ext
stringclasses 9
values | sha
stringlengths 40
40
| content
stringlengths 3
1.04M
|
---|---|---|
py | 1a496439e7d076a45b460a8701e207ba06a0d9ed | from clubsandwich.geom import Size
from .level_generator import generate_dungeon
from .level_state import LevelState
LEVEL_SIZE = Size(100, 60)
# Originally, Rogue Basement was going to have several levels. This was going
# to be the object that tracked the current level and let you switch between
# them.
#
# It fulfills that purpose, but there is no actual "change current level"
# method! It would be as simple as setting `self.active_id = NEW_VALUE`.
# The screen is redrawn completely every frame, so there is no need to do
# anything else.
#
# This object also tracks the score. LevelState keeps a weak reference to this
# object, so the active LevelState object is what actually increments the
# score.
class GameState:
def __init__(self):
self.level_states_by_id = {}
self.score = 0
self.active_id = self.add_level().uuid
@property
def level(self):
return self.level_states_by_id[self.active_id]
def add_level(self):
level_state = LevelState(generate_dungeon(LEVEL_SIZE), self)
self.level_states_by_id[level_state.uuid] = level_state
return level_state
# Your next stop should be level_state.py.
|
py | 1a4964ea10068f01b909d2381c082cd56cc51101 | import base64
import io
import logging
import os
import numpy as np
import torch
from PIL import Image
from torch.autograd import Variable
from torchvision import transforms
logger = logging.getLogger(__name__)
class DIYSegmentation:
"""
DIYSegmentation handler class.
"""
def __init__(self):
self.model = None
self.mapping = None
self.device = None
self.initialized = False
def initialize(self, ctx):
"""
load eager mode state_dict based model
"""
properties = ctx.system_properties
self.device = torch.device(
"cuda:" + str(properties.get("gpu_id"))
if torch.cuda.is_available()
else "cpu"
)
logger.info(f"Device on initialization is: {self.device}")
model_dir = properties.get("model_dir")
manifest = ctx.manifest
logger.error(manifest)
serialized_file = manifest["model"]["serializedFile"]
model_pt_path = os.path.join(model_dir, serialized_file)
if not os.path.isfile(model_pt_path):
raise RuntimeError("Missing the model definition file")
logger.debug(model_pt_path)
from model import DynamicUnetDIY
state_dict = torch.load(model_pt_path, map_location=self.device)
self.model = DynamicUnetDIY()
self.model.load_state_dict(state_dict)
self.model.to(self.device)
self.model.eval()
logger.debug("Model file {0} loaded successfully".format(model_pt_path))
self.initialized = True
def preprocess(self, data):
"""
Scales and normalizes a PIL image for an U-net model
"""
image = data[0].get("data")
if image is None:
image = data[0].get("body")
image_transform = transforms.Compose(
[
# must be consistent with model training
transforms.Resize((96, 128)),
transforms.ToTensor(),
# default statistics from imagenet
transforms.Normalize(
mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]
),
]
)
image = Image.open(io.BytesIO(image)).convert(
"RGB"
) # in case of an alpha channel
image = image_transform(image).unsqueeze_(0)
return image
def inference(self, img):
"""
Predict the chip stack mask of an image using a trained deep learning model.
"""
logger.info(f"Device on inference is: {self.device}")
self.model.eval()
inputs = Variable(img).to(self.device)
outputs = self.model.forward(inputs)
logging.debug(outputs.shape)
return outputs
def postprocess(self, inference_output):
if torch.cuda.is_available():
inference_output = inference_output[0].argmax(dim=0).cpu()
else:
inference_output = inference_output[0].argmax(dim=0)
return [
{
"base64_prediction": base64.b64encode(
inference_output.numpy().astype(np.uint8)
).decode("utf-8")
}
]
_service = DIYSegmentation()
def handle(data, context):
if not _service.initialized:
_service.initialize(context)
if data is None:
return None
data = _service.preprocess(data)
data = _service.inference(data)
data = _service.postprocess(data)
return data
|
py | 1a496509320bd1c50d95dce87ec012ff375739dd |
import os
from zipfile import ZipFile
bag_of_words = open('spans-pred_charbert.txt', 'r')
charBert = open('40_0.4_spans-pred.txt', 'r').readlines()
zipObj = ZipFile('spans-pred.zip', 'w')
def charList_to_intList(line):
line = line.split('\t')
line = line[1][1:-1].split(' ')
span = []
for elem in line:
if len(elem) >= 2:
span.append(int(elem[:-1]))
return span
with open('spans-pred_.txt', 'w') as combined_preds:
for i, line in enumerate(bag_of_words):
spans = []
span1 = charList_to_intList(line)
span2 = charList_to_intList(charBert[i])
for elem in span1:
spans.append(elem)
for elem in span2:
spans.append(elem)
spans = sorted(set(spans))
combined_preds.write(str(i) + '\t' + str(spans) + '\n')
zipObj.write('spans-pred.txt')
# os.remove('spans-pred.txt')
|
py | 1a496550cad23c4d2ee93846076be201cfe69056 | # To test a single translator use the -k parameter followed by either
# timescale or crate.
# See https://docs.pytest.org/en/stable/example/parametrize.html
from datetime import datetime
from conftest import crate_translator, timescale_translator
from utils.common import TIME_INDEX_NAME
from utils.tests.common import create_random_entities
import pytest
translators = [
pytest.lazy_fixture('crate_translator'),
pytest.lazy_fixture('timescale_translator')
]
@pytest.mark.parametrize("translator", translators, ids=["crate", "timescale"])
def test_delete_entity_defaults(translator):
num_types = 2
num_ids_per_type = 2
num_updates = 5
entities = create_random_entities(num_types, num_ids_per_type, num_updates)
translator.insert(entities)
deleted_type = entities[0]['type']
deleted_id = entities[0]['id']
total = translator.query()
assert len(total) == num_types * num_ids_per_type
selected = translator.query(entity_type=deleted_type, entity_id=deleted_id)
assert len(selected[0]['index']) == num_updates
n_deleted = translator.delete_entity(deleted_id, entity_type=deleted_type)
assert n_deleted == num_updates
remaining = translator.query()
assert len(remaining) == (len(total) - 1)
survivors = translator.query(
entity_type=deleted_type, entity_id=deleted_id)
assert len(survivors) == 0
translator.clean()
@pytest.mark.parametrize("translator", translators, ids=["crate", "timescale"])
def test_delete_entity_customs(translator):
entities = create_random_entities(num_types=1,
num_ids_per_type=2,
num_updates=10)
for i, e in enumerate(entities):
t = datetime(2018, 1, 1 + i).isoformat(timespec='milliseconds')
e[TIME_INDEX_NAME] = t
translator.insert(entities)
deleted_type = entities[-1]['type']
deleted_id = entities[-1]['id']
res = translator.delete_entity(entity_id=deleted_id,
entity_type=deleted_type,
from_date=datetime(2018, 1, 8).isoformat(),
to_date=datetime(2018, 1, 16).isoformat())
assert res == 5
affected = translator.query(entity_id=deleted_id, entity_type=deleted_type)
assert len(affected) == 1
affected = affected[0]
assert affected['id'] == deleted_id
assert affected['type'] == deleted_type
assert len(affected['index']) == 10 - 5
res = translator.query(entity_type=deleted_type)
assert len(res) == 2
unaffected = res[0] if res[0]['id'] != deleted_id else res[1]
assert unaffected['id'] != deleted_id
assert unaffected['type'] == deleted_type
assert len(unaffected['index']) == 10
translator.clean()
@pytest.mark.parametrize("translator", translators, ids=["crate", "timescale"])
def test_delete_entity_with_tenancy(translator):
entities = create_random_entities(num_types=2,
num_ids_per_type=2,
num_updates=5)
fs = 'fs'
fsp = 'fsp'
translator.insert(entities, fiware_service=fs, fiware_servicepath=fsp)
to_delete = entities[0]
deleted_type = to_delete['type']
deleted_id = to_delete['id']
# No fs nor fsp -> no deletion
res = translator.delete_entity(deleted_id, entity_type=deleted_type)
assert res == 0
# No fsp -> no deletion
res = translator.delete_entity(deleted_id,
entity_type=deleted_type,
fiware_service=fs)
assert res == 0
# Matching fs & fsp -> deletion
res = translator.delete_entity(deleted_id,
entity_type=deleted_type,
fiware_service=fs,
fiware_servicepath=fsp)
assert res == 5
translator.clean(fs)
@pytest.mark.parametrize("translator", translators, ids=["crate", "timescale"])
def test_delete_entities_defaults(translator):
entities = create_random_entities(num_types=3,
num_ids_per_type=2,
num_updates=20)
translator.insert(entities)
type_to_delete = entities[0]['type']
res = translator.delete_entities(type_to_delete)
assert res == 20 * 2
remaining = translator.query()
assert len(remaining) == (3 - 1) * 2
assert all([r['type'] != type_to_delete for r in remaining])
translator.clean()
@pytest.mark.parametrize("translator", translators, ids=["crate", "timescale"])
def test_delete_entities_customs(translator):
entities = create_random_entities(num_types=4,
num_ids_per_type=1,
num_updates=4)
for i, e in enumerate(entities):
time_index = datetime(2018, 1, 1 + i).isoformat()[:-3]
e[TIME_INDEX_NAME] = time_index
translator.insert(entities)
type_to_delete = entities[-1]['type']
res = translator.delete_entities(type_to_delete,
from_date=datetime(
2018, 1, 4).isoformat(),
to_date=datetime(2018, 1, 12).isoformat())
assert res == 3
remaining = translator.query()
assert sum([len(r['index']) for r in remaining]) == ((4 * 4) - 3)
translator.clean()
@pytest.mark.parametrize("translator", translators, ids=["crate", "timescale"])
def test_delete_entities_with_tenancy(translator):
fs = 'fs'
fsp = 'fsp'
entities = create_random_entities(num_types=3,
num_ids_per_type=1,
num_updates=10)
translator.insert(entities, fiware_service=fs, fiware_servicepath=fsp)
type_to_delete = entities[0]['type']
res = translator.delete_entities(type_to_delete)
assert res == 0
res = translator.delete_entities(type_to_delete,
fiware_service=fs,
fiware_servicepath='another/path')
assert res == 0
res = translator.delete_entities(type_to_delete,
fiware_service=fs,
fiware_servicepath=fsp)
assert res == 10
translator.clean(fs)
|
py | 1a49656488f281f78144f849fba8eb0ff6f79f81 | import random
print("BEM VINDO AO JOGO DO PARA OU IMPAR")
print("--"*15)
vit = 0
while True:
palpite = int(input('Diga um valor entre zero e 9: '))
jogador = ''
while jogador not in ['P', 'I']:
jogador = str(input('Quer Par ou Impar? ')).strip().upper()[0]
jogada = random.choice(['PAR','IMPAR'])
if (jogador == 'P' and jogada == 'PAR') or (jogador == 'I' and jogada == 'IMPAR'):
print("Saiu {}. Você Venceu!! Vamos novamente.".format(jogada))
vit += 1
else:
print("Saiu {}. Você Perdeu!!".format(jogada))
break
print("Você teve {} vitórias consecutivas.".format(vit) if vit > 0 else "Infelizmente você não teve vitórias desta vez.")
|
py | 1a49667bad8f424e75aeefc7ffd9e5354e470843 | """The dhcp integration."""
from abc import abstractmethod
from datetime import timedelta
import fnmatch
from ipaddress import ip_address as make_ip_address
import logging
import os
import threading
from aiodiscover import DiscoverHosts
from aiodiscover.discovery import (
HOSTNAME as DISCOVERY_HOSTNAME,
IP_ADDRESS as DISCOVERY_IP_ADDRESS,
MAC_ADDRESS as DISCOVERY_MAC_ADDRESS,
)
from scapy.arch.common import compile_filter
from scapy.config import conf
from scapy.error import Scapy_Exception
from scapy.layers.dhcp import DHCP
from scapy.layers.inet import IP
from scapy.layers.l2 import Ether
from scapy.sendrecv import AsyncSniffer
from homeassistant.components.device_tracker.const import (
ATTR_HOST_NAME,
ATTR_IP,
ATTR_MAC,
ATTR_SOURCE_TYPE,
DOMAIN as DEVICE_TRACKER_DOMAIN,
SOURCE_TYPE_ROUTER,
)
from homeassistant.const import (
EVENT_HOMEASSISTANT_STARTED,
EVENT_HOMEASSISTANT_STOP,
STATE_HOME,
)
from homeassistant.core import Event, HomeAssistant, State, callback
from homeassistant.helpers.device_registry import format_mac
from homeassistant.helpers.event import (
async_track_state_added_domain,
async_track_time_interval,
)
from homeassistant.helpers.typing import ConfigType
from homeassistant.loader import async_get_dhcp
from homeassistant.util.network import is_invalid, is_link_local, is_loopback
from .const import DOMAIN
FILTER = "udp and (port 67 or 68)"
REQUESTED_ADDR = "requested_addr"
MESSAGE_TYPE = "message-type"
HOSTNAME = "hostname"
MAC_ADDRESS = "macaddress"
IP_ADDRESS = "ip"
DHCP_REQUEST = 3
SCAN_INTERVAL = timedelta(minutes=60)
_LOGGER = logging.getLogger(__name__)
async def async_setup(hass: HomeAssistant, config: ConfigType) -> bool:
"""Set up the dhcp component."""
async def _initialize(_):
address_data = {}
integration_matchers = await async_get_dhcp(hass)
watchers = []
for cls in (DHCPWatcher, DeviceTrackerWatcher, NetworkWatcher):
watcher = cls(hass, address_data, integration_matchers)
await watcher.async_start()
watchers.append(watcher)
async def _async_stop(*_):
for watcher in watchers:
await watcher.async_stop()
hass.bus.async_listen_once(EVENT_HOMEASSISTANT_STOP, _async_stop)
hass.bus.async_listen_once(EVENT_HOMEASSISTANT_STARTED, _initialize)
return True
class WatcherBase:
"""Base class for dhcp and device tracker watching."""
def __init__(self, hass, address_data, integration_matchers):
"""Initialize class."""
super().__init__()
self.hass = hass
self._integration_matchers = integration_matchers
self._address_data = address_data
def process_client(self, ip_address, hostname, mac_address):
"""Process a client."""
made_ip_address = make_ip_address(ip_address)
if (
is_link_local(made_ip_address)
or is_loopback(made_ip_address)
or is_invalid(made_ip_address)
):
# Ignore self assigned addresses, loopback, invalid
return
data = self._address_data.get(ip_address)
if (
data
and data[MAC_ADDRESS] == mac_address
and data[HOSTNAME].startswith(hostname)
):
# If the address data is the same no need
# to process it
return
self._address_data[ip_address] = {MAC_ADDRESS: mac_address, HOSTNAME: hostname}
self.process_updated_address_data(ip_address, self._address_data[ip_address])
def process_updated_address_data(self, ip_address, data):
"""Process the address data update."""
lowercase_hostname = data[HOSTNAME].lower()
uppercase_mac = data[MAC_ADDRESS].upper()
_LOGGER.debug(
"Processing updated address data for %s: mac=%s hostname=%s",
ip_address,
uppercase_mac,
lowercase_hostname,
)
for entry in self._integration_matchers:
if MAC_ADDRESS in entry and not fnmatch.fnmatch(
uppercase_mac, entry[MAC_ADDRESS]
):
continue
if HOSTNAME in entry and not fnmatch.fnmatch(
lowercase_hostname, entry[HOSTNAME]
):
continue
_LOGGER.debug("Matched %s against %s", data, entry)
self.create_task(
self.hass.config_entries.flow.async_init(
entry["domain"],
context={"source": DOMAIN},
data={
IP_ADDRESS: ip_address,
HOSTNAME: lowercase_hostname,
MAC_ADDRESS: data[MAC_ADDRESS],
},
)
)
@abstractmethod
def create_task(self, task):
"""Pass a task to async_add_task based on which context we are in."""
class NetworkWatcher(WatcherBase):
"""Class to query ptr records routers."""
def __init__(self, hass, address_data, integration_matchers):
"""Initialize class."""
super().__init__(hass, address_data, integration_matchers)
self._unsub = None
self._discover_hosts = None
self._discover_task = None
async def async_stop(self):
"""Stop scanning for new devices on the network."""
if self._unsub:
self._unsub()
self._unsub = None
if self._discover_task:
self._discover_task.cancel()
self._discover_task = None
async def async_start(self):
"""Start scanning for new devices on the network."""
self._discover_hosts = DiscoverHosts()
self._unsub = async_track_time_interval(
self.hass, self.async_start_discover, SCAN_INTERVAL
)
self.async_start_discover()
@callback
def async_start_discover(self, *_):
"""Start a new discovery task if one is not running."""
if self._discover_task and not self._discover_task.done():
return
self._discover_task = self.create_task(self.async_discover())
async def async_discover(self):
"""Process discovery."""
for host in await self._discover_hosts.async_discover():
self.process_client(
host[DISCOVERY_IP_ADDRESS],
host[DISCOVERY_HOSTNAME],
_format_mac(host[DISCOVERY_MAC_ADDRESS]),
)
def create_task(self, task):
"""Pass a task to async_create_task since we are in async context."""
return self.hass.async_create_task(task)
class DeviceTrackerWatcher(WatcherBase):
"""Class to watch dhcp data from routers."""
def __init__(self, hass, address_data, integration_matchers):
"""Initialize class."""
super().__init__(hass, address_data, integration_matchers)
self._unsub = None
async def async_stop(self):
"""Stop watching for new device trackers."""
if self._unsub:
self._unsub()
self._unsub = None
async def async_start(self):
"""Stop watching for new device trackers."""
self._unsub = async_track_state_added_domain(
self.hass, [DEVICE_TRACKER_DOMAIN], self._async_process_device_event
)
for state in self.hass.states.async_all(DEVICE_TRACKER_DOMAIN):
self._async_process_device_state(state)
@callback
def _async_process_device_event(self, event: Event):
"""Process a device tracker state change event."""
self._async_process_device_state(event.data.get("new_state"))
@callback
def _async_process_device_state(self, state: State):
"""Process a device tracker state."""
if state.state != STATE_HOME:
return
attributes = state.attributes
if attributes.get(ATTR_SOURCE_TYPE) != SOURCE_TYPE_ROUTER:
return
ip_address = attributes.get(ATTR_IP)
hostname = attributes.get(ATTR_HOST_NAME, "")
mac_address = attributes.get(ATTR_MAC)
if ip_address is None or mac_address is None:
return
self.process_client(ip_address, hostname, _format_mac(mac_address))
def create_task(self, task):
"""Pass a task to async_create_task since we are in async context."""
return self.hass.async_create_task(task)
class DHCPWatcher(WatcherBase):
"""Class to watch dhcp requests."""
def __init__(self, hass, address_data, integration_matchers):
"""Initialize class."""
super().__init__(hass, address_data, integration_matchers)
self._sniffer = None
self._started = threading.Event()
async def async_stop(self):
"""Stop watching for new device trackers."""
await self.hass.async_add_executor_job(self._stop)
def _stop(self):
"""Stop the thread."""
if self._started.is_set():
self._sniffer.stop()
async def async_start(self):
"""Start watching for dhcp packets."""
# disable scapy promiscuous mode as we do not need it
conf.sniff_promisc = 0
try:
await self.hass.async_add_executor_job(_verify_l2socket_setup, FILTER)
except (Scapy_Exception, OSError) as ex:
if os.geteuid() == 0:
_LOGGER.error("Cannot watch for dhcp packets: %s", ex)
else:
_LOGGER.debug(
"Cannot watch for dhcp packets without root or CAP_NET_RAW: %s", ex
)
return
try:
await self.hass.async_add_executor_job(_verify_working_pcap, FILTER)
except (Scapy_Exception, ImportError) as ex:
_LOGGER.error(
"Cannot watch for dhcp packets without a functional packet filter: %s",
ex,
)
return
self._sniffer = AsyncSniffer(
filter=FILTER,
started_callback=self._started.set,
prn=self.handle_dhcp_packet,
store=0,
)
self._sniffer.start()
if self._sniffer.thread:
self._sniffer.thread.name = self.__class__.__name__
def handle_dhcp_packet(self, packet):
"""Process a dhcp packet."""
if DHCP not in packet:
return
options = packet[DHCP].options
request_type = _decode_dhcp_option(options, MESSAGE_TYPE)
if request_type != DHCP_REQUEST:
# DHCP request
return
ip_address = _decode_dhcp_option(options, REQUESTED_ADDR) or packet[IP].src
hostname = _decode_dhcp_option(options, HOSTNAME) or ""
mac_address = _format_mac(packet[Ether].src)
if ip_address is None or mac_address is None:
return
self.process_client(ip_address, hostname, mac_address)
def create_task(self, task):
"""Pass a task to hass.add_job since we are in a thread."""
return self.hass.add_job(task)
def _decode_dhcp_option(dhcp_options, key):
"""Extract and decode data from a packet option."""
for option in dhcp_options:
if len(option) < 2 or option[0] != key:
continue
value = option[1]
if value is None or key != HOSTNAME:
return value
# hostname is unicode
try:
return value.decode()
except (AttributeError, UnicodeDecodeError):
return None
def _format_mac(mac_address):
"""Format a mac address for matching."""
return format_mac(mac_address).replace(":", "")
def _verify_l2socket_setup(cap_filter):
"""Create a socket using the scapy configured l2socket.
Try to create the socket
to see if we have permissions
since AsyncSniffer will do it another
thread so we will not be able to capture
any permission or bind errors.
"""
conf.L2socket(filter=cap_filter)
def _verify_working_pcap(cap_filter):
"""Verify we can create a packet filter.
If we cannot create a filter we will be listening for
all traffic which is too intensive.
"""
compile_filter(cap_filter)
|
py | 1a49670e9ee12eddad2bfec90205a6429d907a61 | # -*- coding: utf-8 -*-
"""
Created on Wed Sep 27 15:06:40 2017
@author: Diogo Leite
"""
# here the FK values was selected in lastas positions according to Species_new object class
from DAL import *
from configuration.configuration_data import *
class _Species_sql_new(object):
"""
This class manipulate the SPECIES table in the database
The FK are manipulated in the lasts positions of the parameters
"""
def __init__(self):
self.db_name = self.get_database_name()
def get_database_name(self):
"""
This method is used to get the database name used in factory
:return: database name
:rtype string
"""
conf_data_obj = Configuration_data('INPHINITY')
db_name = conf_data_obj.get_database_name()
return db_name
def select_all_species_all_attributes(self):
"""
return all the Species in the database
:return: cursor with all species
:rtype Cursor list
"""
sql_string = "SELECT id_specie_SP, designation_SP, FK_id_genus_GE_SP FROM SPECIES"
dalObj = DAL(self.db_name, sql_string)
results = dalObj.executeSelect()
return results
def select_specie_by_bacterium_id(self, id_bacterium):
"""
return a specie given bacterium id
If any exist it is returned -1
:param id_bacterium: id of the bacterium - -1 if unknown
:type id_bacterium: int - not required
:return: cursor with all species
:rtype Cursor list
"""
sql_string = "select id_specie_SP, designation_SP, FK_id_genus_GE_SP from SPECIES, STRAINS, ORGANISMS WHERE FK_id_specie_SP_ST = id_specie_SP and FK_id_strain_ST_OR = id_strain_ST and id_organism_OR = " + str(id_bacterium)
dalObj = DAL(self.db_name, sql_string)
results = dalObj.executeSelect()
if len(results) == 0:
return -1
else:
return results[0]
def insert_specie_if_not_exist_in_Genus(self, specieName, genus_id):
"""
Insert a Specie if it not yet exist (based on the designation)
:param specieName: name of the specie
:param genus_id: FK of the specie's genus - -1 if unknown
:type genusName: string - required
:type genus_id: int - required
:return: id of the specie inserted
:rtype int
:note:: it not verify the complete taxonomy but just only if the specie already exists in a give genus.
"""
id_specie = self.get_specie_id_by_designation_and_genus_id(specieName, genus_id)
if id_specie == -1:
sql_string = "INSERT INTO SPECIES (designation_SP, FK_id_genus_GE_SP) VALUES (%s, %s)"
params = [specieName, genus_id]
dalObj = DAL(self.db_name, sql_string)
dalObj.sqlcommand = sql_string
dalObj.parameters = params
results = dalObj.executeInsert()
return results.lastrowid
else:
print("The specie: %s already exists in the genus id: %d" %(str(specieName), genus_id))
return id_specie
def get_specie_id_by_designation_and_genus_id(self, designation, genus_id):
"""
get the id of a Specie based on its designation and genus_id
:param designation: designation of the specie
:param genus_id: FK id_genus
:type designation: string - required
:type genus_id: int - required
:return: id of the couple or -1 if inexistant
:rtype int
"""
sql_string = "SELECT id_specie_SP FROM SPECIES WHERE designation_SP = '" + str(designation) + "' AND FK_id_genus_GE_SP = " + str(genus_id)
dalObj = DAL(self.db_name, sql_string)
results = dalObj.executeSelect()
if len(results) is 0:
return -1
else:
return results[0][0]
def get_specie_by_id(self, id_specie):
"""
Get a specie by its id
:return: Specie elements info
:rtype List(infos species)
"""
sql_string = "SELECT id_specie_SP, designation_SP, FK_id_genus_GE_SP FROM SPECIES WHERE id_specie_SP = " + str(id_specie)
dalobj = DAL(self.db_name, sql_string)
results = dalobj.executeSelect()
return results[0]
def get_specie_by_organism_id(self, id_organism):
"""
Get a strain by an organism id
:return: Strain elements info
:rtype List(infos organism)
"""
sql_string = "SELECT id_specie_SP, designation_SP, FK_id_genus_GE_SP FROM STRAINS, SPECIES, ORGANISMS WHERE FK_id_specie_SP_ST = id_specie_SP and id_strain_ST = FK_id_strain_ST_OR and id_organism_OR = " + str(id_organism)
dalobj = DAL(self.db_name, sql_string)
results = dalobj.executeSelect()
return results[0]
def select_all_species_of_genus_id(self, id_genus):
"""
return all the Species in the database based on a genus id
:param id_genus: id of the genus - -1 if unknown
:type id_genus: int - not required
:return: cursor with all species
:rtype Cursor list
"""
sql_string = "SELECT id_specie_SP, designation_SP, FK_id_genus_GE_SP FROM SPECIES WHERE FK_id_genus_GE_SP = " + str(id_genus)
dalObj = DAL(self.db_name, sql_string)
results = dalObj.executeSelect()
return results
def select_all_species_frequency_couples_by_phage_id_positive(self, phage_id):
"""
return the frequencies list of interactions where appear a given phage id
:return: cursor with all species frequencies
:rtype Cursor list
"""
sql_string = "select id_specie_SP, designation_SP, FK_id_genus_GE_SP, count(id_specie_SP) as 'Quantity' FROM SPECIES, STRAINS, ORGANISMS, COUPLES WHERE FK_id_organism_phage_OR_CP = " + str(phage_id) + " and FK_id_organism_bact_OR_CP = id_organism_OR and FK_id_strain_ST_OR = id_strain_ST and FK_id_specie_SP_ST = id_specie_SP and interaction_CP = 1 group by id_specie_SP;"
dalObj = DAL(self.db_name, sql_string)
results = dalObj.executeSelect()
return results
|
py | 1a49673f01fe5e791784a0d469d3025f80b02ffb | import logging
import pytest
import time
from datetime import datetime
from tests.arp.arp_utils import clear_dut_arp_cache
from tests.ptf_runner import ptf_runner
from tests.common.helpers.assertions import pytest_assert
from tests.common.fixtures.ptfhost_utils import copy_ptftests_directory # lgtm[py/unused-import]
pytestmark = [
pytest.mark.topology('t1', 't2')
]
logger = logging.getLogger(__name__)
def test_arp_unicast_reply(common_setup_teardown):
duthost, ptfhost, int_facts, intf1, intf2, intf1_indice, intf2_indice = common_setup_teardown
# Start PTF runner and send correct unicast arp packets
clear_dut_arp_cache(duthost)
params = {
'acs_mac': int_facts['ansible_interface_facts'][intf1]['macaddress'],
'port': intf1_indice
}
log_file = "/tmp/arptest.VerifyUnicastARPReply.{0}.log".format(datetime.now().strftime("%Y-%m-%d-%H:%M:%S"))
ptf_runner(ptfhost, 'ptftests', "arptest.VerifyUnicastARPReply", '/root/ptftests', params=params, log_file=log_file)
# Get DUT arp table
switch_arptable = duthost.switch_arptable()['ansible_facts']
pytest_assert(switch_arptable['arptable']['v4']['10.10.1.3']['macaddress'] == '00:06:07:08:09:00')
pytest_assert(switch_arptable['arptable']['v4']['10.10.1.3']['interface'] == intf1)
def test_arp_expect_reply(common_setup_teardown):
duthost, ptfhost, int_facts, intf1, intf2, intf1_indice, intf2_indice = common_setup_teardown
params = {
'acs_mac': int_facts['ansible_interface_facts'][intf1]['macaddress'],
'port': intf1_indice
}
# Start PTF runner and send correct arp packets
clear_dut_arp_cache(duthost)
log_file = "/tmp/arptest.ExpectReply.{0}.log".format(datetime.now().strftime("%Y-%m-%d-%H:%M:%S"))
ptf_runner(ptfhost, 'ptftests', "arptest.ExpectReply", '/root/ptftests', params=params, log_file=log_file)
switch_arptable = duthost.switch_arptable()['ansible_facts']
pytest_assert(switch_arptable['arptable']['v4']['10.10.1.3']['macaddress'] == '00:06:07:08:09:0a')
pytest_assert(switch_arptable['arptable']['v4']['10.10.1.3']['interface'] == intf1)
def test_arp_no_reply_other_intf(common_setup_teardown):
duthost, ptfhost, int_facts, intf1, intf2, intf1_indice, intf2_indice = common_setup_teardown
# Check DUT won't reply ARP and install ARP entry when ARP request coming from other interfaces
clear_dut_arp_cache(duthost)
intf2_params = {
'acs_mac': int_facts['ansible_interface_facts'][intf2]['macaddress'],
'port': intf2_indice
}
log_file = "/tmp/arptest.SrcOutRangeNoReply.{0}.log".format(datetime.now().strftime("%Y-%m-%d-%H:%M:%S"))
ptf_runner(ptfhost, 'ptftests', "arptest.SrcOutRangeNoReply", '/root/ptftests', params=intf2_params, log_file=log_file)
switch_arptable = duthost.switch_arptable()['ansible_facts']
for ip in switch_arptable['arptable']['v4'].keys():
pytest_assert(ip != '10.10.1.4')
def test_arp_no_reply_src_out_range(common_setup_teardown):
duthost, ptfhost, int_facts, intf1, intf2, intf1_indice, intf2_indice = common_setup_teardown
params = {
'acs_mac': int_facts['ansible_interface_facts'][intf1]['macaddress'],
'port': intf1_indice
}
# Check DUT won't reply ARP and install ARP entry when src address is not in interface subnet range
clear_dut_arp_cache(duthost)
log_file = "/tmp/arptest.SrcOutRangeNoReply.{0}.log".format(datetime.now().strftime("%Y-%m-%d-%H:%M:%S"))
ptf_runner(ptfhost, 'ptftests', "arptest.SrcOutRangeNoReply", '/root/ptftests', params=params, log_file=log_file)
switch_arptable = duthost.switch_arptable()['ansible_facts']
for ip in switch_arptable['arptable']['v4'].keys():
pytest_assert(ip != '10.10.1.22')
def test_arp_garp_no_update(common_setup_teardown):
duthost, ptfhost, int_facts, intf1, intf2, intf1_indice, intf2_indice = common_setup_teardown
params = {
'acs_mac': int_facts['ansible_interface_facts'][intf1]['macaddress'],
'port': intf1_indice
}
# Test Gratuitous ARP behavior, no Gratuitous ARP installed when arp was not resolved before
clear_dut_arp_cache(duthost)
log_file = "/tmp/arptest.GarpNoUpdate.{0}.log".format(datetime.now().strftime("%Y-%m-%d-%H:%M:%S"))
ptf_runner(ptfhost, 'ptftests', "arptest.GarpNoUpdate", '/root/ptftests', params=params, log_file=log_file)
switch_arptable = duthost.switch_arptable()['ansible_facts']
for ip in switch_arptable['arptable']['v4'].keys():
pytest_assert(ip != '10.10.1.7')
# Test Gratuitous ARP update case, when received garp, no arp reply, update arp table if it was solved before
log_file = "/tmp/arptest.ExpectReply.{0}.log".format(datetime.now().strftime("%Y-%m-%d-%H:%M:%S"))
ptf_runner(ptfhost, 'ptftests', "arptest.ExpectReply", '/root/ptftests', params=params, log_file=log_file)
switch_arptable = duthost.switch_arptable()['ansible_facts']
pytest_assert(switch_arptable['arptable']['v4']['10.10.1.3']['macaddress'] == '00:06:07:08:09:0a')
pytest_assert(switch_arptable['arptable']['v4']['10.10.1.3']['interface'] == intf1)
time.sleep(2)
log_file = "/tmp/arptest.GarpUpdate.{0}.log".format(datetime.now().strftime("%Y-%m-%d-%H:%M:%S"))
ptf_runner(ptfhost, 'ptftests', "arptest.GarpUpdate", '/root/ptftests', params=params, log_file=log_file)
switch_arptable = duthost.switch_arptable()['ansible_facts']
pytest_assert(switch_arptable['arptable']['v4']['10.10.1.3']['macaddress'] == '00:00:07:08:09:0a')
pytest_assert(switch_arptable['arptable']['v4']['10.10.1.3']['interface'] == intf1)
|
py | 1a4967a93fabe213bc8388a7f9cc96c186bbf8c7 | # Copyright 2015 Lukas Lalinsky
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Implementation of the JSON-over-HTTP RPC protocol used by Avatica."""
import re
import socket
import pprint
import math
import logging
import time
from phoenixdb import errors
from phoenixdb.avatica.proto import requests_pb2, common_pb2, responses_pb2
import requests
#from requests_gssapi import HTTPSPNEGOAuth, OPTIONAL
from requests_kerberos import HTTPKerberosAuth, OPTIONAL
import kerberos
try:
import urlparse
except ImportError:
import urllib.parse as urlparse
try:
from HTMLParser import HTMLParser
except ImportError:
from html.parser import HTMLParser
__all__ = ['AvaticaClient']
logger = logging.getLogger(__name__)
class JettyErrorPageParser(HTMLParser):
def __init__(self):
HTMLParser.__init__(self)
self.path = []
self.title = []
self.message = []
def handle_starttag(self, tag, attrs):
self.path.append(tag)
def handle_endtag(self, tag):
self.path.pop()
def handle_data(self, data):
if len(self.path) > 2 and self.path[0] == 'html' and self.path[1] == 'body':
if len(self.path) == 3 and self.path[2] == 'h2':
self.title.append(data.strip())
elif len(self.path) == 4 and self.path[2] == 'p' and self.path[3] == 'pre':
self.message.append(data.strip())
def parse_url(url):
url = urlparse.urlparse(url)
if not url.scheme and not url.netloc and url.path:
netloc = url.path
if ':' not in netloc:
netloc = '{}:8765'.format(netloc)
return urlparse.ParseResult('http', netloc, '/', '', '', '')
return url
# Defined in phoenix-core/src/main/java/org/apache/phoenix/exception/SQLExceptionCode.java
SQLSTATE_ERROR_CLASSES = [
('08', errors.OperationalError), # Connection Exception
('22018', errors.IntegrityError), # Constraint violatioin.
('22', errors.DataError), # Data Exception
('23', errors.IntegrityError), # Constraint Violation
('24', errors.InternalError), # Invalid Cursor State
('25', errors.InternalError), # Invalid Transaction State
('42', errors.ProgrammingError), # Syntax Error or Access Rule Violation
('XLC', errors.OperationalError), # Execution exceptions
('INT', errors.InternalError), # Phoenix internal error
]
# Relevant properties as defined by https://calcite.apache.org/avatica/docs/client_reference.html
OPEN_CONNECTION_PROPERTIES = (
'user', # User for the database connection
'password', # Password for the user
)
def raise_sql_error(code, sqlstate, message):
for prefix, error_class in SQLSTATE_ERROR_CLASSES:
if sqlstate.startswith(prefix):
raise error_class(message, code, sqlstate)
def parse_and_raise_sql_error(message):
match = re.findall(r'(?:([^ ]+): )?ERROR (\d+) \(([0-9A-Z]{5})\): (.*?) ->', message)
if match is not None and len(match):
exception, code, sqlstate, message = match[0]
raise_sql_error(int(code), sqlstate, message)
def parse_error_page(html):
parser = JettyErrorPageParser()
parser.feed(html)
if parser.title == ['HTTP ERROR: 500']:
message = ' '.join(parser.message).strip()
parse_and_raise_sql_error(message)
raise errors.InternalError(message)
def parse_error_protobuf(text):
message = common_pb2.WireMessage()
message.ParseFromString(text)
err = responses_pb2.ErrorResponse()
err.ParseFromString(message.wrapped_message)
parse_and_raise_sql_error(err.error_message)
raise_sql_error(err.error_code, err.sql_state, err.error_message)
raise errors.InternalError(err.error_message)
class AvaticaClient(object):
"""Client for Avatica's RPC server.
This exposes all low-level functionality that the Avatica
server provides, using the native terminology. You most likely
do not want to use this class directly, but rather get connect
to a server using :func:`phoenixdb.connect`.
"""
def __init__(self, url, max_retries=None, auth=None):
"""Constructs a new client object.
:param url:
URL of an Avatica RPC server.
"""
self.url = parse_url(url)
self.max_retries = max_retries if max_retries is not None else 3
self.auth = auth
self.connection = None
def connect(self):
"""This method used to open a persistent TCP connection
requests does not require this"""
pass
def close(self):
"""Also does nothing per requests"""
pass
def _post_request(self, body, headers):
retry_count = self.max_retries
while True:
logger.debug("POST %s %r %r", self.url.geturl(), body, headers)
try:
if self.auth == "SPNEGO":
#response = requests.request('post', self.url.geturl(), data=body, stream=True, headers=headers, auth=HTTPSPNEGOAuth(mutual_authentication=OPTIONAL))
response = requests.request('post', self.url.geturl(), data=body, stream=True, headers=headers, auth=HTTPKerberosAuth(mutual_authentication=OPTIONAL, mech_oid=kerberos.GSS_MECH_OID_SPNEGO), timeout=7200)
else:
response = requests.request('post', self.url.geturl(), data=body, stream=True, headers=headers, timeout=7200)
except requests.HTTPError as e:
if retry_count > 0:
delay = math.exp(-retry_count)
logger.debug("HTTP protocol error, will retry in %s seconds...", delay, exc_info=True)
time.sleep(delay)
retry_count -= 1
continue
raise errors.InterfaceError('RPC request failed', cause=e)
else:
if response.status_code == requests.codes.service_unavailable:
if retry_count > 0:
delay = math.exp(-retry_count)
logger.debug("Service unavailable, will retry in %s seconds...", delay, exc_info=True)
time.sleep(delay)
retry_count -= 1
continue
return response
def _apply(self, request_data, expected_response_type=None):
logger.debug("Sending request\n%s", pprint.pformat(request_data))
request_name = request_data.__class__.__name__
message = common_pb2.WireMessage()
message.name = 'org.apache.calcite.avatica.proto.Requests${}'.format(request_name)
message.wrapped_message = request_data.SerializeToString()
body = message.SerializeToString()
headers = {'content-type': 'application/x-google-protobuf'}
response = self._post_request(body, headers)
response_body = response.raw.read()
if response.status_code != requests.codes.ok:
logger.debug("Received response\n%s", response_body)
if b'<html>' in response_body:
parse_error_page(response_body)
else:
# assume the response is in protobuf format
parse_error_protobuf(response_body)
raise errors.InterfaceError('RPC request returned invalid status code', response.status_code)
message = common_pb2.WireMessage()
message.ParseFromString(response_body)
logger.debug("Received response\n%s", message)
if expected_response_type is None:
expected_response_type = request_name.replace('Request', 'Response')
expected_response_type = 'org.apache.calcite.avatica.proto.Responses$' + expected_response_type
if message.name != expected_response_type:
raise errors.InterfaceError('unexpected response type "{}" expected "{}"'.format(message.name, expected_response_type))
return message.wrapped_message
def get_catalogs(self, connection_id):
request = requests_pb2.CatalogsRequest()
request.connection_id = connection_id
return self._apply(request)
def get_schemas(self, connection_id, catalog=None, schemaPattern=None):
request = requests_pb2.SchemasRequest()
request.connection_id = connection_id
if catalog is not None:
request.catalog = catalog
if schemaPattern is not None:
request.schema_pattern = schemaPattern
return self._apply(request)
def get_tables(self, connection_id, catalog=None, schemaPattern=None, tableNamePattern=None, typeList=None):
request = requests_pb2.TablesRequest()
request.connection_id = connection_id
if catalog is not None:
request.catalog = catalog
if schemaPattern is not None:
request.schema_pattern = schemaPattern
if tableNamePattern is not None:
request.table_name_pattern = tableNamePattern
if typeList is not None:
request.type_list = typeList
if typeList is not None:
request.type_list.extend(typeList)
request.has_type_list = typeList is not None
return self._apply(request)
def get_columns(self, connection_id, catalog=None, schemaPattern=None, tableNamePattern=None, columnNamePattern=None):
request = requests_pb2.ColumnsRequest()
request.connection_id = connection_id
if catalog is not None:
request.catalog = catalog
if schemaPattern is not None:
request.schema_pattern = schemaPattern
if tableNamePattern is not None:
request.table_name_pattern = tableNamePattern
if columnNamePattern is not None:
request.column_name_pattern = columnNamePattern
return self._apply(request)
def get_table_types(self, connection_id):
request = requests_pb2.TableTypesRequest()
request.connection_id = connection_id
return self._apply(request)
def get_type_info(self, connection_id):
request = requests_pb2.TypeInfoRequest()
request.connection_id = connection_id
return self._apply(request)
def connection_sync(self, connection_id, connProps=None):
"""Synchronizes connection properties with the server.
:param connection_id:
ID of the current connection.
:param connProps:
Dictionary with the properties that should be changed.
:returns:
A ``common_pb2.ConnectionProperties`` object.
"""
if connProps is None:
connProps = {}
request = requests_pb2.ConnectionSyncRequest()
request.connection_id = connection_id
request.conn_props.auto_commit = connProps.get('autoCommit', False)
request.conn_props.has_auto_commit = True
request.conn_props.read_only = connProps.get('readOnly', False)
request.conn_props.has_read_only = True
request.conn_props.transaction_isolation = connProps.get('transactionIsolation', 0)
request.conn_props.catalog = connProps.get('catalog', '')
request.conn_props.schema = connProps.get('schema', '')
response_data = self._apply(request)
response = responses_pb2.ConnectionSyncResponse()
response.ParseFromString(response_data)
return response.conn_props
def open_connection(self, connection_id, info=None):
"""Opens a new connection.
:param connection_id:
ID of the connection to open.
"""
request = requests_pb2.OpenConnectionRequest()
request.connection_id = connection_id
if info is not None:
# Info is a list of repeated pairs, setting a dict directly fails
for k, v in info.items():
request.info[k] = v
response_data = self._apply(request)
response = responses_pb2.OpenConnectionResponse()
response.ParseFromString(response_data)
def close_connection(self, connection_id):
"""Closes a connection.
:param connection_id:
ID of the connection to close.
"""
request = requests_pb2.CloseConnectionRequest()
request.connection_id = connection_id
self._apply(request)
def create_statement(self, connection_id):
"""Creates a new statement.
:param connection_id:
ID of the current connection.
:returns:
New statement ID.
"""
request = requests_pb2.CreateStatementRequest()
request.connection_id = connection_id
response_data = self._apply(request)
response = responses_pb2.CreateStatementResponse()
response.ParseFromString(response_data)
return response.statement_id
def close_statement(self, connection_id, statement_id):
"""Closes a statement.
:param connection_id:
ID of the current connection.
:param statement_id:
ID of the statement to close.
"""
request = requests_pb2.CloseStatementRequest()
request.connection_id = connection_id
request.statement_id = statement_id
self._apply(request)
def prepare_and_execute(self, connection_id, statement_id, sql, max_rows_total=None, first_frame_max_size=None):
"""Prepares and immediately executes a statement.
:param connection_id:
ID of the current connection.
:param statement_id:
ID of the statement to prepare.
:param sql:
SQL query.
:param max_rows_total:
The maximum number of rows that will be allowed for this query.
:param first_frame_max_size:
The maximum number of rows that will be returned in the first Frame returned for this query.
:returns:
Result set with the signature of the prepared statement and the first frame data.
"""
request = requests_pb2.PrepareAndExecuteRequest()
request.connection_id = connection_id
request.statement_id = statement_id
request.sql = sql
if max_rows_total is not None:
request.max_rows_total = max_rows_total
if first_frame_max_size is not None:
request.first_frame_max_size = first_frame_max_size
response_data = self._apply(request, 'ExecuteResponse')
response = responses_pb2.ExecuteResponse()
response.ParseFromString(response_data)
return response.results
def prepare(self, connection_id, sql, max_rows_total=None):
"""Prepares a statement.
:param connection_id:
ID of the current connection.
:param sql:
SQL query.
:param max_rows_total:
The maximum number of rows that will be allowed for this query.
:returns:
Signature of the prepared statement.
"""
request = requests_pb2.PrepareRequest()
request.connection_id = connection_id
request.sql = sql
if max_rows_total is not None:
request.max_rows_total = max_rows_total
response_data = self._apply(request)
response = responses_pb2.PrepareResponse()
response.ParseFromString(response_data)
return response.statement
def execute(self, connection_id, statement_id, signature, parameter_values=None, first_frame_max_size=None):
"""Returns a frame of rows.
The frame describes whether there may be another frame. If there is not
another frame, the current iteration is done when we have finished the
rows in the this frame.
:param connection_id:
ID of the current connection.
:param statement_id:
ID of the statement to fetch rows from.
:param signature:
common_pb2.Signature object
:param parameter_values:
A list of parameter values, if statement is to be executed; otherwise ``None``.
:param first_frame_max_size:
The maximum number of rows that will be returned in the first Frame returned for this query.
:returns:
Frame data, or ``None`` if there are no more.
"""
request = requests_pb2.ExecuteRequest()
request.statementHandle.id = statement_id
request.statementHandle.connection_id = connection_id
request.statementHandle.signature.CopyFrom(signature)
if parameter_values is not None:
request.parameter_values.extend(parameter_values)
request.has_parameter_values = True
if first_frame_max_size is not None:
request.deprecated_first_frame_max_size = first_frame_max_size
request.first_frame_max_size = first_frame_max_size
response_data = self._apply(request)
response = responses_pb2.ExecuteResponse()
response.ParseFromString(response_data)
return response.results
def fetch(self, connection_id, statement_id, offset=0, frame_max_size=None):
"""Returns a frame of rows.
The frame describes whether there may be another frame. If there is not
another frame, the current iteration is done when we have finished the
rows in the this frame.
:param connection_id:
ID of the current connection.
:param statement_id:
ID of the statement to fetch rows from.
:param offset:
Zero-based offset of first row in the requested frame.
:param frame_max_size:
Maximum number of rows to return; negative means no limit.
:returns:
Frame data, or ``None`` if there are no more.
"""
request = requests_pb2.FetchRequest()
request.connection_id = connection_id
request.statement_id = statement_id
request.offset = offset
if frame_max_size is not None:
request.frame_max_size = frame_max_size
response_data = self._apply(request)
response = responses_pb2.FetchResponse()
response.ParseFromString(response_data)
return response.frame
|
py | 1a496906183741497d7cb03f3a0e4b0a8feee713 | import numpy as np
import scipy as sp
from ._model import Model
from ..utils import safe_isinstance, record_import_error
from ..utils.transformers import parse_prefix_suffix_for_tokenizer
from .. import models
from .._serializable import Serializer, Deserializer
try:
import torch
except ImportError as e:
record_import_error("torch", "Torch could not be imported!", e)
try:
import tensorflow as tf
except ImportError as e:
record_import_error("tensorflow", "TensorFlow could not be imported!", e)
class TeacherForcing(Model):
""" Generates scores (log odds) for output text explanation algorithms using Teacher Forcing technique.
This class supports generation of log odds for transformer models as well as functions. In model agnostic
cases (model is function) it expects a similarity_model and similarity_tokenizer to approximate log odd scores
for target sentence generated by the model.
"""
def __init__(self, model, tokenizer=None, similarity_model=None, similarity_tokenizer=None, batch_size=128, device=None):
""" Build a teacher forcing model from the given text generation model.
Parameters
----------
model: object or function
A object of any pretrained transformer model or function which is to be explained.
tokenizer: object
A tokenizer object(PreTrainedTokenizer/PreTrainedTokenizerFast) which is used to tokenize source and target sentence.
similarity_model: object
A pretrained transformer model object which is used in model agnostic scenario to approximate log odds.
similarity_tokenizer: object
A tokenizer object(PreTrainedTokenizer/PreTrainedTokenizerFast) which is used to tokenize sentence in model agnostic scenario.
batch_size: int
Batch size for model inferencing and computing logodds (default=128).
device: str
By default, it infers if system has a gpu and accordingly sets device. Should be 'cpu' or 'cuda' or pytorch models.
Returns
-------
numpy.ndarray
The scores (log odds) of generating target sentence ids using the model.
"""
super().__init__(model)
self.tokenizer = tokenizer
# set pad token if not defined
if self.tokenizer is not None and self.tokenizer.pad_token is None:
self.tokenizer.pad_token = self.tokenizer.eos_token
self.device = device
self.batch_size = batch_size
# assign text generation function
if safe_isinstance(model, "transformers.PreTrainedModel") or safe_isinstance(model, "transformers.TFPreTrainedModel"):
self.text_generate = models.TextGeneration(self.inner_model, tokenizer=self.tokenizer, device=self.device)
self.similarity_model = model
self.similarity_tokenizer = tokenizer
self.model_agnostic = False
else:
self.text_generate = models.TextGeneration(self.inner_model, device=self.device)
self.similarity_model = similarity_model
self.similarity_tokenizer = similarity_tokenizer
# set pad token for a similarity tokenizer(in a model agnostic scenario) if not defined
if self.similarity_tokenizer is not None and self.similarity_tokenizer.pad_token is None:
self.similarity_tokenizer.pad_token = self.similarity_tokenizer.eos_token
self.model_agnostic = True
# initializing target which is the target sentence/ids for every new row of explanation
self.output = None
self.output_names = None
self.similarity_model_type = None
if safe_isinstance(self.similarity_model, "transformers.PreTrainedModel"):
self.similarity_model_type = "pt"
self.device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') if self.device is None else self.device
self.similarity_model = self.similarity_model.to(self.device)
elif safe_isinstance(self.similarity_model, "transformers.TFPreTrainedModel"):
self.similarity_model_type = "tf"
def __call__(self, X, Y):
""" Computes log odds scores of generating output(text) for a given batch of input(text/image) .
Parameters
----------
X: numpy.ndarray
An array containing a list of masked inputs.
Y: numpy.ndarray
An array containing a list of target sentence/ids.
Returns
-------
numpy.ndarray
A numpy array of log odds scores for every input pair (masked_X, X)
"""
output_batch = None
# caching updates output names and target sentence ids
self.update_output_names(Y[:1])
start_batch_idx, end_batch_idx = 0, len(X)
while start_batch_idx < end_batch_idx:
X_batch = X[start_batch_idx:start_batch_idx+self.batch_size]
Y_batch = Y[start_batch_idx:start_batch_idx+self.batch_size]
logits = self.get_teacher_forced_logits(X_batch, Y_batch)
logodds = self.get_logodds(logits)
if output_batch is None:
output_batch = logodds
else:
output_batch = np.concatenate((output_batch, logodds))
start_batch_idx += self.batch_size
return output_batch
def update_output_names(self, output):
""" The function updates output tokens.
It mimics the caching mechanism to update the output tokens for every
new row of explanation that are to be explained.
Parameters
----------
output: numpy.ndarray
Output(sentence/sentence ids) for an explanation row.
"""
# check if the target sentence has been updated (occurs when explaining a new row)
if (self.output is None) or (not np.array_equal(self.output, output)):
self.output = output
self.output_names = self.get_output_names(output)
def get_output_names(self, output):
""" Gets the output tokens by computing the output sentence ids and output names using the similarity_tokenizer.
Parameters
----------
output: numpy.ndarray
Output(sentence/sentence ids) for an explanation row.
Returns
-------
list
A list of output tokens.
"""
output_ids = self.get_outputs(output)
output_names = [self.similarity_tokenizer.decode([x]).strip() for x in output_ids[0, :]]
return output_names
def get_outputs(self, X):
""" The function tokenizes output sentences and returns ids.
Parameters
----------
X: numpy.ndarray
Output(sentence/sentence ids) for an explanation row.
Returns
-------
numpy.ndarray
An array of output(target sentence) ids.
"""
# check if output is a sentence or already parsed target ids
if X.dtype.type is np.str_:
parsed_tokenizer_dict = parse_prefix_suffix_for_tokenizer(self.similarity_tokenizer)
keep_prefix, keep_suffix = parsed_tokenizer_dict['keep_prefix'], parsed_tokenizer_dict['keep_suffix']
if keep_suffix > 0:
output_ids = np.array(self.similarity_tokenizer(X.tolist(), padding=True)["input_ids"])[:, keep_prefix:-keep_suffix]
else:
output_ids = np.array(self.similarity_tokenizer(X.tolist(), padding=True)["input_ids"])[:, keep_prefix:]
else:
output_ids = X
return output_ids
def get_inputs(self, X, padding_side='right'):
""" The function tokenizes source sentences.
In model agnostic case, the function calls model(X) which is expected to
return a batch of output sentences which is tokenized to compute inputs.
Parameters
----------
X: numpy.ndarray
X could be a batch of text or images(model agnostic case).
Returns
-------
dict
Dictionary of padded source sentence ids and attention mask as tensors("pt" or "tf" based on similarity_model_type).
"""
if self.model_agnostic:
# In model agnostic case, we first pass the input through the model and then tokenize output sentence
input_sentences = np.array(self.inner_model(X))
else:
input_sentences = np.array(X)
# set tokenizer padding to prepare inputs for batch inferencing
# padding_side="left" for only decoder models text generation eg. GPT2
self.similarity_tokenizer.padding_side = padding_side
inputs = self.similarity_tokenizer(input_sentences.tolist(), return_tensors=self.similarity_model_type, padding=True)
# set tokenizer padding to default
self.similarity_tokenizer.padding_side = 'right'
return inputs
def get_logodds(self, logits):
""" Calculates log odds from logits.
This function passes the logits through softmax and then computes log odds for the output(target sentence) ids.
Parameters
----------
logits: numpy.ndarray
An array of logits generated from the model.
Returns
-------
numpy.ndarray
Computes log odds for corresponding output ids.
"""
# set output ids for which scores are to be extracted
if self.output.dtype.type is np.str_:
output_ids = self.get_outputs(self.output)[0]
else:
output_ids = self.output[0]
def calc_logodds(arr):
probs = np.exp(arr) / np.exp(arr).sum(-1)
logodds = sp.special.logit(probs)
return logodds
# pass logits through softmax, get the token corresponding score and convert back to log odds (as one vs all)
logodds = np.apply_along_axis(calc_logodds, -1, logits)
logodds_for_output_ids = logodds[:, np.array(range(logodds.shape[1])), output_ids]
return logodds_for_output_ids
def model_inference(self, inputs, output_ids):
""" This function performs model inference for tensorflow and pytorch models.
Parameters
----------
inputs: dict
Dictionary of padded source sentence ids and attention mask as tensors.
output_ids: numpy.ndarray
An array of decoder output ids.
Returns
-------
numpy.ndarray
Returns output logits from the model.
"""
if self.similarity_model_type == "pt":
# create torch tensors and move to device
inputs = inputs.to(self.device)
output_ids = torch.tensor(output_ids, dtype=torch.int64, device=self.device)
self.similarity_model.eval()
with torch.no_grad():
if self.similarity_model.config.is_encoder_decoder:
# model inference
outputs = self.similarity_model(**inputs, decoder_input_ids=output_ids, labels=output_ids, return_dict=True)
else:
# combine source and target sentence ids to pass into decoder eg: in case of distillgpt2
inputs["input_ids"] = torch.cat((inputs["input_ids"], output_ids), dim=-1)
attention_mask_for_output_ids = torch.ones(output_ids.shape, dtype=output_ids.dtype, device=self.device)
inputs["attention_mask"] = torch.cat((inputs["attention_mask"], attention_mask_for_output_ids), dim=-1)
# create position ids due to left padding for decoder models
inputs["position_ids"] = (inputs["attention_mask"].long().cumsum(-1) - 1)
inputs["position_ids"].masked_fill_(inputs["attention_mask"] == 0, 0)
# model inference
outputs = self.similarity_model(**inputs, return_dict=True)
logits = outputs.logits.detach().cpu().numpy().astype('float64')
elif self.similarity_model_type == "tf":
output_ids = tf.convert_to_tensor(output_ids, dtype=tf.int32)
if self.similarity_model.config.is_encoder_decoder:
if self.device is None:
outputs = self.similarity_model(inputs, decoder_input_ids=output_ids, labels=output_ids, return_dict=True)
else:
try:
with tf.device(self.device):
outputs = self.similarity_model(inputs, decoder_input_ids=output_ids, labels=output_ids, return_dict=True)
except RuntimeError as e:
print(e)
else:
# combine source and target sentence ids to pass into decoder eg: in case of distillgpt2
inputs["input_ids"] = tf.concat((inputs["input_ids"], output_ids), axis=-1)
attention_mask_for_output_ids = tf.ones(output_ids.shape, dtype=output_ids.dtype)
inputs["attention_mask"] = tf.concat((inputs["attention_mask"], attention_mask_for_output_ids), axis=-1)
inputs["position_ids"] = tf.math.cumsum(inputs["attention_mask"], axis=-1) - 1
inputs["position_ids"] = tf.where(inputs["attention_mask"] == 0, 0, inputs["position_ids"])
if self.device is None:
outputs = self.similarity_model(inputs, return_dict=True)
else:
try:
with tf.device(self.device):
outputs = self.similarity_model(inputs, return_dict=True)
except RuntimeError as e:
print(e)
logits = outputs.logits.numpy().astype('float64')
return logits
def get_teacher_forced_logits(self, X, Y):
""" The function generates logits for transformer models.
It generates logits for encoder-decoder models as well as decoder only models by using the teacher forcing technique.
Parameters
----------
X: numpy.ndarray
An array containing a list of masked inputs.
Y: numpy.ndarray
An array containing a list of target sentence/ids.
Returns
-------
numpy.ndarray
Decoder output logits for output(target sentence) ids.
"""
# check if type of model architecture assigned in model config
if (hasattr(self.similarity_model.config, "is_encoder_decoder") and not self.similarity_model.config.is_encoder_decoder) \
and (hasattr(self.similarity_model.config, "is_decoder") and not self.similarity_model.config.is_decoder):
raise ValueError(
"Please assign either of is_encoder_decoder or is_decoder to True in model config for extracting target sentence ids"
)
# get output ids for teacher forcing
output_ids = self.get_outputs(Y)
if self.similarity_model.config.is_encoder_decoder:
# encode batched inputs by padding on the right side
inputs = self.get_inputs(X, padding_side='right')
# assigning decoder start token id as it is needed for encoder decoder model generation
decoder_start_token_id = None
if hasattr(self.similarity_model.config, "decoder_start_token_id") and \
self.similarity_model.config.decoder_start_token_id is not None:
decoder_start_token_id = self.similarity_model.config.decoder_start_token_id
elif hasattr(self.similarity_model.config, "bos_token_id") and self.similarity_model.config.bos_token_id is not None:
decoder_start_token_id = self.similarity_model.config.bos_token_id
elif (hasattr(self.similarity_model.config, "decoder") and hasattr(self.similarity_model.config.decoder, "bos_token_id") and \
self.similarity_model.config.decoder.bos_token_id is not None):
decoder_start_token_id = self.similarity_model.config.decoder.bos_token_id
else:
raise ValueError(
"No decoder_start_token_id or bos_token_id defined in config for encoder-decoder generation"
)
# concat decoder start token id to target sentence ids
output_start_id = np.ones((output_ids.shape[0], 1)) * decoder_start_token_id
output_ids = np.concatenate((output_start_id, output_ids), axis=-1)
# generate outputs and logits
logits = self.model_inference(inputs, output_ids)
logits = logits[:, :-1, :]
else:
# encode batched inputs by padding on the left side
inputs = self.get_inputs(X, padding_side='left')
# generate outputs and logits
logits = self.model_inference(inputs, output_ids)
# extract only logits corresponding to target sentence ids
logits = logits[:, -output_ids.shape[1]-1:-1, :]
return logits
def save(self, out_file):
super().save(out_file)
# Increment the verison number when the encoding changes!
with Serializer(out_file, "shap.models.TeacherForcing", version=0) as s:
s.save("tokenizer", self.tokenizer)
s.save("similarity_model", self.similarity_model)
s.save("similarity_tokenizer", self.similarity_tokenizer)
s.save("batch_size", self.batch_size)
s.save("device", self.device)
@classmethod
def load(cls, in_file, instantiate=True):
if instantiate:
return cls._instantiated_load(in_file)
kwargs = super().load(in_file, instantiate=False)
with Deserializer(in_file, "shap.models.TeacherForcing", min_version=0, max_version=0) as s:
kwargs["tokenizer"] = s.load("tokenizer")
kwargs["similarity_model"] = s.load("similarity_model")
kwargs["similarity_tokenizer"] = s.load("similarity_tokenizer")
kwargs["batch_size"] = s.load("batch_size")
kwargs["device"] = s.load("device")
return kwargs
|
py | 1a496a9842853b6c5781251737b008a3c4626002 | # Copyright 2013-2022 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from __future__ import print_function
import os
import re
import llnl.util.tty as tty
from llnl.util.filesystem import mkdirp
import spack.repo
import spack.stage
import spack.util.web
from spack.spec import Spec
from spack.url import (
UndetectableNameError,
UndetectableVersionError,
parse_name,
parse_version,
)
from spack.util.editor import editor
from spack.util.executable import ProcessError, which
from spack.util.naming import (
mod_to_class,
simplify_name,
valid_fully_qualified_module_name,
)
description = "create a new package file"
section = "packaging"
level = "short"
package_template = '''\
# Copyright 2013-2022 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
# ----------------------------------------------------------------------------
# If you submit this package back to Spack as a pull request,
# please first remove this boilerplate and all FIXME comments.
#
# This is a template package file for Spack. We've put "FIXME"
# next to all the things you'll want to change. Once you've handled
# them, you can save this file and test your package like this:
#
# spack install {name}
#
# You can edit this file again by typing:
#
# spack edit {name}
#
# See the Spack documentation for more information on packaging.
# ----------------------------------------------------------------------------
from spack import *
class {class_name}({base_class_name}):
"""FIXME: Put a proper description of your package here."""
# FIXME: Add a proper url for your package's homepage here.
homepage = "https://www.example.com"
{url_def}
# FIXME: Add a list of GitHub accounts to
# notify when the package is updated.
# maintainers = ['github_user1', 'github_user2']
{versions}
{dependencies}
{body_def}
'''
class BundlePackageTemplate(object):
"""
Provides the default values to be used for a bundle package file template.
"""
base_class_name = 'BundlePackage'
dependencies = """\
# FIXME: Add dependencies if required.
# depends_on('foo')"""
url_def = " # There is no URL since there is no code to download."
body_def = " # There is no need for install() since there is no code."
def __init__(self, name, versions):
self.name = name
self.class_name = mod_to_class(name)
self.versions = versions
def write(self, pkg_path):
"""Writes the new package file."""
# Write out a template for the file
with open(pkg_path, "w") as pkg_file:
pkg_file.write(package_template.format(
name=self.name,
class_name=self.class_name,
base_class_name=self.base_class_name,
url_def=self.url_def,
versions=self.versions,
dependencies=self.dependencies,
body_def=self.body_def))
class PackageTemplate(BundlePackageTemplate):
"""Provides the default values to be used for the package file template"""
base_class_name = 'Package'
body_def = """\
def install(self, spec, prefix):
# FIXME: Unknown build system
make()
make('install')"""
url_line = ' url = "{url}"'
def __init__(self, name, url, versions):
super(PackageTemplate, self).__init__(name, versions)
self.url_def = self.url_line.format(url=url)
class AutotoolsPackageTemplate(PackageTemplate):
"""Provides appropriate overrides for Autotools-based packages
that *do* come with a ``configure`` script"""
base_class_name = 'AutotoolsPackage'
body_def = """\
def configure_args(self):
# FIXME: Add arguments other than --prefix
# FIXME: If not needed delete this function
args = []
return args"""
class AutoreconfPackageTemplate(PackageTemplate):
"""Provides appropriate overrides for Autotools-based packages
that *do not* come with a ``configure`` script"""
base_class_name = 'AutotoolsPackage'
dependencies = """\
depends_on('autoconf', type='build')
depends_on('automake', type='build')
depends_on('libtool', type='build')
depends_on('m4', type='build')
# FIXME: Add additional dependencies if required.
# depends_on('foo')"""
body_def = """\
def autoreconf(self, spec, prefix):
# FIXME: Modify the autoreconf method as necessary
autoreconf('--install', '--verbose', '--force')
def configure_args(self):
# FIXME: Add arguments other than --prefix
# FIXME: If not needed delete this function
args = []
return args"""
class CMakePackageTemplate(PackageTemplate):
"""Provides appropriate overrides for CMake-based packages"""
base_class_name = 'CMakePackage'
body_def = """\
def cmake_args(self):
# FIXME: Add arguments other than
# FIXME: CMAKE_INSTALL_PREFIX and CMAKE_BUILD_TYPE
# FIXME: If not needed delete this function
args = []
return args"""
class MesonPackageTemplate(PackageTemplate):
"""Provides appropriate overrides for meson-based packages"""
base_class_name = 'MesonPackage'
body_def = """\
def meson_args(self):
# FIXME: If not needed delete this function
args = []
return args"""
class QMakePackageTemplate(PackageTemplate):
"""Provides appropriate overrides for QMake-based packages"""
base_class_name = 'QMakePackage'
body_def = """\
def qmake_args(self):
# FIXME: If not needed delete this function
args = []
return args"""
class MavenPackageTemplate(PackageTemplate):
"""Provides appropriate overrides for Maven-based packages"""
base_class_name = 'MavenPackage'
body_def = """\
def build(self, spec, prefix):
# FIXME: If not needed delete this function
pass"""
class SconsPackageTemplate(PackageTemplate):
"""Provides appropriate overrides for SCons-based packages"""
base_class_name = 'SConsPackage'
body_def = """\
def build_args(self, spec, prefix):
# FIXME: Add arguments to pass to build.
# FIXME: If not needed delete this function
args = []
return args"""
class WafPackageTemplate(PackageTemplate):
"""Provides appropriate override for Waf-based packages"""
base_class_name = 'WafPackage'
body_def = """\
# FIXME: Override configure_args(), build_args(),
# or install_args() if necessary."""
class BazelPackageTemplate(PackageTemplate):
"""Provides appropriate overrides for Bazel-based packages"""
dependencies = """\
# FIXME: Add additional dependencies if required.
depends_on('bazel', type='build')"""
body_def = """\
def install(self, spec, prefix):
# FIXME: Add logic to build and install here.
bazel()"""
class PythonPackageTemplate(PackageTemplate):
"""Provides appropriate overrides for python extensions"""
base_class_name = 'PythonPackage'
dependencies = """\
# FIXME: Only add the python/pip/wheel dependencies if you need specific versions
# or need to change the dependency type. Generic python/pip/wheel dependencies are
# added implicity by the PythonPackage base class.
# depends_on('[email protected]:2.Y,3.Z:', type=('build', 'run'))
# depends_on('[email protected]:', type='build')
# depends_on('[email protected]:', type='build')
# FIXME: Add a build backend, usually defined in pyproject.toml. If no such file
# exists, use setuptools.
# depends_on('py-setuptools', type='build')
# depends_on('py-flit-core', type='build')
# depends_on('py-poetry-core', type='build')
# FIXME: Add additional dependencies if required.
# depends_on('py-foo', type=('build', 'run'))"""
body_def = """\
def global_options(self, spec, prefix):
# FIXME: Add options to pass to setup.py
# FIXME: If not needed, delete this function
options = []
return options
def install_options(self, spec, prefix):
# FIXME: Add options to pass to setup.py install
# FIXME: If not needed, delete this function
options = []
return options"""
def __init__(self, name, url, *args, **kwargs):
# If the user provided `--name py-numpy`, don't rename it py-py-numpy
if not name.startswith('py-'):
# Make it more obvious that we are renaming the package
tty.msg("Changing package name from {0} to py-{0}".format(name))
name = 'py-{0}'.format(name)
# Simple PyPI URLs:
# https://<hostname>/packages/<type>/<first character of project>/<project>/<download file>
# e.g. https://pypi.io/packages/source/n/numpy/numpy-1.19.4.zip
# e.g. https://www.pypi.io/packages/source/n/numpy/numpy-1.19.4.zip
# e.g. https://pypi.org/packages/source/n/numpy/numpy-1.19.4.zip
# e.g. https://pypi.python.org/packages/source/n/numpy/numpy-1.19.4.zip
# e.g. https://files.pythonhosted.org/packages/source/n/numpy/numpy-1.19.4.zip
# PyPI URLs containing hash:
# https://<hostname>/packages/<two character hash>/<two character hash>/<longer hash>/<download file>
# e.g. https://pypi.io/packages/c5/63/a48648ebc57711348420670bb074998f79828291f68aebfff1642be212ec/numpy-1.19.4.zip
# e.g. https://files.pythonhosted.org/packages/c5/63/a48648ebc57711348420670bb074998f79828291f68aebfff1642be212ec/numpy-1.19.4.zip
# e.g. https://files.pythonhosted.org/packages/c5/63/a48648ebc57711348420670bb074998f79828291f68aebfff1642be212ec/numpy-1.19.4.zip#sha256=141ec3a3300ab89c7f2b0775289954d193cc8edb621ea05f99db9cb181530512
# PyPI URLs for wheels:
# https://pypi.io/packages/py3/a/azureml_core/azureml_core-1.11.0-py3-none-any.whl
# https://pypi.io/packages/py3/d/dotnetcore2/dotnetcore2-2.1.14-py3-none-macosx_10_9_x86_64.whl
# https://pypi.io/packages/py3/d/dotnetcore2/dotnetcore2-2.1.14-py3-none-manylinux1_x86_64.whl
# https://files.pythonhosted.org/packages/cp35.cp36.cp37.cp38.cp39/s/shiboken2/shiboken2-5.15.2-5.15.2-cp35.cp36.cp37.cp38.cp39-abi3-manylinux1_x86_64.whl
# https://files.pythonhosted.org/packages/f4/99/ad2ef1aeeb395ee2319bb981ea08dbbae878d30dd28ebf27e401430ae77a/azureml_core-1.36.0.post2-py3-none-any.whl#sha256=60bcad10b4380d78a8280deb7365de2c2cd66527aacdcb4a173f613876cbe739
match = re.search(
r'(?:pypi|pythonhosted)[^/]+/packages' + '/([^/#]+)' * 4,
url
)
if match:
# PyPI URLs for wheels are too complicated, ignore them for now
# https://www.python.org/dev/peps/pep-0427/#file-name-convention
if not match.group(4).endswith('.whl'):
if len(match.group(2)) == 1:
# Simple PyPI URL
url = '/'.join(match.group(3, 4))
else:
# PyPI URL containing hash
# Project name doesn't necessarily match download name, but it
# usually does, so this is the best we can do
project = parse_name(url)
url = '/'.join([project, match.group(4)])
self.url_line = ' pypi = "{url}"'
else:
# Add a reminder about spack preferring PyPI URLs
self.url_line = '''
# FIXME: ensure the package is not available through PyPI. If it is,
# re-run `spack create --force` with the PyPI URL.
''' + self.url_line
super(PythonPackageTemplate, self).__init__(name, url, *args, **kwargs)
class RPackageTemplate(PackageTemplate):
"""Provides appropriate overrides for R extensions"""
base_class_name = 'RPackage'
dependencies = """\
# FIXME: Add dependencies if required.
# depends_on('r-foo', type=('build', 'run'))"""
body_def = """\
def configure_args(self):
# FIXME: Add arguments to pass to install via --configure-args
# FIXME: If not needed delete this function
args = []
return args"""
def __init__(self, name, url, *args, **kwargs):
# If the user provided `--name r-rcpp`, don't rename it r-r-rcpp
if not name.startswith('r-'):
# Make it more obvious that we are renaming the package
tty.msg("Changing package name from {0} to r-{0}".format(name))
name = 'r-{0}'.format(name)
r_name = parse_name(url)
cran = re.search(
r'(?:r-project|rstudio)[^/]+/src' + '/([^/]+)' * 2,
url
)
if cran:
url = r_name
self.url_line = ' cran = "{url}"'
bioc = re.search(
r'(?:bioconductor)[^/]+/packages' + '/([^/]+)' * 5,
url
)
if bioc:
self.url_line = ' url = "{0}"\n'\
' bioc = "{1}"'.format(url, r_name)
super(RPackageTemplate, self).__init__(name, url, *args, **kwargs)
class PerlmakePackageTemplate(PackageTemplate):
"""Provides appropriate overrides for Perl extensions
that come with a Makefile.PL"""
base_class_name = 'PerlPackage'
dependencies = """\
# FIXME: Add dependencies if required:
# depends_on('perl-foo', type=('build', 'run'))"""
body_def = """\
def configure_args(self):
# FIXME: Add non-standard arguments
# FIXME: If not needed delete this function
args = []
return args"""
def __init__(self, name, *args, **kwargs):
# If the user provided `--name perl-cpp`, don't rename it perl-perl-cpp
if not name.startswith('perl-'):
# Make it more obvious that we are renaming the package
tty.msg("Changing package name from {0} to perl-{0}".format(name))
name = 'perl-{0}'.format(name)
super(PerlmakePackageTemplate, self).__init__(name, *args, **kwargs)
class PerlbuildPackageTemplate(PerlmakePackageTemplate):
"""Provides appropriate overrides for Perl extensions
that come with a Build.PL instead of a Makefile.PL"""
dependencies = """\
depends_on('perl-module-build', type='build')
# FIXME: Add additional dependencies if required:
# depends_on('perl-foo', type=('build', 'run'))"""
class OctavePackageTemplate(PackageTemplate):
"""Provides appropriate overrides for octave packages"""
base_class_name = 'OctavePackage'
dependencies = """\
extends('octave')
# FIXME: Add additional dependencies if required.
# depends_on('octave-foo', type=('build', 'run'))"""
def __init__(self, name, *args, **kwargs):
# If the user provided `--name octave-splines`, don't rename it
# octave-octave-splines
if not name.startswith('octave-'):
# Make it more obvious that we are renaming the package
tty.msg("Changing package name from {0} to octave-{0}".format(name)) # noqa
name = 'octave-{0}'.format(name)
super(OctavePackageTemplate, self).__init__(name, *args, **kwargs)
class RubyPackageTemplate(PackageTemplate):
"""Provides appropriate overrides for Ruby packages"""
base_class_name = 'RubyPackage'
dependencies = """\
# FIXME: Add dependencies if required. Only add the ruby dependency
# if you need specific versions. A generic ruby dependency is
# added implicity by the RubyPackage class.
# depends_on('[email protected]:', type=('build', 'run'))
# depends_on('ruby-foo', type=('build', 'run'))"""
body_def = """\
def build(self, spec, prefix):
# FIXME: If not needed delete this function
pass"""
def __init__(self, name, *args, **kwargs):
# If the user provided `--name ruby-numpy`, don't rename it
# ruby-ruby-numpy
if not name.startswith('ruby-'):
# Make it more obvious that we are renaming the package
tty.msg("Changing package name from {0} to ruby-{0}".format(name))
name = 'ruby-{0}'.format(name)
super(RubyPackageTemplate, self).__init__(name, *args, **kwargs)
class MakefilePackageTemplate(PackageTemplate):
"""Provides appropriate overrides for Makefile packages"""
base_class_name = 'MakefilePackage'
body_def = """\
def edit(self, spec, prefix):
# FIXME: Edit the Makefile if necessary
# FIXME: If not needed delete this function
# makefile = FileFilter('Makefile')
# makefile.filter('CC = .*', 'CC = cc')"""
class IntelPackageTemplate(PackageTemplate):
"""Provides appropriate overrides for licensed Intel software"""
base_class_name = 'IntelPackage'
body_def = """\
# FIXME: Override `setup_environment` if necessary."""
class SIPPackageTemplate(PackageTemplate):
"""Provides appropriate overrides for SIP packages."""
base_class_name = 'SIPPackage'
body_def = """\
def configure_args(self, spec, prefix):
# FIXME: Add arguments other than --bindir and --destdir
# FIXME: If not needed delete this function
args = []
return args"""
def __init__(self, name, *args, **kwargs):
# If the user provided `--name py-pyqt4`, don't rename it py-py-pyqt4
if not name.startswith('py-'):
# Make it more obvious that we are renaming the package
tty.msg("Changing package name from {0} to py-{0}".format(name))
name = 'py-{0}'.format(name)
super(SIPPackageTemplate, self).__init__(name, *args, **kwargs)
templates = {
'autotools': AutotoolsPackageTemplate,
'autoreconf': AutoreconfPackageTemplate,
'cmake': CMakePackageTemplate,
'bundle': BundlePackageTemplate,
'qmake': QMakePackageTemplate,
'maven': MavenPackageTemplate,
'scons': SconsPackageTemplate,
'waf': WafPackageTemplate,
'bazel': BazelPackageTemplate,
'python': PythonPackageTemplate,
'r': RPackageTemplate,
'perlmake': PerlmakePackageTemplate,
'perlbuild': PerlbuildPackageTemplate,
'octave': OctavePackageTemplate,
'ruby': RubyPackageTemplate,
'makefile': MakefilePackageTemplate,
'intel': IntelPackageTemplate,
'meson': MesonPackageTemplate,
'sip': SIPPackageTemplate,
'generic': PackageTemplate,
}
def setup_parser(subparser):
subparser.add_argument(
'url', nargs='?',
help="url of package archive")
subparser.add_argument(
'--keep-stage', action='store_true',
help="don't clean up staging area when command completes")
subparser.add_argument(
'-n', '--name',
help="name of the package to create")
subparser.add_argument(
'-t', '--template', metavar='TEMPLATE',
choices=sorted(templates.keys()),
help="build system template to use. options: %(choices)s")
subparser.add_argument(
'-r', '--repo',
help="path to a repository where the package should be created")
subparser.add_argument(
'-N', '--namespace',
help="specify a namespace for the package. must be the namespace of "
"a repository registered with Spack")
subparser.add_argument(
'-f', '--force', action='store_true',
help="overwrite any existing package file with the same name")
subparser.add_argument(
'--skip-editor', action='store_true',
help="skip the edit session for the package (e.g., automation)")
subparser.add_argument(
'-b', '--batch', action='store_true',
help="don't ask which versions to checksum")
class BuildSystemGuesser:
"""An instance of BuildSystemGuesser provides a callable object to be used
during ``spack create``. By passing this object to ``spack checksum``, we
can take a peek at the fetched tarball and discern the build system it uses
"""
def __init__(self):
"""Sets the default build system."""
self.build_system = 'generic'
def __call__(self, stage, url):
"""Try to guess the type of build system used by a project based on
the contents of its archive or the URL it was downloaded from."""
if url is not None:
# Most octave extensions are hosted on Octave-Forge:
# https://octave.sourceforge.net/index.html
# They all have the same base URL.
if 'downloads.sourceforge.net/octave/' in url:
self.build_system = 'octave'
return
if url.endswith('.gem'):
self.build_system = 'ruby'
return
if url.endswith('.whl') or '.whl#' in url:
self.build_system = 'python'
return
# A list of clues that give us an idea of the build system a package
# uses. If the regular expression matches a file contained in the
# archive, the corresponding build system is assumed.
# NOTE: Order is important here. If a package supports multiple
# build systems, we choose the first match in this list.
clues = [
(r'/CMakeLists\.txt$', 'cmake'),
(r'/NAMESPACE$', 'r'),
(r'/configure$', 'autotools'),
(r'/configure\.(in|ac)$', 'autoreconf'),
(r'/Makefile\.am$', 'autoreconf'),
(r'/pom\.xml$', 'maven'),
(r'/SConstruct$', 'scons'),
(r'/waf$', 'waf'),
(r'/pyproject.toml', 'python'),
(r'/setup\.(py|cfg)$', 'python'),
(r'/WORKSPACE$', 'bazel'),
(r'/Build\.PL$', 'perlbuild'),
(r'/Makefile\.PL$', 'perlmake'),
(r'/.*\.gemspec$', 'ruby'),
(r'/Rakefile$', 'ruby'),
(r'/setup\.rb$', 'ruby'),
(r'/.*\.pro$', 'qmake'),
(r'/(GNU)?[Mm]akefile$', 'makefile'),
(r'/DESCRIPTION$', 'octave'),
(r'/meson\.build$', 'meson'),
(r'/configure\.py$', 'sip'),
]
# Peek inside the compressed file.
if (stage.archive_file.endswith('.zip') or
'.zip#' in stage.archive_file):
try:
unzip = which('unzip')
output = unzip('-lq', stage.archive_file, output=str)
except ProcessError:
output = ''
else:
try:
tar = which('tar')
output = tar('--exclude=*/*/*', '-tf',
stage.archive_file, output=str)
except ProcessError:
output = ''
lines = output.split('\n')
# Determine the build system based on the files contained
# in the archive.
for pattern, bs in clues:
if any(re.search(pattern, line) for line in lines):
self.build_system = bs
break
def get_name(args):
"""Get the name of the package based on the supplied arguments.
If a name was provided, always use that. Otherwise, if a URL was
provided, extract the name from that. Otherwise, use a default.
Args:
args (argparse.Namespace): The arguments given to
``spack create``
Returns:
str: The name of the package
"""
# Default package name
name = 'example'
if args.name is not None:
# Use a user-supplied name if one is present
name = args.name
if len(args.name.strip()) > 0:
tty.msg("Using specified package name: '{0}'".format(name))
else:
tty.die("A package name must be provided when using the option.")
elif args.url is not None:
# Try to guess the package name based on the URL
try:
name = parse_name(args.url)
if name != args.url:
desc = 'URL'
else:
desc = 'package name'
tty.msg("This looks like a {0} for {1}".format(desc, name))
except UndetectableNameError:
tty.die("Couldn't guess a name for this package.",
" Please report this bug. In the meantime, try running:",
" `spack create --name <name> <url>`")
name = simplify_name(name)
if not valid_fully_qualified_module_name(name):
tty.die("Package name can only contain a-z, 0-9, and '-'")
return name
def get_url(args):
"""Get the URL to use.
Use a default URL if none is provided.
Args:
args (argparse.Namespace): The arguments given to ``spack create``
Returns:
str: The URL of the package
"""
# Default URL
url = 'https://www.example.com/example-1.2.3.tar.gz'
if args.url:
# Use a user-supplied URL if one is present
url = args.url
return url
def get_versions(args, name):
"""Returns a list of versions and hashes for a package.
Also returns a BuildSystemGuesser object.
Returns default values if no URL is provided.
Args:
args (argparse.Namespace): The arguments given to ``spack create``
name (str): The name of the package
Returns:
tuple: versions and hashes, and a BuildSystemGuesser object
"""
# Default version with hash
hashed_versions = """\
# FIXME: Add proper versions and checksums here.
# version('1.2.3', '0123456789abcdef0123456789abcdef')"""
# Default version without hash
unhashed_versions = """\
# FIXME: Add proper versions here.
# version('1.2.4')"""
# Default guesser
guesser = BuildSystemGuesser()
if args.url is not None and args.template != 'bundle':
# Find available versions
try:
url_dict = spack.util.web.find_versions_of_archive(args.url)
except UndetectableVersionError:
# Use fake versions
tty.warn("Couldn't detect version in: {0}".format(args.url))
return hashed_versions, guesser
if not url_dict:
# If no versions were found, revert to what the user provided
version = parse_version(args.url)
url_dict = {version: args.url}
versions = spack.stage.get_checksums_for_versions(
url_dict, name, first_stage_function=guesser,
keep_stage=args.keep_stage,
batch=(args.batch or len(url_dict) == 1))
else:
versions = unhashed_versions
return versions, guesser
def get_build_system(args, guesser):
"""Determine the build system template.
If a template is specified, always use that. Otherwise, if a URL
is provided, download the tarball and peek inside to guess what
build system it uses. Otherwise, use a generic template by default.
Args:
args (argparse.Namespace): The arguments given to ``spack create``
guesser (BuildSystemGuesser): The first_stage_function given to
``spack checksum`` which records the build system it detects
Returns:
str: The name of the build system template to use
"""
# Default template
template = 'generic'
if args.template is not None:
# Use a user-supplied template if one is present
template = args.template
tty.msg("Using specified package template: '{0}'".format(template))
elif args.url is not None:
# Use whatever build system the guesser detected
template = guesser.build_system
if template == 'generic':
tty.warn("Unable to detect a build system. "
"Using a generic package template.")
else:
msg = "This package looks like it uses the {0} build system"
tty.msg(msg.format(template))
return template
def get_repository(args, name):
"""Returns a Repo object that will allow us to determine the path where
the new package file should be created.
Args:
args (argparse.Namespace): The arguments given to ``spack create``
name (str): The name of the package to create
Returns:
spack.repo.Repo: A Repo object capable of determining the path to the
package file
"""
spec = Spec(name)
# Figure out namespace for spec
if spec.namespace and args.namespace and spec.namespace != args.namespace:
tty.die("Namespaces '{0}' and '{1}' do not match.".format(
spec.namespace, args.namespace))
if not spec.namespace and args.namespace:
spec.namespace = args.namespace
# Figure out where the new package should live
repo_path = args.repo
if repo_path is not None:
repo = spack.repo.Repo(repo_path)
if spec.namespace and spec.namespace != repo.namespace:
tty.die("Can't create package with namespace {0} in repo with "
"namespace {1}".format(spec.namespace, repo.namespace))
else:
if spec.namespace:
repo = spack.repo.path.get_repo(spec.namespace, None)
if not repo:
tty.die("Unknown namespace: '{0}'".format(spec.namespace))
else:
repo = spack.repo.path.first_repo()
# Set the namespace on the spec if it's not there already
if not spec.namespace:
spec.namespace = repo.namespace
return repo
def create(parser, args):
# Gather information about the package to be created
name = get_name(args)
url = get_url(args)
versions, guesser = get_versions(args, name)
build_system = get_build_system(args, guesser)
# Create the package template object
constr_args = {'name': name, 'versions': versions}
package_class = templates[build_system]
if package_class != BundlePackageTemplate:
constr_args['url'] = url
package = package_class(**constr_args)
tty.msg("Created template for {0} package".format(package.name))
# Create a directory for the new package
repo = get_repository(args, name)
pkg_path = repo.filename_for_package_name(package.name)
if os.path.exists(pkg_path) and not args.force:
tty.die('{0} already exists.'.format(pkg_path),
' Try running `spack create --force` to overwrite it.')
else:
mkdirp(os.path.dirname(pkg_path))
# Write the new package file
package.write(pkg_path)
tty.msg("Created package file: {0}".format(pkg_path))
# Optionally open up the new package file in your $EDITOR
if not args.skip_editor:
editor(pkg_path)
|
py | 1a496b335035dc7a06071acb3dc2394c6e28ddc9 | # Copyright 2017-2020 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
"""Placeholder docstring"""
from __future__ import absolute_import
import os
import subprocess
import tempfile
import warnings
import six
from six.moves import urllib
def git_clone_repo(git_config, entry_point, source_dir=None, dependencies=None):
"""Git clone repo containing the training code and serving code. This method
also validate ``git_config``, and set ``entry_point``, ``source_dir`` and
``dependencies`` to the right file or directory in the repo cloned.
Args:
git_config (dict[str, str]): Git configurations used for cloning files,
including ``repo``, ``branch``, ``commit``, ``2FA_enabled``,
``username``, ``password`` and ``token``. The ``repo`` field is
required. All other fields are optional. ``repo`` specifies the Git
repository where your training script is stored. If you don't
provide ``branch``, the default value 'master' is used. If you don't
provide ``commit``, the latest commit in the specified branch is
used. ``2FA_enabled``, ``username``, ``password`` and ``token`` are
for authentication purpose. If ``2FA_enabled`` is not provided, we
consider 2FA as disabled.
For GitHub and GitHub-like repos, when SSH URLs are provided, it
doesn't matter whether 2FA is enabled or disabled; you should either
have no passphrase for the SSH key pairs, or have the ssh-agent
configured so that you will not be prompted for SSH passphrase when
you do 'git clone' command with SSH URLs. When https URLs are
provided: if 2FA is disabled, then either token or username+password
will be used for authentication if provided (token prioritized); if
2FA is enabled, only token will be used for authentication if
provided. If required authentication info is not provided, python
SDK will try to use local credentials storage to authenticate. If
that fails either, an error message will be thrown.
For CodeCommit repos, 2FA is not supported, so '2FA_enabled' should
not be provided. There is no token in CodeCommit, so 'token' should
not be provided too. When 'repo' is an SSH URL, the requirements are
the same as GitHub-like repos. When 'repo' is an https URL,
username+password will be used for authentication if they are
provided; otherwise, python SDK will try to use either CodeCommit
credential helper or local credential storage for authentication.
entry_point (str): A relative location to the Python source file which
should be executed as the entry point to training or model hosting
in the Git repo.
source_dir (str): A relative location to a directory with other training
or model hosting source code dependencies aside from the entry point
file in the Git repo (default: None). Structure within this
directory are preserved when training on Amazon SageMaker.
dependencies (list[str]): A list of relative locations to directories
with any additional libraries that will be exported to the container
in the Git repo (default: []).
Returns:
dict: A dict that contains the updated values of entry_point, source_dir
and dependencies.
Raises:
CalledProcessError: If 1. failed to clone git repo
2. failed to checkout the required branch
3. failed to checkout the required commit
ValueError: If 1. entry point specified does not exist in the repo
2. source dir specified does not exist in the repo
3. dependencies specified do not exist in the repo
4. wrong format is provided for git_config
"""
if entry_point is None:
raise ValueError("Please provide an entry point.")
_validate_git_config(git_config)
dest_dir = tempfile.mkdtemp()
_generate_and_run_clone_command(git_config, dest_dir)
_checkout_branch_and_commit(git_config, dest_dir)
updated_paths = {
"entry_point": entry_point,
"source_dir": source_dir,
"dependencies": dependencies,
}
# check if the cloned repo contains entry point, source directory and dependencies
if source_dir:
if not os.path.isdir(os.path.join(dest_dir, source_dir)):
raise ValueError("Source directory does not exist in the repo.")
if not os.path.isfile(os.path.join(dest_dir, source_dir, entry_point)):
raise ValueError("Entry point does not exist in the repo.")
updated_paths["source_dir"] = os.path.join(dest_dir, source_dir)
else:
if os.path.isfile(os.path.join(dest_dir, entry_point)):
updated_paths["entry_point"] = os.path.join(dest_dir, entry_point)
else:
raise ValueError("Entry point does not exist in the repo.")
if dependencies is not None:
updated_paths["dependencies"] = []
for path in dependencies:
if os.path.exists(os.path.join(dest_dir, path)):
updated_paths["dependencies"].append(os.path.join(dest_dir, path))
else:
raise ValueError("Dependency {} does not exist in the repo.".format(path))
return updated_paths
def _validate_git_config(git_config):
"""
Args:
git_config:
"""
if "repo" not in git_config:
raise ValueError("Please provide a repo for git_config.")
for key in git_config:
if key == "2FA_enabled":
if not isinstance(git_config["2FA_enabled"], bool):
raise ValueError("Please enter a bool type for 2FA_enabled'.")
elif not isinstance(git_config[key], six.string_types):
raise ValueError("'{}' must be a string.".format(key))
def _generate_and_run_clone_command(git_config, dest_dir):
"""check if a git_config param is valid, if it is, create the command to git
clone the repo, and run it.
Args:
git_config ((dict[str, str]): Git configurations used for cloning files,
including ``repo``, ``branch`` and ``commit``.
dest_dir (str): The local directory to clone the Git repo into.
Raises:
CalledProcessError: If failed to clone git repo.
"""
if git_config["repo"].startswith("https://git-codecommit") or git_config["repo"].startswith(
"ssh://git-codecommit"
):
_clone_command_for_codecommit(git_config, dest_dir)
else:
_clone_command_for_github_like(git_config, dest_dir)
def _clone_command_for_github_like(git_config, dest_dir):
"""check if a git_config param representing a GitHub (or like) repo is
valid, if it is, create the command to git clone the repo, and run it.
Args:
git_config ((dict[str, str]): Git configurations used for cloning files,
including ``repo``, ``branch`` and ``commit``.
dest_dir (str): The local directory to clone the Git repo into.
Raises:
ValueError: If git_config['repo'] is in the wrong format.
CalledProcessError: If failed to clone git repo.
"""
is_https = git_config["repo"].startswith("https://")
is_ssh = git_config["repo"].startswith("git@")
if not is_https and not is_ssh:
raise ValueError("Invalid Git url provided.")
if is_ssh:
_clone_command_for_ssh(git_config, dest_dir)
elif "2FA_enabled" in git_config and git_config["2FA_enabled"] is True:
_clone_command_for_github_like_https_2fa_enabled(git_config, dest_dir)
else:
_clone_command_for_github_like_https_2fa_disabled(git_config, dest_dir)
def _clone_command_for_ssh(git_config, dest_dir):
"""
Args:
git_config:
dest_dir:
"""
if "username" in git_config or "password" in git_config or "token" in git_config:
warnings.warn("SSH cloning, authentication information in git config will be ignored.")
_run_clone_command(git_config["repo"], dest_dir)
def _clone_command_for_github_like_https_2fa_disabled(git_config, dest_dir):
"""
Args:
git_config:
dest_dir:
"""
updated_url = git_config["repo"]
if "token" in git_config:
if "username" in git_config or "password" in git_config:
warnings.warn("Using token for authentication, " "other credentials will be ignored.")
updated_url = _insert_token_to_repo_url(url=git_config["repo"], token=git_config["token"])
elif "username" in git_config and "password" in git_config:
updated_url = _insert_username_and_password_to_repo_url(
url=git_config["repo"], username=git_config["username"], password=git_config["password"]
)
elif "username" in git_config or "password" in git_config:
warnings.warn("Credentials provided in git config will be ignored.")
_run_clone_command(updated_url, dest_dir)
def _clone_command_for_github_like_https_2fa_enabled(git_config, dest_dir):
"""
Args:
git_config:
dest_dir:
"""
updated_url = git_config["repo"]
if "token" in git_config:
if "username" in git_config or "password" in git_config:
warnings.warn("Using token for authentication, " "other credentials will be ignored.")
updated_url = _insert_token_to_repo_url(url=git_config["repo"], token=git_config["token"])
_run_clone_command(updated_url, dest_dir)
def _clone_command_for_codecommit(git_config, dest_dir):
"""check if a git_config param representing a CodeCommit repo is valid, if
it is, create the command to git clone the repo, and run it.
Args:
git_config ((dict[str, str]): Git configurations used for cloning files,
including ``repo``, ``branch`` and ``commit``.
dest_dir (str): The local directory to clone the Git repo into.
Raises:
ValueError: If git_config['repo'] is in the wrong format.
CalledProcessError: If failed to clone git repo.
"""
is_https = git_config["repo"].startswith("https://git-codecommit")
is_ssh = git_config["repo"].startswith("ssh://git-codecommit")
if not is_https and not is_ssh:
raise ValueError("Invalid Git url provided.")
if "2FA_enabled" in git_config:
warnings.warn("CodeCommit does not support 2FA, '2FA_enabled' will be ignored.")
if "token" in git_config:
warnings.warn("There are no tokens in CodeCommit, the token provided will be ignored.")
if is_ssh:
_clone_command_for_ssh(git_config, dest_dir)
else:
_clone_command_for_codecommit_https(git_config, dest_dir)
def _clone_command_for_codecommit_https(git_config, dest_dir):
"""
Args:
git_config:
dest_dir:
"""
updated_url = git_config["repo"]
if "username" in git_config and "password" in git_config:
updated_url = _insert_username_and_password_to_repo_url(
url=git_config["repo"], username=git_config["username"], password=git_config["password"]
)
elif "username" in git_config or "password" in git_config:
warnings.warn("Credentials provided in git config will be ignored.")
_run_clone_command(updated_url, dest_dir)
def _run_clone_command(repo_url, dest_dir):
"""Run the 'git clone' command with the repo url and the directory to clone
the repo into.
Args:
repo_url (str): Git repo url to be cloned.
dest_dir: (str): Local path where the repo should be cloned into.
Raises:
CalledProcessError: If failed to clone git repo.
"""
my_env = os.environ.copy()
if repo_url.startswith("https://"):
my_env["GIT_TERMINAL_PROMPT"] = "0"
subprocess.check_call(["git", "clone", repo_url, dest_dir], env=my_env)
elif repo_url.startswith("git@"):
with tempfile.NamedTemporaryFile() as sshnoprompt:
write_pipe = open(sshnoprompt.name, "w")
write_pipe.write("ssh -oBatchMode=yes $@")
write_pipe.close()
# 511 in decimal is same as 777 in octal
os.chmod(sshnoprompt.name, 511)
my_env["GIT_SSH"] = sshnoprompt.name
subprocess.check_call(["git", "clone", repo_url, dest_dir], env=my_env)
def _insert_token_to_repo_url(url, token):
"""Insert the token to the Git repo url, to make a component of the git
clone command. This method can only be called when repo_url is an https url.
Args:
url (str): Git repo url where the token should be inserted into.
token (str): Token to be inserted.
Returns:
str: the component needed fot the git clone command.
"""
index = len("https://")
if url.find(token) == index:
return url
return url.replace("https://", "https://" + token + "@")
def _insert_username_and_password_to_repo_url(url, username, password):
"""Insert the username and the password to the Git repo url, to make a
component of the git clone command. This method can only be called when
repo_url is an https url.
Args:
url (str): Git repo url where the token should be inserted into.
username (str): Username to be inserted.
password (str): Password to be inserted.
Returns:
str: the component needed for the git clone command.
"""
password = urllib.parse.quote_plus(password)
# urllib parses ' ' as '+', but what we need is '%20' here
password = password.replace("+", "%20")
index = len("https://")
return url[:index] + username + ":" + password + "@" + url[index:]
def _checkout_branch_and_commit(git_config, dest_dir):
"""Checkout the required branch and commit.
Args:
git_config (dict[str, str]): Git configurations used for cloning files,
including ``repo``, ``branch`` and ``commit``.
dest_dir (str): the directory where the repo is cloned
Raises:
CalledProcessError: If 1. failed to checkout the required branch 2.
failed to checkout the required commit
"""
if "branch" in git_config:
subprocess.check_call(args=["git", "checkout", git_config["branch"]], cwd=str(dest_dir))
if "commit" in git_config:
subprocess.check_call(args=["git", "checkout", git_config["commit"]], cwd=str(dest_dir))
|
py | 1a496c41ce73346cb25b238e809d37754dbbad29 | #! /usr/bin/env python
#
# Example program using irc.client.
#
# Copyright (C) 1999-2002 Joel Rosdahl
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
# Joel Rosdahl <[email protected]>
#
# servermap connects to an IRC server and finds out what other IRC
# servers there are in the net and prints a tree-like map of their
# interconnections.
#
# Example:
#
# % ./servermap irc.dal.net somenickname
# Connecting to server...
# Getting links...
#
# 26 servers (18 leaves and 8 hubs)
#
# splitrock.tx.us.dal.net
# `-vader.ny.us.dal.net
# |-twisted.ma.us.dal.net
# |-sodre.nj.us.dal.net
# |-glass.oh.us.dal.net
# |-distant.ny.us.dal.net
# | |-algo.se.eu.dal.net
# | | |-borg.se.eu.dal.net
# | | | `-ced.se.eu.dal.net
# | | |-viking.no.eu.dal.net
# | | |-inco.fr.eu.dal.net
# | | |-paranoia.se.eu.dal.net
# | | |-gaston.se.eu.dal.net
# | | | `-powertech.no.eu.dal.net
# | | `-algo-u.se.eu.dal.net
# | |-philly.pa.us.dal.net
# | |-liberty.nj.us.dal.net
# | `-jade.va.us.dal.net
# `-journey.ca.us.dal.net
# |-ion.va.us.dal.net
# |-dragons.ca.us.dal.net
# |-toronto.on.ca.dal.net
# | `-netropolis-r.uk.eu.dal.net
# | |-traced.de.eu.dal.net
# | `-lineone.uk.eu.dal.net
# `-omega.ca.us.dal.net
import irc.client
import sys
def on_connect(connection, event):
sys.stdout.write("\nGetting links...")
sys.stdout.flush()
connection.links()
def on_passwdmismatch(connection, event):
print("Password required.")
sys.exit(1)
def on_links(connection, event):
global links
links.append((event.arguments[0],
event.arguments[1],
event.arguments[2]))
def on_endoflinks(connection, event):
global links
print("\n")
m = {}
for (to_node, from_node, desc) in links:
if from_node != to_node:
m[from_node] = m.get(from_node, []) + [to_node]
if connection.get_server_name() in m:
if len(m[connection.get_server_name()]) == 1:
hubs = len(m) - 1
else:
hubs = len(m)
else:
hubs = 0
print("%d servers (%d leaves and %d hubs)\n" % (len(links), len(links)-hubs, hubs))
print_tree(0, [], connection.get_server_name(), m)
connection.quit("Using irc.client.py")
def on_disconnect(connection, event):
sys.exit(0)
def indent_string(level, active_levels, last):
if level == 0:
return ""
s = ""
for i in range(level-1):
if i in active_levels:
s = s + "| "
else:
s = s + " "
if last:
s = s + "`-"
else:
s = s + "|-"
return s
def print_tree(level, active_levels, root, map, last=0):
sys.stdout.write(indent_string(level, active_levels, last)
+ root + "\n")
if root in map:
list = map[root]
for r in list[:-1]:
print_tree(level+1, active_levels[:]+[level], r, map)
print_tree(level+1, active_levels[:], list[-1], map, 1)
def main():
global links
if len(sys.argv) != 3:
print("Usage: servermap <server[:port]> <nickname>")
sys.exit(1)
links = []
s = sys.argv[1].split(":", 1)
server = s[0]
if len(s) == 2:
try:
port = int(s[1])
except ValueError:
print("Error: Erroneous port.")
sys.exit(1)
else:
port = 6667
nickname = sys.argv[2]
client = irc.client.IRC()
sys.stdout.write("Connecting to server...")
sys.stdout.flush()
try:
c = client.server().connect(server, port, nickname)
except irc.client.ServerConnectionError as x:
print(x)
sys.exit(1)
c.add_global_handler("welcome", on_connect)
c.add_global_handler("passwdmismatch", on_passwdmismatch)
c.add_global_handler("links", on_links)
c.add_global_handler("endoflinks", on_endoflinks)
c.add_global_handler("disconnect", on_disconnect)
client.process_forever()
if __name__ == '__main__':
main()
|
py | 1a496c94715d1fc9a25a1e1fa5ca35ff65efddde | # 5550
# <!*[^<>]*>
# POLYNOMIAL
# nums:4
# POLYNOMIAL AttackString:"<"+"!"*10000+"! _1_POA(i)"
import re
from time import perf_counter
regex = """<!*[^<>]*>"""
REGEX = re.compile(regex)
for i in range(0, 150000):
ATTACK = "<" + "!" * i * 10000 + "! _1_POA(i)"
LEN = len(ATTACK)
BEGIN = perf_counter()
m = REGEX.search(ATTACK)
# m = REGEX.match(ATTACK)
DURATION = perf_counter() - BEGIN
print(f"{i *10000}: took {DURATION} seconds!") |
py | 1a496cbe6f8dfc1c978481489d5579f107bf1efb | #!/usr/bin/env python3
import os
import re
import sys
import urllib.request
from time import strptime
# Regular Expressions
CVE_RE = re.compile('CVE-[0-9]{4}-[0-9]{4,}')
HTML_RE = re.compile('<[^<]+?>')
BOUNTY_RE = re.compile('\[\$([0-9\.]|TBD|N/A)+\]')
BUG_RE = re.compile('\[[0-9]+\]')
DESCRIPTION_RE = re.compile('[:-]{0,1} [^\.]*(\.|\s)')
CLEAN_RE = re.compile('(\]|\[|\: |\- |\$)')
ANNOUNCED_RE = re.compile('[0-9]{4}-[0-9]{2}-[0-9]{2}')
DATE_RE = re.compile('\w+, \w+ ([0-9]{1}|[0-9]{2}), [0-9]{4}')
def format_date(publish_date):
fields = publish_date.split()
sub = fields[1][:3]
month = str(strptime(sub,'%b').tm_mon)
if len(month) == 1:
month = '0' + month
year = fields[3]
day = fields[2][:-1]
if len(day) == 1:
day = '0' + day
return "{}-{}-{}".format(year, month, day)
SKELETON = list()
with open("../spec/data/cve-skeleton.yml", "r") as f:
SKELETON = f.readlines()
def get_skeleton(cve, description, bounty, bug, announced):
""" Return the skeleton of a CVE with the given fields filled. """
global SKELETON
skeleton = SKELETON.copy()
for i in range(len(skeleton)):
if skeleton[i] == "CVE:\n":
skeleton[i] = "CVE: {:s}\n".format(cve)
elif skeleton[i] == "description: |\n":
skeleton[i] = "description: |\n {:s}\n".format(description)
elif skeleton[i] == "bugs: []\n":
skeleton[i]= "bugs: [{:s}]\n".format(bug)
elif skeleton[i] == " amt:\n":
if bounty == "N/A":
skeleton[i] = " amt: 0\n"
elif bounty == "TBD":
skeleton[i+1] = " announced: TBD\n"
else:
skeleton[i] = " amt: {:s}\n".format(bounty)
elif skeleton[i] == "announced:\n":
skeleton[i] = "announced: {:s}\n".format(announced)
return "".join(skeleton)
def clean_line(line):
""" Decode bytestrings and string newlines. """
return line.decode().strip("\n")
def clean_match(text):
""" Clean up the text by removing matches in CLEAN_RE. """
return CLEAN_RE.sub('', text)
def get_page(url):
""" Return the raw HTML of the given URL. """
return urllib.request.urlopen(url)
if __name__ == "__main__":
url = sys.argv[1]
page = get_page(url)
contents = page.readlines()
matches = list()
publish_date = ""
for line in contents:
line = HTML_RE.sub('', clean_line(line))
if CVE_RE.search(line):
matches.append(line)
if DATE_RE.search(line) and not publish_date:
publish_date = line
matches = list(set(matches))
# For each CVE...
for cve in matches:
# Parse out the fields we care about...
try:
bounty = clean_match(BOUNTY_RE.search(cve).group(0))
except:
bounty = ""
bug_id = clean_match(BUG_RE.search(cve).group(0))
cve_id = clean_match(CVE_RE.search(cve).group(0))
try:
description = clean_match(DESCRIPTION_RE.search(cve).group(0))
except:
print("ERROR: Regex failed for Description in " + str(cve_id))
try:
announced = clean_match(ANNOUNCED_RE.search(cve).group(0))
except:
announced = format_date(publish_date)
# And write the new CVE to disk.
cve_path = "../cves/{:s}.yml".format(cve_id)
if os.path.exists(cve_path):
print("Skipping CVE: {:s}.".format(cve_id))
else:
skeleton = get_skeleton(cve_id, description, bounty, bug_id, announced)
with open("../cves/" + cve_id + ".yml", "w") as f:
f.write(skeleton)
print(" Created CVE: {:s}".format(cve_path))
|
py | 1a496d14843086be8097fd427cb3474ac594df0c | import unittest
from argparse import ArgumentTypeError
from streamlink.utils.args import (
boolean, comma_list, comma_list_filter, filesize, keyvalue, num
)
class TestUtilsArgs(unittest.TestCase):
def test_boolean_true(self):
self.assertEqual(boolean('1'), True)
self.assertEqual(boolean('on'), True)
self.assertEqual(boolean('true'), True)
self.assertEqual(boolean('yes'), True)
self.assertEqual(boolean('Yes'), True)
def test_boolean_false(self):
self.assertEqual(boolean('0'), False)
self.assertEqual(boolean('false'), False)
self.assertEqual(boolean('no'), False)
self.assertEqual(boolean('No'), False)
self.assertEqual(boolean('off'), False)
def test_boolean_error(self):
with self.assertRaises(ArgumentTypeError):
boolean('yesno')
with self.assertRaises(ArgumentTypeError):
boolean('FOO')
with self.assertRaises(ArgumentTypeError):
boolean('2')
def test_comma_list(self):
# (values, result)
test_data = [
('foo.bar,example.com', ['foo.bar', 'example.com']),
('/var/run/foo,/var/run/bar', ['/var/run/foo', '/var/run/bar']),
('foo bar,24', ['foo bar', '24']),
('hls', ['hls']),
]
for _v, _r in test_data:
self.assertEqual(comma_list(_v), _r)
def test_comma_list_filter(self):
# (acceptable, values, result)
test_data = [
(['foo', 'bar', 'com'], 'foo,bar,example.com', ['foo', 'bar']),
(['/var/run/foo', 'FO'], '/var/run/foo,/var/run/bar',
['/var/run/foo']),
(['hls', 'hls5', 'dash'], 'hls,hls5', ['hls', 'hls5']),
(['EU', 'RU'], 'DE,FR,RU,US', ['RU']),
]
for _a, _v, _r in test_data:
func = comma_list_filter(_a)
self.assertEqual(func(_v), _r)
def test_filesize(self):
self.assertEqual(filesize('2000'), 2000)
self.assertEqual(filesize('11KB'), 1024 * 11)
self.assertEqual(filesize('12MB'), 1024 * 1024 * 12)
self.assertEqual(filesize('1KB'), 1024)
self.assertEqual(filesize('1MB'), 1024 * 1024)
self.assertEqual(filesize('2KB'), 1024 * 2)
def test_filesize_error(self):
with self.assertRaises(ValueError):
filesize('FOO')
with self.assertRaises(ValueError):
filesize('0.00000')
def test_keyvalue(self):
# (value, result)
test_data = [
('X-Forwarded-For=127.0.0.1', ('X-Forwarded-For', '127.0.0.1')),
('Referer=https://foo.bar', ('Referer', 'https://foo.bar')),
(
'User-Agent=Mozilla/5.0 (X11; Linux x86_64; rv:60.0)'
' Gecko/20100101 Firefox/60.0',
('User-Agent', 'Mozilla/5.0 (X11; Linux x86_64; rv:60.0) '
'Gecko/20100101 Firefox/60.0')
),
('domain=example.com', ('domain', 'example.com')),
]
for _v, _r in test_data:
self.assertEqual(keyvalue(_v), _r)
def test_keyvalue_error(self):
with self.assertRaises(ValueError):
keyvalue('127.0.0.1')
def test_num(self):
# (value, func, result)
test_data = [
('33', num(int, 5, 120), 33),
('234', num(int, min=10), 234),
('50.222', num(float, 10, 120), 50.222),
]
for _v, _f, _r in test_data:
self.assertEqual(_f(_v), _r)
def test_num_error(self):
with self.assertRaises(ArgumentTypeError):
func = num(int, 5, 10)
func('3')
with self.assertRaises(ArgumentTypeError):
func = num(int, max=11)
func('12')
with self.assertRaises(ArgumentTypeError):
func = num(int, min=15)
func('8')
with self.assertRaises(ArgumentTypeError):
func = num(float, 10, 20)
func('40.222')
|
py | 1a496d18ea61a1f2b83cc477b4b29a27a83e207e | #!/usr/bin/env vpython
#
# Copyright 2018 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import argparse
import collections
import json
import logging
import os
import pipes
import re
import shutil
import signal
import socket
import sys
import tempfile
# The following non-std imports are fetched via vpython. See the list at
# //.vpython
import dateutil.parser # pylint: disable=import-error
import jsonlines # pylint: disable=import-error
import psutil # pylint: disable=import-error
import six
CHROMIUM_SRC_PATH = os.path.abspath(
os.path.join(os.path.dirname(__file__), '..', '..'))
# Use the android test-runner's gtest results support library for generating
# output json ourselves.
sys.path.insert(0, os.path.join(CHROMIUM_SRC_PATH, 'build', 'android'))
from pylib.base import base_test_result # pylint: disable=import-error
from pylib.base import result_sink # pylint: disable=import-error
from pylib.results import json_results # pylint: disable=import-error
if six.PY2:
import subprocess32 as subprocess # pylint: disable=import-error
else:
import subprocess # pylint: disable=import-error,wrong-import-order
DEFAULT_CROS_CACHE = os.path.abspath(
os.path.join(CHROMIUM_SRC_PATH, 'build', 'cros_cache'))
CHROMITE_PATH = os.path.abspath(
os.path.join(CHROMIUM_SRC_PATH, 'third_party', 'chromite'))
CROS_RUN_TEST_PATH = os.path.abspath(
os.path.join(CHROMITE_PATH, 'bin', 'cros_run_test'))
# This is a special hostname that resolves to a different DUT in the lab
# depending on which lab machine you're on.
LAB_DUT_HOSTNAME = 'variable_chromeos_device_hostname'
SYSTEM_LOG_LOCATIONS = [
'/var/log/chrome/',
'/var/log/messages',
'/var/log/ui/',
]
TAST_DEBUG_DOC = 'https://bit.ly/2LgvIXz'
class TestFormatError(Exception):
pass
class RemoteTest(object):
# This is a basic shell script that can be appended to in order to invoke the
# test on the device.
BASIC_SHELL_SCRIPT = [
'#!/bin/sh',
# /home and /tmp are mounted with "noexec" in the device, but some of our
# tools and tests use those dirs as a workspace (eg: vpython downloads
# python binaries to ~/.vpython-root and /tmp/vpython_bootstrap).
# /usr/local/tmp doesn't have this restriction, so change the location of
# the home and temp dirs for the duration of the test.
'export HOME=/usr/local/tmp',
'export TMPDIR=/usr/local/tmp',
]
def __init__(self, args, unknown_args):
self._additional_args = unknown_args
self._path_to_outdir = args.path_to_outdir
self._test_launcher_summary_output = args.test_launcher_summary_output
self._logs_dir = args.logs_dir
self._use_vm = args.use_vm
self._rdb_client = result_sink.TryInitClient()
self._retries = 0
self._timeout = None
self._test_launcher_shard_index = args.test_launcher_shard_index
self._test_launcher_total_shards = args.test_launcher_total_shards
# The location on disk of a shell script that can be optionally used to
# invoke the test on the device. If it's not set, we assume self._test_cmd
# contains the test invocation.
self._on_device_script = None
self._test_cmd = [
CROS_RUN_TEST_PATH,
'--board',
args.board,
'--cache-dir',
args.cros_cache,
]
if args.use_vm:
self._test_cmd += [
'--start',
# Don't persist any filesystem changes after the VM shutsdown.
'--copy-on-write',
]
else:
self._test_cmd += [
'--device', args.device if args.device else LAB_DUT_HOSTNAME
]
if args.logs_dir:
for log in SYSTEM_LOG_LOCATIONS:
self._test_cmd += ['--results-src', log]
self._test_cmd += [
'--results-dest-dir',
os.path.join(args.logs_dir, 'system_logs')
]
if args.flash:
self._test_cmd += ['--flash']
if args.public_image:
self._test_cmd += ['--public-image']
# This environment variable is set for tests that have been instrumented
# for code coverage. Its incoming value is expected to be a location
# inside a subdirectory of result_dir above. This is converted to an
# absolute path that the vm is able to write to, and passed in the
# --results-src flag to cros_run_vm_test for copying out of the vm before
# its termination.
self._llvm_profile_var = None
if os.environ.get('LLVM_PROFILE_FILE'):
_, llvm_profile_file = os.path.split(os.environ['LLVM_PROFILE_FILE'])
self._llvm_profile_var = '/tmp/profraw/%s' % llvm_profile_file
# This should make the vm test runner exfil the profiling data.
self._test_cmd += ['--results-src', '/tmp/profraw']
self._test_env = setup_env()
@property
def suite_name(self):
raise NotImplementedError('Child classes need to define suite name.')
@property
def test_cmd(self):
return self._test_cmd
def write_test_script_to_disk(self, script_contents):
# Since we're using an on_device_script to invoke the test, we'll need to
# set cwd.
self._test_cmd += [
'--remote-cmd',
'--cwd',
os.path.relpath(self._path_to_outdir, CHROMIUM_SRC_PATH),
]
logging.info('Running the following command on the device:')
logging.info('\n' + '\n'.join(script_contents))
fd, tmp_path = tempfile.mkstemp(suffix='.sh', dir=self._path_to_outdir)
os.fchmod(fd, 0o755)
with os.fdopen(fd, 'wb') as f:
f.write('\n'.join(script_contents) + '\n')
return tmp_path
def run_test(self):
# Traps SIGTERM and kills all child processes of cros_run_test when it's
# caught. This will allow us to capture logs from the device if a test hangs
# and gets timeout-killed by swarming. See also:
# https://chromium.googlesource.com/infra/luci/luci-py/+/master/appengine/swarming/doc/Bot.md#graceful-termination_aka-the-sigterm-and-sigkill-dance
test_proc = None
def _kill_child_procs(trapped_signal, _):
logging.warning('Received signal %d. Killing child processes of test.',
trapped_signal)
if not test_proc or not test_proc.pid:
# This shouldn't happen?
logging.error('Test process not running.')
return
for child in psutil.Process(test_proc.pid).children():
logging.warning('Killing process %s', child)
child.kill()
signal.signal(signal.SIGTERM, _kill_child_procs)
for i in range(self._retries + 1):
logging.info('########################################')
logging.info('Test attempt #%d', i)
logging.info('########################################')
test_proc = subprocess.Popen(
self._test_cmd,
stdout=sys.stdout,
stderr=sys.stderr,
env=self._test_env)
try:
test_proc.wait(timeout=self._timeout)
except subprocess.TimeoutExpired: # pylint: disable=no-member
logging.error('Test timed out. Sending SIGTERM.')
# SIGTERM the proc and wait 10s for it to close.
test_proc.terminate()
try:
test_proc.wait(timeout=10)
except subprocess.TimeoutExpired: # pylint: disable=no-member
# If it hasn't closed in 10s, SIGKILL it.
logging.error('Test did not exit in time. Sending SIGKILL.')
test_proc.kill()
test_proc.wait()
logging.info('Test exitted with %d.', test_proc.returncode)
if test_proc.returncode == 0:
break
ret = self.post_run(test_proc.returncode)
# Allow post_run to override test proc return code. (Useful when the host
# side Tast bin returns 0 even for failed tests.)
if ret is not None:
return ret
return test_proc.returncode
def post_run(self, return_code):
if self._on_device_script:
os.remove(self._on_device_script)
# Create a simple json results file for a test run. The results will contain
# only one test (suite_name), and will either be a PASS or FAIL depending on
# return_code.
if self._test_launcher_summary_output:
result = (
base_test_result.ResultType.FAIL
if return_code else base_test_result.ResultType.PASS)
suite_result = base_test_result.BaseTestResult(self.suite_name, result)
run_results = base_test_result.TestRunResults()
run_results.AddResult(suite_result)
with open(self._test_launcher_summary_output, 'w') as f:
json.dump(json_results.GenerateResultsDict([run_results]), f)
@staticmethod
def get_artifacts(path):
"""Crawls a given directory for file artifacts to attach to a test.
Args:
path: Path to a directory to search for artifacts.
Returns:
A dict mapping name of the artifact to its absolute filepath.
"""
artifacts = {}
for dirpath, _, filenames in os.walk(path):
for f in filenames:
artifact_path = os.path.join(dirpath, f)
artifacts[os.path.relpath(artifact_path, path)] = {
'filePath': artifact_path,
}
return artifacts
class TastTest(RemoteTest):
def __init__(self, args, unknown_args):
super(TastTest, self).__init__(args, unknown_args)
self._suite_name = args.suite_name
self._tast_vars = args.tast_vars
self._tests = args.tests
# The CQ passes in '--gtest_filter' when specifying tests to skip. Store it
# here and parse it later to integrate it into Tast executions.
self._gtest_style_filter = args.gtest_filter
self._conditional = args.conditional
self._should_strip = args.strip_chrome
self._deploy_lacros = args.deploy_lacros
if self._deploy_lacros and self._should_strip:
raise TestFormatError(
'--strip-chrome is only applicable to ash-chrome because '
'lacros-chrome deployment uses --nostrip by default, so it cannot '
'be specificed with --deploy-lacros.')
if not self._llvm_profile_var and not self._logs_dir:
# The host-side Tast bin returns 0 when tests fail, so we need to capture
# and parse its json results to reliably determine if tests fail.
raise TestFormatError(
'When using the host-side Tast bin, "--logs-dir" must be passed in '
'order to parse its results.')
# If the first test filter is negative, it should be safe to assume all of
# them are, so just test the first filter.
if self._gtest_style_filter and self._gtest_style_filter[0] == '-':
raise TestFormatError('Negative test filters not supported for Tast.')
@property
def suite_name(self):
return self._suite_name
def build_test_command(self):
unsupported_args = [
'--test-launcher-retry-limit',
'--test-launcher-batch-limit',
'--gtest_repeat',
]
for unsupported_arg in unsupported_args:
if any(arg.startswith(unsupported_arg) for arg in self._additional_args):
logging.info(
'%s not supported for Tast tests. The arg will be ignored.',
unsupported_arg)
self._additional_args = [
arg for arg in self._additional_args
if not arg.startswith(unsupported_arg)
]
# Lacros deployment mounts itself by default.
self._test_cmd.extend(
['--deploy-lacros'] if self._deploy_lacros else ['--deploy', '--mount'])
self._test_cmd += [
'--build-dir',
os.path.relpath(self._path_to_outdir, CHROMIUM_SRC_PATH)
] + self._additional_args
# Coverage tests require some special pre-test setup, so use an
# on_device_script in that case. For all other tests, use cros_run_test's
# built-in '--tast' option. This gives us much better results reporting.
if self._llvm_profile_var:
# Build the shell script that will be used on the device to invoke the
# test.
device_test_script_contents = self.BASIC_SHELL_SCRIPT[:]
device_test_script_contents += [
'echo "LLVM_PROFILE_FILE=%s" >> /etc/chrome_dev.conf' %
(self._llvm_profile_var)
]
local_test_runner_cmd = ['local_test_runner', '-waituntilready']
if self._use_vm:
# If we're running tests in VMs, tell the test runner to skip tests that
# aren't compatible.
local_test_runner_cmd.append('-extrauseflags=tast_vm')
if self._conditional:
local_test_runner_cmd.append(pipes.quote(self._conditional))
else:
local_test_runner_cmd.extend(self._tests)
device_test_script_contents.append(' '.join(local_test_runner_cmd))
self._on_device_script = self.write_test_script_to_disk(
device_test_script_contents)
self._test_cmd += [
'--files',
os.path.relpath(self._on_device_script), '--',
'./' + os.path.relpath(self._on_device_script, self._path_to_outdir)
]
else:
# Capture tast's results in the logs dir as well.
if self._logs_dir:
self._test_cmd += [
'--results-dir',
self._logs_dir,
]
self._test_cmd += [
'--tast-total-shards=%d' % self._test_launcher_total_shards,
'--tast-shard-index=%d' % self._test_launcher_shard_index,
]
# If we're using a test filter, replace the contents of the Tast
# conditional with a long list of "name:test" expressions, one for each
# test in the filter.
if self._gtest_style_filter:
if self._conditional or self._tests:
logging.warning(
'Presence of --gtest_filter will cause the specified Tast '
'conditional or test list to be ignored.')
names = []
for test in self._gtest_style_filter.split(':'):
names.append('"name:%s"' % test)
self._conditional = '(' + ' || '.join(names) + ')'
if self._conditional:
# Don't use pipes.quote() here. Something funky happens with the arg
# as it gets passed down from cros_run_test to tast. (Tast picks up the
# escaping single quotes and complains that the conditional "must be
# within parentheses".)
self._test_cmd.append('--tast=%s' % self._conditional)
else:
self._test_cmd.append('--tast')
self._test_cmd.extend(self._tests)
for v in self._tast_vars or []:
self._test_cmd.extend(['--tast-var', v])
# Mounting ash-chrome gives it enough disk space to not need stripping,
# but only for one not instrumented with code coverage.
# Lacros uses --nostrip by default, so there is no need to specify.
if not self._deploy_lacros and not self._should_strip:
self._test_cmd.append('--nostrip')
def post_run(self, return_code):
# If we don't need to parse the host-side Tast tool's results, fall back to
# the parent method's default behavior.
if self._llvm_profile_var:
return super(TastTest, self).post_run(return_code)
tast_results_path = os.path.join(self._logs_dir, 'streamed_results.jsonl')
if not os.path.exists(tast_results_path):
logging.error(
'Tast results not found at %s. Falling back to generic result '
'reporting.', tast_results_path)
return super(TastTest, self).post_run(return_code)
# See the link below for the format of the results:
# https://godoc.org/chromium.googlesource.com/chromiumos/platform/tast.git/src/chromiumos/cmd/tast/run#TestResult
with jsonlines.open(tast_results_path) as reader:
tast_results = collections.deque(reader)
suite_results = base_test_result.TestRunResults()
for test in tast_results:
errors = test['errors']
start, end = test['start'], test['end']
# Use dateutil to parse the timestamps since datetime can't handle
# nanosecond precision.
duration = dateutil.parser.parse(end) - dateutil.parser.parse(start)
duration_ms = duration.total_seconds() * 1000
if bool(test['skipReason']):
result = base_test_result.ResultType.SKIP
elif errors:
result = base_test_result.ResultType.FAIL
else:
result = base_test_result.ResultType.PASS
error_log = ''
if errors:
# See the link below for the format of these errors:
# https://godoc.org/chromium.googlesource.com/chromiumos/platform/tast.git/src/chromiumos/tast/testing#Error
for err in errors:
error_log += err['stack'].encode('utf-8') + '\n'
error_log += (
"\nIf you're unsure why this test failed, consult the steps "
'outlined in\n%s\n' % TAST_DEBUG_DOC)
base_result = base_test_result.BaseTestResult(
test['name'], result, duration=duration_ms, log=error_log)
suite_results.AddResult(base_result)
self._maybe_handle_perf_results(test['name'])
if self._rdb_client:
# Walk the contents of the test's "outDir" and atttach any file found
# inside as an RDB 'artifact'. (This could include system logs, screen
# shots, etc.)
artifacts = self.get_artifacts(test['outDir'])
self._rdb_client.Post(test['name'], result, error_log, artifacts)
if self._rdb_client and self._logs_dir:
# Attach artifacts from the device that don't apply to a single test.
artifacts = self.get_artifacts(
os.path.join(self._logs_dir, 'system_logs'))
artifacts.update(
self.get_artifacts(os.path.join(self._logs_dir, 'crashes')))
self._rdb_client.ReportInvocationLevelArtifacts(artifacts)
if self._test_launcher_summary_output:
with open(self._test_launcher_summary_output, 'w') as f:
json.dump(json_results.GenerateResultsDict([suite_results]), f)
if not suite_results.DidRunPass():
return 1
elif return_code:
logging.warning(
'No failed tests found, but exit code of %d was returned from '
'cros_run_test.', return_code)
return return_code
return 0
def _maybe_handle_perf_results(self, test_name):
"""Prepares any perf results from |test_name| for process_perf_results.
- process_perf_results looks for top level directories containing a
perf_results.json file and a test_results.json file. The directory names
are used as the benchmark names.
- If a perf_results.json or results-chart.json file exists in the
|test_name| results directory, a top level directory is created and the
perf results file is copied to perf_results.json.
- A trivial test_results.json file is also created to indicate that the test
succeeded (this function would not be called otherwise).
- When process_perf_results is run, it will find the expected files in the
named directory and upload the benchmark results.
"""
perf_results = os.path.join(self._logs_dir, 'tests', test_name,
'perf_results.json')
# TODO(stevenjb): Remove check for crosbolt results-chart.json file.
if not os.path.exists(perf_results):
perf_results = os.path.join(self._logs_dir, 'tests', test_name,
'results-chart.json')
if os.path.exists(perf_results):
benchmark_dir = os.path.join(self._logs_dir, test_name)
if not os.path.isdir(benchmark_dir):
os.makedirs(benchmark_dir)
shutil.copyfile(perf_results,
os.path.join(benchmark_dir, 'perf_results.json'))
# process_perf_results.py expects a test_results.json file.
test_results = {'valid': True, 'failures': []}
with open(os.path.join(benchmark_dir, 'test_results.json'), 'w') as out:
json.dump(test_results, out)
class GTestTest(RemoteTest):
# The following list corresponds to paths that should not be copied over to
# the device during tests. In other words, these files are only ever used on
# the host.
_FILE_IGNORELIST = [
re.compile(r'.*build/android.*'),
re.compile(r'.*build/chromeos.*'),
re.compile(r'.*build/cros_cache.*'),
# The following matches anything under //testing/ that isn't under
# //testing/buildbot/filters/.
re.compile(r'.*testing/(?!buildbot/filters).*'),
re.compile(r'.*third_party/chromite.*'),
]
def __init__(self, args, unknown_args):
super(GTestTest, self).__init__(args, unknown_args)
self._test_exe = args.test_exe
self._runtime_deps_path = args.runtime_deps_path
self._vpython_dir = args.vpython_dir
self._on_device_script = None
self._env_vars = args.env_var
self._stop_ui = args.stop_ui
self._trace_dir = args.trace_dir
@property
def suite_name(self):
return self._test_exe
def build_test_command(self):
# To keep things easy for us, ensure both types of output locations are
# the same.
if self._test_launcher_summary_output and self._logs_dir:
json_out_dir = os.path.dirname(self._test_launcher_summary_output) or '.'
if os.path.abspath(json_out_dir) != os.path.abspath(self._logs_dir):
raise TestFormatError(
'--test-launcher-summary-output and --logs-dir must point to '
'the same directory.')
if self._test_launcher_summary_output:
result_dir, result_file = os.path.split(
self._test_launcher_summary_output)
# If args.test_launcher_summary_output is a file in cwd, result_dir will
# be an empty string, so replace it with '.' when this is the case so
# cros_run_test can correctly handle it.
if not result_dir:
result_dir = '.'
device_result_file = '/tmp/%s' % result_file
self._test_cmd += [
'--results-src',
device_result_file,
'--results-dest-dir',
result_dir,
]
if self._trace_dir and self._logs_dir:
trace_path = os.path.dirname(self._trace_dir) or '.'
if os.path.abspath(trace_path) != os.path.abspath(self._logs_dir):
raise TestFormatError(
'--trace-dir and --logs-dir must point to the same directory.')
if self._trace_dir:
trace_path, trace_dirname = os.path.split(self._trace_dir)
device_trace_dir = '/tmp/%s' % trace_dirname
self._test_cmd += [
'--results-src',
device_trace_dir,
'--results-dest-dir',
trace_path,
]
# Build the shell script that will be used on the device to invoke the test.
# Stored here as a list of lines.
device_test_script_contents = self.BASIC_SHELL_SCRIPT[:]
if self._llvm_profile_var:
device_test_script_contents += [
'export LLVM_PROFILE_FILE=%s' % self._llvm_profile_var,
]
for var_name, var_val in self._env_vars:
device_test_script_contents += ['export %s=%s' % (var_name, var_val)]
if self._vpython_dir:
vpython_path = os.path.join(self._path_to_outdir, self._vpython_dir,
'vpython')
cpython_path = os.path.join(self._path_to_outdir, self._vpython_dir,
'bin', 'python')
if not os.path.exists(vpython_path) or not os.path.exists(cpython_path):
raise TestFormatError(
'--vpython-dir must point to a dir with both infra/python/cpython '
'and infra/tools/luci/vpython installed.')
vpython_spec_path = os.path.relpath(
os.path.join(CHROMIUM_SRC_PATH, '.vpython'), self._path_to_outdir)
# Initialize the vpython cache. This can take 10-20s, and some tests
# can't afford to wait that long on the first invocation.
device_test_script_contents.extend([
'export PATH=$PWD/%s:$PWD/%s/bin/:$PATH' %
(self._vpython_dir, self._vpython_dir),
'vpython -vpython-spec %s -vpython-tool install' %
(vpython_spec_path),
])
test_invocation = ('LD_LIBRARY_PATH=./ ./%s --test-launcher-shard-index=%d '
'--test-launcher-total-shards=%d' %
(self._test_exe, self._test_launcher_shard_index,
self._test_launcher_total_shards))
if self._test_launcher_summary_output:
test_invocation += ' --test-launcher-summary-output=%s' % (
device_result_file)
if self._trace_dir:
device_test_script_contents.extend([
'rm -rf %s' % device_trace_dir,
'su chronos -c -- "mkdir -p %s"' % device_trace_dir,
])
test_invocation += ' --trace-dir=%s' % device_trace_dir
if self._additional_args:
test_invocation += ' %s' % ' '.join(self._additional_args)
if self._stop_ui:
device_test_script_contents += [
'stop ui',
]
# The UI service on the device owns the chronos user session, so shutting
# it down as chronos kills the entire execution of the test. So we'll have
# to run as root up until the test invocation.
test_invocation = 'su chronos -c -- "%s"' % test_invocation
# And we'll need to chown everything since cros_run_test's "--as-chronos"
# option normally does that for us.
device_test_script_contents.append('chown -R chronos: ../..')
else:
self._test_cmd += [
# Some tests fail as root, so run as the less privileged user
# 'chronos'.
'--as-chronos',
]
device_test_script_contents.append(test_invocation)
self._on_device_script = self.write_test_script_to_disk(
device_test_script_contents)
runtime_files = [os.path.relpath(self._on_device_script)]
runtime_files += self._read_runtime_files()
if self._vpython_dir:
# --vpython-dir is relative to the out dir, but --files expects paths
# relative to src dir, so fix the path up a bit.
runtime_files.append(
os.path.relpath(
os.path.abspath(
os.path.join(self._path_to_outdir, self._vpython_dir)),
CHROMIUM_SRC_PATH))
# TODO(bpastene): Add the vpython spec to the test's runtime deps instead
# of handling it here.
runtime_files.append('.vpython')
for f in runtime_files:
self._test_cmd.extend(['--files', f])
self._test_cmd += [
'--',
'./' + os.path.relpath(self._on_device_script, self._path_to_outdir)
]
def _read_runtime_files(self):
if not self._runtime_deps_path:
return []
abs_runtime_deps_path = os.path.abspath(
os.path.join(self._path_to_outdir, self._runtime_deps_path))
with open(abs_runtime_deps_path) as runtime_deps_file:
files = [l.strip() for l in runtime_deps_file if l]
rel_file_paths = []
for f in files:
rel_file_path = os.path.relpath(
os.path.abspath(os.path.join(self._path_to_outdir, f)))
if not any(regex.match(rel_file_path) for regex in self._FILE_IGNORELIST):
rel_file_paths.append(rel_file_path)
return rel_file_paths
def post_run(self, _):
if self._on_device_script:
os.remove(self._on_device_script)
def device_test(args, unknown_args):
# cros_run_test has trouble with relative paths that go up directories,
# so cd to src/, which should be the root of all data deps.
os.chdir(CHROMIUM_SRC_PATH)
# pylint: disable=redefined-variable-type
# TODO: Remove the above when depot_tool's pylint is updated to include the
# fix to https://github.com/PyCQA/pylint/issues/710.
if args.test_type == 'tast':
test = TastTest(args, unknown_args)
else:
test = GTestTest(args, unknown_args)
test.build_test_command()
logging.info('Running the following command on the device:')
logging.info(' '.join(test.test_cmd))
return test.run_test()
def host_cmd(args, cmd_args):
if not cmd_args:
raise TestFormatError('Must specify command to run on the host.')
elif args.deploy_chrome and not args.path_to_outdir:
raise TestFormatError(
'--path-to-outdir must be specified if --deploy-chrome is passed.')
cros_run_test_cmd = [
CROS_RUN_TEST_PATH,
'--board',
args.board,
'--cache-dir',
args.cros_cache,
]
if args.use_vm:
cros_run_test_cmd += [
'--start',
# Don't persist any filesystem changes after the VM shutsdown.
'--copy-on-write',
]
else:
cros_run_test_cmd += [
'--device', args.device if args.device else LAB_DUT_HOSTNAME
]
if args.verbose:
cros_run_test_cmd.append('--debug')
if args.flash:
cros_run_test_cmd.append('--flash')
if args.public_image:
cros_run_test_cmd += ['--public-image']
if args.logs_dir:
for log in SYSTEM_LOG_LOCATIONS:
cros_run_test_cmd += ['--results-src', log]
cros_run_test_cmd += [
'--results-dest-dir',
os.path.join(args.logs_dir, 'system_logs')
]
test_env = setup_env()
if args.deploy_chrome:
# Mounting ash-chrome gives it enough disk space to not need stripping.
cros_run_test_cmd.extend(['--deploy-lacros'] if args.deploy_lacros else
['--deploy', '--mount', '--nostrip'])
cros_run_test_cmd += [
'--build-dir',
os.path.abspath(args.path_to_outdir),
]
cros_run_test_cmd += [
'--host-cmd',
'--',
] + cmd_args
logging.info('Running the following command:')
logging.info(' '.join(cros_run_test_cmd))
return subprocess.call(
cros_run_test_cmd, stdout=sys.stdout, stderr=sys.stderr, env=test_env)
def setup_env():
"""Returns a copy of the current env with some needed vars added."""
env = os.environ.copy()
# Some chromite scripts expect chromite/bin to be on PATH.
env['PATH'] = env['PATH'] + ':' + os.path.join(CHROMITE_PATH, 'bin')
# deploy_chrome needs a set of GN args used to build chrome to determine if
# certain libraries need to be pushed to the device. It looks for the args via
# an env var. To trigger the default deploying behavior, give it a dummy set
# of args.
# TODO(crbug.com/823996): Make the GN-dependent deps controllable via cmd
# line args.
if not env.get('GN_ARGS'):
env['GN_ARGS'] = 'enable_nacl = true'
if not env.get('USE'):
env['USE'] = 'highdpi'
return env
def add_common_args(*parsers):
for parser in parsers:
parser.add_argument('--verbose', '-v', action='store_true')
parser.add_argument(
'--board', type=str, required=True, help='Type of CrOS device.')
parser.add_argument(
'--cros-cache',
type=str,
default=DEFAULT_CROS_CACHE,
help='Path to cros cache.')
parser.add_argument(
'--path-to-outdir',
type=str,
required=True,
help='Path to output directory, all of whose contents will be '
'deployed to the device.')
parser.add_argument(
'--runtime-deps-path',
type=str,
help='Runtime data dependency file from GN.')
parser.add_argument(
'--vpython-dir',
type=str,
help='Location on host of a directory containing a vpython binary to '
'deploy to the device before the test starts. The location of '
'this dir will be added onto PATH in the device. WARNING: The '
'arch of the device might not match the arch of the host, so '
'avoid using "${platform}" when downloading vpython via CIPD.')
parser.add_argument(
'--logs-dir',
type=str,
dest='logs_dir',
help='Will copy everything under /var/log/ from the device after the '
'test into the specified dir.')
# Shard args are parsed here since we might also specify them via env vars.
parser.add_argument(
'--test-launcher-shard-index',
type=int,
default=os.environ.get('GTEST_SHARD_INDEX', 0),
help='Index of the external shard to run.')
parser.add_argument(
'--test-launcher-total-shards',
type=int,
default=os.environ.get('GTEST_TOTAL_SHARDS', 1),
help='Total number of external shards.')
parser.add_argument(
'--flash',
action='store_true',
help='Will flash the device to the current SDK version before running '
'the test.')
parser.add_argument(
'--public-image',
action='store_true',
help='Will flash a public "full" image to the device.')
vm_or_device_group = parser.add_mutually_exclusive_group()
vm_or_device_group.add_argument(
'--use-vm',
action='store_true',
help='Will run the test in the VM instead of a device.')
vm_or_device_group.add_argument(
'--device',
type=str,
help='Hostname (or IP) of device to run the test on. This arg is not '
'required if --use-vm is set.')
def main():
parser = argparse.ArgumentParser()
subparsers = parser.add_subparsers(dest='test_type')
# Host-side test args.
host_cmd_parser = subparsers.add_parser(
'host-cmd',
help='Runs a host-side test. Pass the host-side command to run after '
'"--". If --use-vm is passed, hostname and port for the device '
'will be 127.0.0.1:9222.')
host_cmd_parser.set_defaults(func=host_cmd)
host_cmd_parser.add_argument(
'--deploy-chrome',
action='store_true',
help='Will deploy a locally built ash-chrome binary to the device before '
'running the host-cmd.')
host_cmd_parser.add_argument(
'--deploy-lacros',
action='store_true',
help='Deploy a lacros-chrome instead of ash-chrome.')
# GTest args.
# TODO(bpastene): Rename 'vm-test' arg to 'gtest'.
gtest_parser = subparsers.add_parser(
'vm-test', help='Runs a device-side gtest.')
gtest_parser.set_defaults(func=device_test)
gtest_parser.add_argument(
'--test-exe',
type=str,
required=True,
help='Path to test executable to run inside the device.')
# GTest args. Some are passed down to the test binary in the device. Others
# are parsed here since they might need tweaking or special handling.
gtest_parser.add_argument(
'--test-launcher-summary-output',
type=str,
help='When set, will pass the same option down to the test and retrieve '
'its result file at the specified location.')
gtest_parser.add_argument(
'--stop-ui',
action='store_true',
help='Will stop the UI service in the device before running the test.')
gtest_parser.add_argument(
'--trace-dir',
type=str,
help='When set, will pass down to the test to generate the trace and '
'retrieve the trace files to the specified location.')
gtest_parser.add_argument(
'--env-var',
nargs=2,
action='append',
default=[],
help='Env var to set on the device for the duration of the test. '
'Expected format is "--env-var SOME_VAR_NAME some_var_value". Specify '
'multiple times for more than one var.')
# Tast test args.
# pylint: disable=line-too-long
tast_test_parser = subparsers.add_parser(
'tast',
help='Runs a device-side set of Tast tests. For more details, see: '
'https://chromium.googlesource.com/chromiumos/platform/tast/+/master/docs/running_tests.md'
)
tast_test_parser.set_defaults(func=device_test)
tast_test_parser.add_argument(
'--suite-name',
type=str,
required=True,
help='Name to apply to the set of Tast tests to run. This has no effect '
'on what is executed, but is used mainly for test results reporting '
'and tracking (eg: flakiness dashboard).')
tast_test_parser.add_argument(
'--test-launcher-summary-output',
type=str,
help='Generates a simple GTest-style JSON result file for the test run.')
# TODO(bpastene): Change all uses of "--conditional" to use "--attr-expr".
tast_test_parser.add_argument(
'--conditional',
'--attr-expr',
type=str,
dest='conditional',
help='A boolean expression whose matching tests will run '
'(eg: ("dep:chrome")).')
tast_test_parser.add_argument(
'--strip-chrome',
action='store_true',
help='Strips symbols from ash-chrome before deploying to the device.')
tast_test_parser.add_argument(
'--deploy-lacros',
action='store_true',
help='Deploy a lacros-chrome instead of ash-chrome.')
tast_test_parser.add_argument(
'--tast-var',
action='append',
dest='tast_vars',
help='Runtime variables for Tast tests, and the format are expected to '
'be "key=value" pairs.')
tast_test_parser.add_argument(
'--test',
'-t',
action='append',
dest='tests',
help='A Tast test to run in the device (eg: "ui.ChromeLogin").')
tast_test_parser.add_argument(
'--gtest_filter',
type=str,
help="Similar to GTest's arg of the same name, this will filter out the "
"specified tests from the Tast run. However, due to the nature of Tast's "
'cmd-line API, this will overwrite the value(s) of "--test" above.')
add_common_args(gtest_parser, tast_test_parser, host_cmd_parser)
args = sys.argv[1:]
unknown_args = []
# If a '--' is present in the args, treat everything to the right of it as
# args to the test and everything to the left as args to this test runner.
# Otherwise treat all known args as args to this test runner and all unknown
# args as test args.
if '--' in args:
unknown_args = args[args.index('--') + 1:]
args = args[0:args.index('--')]
if unknown_args:
args = parser.parse_args(args=args)
else:
args, unknown_args = parser.parse_known_args()
logging.basicConfig(level=logging.DEBUG if args.verbose else logging.WARN)
if not args.use_vm and not args.device:
# If we're not running on a VM, but haven't specified a hostname, assume
# we're on a lab bot and are trying to run a test on a lab DUT. See if the
# magic lab DUT hostname resolves to anything. (It will in the lab and will
# not on dev machines.)
try:
socket.getaddrinfo(LAB_DUT_HOSTNAME, None)
except socket.gaierror:
logging.error('The default DUT hostname of %s is unreachable.',
LAB_DUT_HOSTNAME)
return 1
args.cros_cache = os.path.abspath(args.cros_cache)
return args.func(args, unknown_args)
if __name__ == '__main__':
sys.exit(main())
|
py | 1a496d7fd9f5b303d04280046d522e164758f50b | # импортируем специальные поля Алхимии для инициализации полей таблицы
from sqlalchemy import Column, Float, String, Integer
# импортируем модуль инициализации декларативного класса Алхимии
from DB.dbcore import Base
class Client(Base):
__tablename__ = 'clients'
id = Column(Integer, primary_key = True)
address = Column(String)
chat_id = Column(Integer)
email = Column(String)
latitude = Column(Float)
longitude = Column(Float)
phone = Column(String)
title = Column(String)
user_name = Column(String)
def __init__(self, address, chat_id, email, latitude, longitude, phone, title, user_name):
self.address = address
self.chat_id = chat_id
self.email = email
self.latitude = latitude
self.longitude = longitude
self.phone = phone
self.title = title
self.user_name = user_name
def __repr__(self):
return f'<Client: {self.id}, {self.title}, {self.address}>' |
py | 1a496de9df052f530a5df18f6fbae17dbb61b931 | from setuptools import find_packages, setup
setup(
name='src',
packages=find_packages(),
version='0.1.0',
description='Outputs naive fen from input chess boards.',
author='Varun Rao',
license='MIT',
)
|
py | 1a496f75d16303d8ddf7ec7728e5a6702eaed0e5 | # -*- coding: utf-8 -*-
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
# TODO(mikal): move eventlet imports to nova.__init__ once we move to PBR
import os
import sys
# NOTE(mikal): All of this is because if dnspython is present in your
# environment then eventlet monkeypatches socket.getaddrinfo() with an
# implementation which doesn't work for IPv6. What we're checking here is
# that the magic environment variable was set when the import happened.
if ('eventlet' in sys.modules and
os.environ.get('EVENTLET_NO_GREENDNS', '').lower() != 'yes'):
raise ImportError('eventlet imported before nova/cmd/__init__ '
'(env var set to %s)'
% os.environ.get('EVENTLET_NO_GREENDNS'))
os.environ['EVENTLET_NO_GREENDNS'] = 'yes'
import eventlet
eventlet.monkey_patch(os=False)
|
py | 1a496f8fdb1bda7f0df288b45b8ed041cc387892 | import cv2
import numpy as np
from PIL import Image
def anahtarOlustur(gorsel,gelen):
r, c ,t= gorsel.shape
keyGen = np.random.randint(0, 256, size=(r, c, t ), dtype=np.uint8)
key = np.random.choice(gelen,size=(r, c,t))
mylist = []
for i in key:
arr = np.array(i, dtype=np.uint8)
mylist.append(arr)
fth = np.array(mylist)
return cv2.bitwise_xor(fth, keyGen)
def xor(gorsel, anahtar):
r, c ,t= gorsel.shape
return cv2.bitwise_xor(gorsel, anahtar)
def hexToUint8(hash):
return [int(hash[i:i+2],16) for i in range(0,len(hash),2)]
|
py | 1a497039fdc58baad3edfc2f087a084f06ded0f4 | """
Copyright (c) 2020 COTOBA DESIGN, Inc.
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
documentation files (the "Software"), to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software,
and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO
THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
"""
Copyright (c) 2016-2019 Keith Sterling http://www.keithsterling.com
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
documentation files (the "Software"), to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software,
and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the
Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO
THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
from sqlalchemy import Column, Integer, String
from programy.storage.stores.sql.base import Base
from programy.storage.stores.utils import DAOUtils
class Node(object):
id = Column(Integer, primary_key=True)
name = Column(String(48))
node_class = Column(String(512))
class PatternNode(Base, Node):
__tablename__ = 'pattern_nodes'
def __repr__(self):
return "<Pattern Node(id='%s', name='%s', node_class='%s')>" % (DAOUtils.valid_id(self.id), self.name, self.node_class)
class TemplateNode(Base, Node):
__tablename__ = 'template_nodes'
def __repr__(self):
return "<Template Node(id='%s', name='%s', node_class='%s')>" % (DAOUtils.valid_id(self.id), self.name, self.node_class)
|
py | 1a49714814ff2ba671f90c361da7bd027484abc2 | _base_ = [
'../_base_/models/faster_rcnn_r50_fpn.py', '../_base_/datasets/voc0712.py',
'../_base_/default_runtime.py'
]
model = dict(roi_head=dict(bbox_head=dict(num_classes=20)))
# optimizer
optimizer = dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0001)
optimizer_config = dict(grad_clip=None)
# learning policy
# actual epoch = 3 * 3 = 9
lr_config = dict(policy='step', step=[3])
# runtime settings
total_epochs = 4 # actual epoch = 4 * 3 = 12
|
py | 1a4972a86daef71b00754ae5e395aa248406608e | import torch
from torch import Tensor
from torch.nn import Parameter as Param
from torch_geometric.nn.conv import MessagePassing
from ..inits import uniform
class GatedGraphConv(MessagePassing):
r"""The gated graph convolution operator from the `"Gated Graph Sequence
Neural Networks" <https://arxiv.org/abs/1511.05493>`_ paper
.. math::
\mathbf{h}_i^{(0)} &= \mathbf{x}_i \, \Vert \, \mathbf{0}
\mathbf{m}_i^{(l+1)} &= \sum_{j \in \mathcal{N}(i)} \mathbf{\Theta}
\cdot \mathbf{h}_j^{(l)}
\mathbf{h}_i^{(l+1)} &= \textrm{GRU} (\mathbf{m}_i^{(l+1)},
\mathbf{h}_i^{(l)})
up to representation :math:`\mathbf{h}_i^{(L)}`.
The number of input channels of :math:`\mathbf{x}_i` needs to be less or
equal than :obj:`out_channels`.
Args:
out_channels (int): Size of each input sample.
num_layers (int): The sequence length :math:`L`.
aggr (string, optional): The aggregation scheme to use
(:obj:`"add"`, :obj:`"mean"`, :obj:`"max"`).
(default: :obj:`"add"`)
bias (bool, optional): If set to :obj:`False`, the layer will not learn
an additive bias. (default: :obj:`True`)
**kwargs (optional): Additional arguments of
:class:`torch_geometric.nn.conv.MessagePassing`.
"""
def __init__(self,
out_channels,
num_layers,
aggr='add',
bias=True,
**kwargs):
super(GatedGraphConv, self).__init__(aggr=aggr, **kwargs)
self.out_channels = out_channels
self.num_layers = num_layers
self.weight = Param(Tensor(num_layers, out_channels, out_channels))
self.rnn = torch.nn.GRUCell(out_channels, out_channels, bias=bias)
self.reset_parameters()
def reset_parameters(self):
uniform(self.out_channels, self.weight)
self.rnn.reset_parameters()
def forward(self, x, edge_index, edge_weight=None):
""""""
h = x if x.dim() == 2 else x.unsqueeze(-1)
if h.size(1) > self.out_channels:
raise ValueError('The number of input channels is not allowed to '
'be larger than the number of output channels')
if h.size(1) < self.out_channels:
zero = h.new_zeros(h.size(0), self.out_channels - h.size(1))
h = torch.cat([h, zero], dim=1)
for i in range(self.num_layers):
m = torch.matmul(h, self.weight[i])
m = self.propagate(edge_index, x=m, edge_weight=edge_weight)
h = self.rnn(m, h)
return h
def message(self, x_j, edge_weight):
if edge_weight is not None:
return edge_weight.view(-1, 1) * x_j
return x_j
def __repr__(self):
return '{}({}, num_layers={})'.format(
self.__class__.__name__, self.out_channels, self.num_layers)
|
py | 1a49741ca31a5134d2605b4a0fac9c472234dc44 | import os
from pathlib import Path
from allennlp.data.iterators import BasicIterator
from allennlp.nn.util import move_to_device
from pytorch_pretrained_bert import BertTokenizer, BertModel, BertAdam
import config
from bert_model_variances.bert_multilayer_output import BertMultiLayerSeqClassification
from data_utils.exvocab import ExVocabulary
from data_utils.readers.bert_reader_fever_sent_selection import BertContentSelectionReader
# from evaluation import ext_hotpot_eval
from evaluation import fever_scorer
from fever_sampler.ss_sampler import build_full_wiki_document_forward_item, down_sample_neg
from fever_utils import fever_db
from flint import torch_util
# from hotpot_data_analysis.fullwiki_provided_upperbound import append_gt_downstream_to_get_upperbound_from_doc_retri
from utils import common, list_dict_data_tool
import torch
from tqdm import tqdm
import numpy as np
import copy
import allennlp
from utils import save_tool
import torch.nn.functional as F
def eval_model(model, data_iter, device_num, with_probs=False, make_int=False, show_progress=False):
print("Evaluating ...")
tqdm_disable = not show_progress
with torch.no_grad():
model.eval()
totoal_size = 0
y_pred_list = []
y_fid_list = []
y_pid_list = []
y_element_list = []
y_logits_list = []
y_probs_list = []
for batch_idx, batch in tqdm(enumerate(data_iter), disable=tqdm_disable):
batch = move_to_device(batch, device_num)
eval_paired_sequence = batch['paired_sequence']
eval_paired_segments_ids = batch['paired_segments_ids']
eval_labels_ids = batch['label']
eval_att_mask, _ = torch_util.get_length_and_mask(eval_paired_sequence)
s1_span = batch['bert_s1_span']
s2_span = batch['bert_s2_span']
out = model(eval_paired_sequence, token_type_ids=eval_paired_segments_ids, attention_mask=eval_att_mask,
mode=BertMultiLayerSeqClassification.ForwardMode.EVAL,
labels=eval_labels_ids)
y_pid_list.extend(list(batch['oid']))
y_fid_list.extend(list(batch['fid']))
y_element_list.extend(list(batch['item']))
y_pred_list.extend(torch.max(out, 1)[1].view(out.size(0)).tolist())
y_logits_list.extend(out.view(out.size(0)).tolist())
if with_probs:
y_probs_list.extend(torch.sigmoid(out).view(out.size(0)).tolist())
totoal_size += out.size(0)
result_items_list = []
assert len(y_pred_list) == len(y_fid_list)
assert len(y_pred_list) == len(y_pid_list)
assert len(y_pred_list) == len(y_element_list)
assert len(y_pred_list) == len(y_logits_list)
if with_probs:
assert len(y_pred_list) == len(y_probs_list)
for i in range(len(y_pred_list)):
r_item = dict()
r_item['fid'] = y_fid_list[i]
r_item['oid'] = y_pid_list[i] if not make_int else int(y_pid_list[i])
r_item['score'] = y_logits_list[i]
r_item['element'] = y_element_list[i]
if with_probs:
r_item['prob'] = y_probs_list[i]
result_items_list.append(r_item)
return result_items_list
def select_top_k_and_to_results_dict(scored_dict, merged_field_name='merged_field',
score_field_name='prob', item_field_name='element',
top_k=5, threshold=None):
results_dict = dict()
for key, value in scored_dict.items():
if key not in results_dict:
results_dict[key] = dict()
# if merged_field_name not in value:
# results_dict[key]['scored_results'] = []
# results_dict[key]['predicated_evidence'] = []
# continue
fitems_dict = value[merged_field_name]
scored_element_list = []
for item in fitems_dict.values():
score = item[score_field_name]
element = item[item_field_name]
scored_element_list.append((score, element)) # score is index 0.
results_dict[key]['scored_results'] = scored_element_list
sorted_e_list = sorted(scored_element_list, key=lambda x: x[0], reverse=True)
evidence_sid = []
scored_evidence_sid = []
for s, e in sorted_e_list:
if threshold is not None:
if s >= threshold:
evidence_sid.append(e)
scored_evidence_sid.append([s, e])
else:
evidence_sid.append(e)
scored_evidence_sid.append([s, e])
evidence_sid = evidence_sid[:top_k]
scored_evidence_sid = scored_evidence_sid[:top_k]
assert len(evidence_sid) == len(scored_evidence_sid)
results_dict[key]['predicted_evidence'] = []
for sid in evidence_sid:
doc_id, ln = sid.split('(-.-)')[0], int(sid.split('(-.-)')[1])
results_dict[key]['predicted_evidence'].append([doc_id, ln])
results_dict[key]['predicted_scored_evidence'] = []
for score, sid in scored_evidence_sid:
doc_id, ln = sid.split('(-.-)')[0], int(sid.split('(-.-)')[1])
results_dict[key]['predicted_scored_evidence'].append((score, [doc_id, ln]))
# predicted_sentids
# results_dict[key]['predicted_sentids'] = results_dict[key]['predicted_evidence']
return results_dict
def get_sentences(tag, is_training, debug=False):
if tag == 'dev':
d_list = common.load_jsonl(config.FEVER_DEV)
elif tag == 'train':
d_list = common.load_jsonl(config.FEVER_TRAIN)
elif tag == 'test':
d_list = common.load_jsonl(config.FEVER_TEST)
else:
raise ValueError(f"Tag:{tag} not supported.")
if debug:
# d_list = d_list[:10]
d_list = d_list[:50]
# d_list = d_list[:200]
doc_results = common.load_jsonl(
config.RESULT_PATH / f"doc_retri_results/fever_results/merged_doc_results/m_doc_{tag}.jsonl")
doc_results_dict = list_dict_data_tool.list_to_dict(doc_results, 'id')
fever_db_cursor = fever_db.get_cursor(config.FEVER_DB)
forward_items = build_full_wiki_document_forward_item(doc_results_dict, d_list, is_training=is_training,
db_cursor=fever_db_cursor)
return forward_items
def set_gt_nli_label(d_list, delete_label=False):
for item in d_list:
item['predicted_label'] = item['label']
if delete_label:
del item['label']
return d_list
def model_go():
seed = 12
torch.manual_seed(seed)
# bert_model_name = 'bert-large-uncased'
bert_model_name = 'bert-base-uncased'
experiment_name = 'fever_v0_cs_ratio_001'
# lazy = False
lazy = True
forward_size = 128
# batch_size = 64
# batch_size = 192
batch_size = 128
gradient_accumulate_step = int(batch_size / forward_size)
warmup_proportion = 0.1
learning_rate = 5e-5
num_train_epochs = 5
eval_frequency = 20000
pos_ratio = 0.01
do_lower_case = True
# debug_mode = True
debug_mode = False
# est_datasize = 900_000
num_class = 1
# num_train_optimization_steps
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
device_num = 0 if torch.cuda.is_available() else -1
n_gpu = torch.cuda.device_count()
unk_token_num = {'tokens': 1} # work around for initiating vocabulary.
vocab = ExVocabulary(unk_token_num=unk_token_num)
vocab.add_token_to_namespace("false", namespace="labels") # 0
vocab.add_token_to_namespace("true", namespace="labels") # 1
vocab.add_token_to_namespace("hidden", namespace="labels")
vocab.change_token_with_index_to_namespace("hidden", -2, namespace='labels')
# Load Dataset
# train_list = common.load_jsonl(config.FEVER_TRAIN)
dev_list = common.load_jsonl(config.FEVER_DEV)
set_gt_nli_label(dev_list)
# dev_fitems_list = common.load_jsonl(
# config.PDATA_ROOT / "content_selection_forward" / "hotpot_dev_p_level_unlabeled.jsonl")
# train_fitems_list = common.load_jsonl(
# config.PDATA_ROOT / "content_selection_forward" / "hotpot_train_p_level_labeled.jsonl")
dev_fitems_list = get_sentences('dev', is_training=False, debug=debug_mode)
train_fitems_list = get_sentences('train', is_training=True, debug=debug_mode)
if debug_mode:
dev_list = dev_list[:50]
eval_frequency = 1
# print(dev_list[-1]['_id'])
# exit(0)
sampled_train_list = down_sample_neg(train_fitems_list, ratio=pos_ratio)
est_datasize = len(sampled_train_list)
dev_o_dict = list_dict_data_tool.list_to_dict(dev_list, 'id')
# print(dev_o_dict)
bert_tokenizer = BertTokenizer.from_pretrained(bert_model_name, do_lower_case=do_lower_case)
bert_cs_reader = BertContentSelectionReader(bert_tokenizer, lazy, is_paired=True,
example_filter=lambda x: len(x['context']) == 0, max_l=128)
bert_encoder = BertModel.from_pretrained(bert_model_name)
model = BertMultiLayerSeqClassification(bert_encoder, num_labels=num_class, num_of_pooling_layer=1,
act_type='tanh', use_pretrained_pooler=True, use_sigmoid=True)
#
param_optimizer = list(model.named_parameters())
no_decay = ['bias', 'LayerNorm.bias', 'LayerNorm.weight']
optimizer_grouped_parameters = [
{'params': [p for n, p in param_optimizer if not any(nd in n for nd in no_decay)], 'weight_decay': 0.01},
{'params': [p for n, p in param_optimizer if any(nd in n for nd in no_decay)], 'weight_decay': 0.0}
]
num_train_optimization_steps = int(est_datasize / forward_size / gradient_accumulate_step) * \
num_train_epochs
if debug_mode:
num_train_optimization_steps = 100
print("Estimated training size", est_datasize)
print("Number of optimization steps:", num_train_optimization_steps)
optimizer = BertAdam(optimizer_grouped_parameters,
lr=learning_rate,
warmup=warmup_proportion,
t_total=num_train_optimization_steps)
dev_instances = bert_cs_reader.read(dev_fitems_list)
biterator = BasicIterator(batch_size=forward_size)
biterator.index_with(vocab)
model.to(device)
if n_gpu > 1:
model = torch.nn.DataParallel(model)
forbackward_step = 0
update_step = 0
logging_agent = save_tool.ScoreLogger({})
file_path_prefix = '.'
if not debug_mode:
file_path_prefix, date = save_tool.gen_file_prefix(f"{experiment_name}")
# # # Create Log File
# Save the source code.
script_name = os.path.basename(__file__)
with open(os.path.join(file_path_prefix, script_name), 'w') as out_f, open(__file__, 'r') as it:
out_f.write(it.read())
out_f.flush()
# # # Log File end
for epoch_i in range(num_train_epochs):
print("Epoch:", epoch_i)
sampled_train_list = down_sample_neg(train_fitems_list, ratio=pos_ratio)
train_instance = bert_cs_reader.read(sampled_train_list)
train_iter = biterator(train_instance, num_epochs=1, shuffle=True)
for batch in tqdm(train_iter):
model.train()
batch = move_to_device(batch, device_num)
paired_sequence = batch['paired_sequence']
paired_segments_ids = batch['paired_segments_ids']
labels_ids = batch['label']
att_mask, _ = torch_util.get_length_and_mask(paired_sequence)
s1_span = batch['bert_s1_span']
s2_span = batch['bert_s2_span']
loss = model(paired_sequence, token_type_ids=paired_segments_ids, attention_mask=att_mask,
mode=BertMultiLayerSeqClassification.ForwardMode.TRAIN,
labels=labels_ids)
if n_gpu > 1:
loss = loss.mean() # mean() to average on multi-gpu.
if gradient_accumulate_step > 1:
loss = loss / gradient_accumulate_step
loss.backward()
forbackward_step += 1
if forbackward_step % gradient_accumulate_step == 0:
optimizer.step()
optimizer.zero_grad()
update_step += 1
if update_step % eval_frequency == 0:
print("Update steps:", update_step)
dev_iter = biterator(dev_instances, num_epochs=1, shuffle=False)
cur_eval_results_list = eval_model(model, dev_iter, device_num, with_probs=True, make_int=True)
copied_dev_o_dict = copy.deepcopy(dev_o_dict)
copied_dev_d_list = copy.deepcopy(dev_list)
list_dict_data_tool.append_subfield_from_list_to_dict(cur_eval_results_list, copied_dev_o_dict,
'oid', 'fid', check=True)
print("Threshold 0.5:")
cur_results_dict_th0_5 = select_top_k_and_to_results_dict(copied_dev_o_dict,
top_k=5, threshold=0.5)
list_dict_data_tool.append_item_from_dict_to_list(copied_dev_d_list, cur_results_dict_th0_5,
'id', 'predicted_evidence')
mode = {'standard': True, 'check_sent_id_correct': True}
strict_score, acc_score, pr, rec, f1 = fever_scorer.fever_score(copied_dev_d_list, dev_list,
mode=mode, max_evidence=5)
score_05 = {
'ss': strict_score, 'as': acc_score,
'pr': pr, 'rec': rec, 'f1': f1,
}
print("Threshold 0.1:")
cur_results_dict_th0_1 = select_top_k_and_to_results_dict(copied_dev_o_dict,
top_k=5, threshold=0.1)
list_dict_data_tool.append_item_from_dict_to_list(copied_dev_d_list, cur_results_dict_th0_1,
'id', 'predicted_evidence')
mode = {'standard': True, 'check_sent_id_correct': True}
strict_score, acc_score, pr, rec, f1 = fever_scorer.fever_score(copied_dev_d_list, dev_list,
mode=mode, max_evidence=5)
score_01 = {
'ss': strict_score, 'as': acc_score,
'pr': pr, 'rec': rec, 'f1': f1,
}
logging_item = {
'score_01': score_01,
'score_05': score_05,
}
print(logging_item)
s01_ss_score = score_01['ss']
s05_ss_score = score_05['ss']
#
# exit(0)
# print(logging_item)
save_file_name = f'i({update_step})|e({epoch_i})' \
f'|s01({s01_ss_score})|s05({s05_ss_score})' \
f'|seed({seed})'
common.save_jsonl(cur_eval_results_list, Path(file_path_prefix) /
f"{save_file_name}_dev_sent_results.json")
# print(save_file_name)
logging_agent.incorporate_results({}, save_file_name, logging_item)
logging_agent.logging_to_file(Path(file_path_prefix) / "log.json")
model_to_save = model.module if hasattr(model, 'module') else model
output_model_file = Path(file_path_prefix) / save_file_name
torch.save(model_to_save.state_dict(), str(output_model_file))
# print(logging_agent.logging_item_list)
# Epoch eval:
print("Update steps:", update_step)
dev_iter = biterator(dev_instances, num_epochs=1, shuffle=False)
cur_eval_results_list = eval_model(model, dev_iter, device_num, with_probs=True, make_int=True)
copied_dev_o_dict = copy.deepcopy(dev_o_dict)
copied_dev_d_list = copy.deepcopy(dev_list)
list_dict_data_tool.append_subfield_from_list_to_dict(cur_eval_results_list, copied_dev_o_dict,
'oid', 'fid', check=True)
print("Threshold 0.5:")
cur_results_dict_th0_5 = select_top_k_and_to_results_dict(copied_dev_o_dict,
top_k=5, threshold=0.5)
list_dict_data_tool.append_item_from_dict_to_list(copied_dev_d_list, cur_results_dict_th0_5,
'id', 'predicted_evidence')
mode = {'standard': True, 'check_sent_id_correct': True}
strict_score, acc_score, pr, rec, f1 = fever_scorer.fever_score(copied_dev_d_list, dev_list,
mode=mode, max_evidence=5)
score_05 = {
'ss': strict_score, 'as': acc_score,
'pr': pr, 'rec': rec, 'f1': f1,
}
print("Threshold 0.1:")
cur_results_dict_th0_1 = select_top_k_and_to_results_dict(copied_dev_o_dict,
top_k=5, threshold=0.1)
list_dict_data_tool.append_item_from_dict_to_list(copied_dev_d_list, cur_results_dict_th0_1,
'id', 'predicted_evidence')
mode = {'standard': True, 'check_sent_id_correct': True}
strict_score, acc_score, pr, rec, f1 = fever_scorer.fever_score(copied_dev_d_list, dev_list,
mode=mode, max_evidence=5)
score_01 = {
'ss': strict_score, 'as': acc_score,
'pr': pr, 'rec': rec, 'f1': f1,
}
logging_item = {
'score_01': score_01,
'score_05': score_05,
}
print(logging_item)
s01_ss_score = score_01['ss']
s05_ss_score = score_05['ss']
#
# exit(0)
# print(logging_item)
save_file_name = f'i({update_step})|e({epoch_i})' \
f'|s01({s01_ss_score})|s05({s05_ss_score})' \
f'|seed({seed})'
common.save_jsonl(cur_eval_results_list, Path(file_path_prefix) /
f"{save_file_name}_dev_sent_results.jsonl")
# print(save_file_name)
logging_agent.incorporate_results({}, save_file_name, logging_item)
logging_agent.logging_to_file(Path(file_path_prefix) / "log.json")
model_to_save = model.module if hasattr(model, 'module') else model
output_model_file = Path(file_path_prefix) / save_file_name
torch.save(model_to_save.state_dict(), str(output_model_file))
def eval_trainset_for_train_nli(model_path):
tag = 'test'
is_training = False
seed = 12
torch.manual_seed(seed)
bert_model_name = 'bert-base-uncased'
lazy = False
# lazy = True
forward_size = 128
# batch_size = 64
# batch_size = 192
batch_size = 128
do_lower_case = True
debug_mode = False
# debug_mode = True
num_class = 1
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
device_num = 0 if torch.cuda.is_available() else -1
n_gpu = torch.cuda.device_count()
unk_token_num = {'tokens': 1} # work around for initiating vocabulary.
vocab = ExVocabulary(unk_token_num=unk_token_num)
vocab.add_token_to_namespace("false", namespace="labels") # 0
vocab.add_token_to_namespace("true", namespace="labels") # 1
vocab.add_token_to_namespace("hidden", namespace="labels")
vocab.change_token_with_index_to_namespace("hidden", -2, namespace='labels')
# Load Dataset
train_fitems_list = get_sentences(tag, is_training=is_training, debug=debug_mode)
est_datasize = len(train_fitems_list)
bert_tokenizer = BertTokenizer.from_pretrained(bert_model_name, do_lower_case=do_lower_case)
bert_cs_reader = BertContentSelectionReader(bert_tokenizer, lazy, is_paired=True,
example_filter=lambda x: len(x['context']) == 0, max_l=128)
bert_encoder = BertModel.from_pretrained(bert_model_name)
model = BertMultiLayerSeqClassification(bert_encoder, num_labels=num_class, num_of_pooling_layer=1,
act_type='tanh', use_pretrained_pooler=True, use_sigmoid=True)
model.load_state_dict(torch.load(model_path))
print("Estimated training size", est_datasize)
print("Estimated forward steps:", est_datasize / forward_size)
biterator = BasicIterator(batch_size=forward_size)
biterator.index_with(vocab)
model.to(device)
if n_gpu > 1:
model = torch.nn.DataParallel(model)
train_instance = bert_cs_reader.read(train_fitems_list)
train_iter = biterator(train_instance, num_epochs=1, shuffle=False)
cur_eval_results_list = eval_model(model, train_iter, device_num, with_probs=True, make_int=True, show_progress=True)
if debug_mode:
train_list = common.load_jsonl(config.FEVER_TRAIN)
train_list = train_list[:50]
set_gt_nli_label(train_list)
train_o_dict = list_dict_data_tool.list_to_dict(train_list, 'id')
copied_dev_o_dict = copy.deepcopy(train_o_dict)
copied_dev_d_list = copy.deepcopy(train_list)
list_dict_data_tool.append_subfield_from_list_to_dict(cur_eval_results_list, copied_dev_o_dict,
'oid', 'fid', check=True)
print("Threshold 0.5:")
cur_results_dict_th0_5 = select_top_k_and_to_results_dict(copied_dev_o_dict,
top_k=5, threshold=0.1)
list_dict_data_tool.append_item_from_dict_to_list(copied_dev_d_list, cur_results_dict_th0_5,
'id', 'predicted_evidence')
mode = {'standard': True, 'check_sent_id_correct': True}
strict_score, acc_score, pr, rec, f1 = fever_scorer.fever_score(copied_dev_d_list, train_list,
mode=mode, max_evidence=5)
print(strict_score, acc_score, pr, rec, f1)
common.save_jsonl(cur_eval_results_list, f'{tag}_sent_results_labeled:{is_training}.jsonl')
if __name__ == '__main__':
model_go()
# model_path = config.PRO_ROOT / "saved_models/04-13-16:37:29_fever_v0_cs/i(5000)|e(0)|s01(0.9170917091709171)|s05(0.8842384238423843)|seed(12)"
#
# model_path = config.PRO_ROOT / "saved_models/04-13-16:37:29_fever_v0_cs/i(15000)|e(1)|s01(0.9013901390139014)|s05(0.8517851785178517)|seed(12)"
# eval_trainset_for_train_nli(model_path)
# dev_sent_list = get_sentences('dev', is_training=False)
# print(len(dev_sent_list))
#
# train_sent_list = get_sentences('dev', is_training=True)
# sampled_sent_list = down_sample_neg(train_sent_list, ratio=0.2)
# print(len(sampled_sent_list))
|
py | 1a4974671f8b6123005f510e262956d08d3f79d6 | import ibm_boto3
from ibm_botocore.client import Config, ClientError
class CloudObjectStore:
DEFAULT_ENDPOINT = \
'https://s3.us-east.cloud-object-storage.appdomain.cloud'
DEFAULT_AUTH_ENDPOINT = \
'https://iam.cloud.ibm.com/identity/token'
'''
Interface to IBM Cloud Object Store, typical values:
bucket_name: name of your storage bucket
api_key: your API key
(go to Cloud Storage dashboard -> Service credentials)
resource_crn: your bucket CRN
(go to your bucket -> Configuration: Bucket instance CRN)
endpoint: for external access,
"https://s3.us-east.cloud-object-storage.appdomain.cloud"
endpoint: for internal access,
"https://s3.private.us-east.cloud-object-storage.appdomain.cloud"
auth_endpoint: "https://iam.cloud.ibm.com/identity/token"
'''
def __init__(self, bucket_name,
api_key,
resource_crn,
endpoint=DEFAULT_ENDPOINT,
auth_endpoint=DEFAULT_AUTH_ENDPOINT,
):
self.bucket_name = bucket_name
self.COS_API_KEY_ID = api_key
self.COS_RESOURCE_CRN = resource_crn
self.COS_ENDPOINT = endpoint
self.COS_AUTH_ENDPOINT = auth_endpoint
self.cos = ibm_boto3.resource(
"s3",
ibm_api_key_id=self.COS_API_KEY_ID,
ibm_service_instance_id=self.COS_RESOURCE_CRN,
ibm_auth_endpoint=self.COS_AUTH_ENDPOINT,
config=Config(signature_version="oauth"),
endpoint_url=self.COS_ENDPOINT
)
def get_bucket_contents(self):
try:
files = self.cos.Bucket(self.bucket_name).objects.all()
return [file.key for file in files]
except ClientError as be:
print("CLIENT ERROR: {0}\n".format(be))
except Exception as e:
print("Unable to retrieve bucket contents: {0}".format(e))
def get_item(self, item_name):
try:
file = self.cos.Object(self.bucket_name, item_name).get()
return file["Body"].read()
except ClientError as be:
print("CLIENT ERROR: {0}\n".format(be))
except Exception as e:
print("Unable to retrieve file contents: {0}".format(e))
def create_item(self, item_name, file_text):
print("Creating new item: {0}".format(item_name))
try:
self.cos.Object(self.bucket_name, item_name).put(
Body=file_text
)
except ClientError as be:
print("CLIENT ERROR: {0}\n".format(be))
except Exception as e:
print("Unable to create text file: {0}".format(e))
|
py | 1a49746ca08f0f2344e0fca0b347af54dc6cb1ce | #!/usr/bin/env python
import os
import pandas as pd
import numpy as np
from sklearn.externals import joblib
from math import (
log,
exp
)
from matplotlib import pyplot as plt
import time
import util
import ipdb
def plot_log_prob_of_all_trials(
list_of_log_prob_mat,
log_prob_owner,
state_no,
figure_save_path):
trial_amount = len(list_of_log_prob_mat)
hidden_state_amount = list_of_log_prob_mat[0].shape[1]
fig, ax_list = plt.subplots(nrows=hidden_state_amount)
if hidden_state_amount == 1:
ax_list = [ax_list]
from matplotlib.pyplot import cm
import numpy as np
color=iter(cm.rainbow(np.linspace(0, 1, trial_amount)))
for i in range(trial_amount):
c=next(color)
log_prob_mat = list_of_log_prob_mat[i][:, :].transpose()
hidden_state_amount = log_prob_mat.shape[0]
for row_no in range(hidden_state_amount):
if i == 0:
ax_list[row_no].plot(log_prob_mat[row_no].tolist(), linestyle="solid", color=c)
title = 'state %s trial hidden state %s log_prob plot'%(state_no, row_no)
ax_list[row_no].set_title(title)
else:
ax_list[row_no].plot(log_prob_mat[row_no].tolist(), linestyle="solid", color=c)
if not os.path.isdir(figure_save_path+'/hidden_state_log_prob_plot'):
os.makedirs(figure_save_path+'/hidden_state_log_prob_plot')
title = 'state %s trial hidden state log_prob plot'%(state_no,)
fig.savefig(os.path.join(figure_save_path, 'hidden_state_log_prob_plot', title+".eps"), format="eps")
plt.close(1)
def run(model_save_path,
figure_save_path,
threshold_c_value,
trials_group_by_folder_name):
trials_group_by_folder_name = util.make_trials_of_each_state_the_same_length(trials_group_by_folder_name)
one_trial_data_group_by_state = trials_group_by_folder_name.itervalues().next()
state_amount = len(one_trial_data_group_by_state)
threshold_constant = 10
threshold_offset = 10
model_group_by_state = {}
for state_no in range(1, state_amount+1):
try:
model_group_by_state[state_no] = joblib.load(model_save_path+"/model_s%s.pkl"%(state_no,))
except IOError:
print 'model of state %s not found'%(state_no,)
continue
expected_log = []
std_of_log = []
deri_threshold = []
for state_no in model_group_by_state:
list_of_log_prob_mat = []
log_prob_owner = []
for trial_name in trials_group_by_folder_name:
log_prob_owner.append(trial_name)
hidden_state_log_prob = util.get_hidden_state_log_prob_matrix(
trials_group_by_folder_name[trial_name][state_no],
model_group_by_state[state_no]
)
list_of_log_prob_mat.append(hidden_state_log_prob)
# use np matrix to facilitate the computation of mean curve and std
plot_log_prob_of_all_trials(
list_of_log_prob_mat,
log_prob_owner,
state_no,
figure_save_path)
|
py | 1a49759fc57560f3f006d5e673601359d5c2a924 | #
# Copyright The NOMAD Authors.
#
# This file is part of NOMAD.
# See https://nomad-lab.eu for further info.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import numpy as np # pylint: disable=unused-import
import typing # pylint: disable=unused-import
from nomad.metainfo import ( # pylint: disable=unused-import
MSection, MCategory, Category, Package, Quantity, Section, SubSection, SectionProxy,
Reference
)
from nomad.metainfo.legacy import LegacyDefinition
from nomad.datamodel.metainfo import public
m_package = Package(
name='cpmd_general_nomadmetainfo_json',
description='None',
a_legacy=LegacyDefinition(name='cpmd.general.nomadmetainfo.json'))
class x_cpmd_section_start_information(MSection):
'''
Contains information about the starting conditions for this run
'''
m_def = Section(validate=False, a_legacy=LegacyDefinition(name='x_cpmd_section_start_information'))
x_cpmd_start_datetime = Quantity(
type=str,
shape=[],
description='''
CPMD run start time and date
''',
a_legacy=LegacyDefinition(name='x_cpmd_start_datetime'))
x_cpmd_input_filename = Quantity(
type=str,
shape=[],
description='''
CPMD input file name.
''',
a_legacy=LegacyDefinition(name='x_cpmd_input_filename'))
x_cpmd_compilation_date = Quantity(
type=str,
shape=[],
description='''
CPMD compilation date.
''',
a_legacy=LegacyDefinition(name='x_cpmd_compilation_date'))
x_cpmd_process_id = Quantity(
type=np.dtype(np.int32),
shape=[],
description='''
The process id for this calculation.
''',
a_legacy=LegacyDefinition(name='x_cpmd_process_id'))
x_cpmd_run_user_name = Quantity(
type=str,
shape=[],
description='''
The user who launched this calculation.
''',
a_legacy=LegacyDefinition(name='x_cpmd_run_user_name'))
x_cpmd_run_host_name = Quantity(
type=str,
shape=[],
description='''
The host on which this calculation was made on.
''',
a_legacy=LegacyDefinition(name='x_cpmd_run_host_name'))
class x_cpmd_section_run_type_information(MSection):
'''
Contains information about the run type.
'''
m_def = Section(validate=False, a_legacy=LegacyDefinition(name='x_cpmd_section_run_type_information'))
x_cpmd_time_step_ions = Quantity(
type=np.dtype(np.float64),
shape=[],
description='''
The time step for ions.
''',
a_legacy=LegacyDefinition(name='x_cpmd_time_step_ions'))
x_cpmd_time_step_electrons = Quantity(
type=np.dtype(np.float64),
shape=[],
description='''
The time step for electrons.
''',
a_legacy=LegacyDefinition(name='x_cpmd_time_step_electrons'))
x_cpmd_geo_opt_method = Quantity(
type=str,
shape=[],
description='''
The geometry optimization method.
''',
a_legacy=LegacyDefinition(name='x_cpmd_geo_opt_method'))
x_cpmd_max_steps = Quantity(
type=np.dtype(np.int32),
shape=[],
description='''
The maximum number of steps requested. In MD, this is the number of MD steps, in
single point calculations this is the number of scf cycles, in geometry
optimization this is the number of optimization steps.
''',
a_legacy=LegacyDefinition(name='x_cpmd_max_steps'))
x_cpmd_ion_temperature_control = Quantity(
type=str,
shape=[],
description='''
The temperature control method for ion dynamics.
''',
a_legacy=LegacyDefinition(name='x_cpmd_ion_temperature_control'))
class x_cpmd_section_xc_information(MSection):
'''
Contains information about the exchange-correlation functional.
'''
m_def = Section(validate=False, a_legacy=LegacyDefinition(name='x_cpmd_section_xc_information'))
class x_cpmd_section_system_information(MSection):
'''
Contains information about the system.
'''
m_def = Section(validate=False, a_legacy=LegacyDefinition(name='x_cpmd_section_system_information'))
class x_cpmd_section_pseudopotential_information(MSection):
'''
Contains information about the pseudopotentials.
'''
m_def = Section(validate=False, a_legacy=LegacyDefinition(name='x_cpmd_section_pseudopotential_information'))
class x_cpmd_section_atom_kinds(MSection):
'''
Contains information about the atomic kinds present in the calculation.
'''
m_def = Section(validate=False, a_legacy=LegacyDefinition(name='x_cpmd_section_atom_kinds'))
x_cpmd_section_atom_kind = SubSection(
sub_section=SectionProxy('x_cpmd_section_atom_kind'),
repeats=True,
a_legacy=LegacyDefinition(name='x_cpmd_section_atom_kind'))
class x_cpmd_section_atom_kind(MSection):
'''
Contains information about one atomic kind.
'''
m_def = Section(validate=False, a_legacy=LegacyDefinition(name='x_cpmd_section_atom_kind'))
x_cpmd_atom_kind_label = Quantity(
type=str,
shape=[],
description='''
The label of the atomic kind.
''',
a_legacy=LegacyDefinition(name='x_cpmd_atom_kind_label'))
x_cpmd_atom_kind_mass = Quantity(
type=np.dtype(np.float64),
shape=[],
description='''
The mass of the atomic kind.
''',
a_legacy=LegacyDefinition(name='x_cpmd_atom_kind_mass'))
x_cpmd_atom_kind_raggio = Quantity(
type=np.dtype(np.float64),
shape=[],
description='''
The width of the ionic charge distribution (RAGGIO) of the atomic kind.
''',
a_legacy=LegacyDefinition(name='x_cpmd_atom_kind_raggio'))
x_cpmd_atom_kind_nlcc = Quantity(
type=str,
shape=[],
description='''
The nonlinear core correction (NLCC) of the atomic kind.
''',
a_legacy=LegacyDefinition(name='x_cpmd_atom_kind_nlcc'))
x_cpmd_atom_kind_pseudopotential_l = Quantity(
type=str,
shape=[],
description='''
The angular part of the pseudopotential for the atomic kind.
''',
a_legacy=LegacyDefinition(name='x_cpmd_atom_kind_pseudopotential_l'))
x_cpmd_atom_kind_pseudopotential_type = Quantity(
type=str,
shape=[],
description='''
The type of the pseudopotential for the atomic kind.
''',
a_legacy=LegacyDefinition(name='x_cpmd_atom_kind_pseudopotential_type'))
class x_cpmd_section_supercell(MSection):
'''
Contains information about the supercell.
'''
m_def = Section(validate=False, a_legacy=LegacyDefinition(name='x_cpmd_section_supercell'))
x_cpmd_cell_symmetry = Quantity(
type=str,
shape=[],
description='''
The symmetry of the cell.
''',
a_legacy=LegacyDefinition(name='x_cpmd_cell_symmetry'))
x_cpmd_cell_lattice_constant = Quantity(
type=np.dtype(np.float64),
shape=[],
description='''
The cell lattice constant.
''',
a_legacy=LegacyDefinition(name='x_cpmd_cell_lattice_constant'))
x_cpmd_cell_volume = Quantity(
type=np.dtype(np.float64),
shape=[],
description='''
The cell volume.
''',
a_legacy=LegacyDefinition(name='x_cpmd_cell_volume'))
x_cpmd_cell_dimension = Quantity(
type=str,
shape=[],
description='''
The cell dimension.
''',
a_legacy=LegacyDefinition(name='x_cpmd_cell_dimension'))
x_cpmd_lattice_vector_A1 = Quantity(
type=str,
shape=[],
description='''
Lattice vector A1
''',
a_legacy=LegacyDefinition(name='x_cpmd_lattice_vector_A1'))
x_cpmd_lattice_vector_A2 = Quantity(
type=str,
shape=[],
description='''
Lattice vector A2
''',
a_legacy=LegacyDefinition(name='x_cpmd_lattice_vector_A2'))
x_cpmd_lattice_vector_A3 = Quantity(
type=str,
shape=[],
description='''
Lattice vector A3
''',
a_legacy=LegacyDefinition(name='x_cpmd_lattice_vector_A3'))
x_cpmd_reciprocal_lattice_vector_B1 = Quantity(
type=str,
shape=[],
description='''
Reciprocal lattice vector B1
''',
a_legacy=LegacyDefinition(name='x_cpmd_reciprocal_lattice_vector_B1'))
x_cpmd_reciprocal_lattice_vector_B2 = Quantity(
type=str,
shape=[],
description='''
Reciprocal lattice vector B2
''',
a_legacy=LegacyDefinition(name='x_cpmd_reciprocal_lattice_vector_B2'))
x_cpmd_reciprocal_lattice_vector_B3 = Quantity(
type=str,
shape=[],
description='''
Reciprocal lattice vector B3
''',
a_legacy=LegacyDefinition(name='x_cpmd_reciprocal_lattice_vector_B3'))
x_cpmd_real_space_mesh = Quantity(
type=str,
shape=[],
description='''
Number of points in the real space mesh.
''',
a_legacy=LegacyDefinition(name='x_cpmd_real_space_mesh'))
x_cpmd_wave_function_cutoff = Quantity(
type=np.dtype(np.float64),
shape=[],
description='''
Place wave cutoff energy for wave function.
''',
a_legacy=LegacyDefinition(name='x_cpmd_wave_function_cutoff'))
x_cpmd_density_cutoff = Quantity(
type=np.dtype(np.float64),
shape=[],
description='''
Place wave cutoff energy for density.
''',
a_legacy=LegacyDefinition(name='x_cpmd_density_cutoff'))
x_cpmd_number_of_planewaves_density = Quantity(
type=np.dtype(np.int32),
shape=[],
description='''
Number of plane waves for density cutoff.
''',
a_legacy=LegacyDefinition(name='x_cpmd_number_of_planewaves_density'))
x_cpmd_number_of_planewaves_wave_function = Quantity(
type=np.dtype(np.int32),
shape=[],
description='''
Number of plane waves for wave_function cutoff.
''',
a_legacy=LegacyDefinition(name='x_cpmd_number_of_planewaves_wave_function'))
class x_cpmd_section_wave_function_initialization(MSection):
'''
Contains information about the wave function initialization
'''
m_def = Section(validate=False, a_legacy=LegacyDefinition(name='x_cpmd_section_wave_function_initialization'))
class x_cpmd_section_scf(MSection):
'''
Contains information about self-consistent field calculation
'''
m_def = Section(validate=False, a_legacy=LegacyDefinition(name='x_cpmd_section_scf'))
x_cpmd_section_scf_iteration = SubSection(
sub_section=SectionProxy('x_cpmd_section_scf_iteration'),
repeats=True,
a_legacy=LegacyDefinition(name='x_cpmd_section_scf_iteration'))
class x_cpmd_section_scf_iteration(MSection):
'''
Contains information about the self-consistent field iteration within a wavefunction
optimization.
'''
m_def = Section(validate=False, a_legacy=LegacyDefinition(name='x_cpmd_section_scf_iteration'))
x_cpmd_scf_nfi = Quantity(
type=np.dtype(np.int32),
shape=[],
description='''
The scf step number (NFI).
''',
a_legacy=LegacyDefinition(name='x_cpmd_scf_nfi'))
x_cpmd_scf_gemax = Quantity(
type=np.dtype(np.float64),
shape=[],
description='''
Largest off-diagonal component (GEMAX) during SCF step.
''',
a_legacy=LegacyDefinition(name='x_cpmd_scf_gemax'))
x_cpmd_scf_cnorm = Quantity(
type=np.dtype(np.float64),
shape=[],
description='''
Average of the off-diagonal components (CNORM) during SCF step.
''',
a_legacy=LegacyDefinition(name='x_cpmd_scf_cnorm'))
x_cpmd_scf_etot = Quantity(
type=np.dtype(np.float64),
shape=[],
description='''
The total energy (ETOT) during SCF step.
''',
a_legacy=LegacyDefinition(name='x_cpmd_scf_etot'))
x_cpmd_scf_detot = Quantity(
type=np.dtype(np.float64),
shape=[],
description='''
The difference in total energy to the previous SCF energy (DETOT).
''',
a_legacy=LegacyDefinition(name='x_cpmd_scf_detot'))
x_cpmd_scf_tcpu = Quantity(
type=np.dtype(np.float64),
shape=[],
description='''
The CPU time used during SCF step (TCPU).
''',
a_legacy=LegacyDefinition(name='x_cpmd_scf_tcpu'))
class x_cpmd_section_final_results(MSection):
'''
The final results after a single point calculation.
'''
m_def = Section(validate=False, a_legacy=LegacyDefinition(name='x_cpmd_section_final_results'))
class x_cpmd_section_geo_opt_initialization(MSection):
'''
Geometry optimization initialization information.
'''
m_def = Section(validate=False, a_legacy=LegacyDefinition(name='x_cpmd_section_geo_opt_initialization'))
x_cpmd_total_number_of_molecular_structures = Quantity(
type=np.dtype(np.int32),
shape=[],
description='''
Total number of molecular structures.
''',
a_legacy=LegacyDefinition(name='x_cpmd_total_number_of_molecular_structures'))
x_cpmd_initialized_positions = Quantity(
type=np.dtype(np.float64),
shape=['number_of_atoms', 3],
description='''
The initialized positions for geometry optimization. The ith row corresponds to
the position for atom number i.
''',
a_legacy=LegacyDefinition(name='x_cpmd_initialized_positions'))
x_cpmd_initialized_forces = Quantity(
type=np.dtype(np.float64),
shape=['number_of_atoms', 3],
description='''
The initialized forces for geometry optimization. The ith row corresponds to the
force for atom number i.
''',
a_legacy=LegacyDefinition(name='x_cpmd_initialized_forces'))
x_cpmd_geo_opt_initialization_time = Quantity(
type=np.dtype(np.float64),
shape=[],
description='''
Time for initialization.
''',
a_legacy=LegacyDefinition(name='x_cpmd_geo_opt_initialization_time'))
class x_cpmd_section_geo_opt_step(MSection):
'''
Contains information for a single geometry optimization step.
'''
m_def = Section(validate=False, a_legacy=LegacyDefinition(name='x_cpmd_section_geo_opt_step'))
x_cpmd_geo_opt_step_positions = Quantity(
type=np.dtype(np.float64),
shape=['number_of_atoms', 3],
description='''
The positions from a geometry optimization step. The ith row corresponds to the
position for atom number i.
''',
a_legacy=LegacyDefinition(name='x_cpmd_geo_opt_step_positions'))
x_cpmd_geo_opt_step_forces = Quantity(
type=np.dtype(np.float64),
shape=['number_of_atoms', 3],
description='''
The forces from a geometry optimization step. The ith row corresponds to the force
for atom number i.
''',
a_legacy=LegacyDefinition(name='x_cpmd_geo_opt_step_forces'))
x_cpmd_geo_opt_step_total_number_of_scf_steps = Quantity(
type=np.dtype(np.int32),
shape=[],
description='''
Total number of SCF steps at the end of this geometry optimization step.
''',
a_legacy=LegacyDefinition(name='x_cpmd_geo_opt_step_total_number_of_scf_steps'))
x_cpmd_geo_opt_step_number = Quantity(
type=np.dtype(np.int32),
shape=[],
description='''
Geometry optimization step number.
''',
a_legacy=LegacyDefinition(name='x_cpmd_geo_opt_step_number'))
x_cpmd_geo_opt_step_gnmax = Quantity(
type=np.dtype(np.float64),
shape=[],
description='''
The largest absolute component of the force on any atom (GNMAX).
''',
a_legacy=LegacyDefinition(name='x_cpmd_geo_opt_step_gnmax'))
x_cpmd_geo_opt_step_gnorm = Quantity(
type=np.dtype(np.float64),
shape=[],
description='''
Average force on the atoms (GNORM).
''',
a_legacy=LegacyDefinition(name='x_cpmd_geo_opt_step_gnorm'))
x_cpmd_geo_opt_step_cnstr = Quantity(
type=np.dtype(np.float64),
shape=[],
description='''
The largest absolute component of a constraint force on the atoms (CNSTR).
''',
a_legacy=LegacyDefinition(name='x_cpmd_geo_opt_step_cnstr'))
x_cpmd_geo_opt_step_etot = Quantity(
type=np.dtype(np.float64),
shape=[],
description='''
The total energy at the end of a geometry optimization step (ETOT).
''',
a_legacy=LegacyDefinition(name='x_cpmd_geo_opt_step_etot'))
x_cpmd_geo_opt_step_detot = Quantity(
type=np.dtype(np.float64),
shape=[],
description='''
The difference in total energy to the previous geometry optimization step (DETOT).
''',
a_legacy=LegacyDefinition(name='x_cpmd_geo_opt_step_detot'))
x_cpmd_geo_opt_step_tcpu = Quantity(
type=np.dtype(np.float64),
shape=[],
description='''
The CPU time used during geometry optimization step (TCPU).
''',
a_legacy=LegacyDefinition(name='x_cpmd_geo_opt_step_tcpu'))
x_cpmd_section_geo_opt_scf_iteration = SubSection(
sub_section=SectionProxy('x_cpmd_section_geo_opt_scf_iteration'),
repeats=True,
a_legacy=LegacyDefinition(name='x_cpmd_section_geo_opt_scf_iteration'))
class x_cpmd_section_geo_opt_scf_iteration(MSection):
'''
Contains information about the self-consistent field iteration within a geometry
optimization step.
'''
m_def = Section(validate=False, a_legacy=LegacyDefinition(name='x_cpmd_section_geo_opt_scf_iteration'))
x_cpmd_geo_opt_scf_nfi = Quantity(
type=np.dtype(np.int32),
shape=[],
description='''
The scf step number (NFI) within geometry optimization step.
''',
a_legacy=LegacyDefinition(name='x_cpmd_geo_opt_scf_nfi'))
x_cpmd_geo_opt_scf_gemax = Quantity(
type=np.dtype(np.float64),
shape=[],
description='''
Largest off-diagonal component (GEMAX) during SCF step within geometry
optimization step.
''',
a_legacy=LegacyDefinition(name='x_cpmd_geo_opt_scf_gemax'))
x_cpmd_geo_opt_scf_cnorm = Quantity(
type=np.dtype(np.float64),
shape=[],
description='''
Average of the off-diagonal components (CNORM) during SCF step within geometry
optimization step.
''',
a_legacy=LegacyDefinition(name='x_cpmd_geo_opt_scf_cnorm'))
x_cpmd_geo_opt_scf_etot = Quantity(
type=np.dtype(np.float64),
shape=[],
description='''
The total energy (ETOT) during SCF step within geometry optimization step.
''',
a_legacy=LegacyDefinition(name='x_cpmd_geo_opt_scf_etot'))
x_cpmd_geo_opt_scf_detot = Quantity(
type=np.dtype(np.float64),
shape=[],
description='''
The difference in total energy to the previous SCF energy (DETOT) within geometry
optimization step.
''',
a_legacy=LegacyDefinition(name='x_cpmd_geo_opt_scf_detot'))
x_cpmd_geo_opt_scf_tcpu = Quantity(
type=np.dtype(np.float64),
shape=[],
description='''
The CPU time used during SCF step (TCPU) within geometry optimization step.
''',
a_legacy=LegacyDefinition(name='x_cpmd_geo_opt_scf_tcpu'))
class x_cpmd_section_md_initialization(MSection):
'''
Molecular dynamics initialization information.
'''
m_def = Section(validate=False, a_legacy=LegacyDefinition(name='x_cpmd_section_md_initialization'))
class x_cpmd_section_md_averaged_quantities(MSection):
'''
Averaged quantities from a MD calculation.
'''
m_def = Section(validate=False, a_legacy=LegacyDefinition(name='x_cpmd_section_md_averaged_quantities'))
x_cpmd_electron_kinetic_energy_mean = Quantity(
type=np.dtype(np.float64),
shape=[],
description='''
The mean electron kinetic energy.
''',
a_legacy=LegacyDefinition(name='x_cpmd_electron_kinetic_energy_mean'))
x_cpmd_electron_kinetic_energy_std = Quantity(
type=np.dtype(np.float64),
shape=[],
description='''
The standard deviation of electron kinetic energy.
''',
a_legacy=LegacyDefinition(name='x_cpmd_electron_kinetic_energy_std'))
x_cpmd_ionic_temperature_mean = Quantity(
type=np.dtype(np.float64),
shape=[],
description='''
The mean ionic temperature.
''',
a_legacy=LegacyDefinition(name='x_cpmd_ionic_temperature_mean'))
x_cpmd_ionic_temperature_std = Quantity(
type=np.dtype(np.float64),
shape=[],
description='''
The standard deviation of ionic temperature.
''',
a_legacy=LegacyDefinition(name='x_cpmd_ionic_temperature_std'))
x_cpmd_density_functional_energy_mean = Quantity(
type=np.dtype(np.float64),
shape=[],
description='''
The mean density functional energy.
''',
a_legacy=LegacyDefinition(name='x_cpmd_density_functional_energy_mean'))
x_cpmd_density_functional_energy_std = Quantity(
type=np.dtype(np.float64),
shape=[],
description='''
The standard deviation of density functional energy.
''',
a_legacy=LegacyDefinition(name='x_cpmd_density_functional_energy_std'))
x_cpmd_classical_energy_mean = Quantity(
type=np.dtype(np.float64),
shape=[],
description='''
The mean classical energy.
''',
a_legacy=LegacyDefinition(name='x_cpmd_classical_energy_mean'))
x_cpmd_classical_energy_std = Quantity(
type=np.dtype(np.float64),
shape=[],
description='''
The standard deviation of classical energy.
''',
a_legacy=LegacyDefinition(name='x_cpmd_classical_energy_std'))
x_cpmd_conserved_energy_mean = Quantity(
type=np.dtype(np.float64),
shape=[],
description='''
The mean conserved energy.
''',
a_legacy=LegacyDefinition(name='x_cpmd_conserved_energy_mean'))
x_cpmd_conserved_energy_std = Quantity(
type=np.dtype(np.float64),
shape=[],
description='''
The standard deviation of conserved energy.
''',
a_legacy=LegacyDefinition(name='x_cpmd_conserved_energy_std'))
x_cpmd_nose_energy_electrons_mean = Quantity(
type=np.dtype(np.float64),
shape=[],
description='''
The mean Nosé energy for electrons.
''',
a_legacy=LegacyDefinition(name='x_cpmd_nose_energy_electrons_mean'))
x_cpmd_nose_energy_electrons_std = Quantity(
type=np.dtype(np.float64),
shape=[],
description='''
The standard deviation of Nosé energy for elctrons.
''',
a_legacy=LegacyDefinition(name='x_cpmd_nose_energy_electrons_std'))
x_cpmd_nose_energy_ions_mean = Quantity(
type=np.dtype(np.float64),
shape=[],
description='''
The mean Nosé energy for ions.
''',
a_legacy=LegacyDefinition(name='x_cpmd_nose_energy_ions_mean'))
x_cpmd_nose_energy_ions_std = Quantity(
type=np.dtype(np.float64),
shape=[],
description='''
The standard deviation of Nosé energy for ions.
''',
a_legacy=LegacyDefinition(name='x_cpmd_nose_energy_ions_std'))
x_cpmd_constraints_energy_mean = Quantity(
type=np.dtype(np.float64),
shape=[],
description='''
The mean constrains energy.
''',
a_legacy=LegacyDefinition(name='x_cpmd_constraints_energy_mean'))
x_cpmd_constraints_energy_std = Quantity(
type=np.dtype(np.float64),
shape=[],
description='''
The standard deviation of constraints energy.
''',
a_legacy=LegacyDefinition(name='x_cpmd_constraints_energy_std'))
x_cpmd_restraints_energy_mean = Quantity(
type=np.dtype(np.float64),
shape=[],
description='''
The mean restraints energy.
''',
a_legacy=LegacyDefinition(name='x_cpmd_restraints_energy_mean'))
x_cpmd_restraints_energy_std = Quantity(
type=np.dtype(np.float64),
shape=[],
description='''
The standard deviation of restraints energy.
''',
a_legacy=LegacyDefinition(name='x_cpmd_restraints_energy_std'))
x_cpmd_ion_displacement_mean = Quantity(
type=np.dtype(np.float64),
shape=[],
description='''
The mean ion displacement.
''',
a_legacy=LegacyDefinition(name='x_cpmd_ion_displacement_mean'))
x_cpmd_ion_displacement_std = Quantity(
type=np.dtype(np.float64),
shape=[],
description='''
The standard deviation of ion displacement.
''',
a_legacy=LegacyDefinition(name='x_cpmd_ion_displacement_std'))
x_cpmd_cpu_time_mean = Quantity(
type=np.dtype(np.float64),
shape=[],
description='''
The mean cpu time.
''',
a_legacy=LegacyDefinition(name='x_cpmd_cpu_time_mean'))
class x_cpmd_section_timing(MSection):
'''
Contains information about the timings.
'''
m_def = Section(validate=False, a_legacy=LegacyDefinition(name='x_cpmd_section_timing'))
class x_cpmd_section_end_information(MSection):
'''
Contains information printed at the end of a calculation.
'''
m_def = Section(validate=False, a_legacy=LegacyDefinition(name='x_cpmd_section_end_information'))
class section_run(public.section_run):
m_def = Section(validate=False, extends_base_section=True, a_legacy=LegacyDefinition(name='section_run'))
x_cpmd_section_start_information = SubSection(
sub_section=SectionProxy('x_cpmd_section_start_information'),
repeats=True,
a_legacy=LegacyDefinition(name='x_cpmd_section_start_information'))
x_cpmd_section_run_type_information = SubSection(
sub_section=SectionProxy('x_cpmd_section_run_type_information'),
repeats=True,
a_legacy=LegacyDefinition(name='x_cpmd_section_run_type_information'))
x_cpmd_section_system_information = SubSection(
sub_section=SectionProxy('x_cpmd_section_system_information'),
repeats=True,
a_legacy=LegacyDefinition(name='x_cpmd_section_system_information'))
x_cpmd_section_supercell = SubSection(
sub_section=SectionProxy('x_cpmd_section_supercell'),
repeats=True,
a_legacy=LegacyDefinition(name='x_cpmd_section_supercell'))
x_cpmd_section_wave_function_initialization = SubSection(
sub_section=SectionProxy('x_cpmd_section_wave_function_initialization'),
repeats=True,
a_legacy=LegacyDefinition(name='x_cpmd_section_wave_function_initialization'))
x_cpmd_section_md_initialization = SubSection(
sub_section=SectionProxy('x_cpmd_section_md_initialization'),
repeats=True,
a_legacy=LegacyDefinition(name='x_cpmd_section_md_initialization'))
x_cpmd_section_md_averaged_quantities = SubSection(
sub_section=SectionProxy('x_cpmd_section_md_averaged_quantities'),
repeats=True,
a_legacy=LegacyDefinition(name='x_cpmd_section_md_averaged_quantities'))
x_cpmd_section_timing = SubSection(
sub_section=SectionProxy('x_cpmd_section_timing'),
repeats=True,
a_legacy=LegacyDefinition(name='x_cpmd_section_timing'))
x_cpmd_section_end_information = SubSection(
sub_section=SectionProxy('x_cpmd_section_end_information'),
repeats=True,
a_legacy=LegacyDefinition(name='x_cpmd_section_end_information'))
class section_method(public.section_method):
m_def = Section(validate=False, extends_base_section=True, a_legacy=LegacyDefinition(name='section_method'))
x_cpmd_section_xc_information = SubSection(
sub_section=SectionProxy('x_cpmd_section_xc_information'),
repeats=True,
a_legacy=LegacyDefinition(name='x_cpmd_section_xc_information'))
x_cpmd_section_pseudopotential_information = SubSection(
sub_section=SectionProxy('x_cpmd_section_pseudopotential_information'),
repeats=True,
a_legacy=LegacyDefinition(name='x_cpmd_section_pseudopotential_information'))
x_cpmd_section_atom_kinds = SubSection(
sub_section=SectionProxy('x_cpmd_section_atom_kinds'),
repeats=True,
a_legacy=LegacyDefinition(name='x_cpmd_section_atom_kinds'))
class section_single_configuration_calculation(public.section_single_configuration_calculation):
m_def = Section(validate=False, extends_base_section=True, a_legacy=LegacyDefinition(name='section_single_configuration_calculation'))
x_cpmd_section_scf = SubSection(
sub_section=SectionProxy('x_cpmd_section_scf'),
repeats=True,
a_legacy=LegacyDefinition(name='x_cpmd_section_scf'))
x_cpmd_section_final_results = SubSection(
sub_section=SectionProxy('x_cpmd_section_final_results'),
repeats=True,
a_legacy=LegacyDefinition(name='x_cpmd_section_final_results'))
class section_frame_sequence(public.section_frame_sequence):
m_def = Section(validate=False, extends_base_section=True, a_legacy=LegacyDefinition(name='section_frame_sequence'))
x_cpmd_section_geo_opt_initialization = SubSection(
sub_section=SectionProxy('x_cpmd_section_geo_opt_initialization'),
repeats=True,
a_legacy=LegacyDefinition(name='x_cpmd_section_geo_opt_initialization'))
x_cpmd_section_geo_opt_step = SubSection(
sub_section=SectionProxy('x_cpmd_section_geo_opt_step'),
repeats=True,
a_legacy=LegacyDefinition(name='x_cpmd_section_geo_opt_step'))
m_package.__init_metainfo__()
|
py | 1a49760c710a7d3276218315bf0c2f1f53761782 | # -*- coding: utf-8 -*-
"""Model unit tests."""
import datetime as dt
import pytest
from MorseFlask.user.models import Role, User
from .factories import UserFactory
@pytest.mark.usefixtures('db')
class TestUser:
"""User tests."""
def test_get_by_id(self):
"""Get user by ID."""
user = User('foo', '[email protected]')
user.save()
retrieved = User.get_by_id(user.id)
assert retrieved == user
def test_created_at_defaults_to_datetime(self):
"""Test creation date."""
user = User(username='foo', email='[email protected]')
user.save()
assert bool(user.created_at)
assert isinstance(user.created_at, dt.datetime)
def test_password_is_nullable(self):
"""Test null password."""
user = User(username='foo', email='[email protected]')
user.save()
assert user.password is None
def test_factory(self, db):
"""Test user factory."""
user = UserFactory(password='myprecious')
db.session.commit()
assert bool(user.username)
assert bool(user.email)
assert bool(user.created_at)
assert user.is_admin is False
assert user.active is True
assert user.check_password('myprecious')
def test_check_password(self):
"""Check password."""
user = User.create(username='foo', email='[email protected]',
password='foobarbaz123')
assert user.check_password('foobarbaz123') is True
assert user.check_password('barfoobaz') is False
def test_full_name(self):
"""User full name."""
user = UserFactory(first_name='Foo', last_name='Bar')
assert user.full_name == 'Foo Bar'
def test_roles(self):
"""Add a role to a user."""
role = Role(name='admin')
role.save()
user = UserFactory()
user.roles.append(role)
user.save()
assert role in user.roles
|
py | 1a49776217ba45d97cc2858ea01f83f38a9a55ea | from __future__ import absolute_import
from __future__ import division
import os.path
import datetime
import csv
import copy
import socket
from itertools import dropwhile
import numpy as np
import scipy.interpolate as interpolate
from sunpy.net import hek
from sunpy.time import parse_time
from sunpy import config
from sunpy import lightcurve
from sunpy.util.net import check_download_file
__all__ = ['get_goes_event_list', 'temp_em', 'goes_chianti_tem']
try:
# Check required data files are present in user's default download dir
# Define location where GOES data files are stored.
# Manually resolve the hostname
HOST = socket.gethostbyname_ex('hesperia.gsfc.nasa.gov')[-1][0]
except socket.gaierror:
HOST = ''
GOES_REMOTE_PATH = "http://{0}/ssw/gen/idl/synoptic/goes/".format(HOST)
# Define location where data files should be downloaded to.
DATA_PATH = config.get("downloads", "download_dir")
# Define variables for file names
FILE_TEMP_COR = "goes_chianti_temp_cor.csv"
FILE_TEMP_PHO = "goes_chianti_temp_pho.csv"
FILE_EM_COR = "goes_chianti_em_cor.csv"
FILE_EM_PHO = "goes_chianti_em_pho.csv"
def get_goes_event_list(timerange, goes_class_filter=None):
"""
Retrieve list of flares detected by GOES within a given time range.
Parameters
----------
timerange: sunpy.time.TimeRange
The time range to download the event list for.
goes_class_filter: (optional) string
A string specifying a minimum GOES class for inclusion in the list,
e.g. 'M1', 'X2'.
"""
# use HEK module to search for GOES events
client = hek.HEKClient()
event_type = 'FL'
tstart = timerange.start
tend = timerange.end
# query the HEK for a list of events detected by the GOES instrument
# between tstart and tend (using a GOES-class filter)
if goes_class_filter:
result = client.query(hek.attrs.Time(tstart, tend),
hek.attrs.EventType(event_type),
hek.attrs.FL.GOESCls > goes_class_filter,
hek.attrs.OBS.Observatory == 'GOES')
else:
result = client.query(hek.attrs.Time(tstart, tend),
hek.attrs.EventType(event_type),
hek.attrs.OBS.Observatory == 'GOES')
# want to condense the results of the query into a more manageable
# dictionary
# keep event data, start time, peak time, end time, GOES-class,
# location, active region source (as per GOES list standard)
# make this into a list of dictionaries
goes_event_list = []
for r in result:
goes_event = {
'event_date': parse_time(r['event_starttime']).date().strftime('%Y-%m-%d'),
'start_time': parse_time(r['event_starttime']),
'peak_time': parse_time(r['event_peaktime']),
'end_time': parse_time(r['event_endtime']),
'goes_class': str(r['fl_goescls']),
'goes_location': (r['event_coord1'], r['event_coord2']),
'noaa_active_region': r['ar_noaanum']
}
goes_event_list.append(goes_event)
return goes_event_list
def temp_em(goeslc, abundances="coronal", download=False, download_dir=DATA_PATH):
"""
Calculates and adds temperature and EM to a GOESLightCurve.
This function calculates the isothermal temperature and volume
emission measure of the solar soft X-ray emitting plasma observed by
the GOES/XRS. This is done using the function goes_chianti_tem().
See that function for more details. Once the temperature and
emission measure are found, they are added to a copy of the
original GOESLightCurve object as goeslc.data.temperature and
goeslc.data.em where goeslc is the GOESLightCurve object.
Parameters
----------
goeslc : GOESLightCurve object
abundances : (optional) string equalling either 'coronal' or 'photospheric'.
States whether photospheric or coronal abundances should be assumed.
Default='coronal'
download : (optional) bool
If True, the GOES temperature and emission measure data files are downloaded.
It is important to do this if a new version of the files has been
generated due to a new CHIANTI version being released or the launch of
new GOES satellites since these files were originally downloaded.
Default=False
download_dir : (optional) string
The directory to download the GOES temperature and emission measure
data files to.
Default=SunPy default download directory
Returns
-------
goeslc.data.temperature : pandas.core.series.Series
Array of temperature values [MK]
goeslc.data.em : pandas.core.series.Series
Array of volume emission measure values [10**49 cm**-3]
Examples
--------
>>> from sunpy.lightcurve as lc
>>> goeslc = lc.GOESLightCurve.create(time1, time2)
>>> goeslc.data
xrsa xrsb
2014-01-01 00:00:00 7e-07 7e-06
2014-01-01 00:00:02 7e-07 7e-06
2014-01-01 00:00:04 7e-07 7e-06
2014-01-01 00:00:06 7e-07 7e-06
>>> goeslc_new = temp_em(goeslc)
>>> goeslc_new.data
xrsa xrsb temperature em
2014-01-01 00:00:00 7e-07 7e-06 11.28295376 4.78577516e+48
2014-01-01 00:00:02 7e-07 7e-06 11.28295376 4.78577516e+48
2014-01-01 00:00:04 7e-07 7e-06 11.28295376 4.78577516e+48
2014-01-01 00:00:06 7e-07 7e-06 11.28295376 4.78577516e+48
"""
# Check that input argument is of correct type
if not isinstance(goeslc, lightcurve.GOESLightCurve):
raise TypeError("goeslc must be a GOESLightCurve object.")
# Find temperature and emission measure with goes_chianti_tem
temp, em = goes_chianti_tem(goeslc.data.xrsb, goeslc.data.xrsa,
satellite=goeslc.meta["TELESCOP"].split()[1],
date=goeslc.data.index[0],
abundances=abundances, download=download,
download_dir=download_dir)
# Enter results into new version of GOES LightCurve Object
goeslc_new = copy.deepcopy(goeslc)
goeslc_new.data["temperature"] = temp
goeslc_new.data["em"] = em
return goeslc_new
def goes_chianti_tem(longflux, shortflux, satellite=8,
date=datetime.datetime.today(), abundances="coronal",
download=False, download_dir=DATA_PATH):
"""
Calculates temperature and emission measure from GOES/XRS data.
This function calculates the isothermal temperature and volume
emission measure of the solar soft X-ray emitting plasma observed by
the GOES/XRS. This is done using the observed flux ratio of the
short (0.5-4 angstrom) to long (1-8 angstrom) channels.
Parameters
----------
longflux, shortflux : ndarray or array-like which can be converted to float64 type, such as an np.array, tuple, list.
Arrays containing the long and short GOES/XRS flux measurements
respectively as a function of time. Must be of same length. [W/m**2].
satellite : int (optional)
Number of GOES satellite used to make observations, important for
correct calibration of data.
Default=8
date : datetime object or str
Date when observations made. Important for correctcalibration.
Default=today
abundances : (optional) string equalling either 'coronal' or 'photospheric'.
States whether photospheric or coronal abundances should be assumed.
Default='coronal'
download : (optional) bool
If True, the GOES temperature and emission measure data files are
downloaded. It is important to do this if a new version of the files
has been generated due to a new CHIANTI version being released or the
launch of new GOES satellites since these files were originally downloaded.
Default=False
download_dir : (optional) string
The directory to download the GOES temperature and emission measure
data files to.
Default=SunPy default download directory
Returns
-------
temp : numpy array
Array of temperature values of same length as longflux and shortflux. [MK]
em : numpy array
Array of volume emission measure values of same length as longflux
and shortflux. [10**49 cm**-3]
Notes
-----
The temperature and volume emission measure are calculated here
using the methods of White et al. (2005) who used the
CHIANTI atomic physics database to model the response of the ratio
of the short (0.5-4 angstrom) to long (1-8 angstrom) channels of the
XRSs onboard various GOES satellites. This method assumes an
isothermal plasma, the ionisation equilibria of
Mazzotta et al. (1998), and a constant density of 10**10 cm**-3.
(See White et al. 2005 for justification of this last assumption.)
This function is based on goes_chianti_tem.pro in SolarSoftWare
written in IDL by Stephen White.
Recent fluxes released to the public are scaled to be consistent
with GOES-7. In fact these recent fluxes are correct and so this
correction must be removed before proceeding to use transfer
functions.
Email Rodney Viereck (NOAA) for more information.
Measurements of short channel flux of less than 1e-10 W/m**2 or
long channel flux less than 3e-8 W/m**2 are not considered good.
Ratio values corresponding to suxh fluxes are set to 0.003.
References
----------
.. [1] White, S. M., Thomas, R. J., & Schwartz, R. A. 2005, Sol. Phys.,
227, 231
.. [2] Mazzotta, P., Mazzitelli, G., Colafrancesco, S., & Vittorio, N.
1998, A&AS, 133, 339
Examples
--------
>>> longflux = np.array([7e-6, 7e-6])
>>> shortflux = np.array([7e-7, 7e-7])
>>> temp, em = goes_chianti_tem(longflux, shortflux, satellite=15,
date='2014-04-16', abundances="coronal")
>>> temp
array([11.28295376, 11.28295376])
>>> em
array([ 4.78577516e+48, 4.78577516e+48])
"""
# ENSURE INPUTS ARE OF CORRECT TYPE AND VALID VALUES
longflux = np.asanyarray(longflux, dtype=np.float64)
shortflux = np.asanyarray(shortflux, dtype=np.float64)
int(satellite)
if satellite < 1:
raise ValueError("satellite must be the number of a "
"valid GOES satellite (>1).")
date = parse_time(date)
# Check flux arrays are of same length.
if len(longflux) != len(shortflux):
raise ValueError(
"longflux and shortflux must have same number of elements.")
# PREPARE DATA
# GOES 6 long channel flux before 1983-Jun-28 must be corrected by a
# factor of 4.43/5.32
if date < datetime.datetime(1983, 06, 28) and satellite == 6:
longflux_corrected = longflux * (4.43/5.32)
else:
longflux_corrected = longflux
# Un-scale fluxes if GOES satellite is after 7. See 2nd paragraph
# in Notes section of docstring above.
if satellite > 7:
longflux_corrected = longflux_corrected / 0.7
shortflux_corrected = shortflux / 0.85
else:
shortflux_corrected = shortflux
# Calculate short to long channel ratio.
# Data which is not good have their ratio value set to 0.003.
# See Notes section in docstring above.
index = np.logical_or(shortflux_corrected < 1e-10,
longflux_corrected < 3e-8)
fluxratio = shortflux_corrected / longflux_corrected
fluxratio[index] = 0.003
# FIND TEMPERATURE AND EMISSION MEASURE FROM FUNCTIONS BELOW
temp = _goes_get_chianti_temp(fluxratio, satellite=satellite,
abundances=abundances, download=download,
download_dir=download_dir)
em = _goes_get_chianti_em(longflux_corrected, temp, satellite=satellite,
abundances=abundances, download=download,
download_dir=download_dir)
return temp, em
def _goes_get_chianti_temp(fluxratio, satellite=8, abundances="coronal",
download=False, download_dir=DATA_PATH):
"""
Calculates temperature from GOES flux ratio.
This function calculates the isothermal temperature of the solar
soft X-ray emitting plasma observed by the GOES/XRS from the
observed flux ratio of the short (0.5-4 angstrom) to
long (1-8 angstrom) channels. This function is not intended to be
called directly but by goes_chianti_tem(), although it can be used
independently. However, if used independently data preparation,
such as correctly rescaling fluxes for some satellites etc. will
not be carried out. This is done in goes_chianti_tem().
Parameters
----------
fluxratio : ndarray or array-like which can be converted to float64 type, such as an np.array, tuple, list.
Array containing the ratio of short channel to long channel GOES/XRS
flux measurements.
satellite : int (optional)
Number of GOES satellite used to make observations. Important for
correct calibration of data.
Default=8
abundances : (optional) string equalling either 'coronal' or 'photospheric'.
States whether photospheric or coronal abundances should be assumed.
Default='coronal'
download : (optional) bool
If True, the GOES temperature data files are downloaded.
It is important to do this if a new version of the files has been
generated due to a new CHIANTI version being released or the launch
of new GOES satellites since these files were originally downloaded.
Default=False
download_dir : (optional) string
The directory to download the GOES temperature and emission measure
data files to.
Default=SunPy default download directory
Returns
-------
temp : numpy array
Array of temperature values of same length as longflux and shortflux. [MK]
Notes
-----
This function uses csv files representing the modelled relationship
between temperature of the soft X-ray emitting plasma and the
short to long channel GOES flux ratio. goes_chianti_temp_cor.csv
is used when coronal abundances are assumed while
goes_chianti_temp_pho.csv is used when photospheric abundances are
assumed. (See make_goes_chianti_temp.py for more detail.)
These files were calculated using the methods of White et al. (2005)
who used the CHIANTI atomic physics database to model the response
of the ratio of the short (0.5-4 angstrom) to long (1-8 angstrom)
channels of the XRSs onboard various GOES satellites. This method
assumes an isothermal plasma, the ionisation equilibria of
Mazzotta et al. (1998), and a constant density of 10**10 cm**-3.
(See White et al. 2005 for justification of this last assumption.)
This function is based on goes_get_chianti_temp.pro in
SolarSoftWare written in IDL by Stephen White.
For correct preparation of GOES data before calculating temperature
see goes_chianti_tem() (Notes section of docstring).
References
----------
.. [1] White, S. M., Thomas, R. J., & Schwartz, R. A. 2005, Sol. Phys.,
227, 231
.. [2] Mazzotta, P., Mazzitelli, G., Colafrancesco, S., & Vittorio, N.
1998, A&AS, 133, 339
Examples
--------
>>> fluxratio = np.array([0.1,0.1])
>>> temp = _goes_get_chianti_temp(fluxratio, satellite=15,
abundances="coronal")
>>> temp
array([11.28295376, 11.28295376])
"""
# If download kwarg is True, or required data files cannot be
# found locally, download required data files.
check_download_file(FILE_TEMP_COR, GOES_REMOTE_PATH, download_dir,
replace=download)
check_download_file(FILE_TEMP_PHO, GOES_REMOTE_PATH, download_dir,
replace=download)
# check inputs are correct
fluxratio = np.asanyarray(fluxratio, dtype=np.float64)
int(satellite)
if satellite < 1:
raise ValueError("satellite must be the number of a "
"valid GOES satellite (>1).")
# if abundance input is valid create file suffix, abund, equalling
# of 'cor' or 'pho'.
if abundances == "coronal":
data_file = FILE_TEMP_COR
elif abundances == "photospheric":
data_file = FILE_TEMP_PHO
else:
raise ValueError("abundances must be a string equalling "
"'coronal' or 'photospheric'.")
# Initialize lists to hold model data of flux ratio - temperature
# relationship read in from csv file
modeltemp = [] # modelled temperature is in log_10 space in units of MK
modelratio = []
# Determine name of column in csv file containing model ratio values
# for relevant GOES satellite
label = "ratioGOES{0}".format(satellite)
# Read data representing appropriate temperature--flux ratio
# relationship depending on satellite number and assumed abundances.
with open(os.path.join(DATA_PATH, data_file), "r") as csvfile:
startline = dropwhile(lambda l: l.startswith("#"), csvfile)
csvreader = csv.DictReader(startline, delimiter=";")
for row in csvreader:
modeltemp.append(float(row["log10temp_MK"]))
modelratio.append(float(row[label]))
modeltemp = np.asarray(modeltemp)
modelratio = np.asarray(modelratio)
# Ensure input values of flux ratio are within limits of model table
if np.min(fluxratio) < np.min(modelratio) or \
np.max(fluxratio) > np.max(modelratio):
raise ValueError(
"For GOES {0}, all values in fluxratio input must be within " +
"the range {1} - {2}.".format(satellite, np.min(modelratio),
np.max(modelratio)))
# Perform spline fit to model data to get temperatures for input
# values of flux ratio
spline = interpolate.splrep(modelratio, modeltemp, s=0)
temp = 10.**interpolate.splev(fluxratio, spline, der=0)
return temp
def _goes_get_chianti_em(longflux, temp, satellite=8, abundances="coronal",
download=False, download_dir=DATA_PATH):
"""
Calculates emission measure from GOES 1-8A flux and temperature.
This function calculates the emission measure of the solar
soft X-ray emitting plasma observed by the GOES/XRS from the
the ratio of the isothermal temperature and observed long channel
(1-8 angstrom) flux which scales with the emission measure.
This function is not intended to be called directly but by
goes_chianti_tem(), although it can be used independently.
However, if used independently data preparation, such as correctly
rescaling fluxes for some satellites etc. will not be carried out.
This is done in goes_chianti_tem().
Parameters
----------
longflux : ndarray or array-like which can be converted to float64 type, such as an np.array, tuple, list.
Array containing the observed GOES/XRS long channel flux
temp : ndarray or array-like which can be converted to float64 type, such as an np.array, tuple, list.
Array containing the GOES temperature
satellite : int (optional)
Number of GOES satellite used to make observations.
Important for correct calibration of data.
Default=8
abundances : (optional) string equalling either 'coronal' or 'photospheric'.
States whether photospheric or coronal abundances should be assumed.
Default='coronal'
download : (optional) bool
If True, the GOES emission measure data files are downloaded.
It is important to do this if a new version of the files has been
generated due to a new CHIANTI version being released or the launch of
new GOES satellites since these files were originally downloaded.
Default=False
download_dir : (optional) string
The directory to download the GOES temperature and emission measure
data files to.
Default=SunPy default download directory
Returns
-------
em : numpy array
Array of emission measure values of same length as longflux
and temp. [cm**-3]
Notes
-----
This function uses csv files representing the modelled relationship
between the temperature of the solar soft X-ray emitting plasma
and the resulting observed flux in the GOES/XRS long channel
(1-8 angstroms). goes_chianti_em_cor.csv is used when coronal
abundances are assumed while goes_chianti_em_pho.csv is used when
photospheric abundances are assumed.
(See make_goes_chianti_temp.py for more detail.)
These files were calculated using the methods of White et al. (2005)
who used the CHIANTI atomic physics database and GOES transfer
functions to model the response of the long channel to the
temperture of the emitting plasma for XRSs onboard various GOES
satellites. The emission measure can then be found by scaling the
ratio of these two properties. This method assumes an isothermal
plasma, the ionisation equilibria of Mazzotta et al. (1998), and
a constant density of 10**10 cm**-3.
(See White et al. 2005 for justification of this last assumption.)
This function is based on goes_get_chianti_temp.pro in
SolarSoftWare written in IDL by Stephen White.
For correct preparation of GOES data before calculating temperature
see goes_chianti_tem() (Notes section of docstring).
References
----------
.. [1] White, S. M., Thomas, R. J., & Schwartz, R. A. 2005, Sol. Phys.,
227, 231
.. [2] Mazzotta, P., Mazzitelli, G., Colafrancesco, S., & Vittorio, N.
1998, A&AS, 133, 339
Examples
--------
>>> longflux = np.array([7e-6,7e-6])
>>> temp = np.array([11,11])
>>> em = _goes_get_chianti_em(longflux, temp, satellite=15,
abundances="coronal")
>>> em
array([ 3.45200672e+48, 3.45200672e+48])
"""
# If download kwarg is True, or required data files cannot be
# found locally, download required data files.
check_download_file(FILE_EM_COR, GOES_REMOTE_PATH, download_dir,
replace=download)
check_download_file(FILE_EM_PHO, GOES_REMOTE_PATH, download_dir,
replace=download)
# Check inputs are of correct type
longflux = np.asanyarray(longflux, dtype=np.float64)
temp = np.asanyarray(temp, dtype=np.float64)
int(satellite)
if satellite < 1:
raise ValueError("satellite must be the number of a "
"valid GOES satellite (>1).")
# if abundance input is valid create file suffix, abund, equalling
# of 'cor' or 'pho'.
if abundances == "coronal":
data_file = FILE_EM_COR
elif abundances == "photospheric":
data_file = FILE_EM_PHO
else:
raise ValueError("abundances must be a string equalling "
"'coronal' or 'photospheric'.")
# check input arrays are of same length
if len(longflux) != len(temp):
raise ValueError("longflux and temp must have same number of "
"elements.")
# Initialize lists to hold model data of temperature - long channel
# flux relationship read in from csv file.
modeltemp = [] # modelled temperature is in log_10 sapce in units of MK
modelflux = []
# Determine name of column in csv file containing model ratio values
# for relevant GOES satellite
label = "longfluxGOES{0}".format(satellite)
# Read data representing appropriate temperature--long flux
# relationship depending on satellite number and assumed abundances.
with open(os.path.join(DATA_PATH, data_file), "r") as csvfile:
startline = dropwhile(lambda l: l.startswith("#"), csvfile)
csvreader = csv.DictReader(startline, delimiter=";")
for row in csvreader:
modeltemp.append(float(row["log10temp_MK"]))
modelflux.append(float(row[label]))
modeltemp = np.asarray(modeltemp)
modelflux = np.asarray(modelflux)
# Ensure input values of flux ratio are within limits of model table
if np.min(np.log10(temp)) < np.min(modeltemp) or \
np.max(np.log10(temp)) > np.max(modeltemp) or \
np.isnan(np.min(np.log10(temp))):
raise ValueError("All values in temp must be within the range "
"{0} - {1} MK.".format(np.min(10**modeltemp),
np.max(10**modeltemp)))
# Perform spline fit to model data
spline = interpolate.splrep(modeltemp, modelflux, s=0)
denom = interpolate.splev(np.log10(temp), spline, der=0)
em = longflux/denom * 1e55
return em
|
py | 1a49780c5a1f5d2744aae1d3a8c742a9c1dcdfc4 | # Copyright 2015 OpenStack LLC.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import itertools
import random
import netaddr
from neutron_lib import exceptions as n_exc
from neutron_lib.plugins import directory
from oslo_db import exception as db_exc
from oslo_log import log
from oslo_utils import uuidutils
from neutron._i18n import _, _LE
from neutron.ipam import driver as ipam_base
from neutron.ipam.drivers.neutrondb_ipam import db_api as ipam_db_api
from neutron.ipam import exceptions as ipam_exc
from neutron.ipam import requests as ipam_req
from neutron.ipam import subnet_alloc
from neutron.ipam import utils as ipam_utils
LOG = log.getLogger(__name__)
class NeutronDbSubnet(ipam_base.Subnet):
"""Manage IP addresses for Neutron DB IPAM driver.
This class implements the strategy for IP address allocation and
deallocation for the Neutron DB IPAM driver.
"""
@classmethod
def create_allocation_pools(cls, subnet_manager, context, pools, cidr):
for pool in pools:
# IPv6 addresses that start '::1', '::2', etc cause IP version
# ambiguity when converted to integers by pool.first and pool.last.
# Infer the IP version from the subnet cidr.
ip_version = cidr.version
subnet_manager.create_pool(
context,
netaddr.IPAddress(pool.first, ip_version).format(),
netaddr.IPAddress(pool.last, ip_version).format())
@classmethod
def create_from_subnet_request(cls, subnet_request, ctx):
ipam_subnet_id = uuidutils.generate_uuid()
subnet_manager = ipam_db_api.IpamSubnetManager(
ipam_subnet_id,
subnet_request.subnet_id)
# Create subnet resource
subnet_manager.create(ctx)
# If allocation pools are not specified, define them around
# the subnet's gateway IP
if not subnet_request.allocation_pools:
pools = ipam_utils.generate_pools(subnet_request.subnet_cidr,
subnet_request.gateway_ip)
else:
pools = subnet_request.allocation_pools
# Create IPAM allocation pools
cls.create_allocation_pools(subnet_manager, ctx, pools,
subnet_request.subnet_cidr)
return cls(ipam_subnet_id,
ctx,
cidr=subnet_request.subnet_cidr,
allocation_pools=pools,
gateway_ip=subnet_request.gateway_ip,
tenant_id=subnet_request.tenant_id,
subnet_id=subnet_request.subnet_id)
@classmethod
def load(cls, neutron_subnet_id, ctx):
"""Load an IPAM subnet from the database given its neutron ID.
:param neutron_subnet_id: neutron subnet identifier.
"""
ipam_subnet = ipam_db_api.IpamSubnetManager.load_by_neutron_subnet_id(
ctx, neutron_subnet_id)
if not ipam_subnet:
LOG.error(_LE("IPAM subnet referenced to "
"Neutron subnet %s does not exist"),
neutron_subnet_id)
raise n_exc.SubnetNotFound(subnet_id=neutron_subnet_id)
pools = []
for pool in ipam_subnet.allocation_pools:
pools.append(netaddr.IPRange(pool['first_ip'], pool['last_ip']))
neutron_subnet = cls._fetch_subnet(ctx, neutron_subnet_id)
return cls(ipam_subnet['id'],
ctx,
cidr=neutron_subnet['cidr'],
allocation_pools=pools,
gateway_ip=neutron_subnet['gateway_ip'],
tenant_id=neutron_subnet['tenant_id'],
subnet_id=neutron_subnet_id)
@classmethod
def _fetch_subnet(cls, context, id):
plugin = directory.get_plugin()
return plugin._get_subnet(context, id)
def __init__(self, internal_id, ctx, cidr=None,
allocation_pools=None, gateway_ip=None, tenant_id=None,
subnet_id=None):
# NOTE: In theory it could have been possible to grant the IPAM
# driver direct access to the database. While this is possible,
# it would have led to duplicate code and/or non-trivial
# refactorings in neutron.db.db_base_plugin_v2.
# This is because in the Neutron V2 plugin logic DB management is
# encapsulated within the plugin.
self._cidr = cidr
self._pools = allocation_pools
self._gateway_ip = gateway_ip
self._tenant_id = tenant_id
self._subnet_id = subnet_id
self.subnet_manager = ipam_db_api.IpamSubnetManager(internal_id,
self._subnet_id)
self._context = ctx
def _verify_ip(self, context, ip_address):
"""Verify whether IP address can be allocated on subnet.
:param context: neutron api request context
:param ip_address: String representing the IP address to verify
:raises: InvalidInput, IpAddressAlreadyAllocated
"""
# Ensure that the IP's are unique
if not self.subnet_manager.check_unique_allocation(context,
ip_address):
raise ipam_exc.IpAddressAlreadyAllocated(
subnet_id=self.subnet_manager.neutron_id,
ip=ip_address)
# Ensure that the IP is valid on the subnet
if not ipam_utils.check_subnet_ip(self._cidr, ip_address):
raise ipam_exc.InvalidIpForSubnet(
subnet_id=self.subnet_manager.neutron_id,
ip=ip_address)
def _generate_ip(self, context, prefer_next=False):
"""Generate an IP address from the set of available addresses."""
ip_allocations = netaddr.IPSet()
for ipallocation in self.subnet_manager.list_allocations(context):
ip_allocations.add(ipallocation.ip_address)
for ip_pool in self.subnet_manager.list_pools(context):
ip_set = netaddr.IPSet()
ip_set.add(netaddr.IPRange(ip_pool.first_ip, ip_pool.last_ip))
av_set = ip_set.difference(ip_allocations)
if av_set.size == 0:
continue
if prefer_next:
window = 1
else:
# Compute a value for the selection window
window = min(av_set.size, 30)
ip_index = random.randint(1, window)
candidate_ips = list(itertools.islice(av_set, ip_index))
allocated_ip = candidate_ips[
random.randint(0, len(candidate_ips) - 1)]
return str(allocated_ip), ip_pool.id
raise ipam_exc.IpAddressGenerationFailure(
subnet_id=self.subnet_manager.neutron_id)
def allocate(self, address_request):
# NOTE(pbondar): Ipam driver is always called in context of already
# running transaction, which is started on create_port or upper level.
# To be able to do rollback/retry actions correctly ipam driver
# should not create new nested transaction blocks.
all_pool_id = None
# NOTE(salv-orlando): It would probably better to have a simpler
# model for address requests and just check whether there is a
# specific IP address specified in address_request
if isinstance(address_request, ipam_req.SpecificAddressRequest):
# This handles both specific and automatic address requests
# Check availability of requested IP
ip_address = str(address_request.address)
self._verify_ip(self._context, ip_address)
else:
prefer_next = isinstance(address_request,
ipam_req.PreferNextAddressRequest)
ip_address, all_pool_id = self._generate_ip(self._context,
prefer_next)
# Create IP allocation request object
# The only defined status at this stage is 'ALLOCATED'.
# More states will be available in the future - e.g.: RECYCLABLE
try:
with self._context.session.begin(subtransactions=True):
# NOTE(kevinbenton): we use a subtransaction to force
# a flush here so we can capture DBReferenceErrors due
# to concurrent subnet deletions. (galera would deadlock
# later on final commit)
self.subnet_manager.create_allocation(self._context,
ip_address)
except db_exc.DBReferenceError:
raise n_exc.SubnetNotFound(
subnet_id=self.subnet_manager.neutron_id)
return ip_address
def deallocate(self, address):
# This is almost a no-op because the Neutron DB IPAM driver does not
# delete IPAllocation objects at every deallocation. The only
# operation it performs is to delete an IPRequest entry.
count = self.subnet_manager.delete_allocation(
self._context, address)
# count can hardly be greater than 1, but it can be 0...
if not count:
raise ipam_exc.IpAddressAllocationNotFound(
subnet_id=self.subnet_manager.neutron_id,
ip_address=address)
def _no_pool_changes(self, context, pools):
"""Check if pool updates in db are required."""
db_pools = self.subnet_manager.list_pools(context)
iprange_pools = [netaddr.IPRange(pool.first_ip, pool.last_ip)
for pool in db_pools]
return pools == iprange_pools
def update_allocation_pools(self, pools, cidr):
# Pools have already been validated in the subnet request object which
# was sent to the subnet pool driver. Further validation should not be
# required.
if self._no_pool_changes(self._context, pools):
return
self.subnet_manager.delete_allocation_pools(self._context)
self.create_allocation_pools(self.subnet_manager, self._context, pools,
cidr)
self._pools = pools
def get_details(self):
"""Return subnet data as a SpecificSubnetRequest"""
return ipam_req.SpecificSubnetRequest(
self._tenant_id, self.subnet_manager.neutron_id,
self._cidr, self._gateway_ip, self._pools)
class NeutronDbPool(subnet_alloc.SubnetAllocator):
"""Subnet pools backed by Neutron Database.
As this driver does not implement yet the subnet pool concept, most
operations are either trivial or no-ops.
"""
def get_subnet(self, subnet_id):
"""Retrieve an IPAM subnet.
:param subnet_id: Neutron subnet identifier
:returns: a NeutronDbSubnet instance
"""
return NeutronDbSubnet.load(subnet_id, self._context)
def allocate_subnet(self, subnet_request):
"""Create an IPAMSubnet object for the provided cidr.
This method does not actually do any operation in the driver, given
its simplified nature.
:param cidr: subnet's CIDR
:returns: a NeutronDbSubnet instance
"""
if self._subnetpool:
subnet = super(NeutronDbPool, self).allocate_subnet(subnet_request)
subnet_request = subnet.get_details()
# SubnetRequest must be an instance of SpecificSubnet
if not isinstance(subnet_request, ipam_req.SpecificSubnetRequest):
raise ipam_exc.InvalidSubnetRequestType(
subnet_type=type(subnet_request))
return NeutronDbSubnet.create_from_subnet_request(subnet_request,
self._context)
def update_subnet(self, subnet_request):
"""Update subnet info the in the IPAM driver.
The only update subnet information the driver needs to be aware of
are allocation pools.
"""
if not subnet_request.subnet_id:
raise ipam_exc.InvalidSubnetRequest(
reason=_("An identifier must be specified when updating "
"a subnet"))
if subnet_request.allocation_pools is None:
LOG.debug("Update subnet request for subnet %s did not specify "
"new allocation pools, there is nothing to do",
subnet_request.subnet_id)
return
subnet = NeutronDbSubnet.load(subnet_request.subnet_id, self._context)
cidr = netaddr.IPNetwork(subnet._cidr)
subnet.update_allocation_pools(subnet_request.allocation_pools, cidr)
return subnet
def remove_subnet(self, subnet_id):
"""Remove data structures for a given subnet.
IPAM-related data has no foreign key relationships to neutron subnet,
so removing ipam subnet manually
"""
count = ipam_db_api.IpamSubnetManager.delete(self._context,
subnet_id)
if count < 1:
LOG.error(_LE("IPAM subnet referenced to "
"Neutron subnet %s does not exist"),
subnet_id)
raise n_exc.SubnetNotFound(subnet_id=subnet_id)
def needs_rollback(self):
return False
|
py | 1a497820e62e1206a0162bf33aac93e5ae0e604c | import warnings
import numpy as np
from vispy.color import Colormap as VispyColormap
from vispy.scene.node import Node
from ..utils.translations import trans
from .image import Image as ImageNode
from .utils_gl import fix_data_dtype
from .vispy_base_layer import VispyBaseLayer
from .volume import Volume as VolumeNode
class ImageLayerNode:
def __init__(self, custom_node: Node = None):
self._custom_node = custom_node
self._image_node = ImageNode(None, method='auto')
self._volume_node = VolumeNode(
np.zeros((1, 1, 1), dtype=np.float32), clim=[0, 1]
)
def get_node(self, ndisplay: int) -> Node:
# Return custom node if we have one.
if self._custom_node is not None:
return self._custom_node
# Return Image or Volume node based on 2D or 3D.
if ndisplay == 2:
return self._image_node
return self._volume_node
class VispyImageLayer(VispyBaseLayer):
def __init__(self, layer, node=None):
# Use custom node from caller, or our standard image/volume nodes.
self._layer_node = ImageLayerNode(node)
# Default to 2D (image) node.
super().__init__(layer, self._layer_node.get_node(2))
self._array_like = True
self.layer.events.rendering.connect(self._on_rendering_change)
self.layer.events.interpolation.connect(self._on_interpolation_change)
self.layer.events.colormap.connect(self._on_colormap_change)
self.layer.events.contrast_limits.connect(
self._on_contrast_limits_change
)
self.layer.events.gamma.connect(self._on_gamma_change)
self.layer.events.iso_threshold.connect(self._on_iso_threshold_change)
self.layer.events.attenuation.connect(self._on_attenuation_change)
self.layer.experimental_slicing_plane.events.enabled.connect(
self._on_experimental_slicing_plane_enabled_change
)
self.layer.experimental_slicing_plane.events.position.connect(
self._on_experimental_slicing_plane_position_change
)
self.layer.experimental_slicing_plane.events.thickness.connect(
self._on_experimental_slicing_plane_thickness_change
)
self.layer.experimental_slicing_plane.events.normal.connect(
self._on_experimental_slicing_plane_normal_change
)
self._on_display_change()
self._on_data_change()
def _on_display_change(self, data=None):
parent = self.node.parent
self.node.parent = None
self.node = self._layer_node.get_node(self.layer._ndisplay)
if data is None:
data = np.zeros((1,) * self.layer._ndisplay)
if self.layer._empty:
self.node.visible = False
else:
self.node.visible = self.layer.visible
if self.layer.loaded:
self.node.set_data(data)
self.node.parent = parent
self.node.order = self.order
self.reset()
def _on_data_change(self, event=None):
if not self.layer.loaded:
# Do nothing if we are not yet loaded. Calling astype below could
# be very expensive. Lets not do it until our data has been loaded.
return
self._set_node_data(self.node, self.layer._data_view)
def _set_node_data(self, node, data):
"""Our self.layer._data_view has been updated, update our node."""
data = fix_data_dtype(data)
if self.layer._ndisplay == 3 and self.layer.ndim == 2:
data = np.expand_dims(data, axis=0)
# Check if data exceeds MAX_TEXTURE_SIZE and downsample
if self.MAX_TEXTURE_SIZE_2D is not None and self.layer._ndisplay == 2:
data = self.downsample_texture(data, self.MAX_TEXTURE_SIZE_2D)
elif (
self.MAX_TEXTURE_SIZE_3D is not None and self.layer._ndisplay == 3
):
data = self.downsample_texture(data, self.MAX_TEXTURE_SIZE_3D)
# Check if ndisplay has changed current node type needs updating
if (
self.layer._ndisplay == 3 and not isinstance(node, VolumeNode)
) or (self.layer._ndisplay == 2 and not isinstance(node, ImageNode)):
self._on_display_change(data)
else:
node.set_data(data)
if self.layer._empty:
node.visible = False
else:
node.visible = self.layer.visible
# Call to update order of translation values with new dims:
self._on_matrix_change()
node.update()
def _on_interpolation_change(self, event=None):
self.node.interpolation = self.layer.interpolation
def _on_rendering_change(self, event=None):
if isinstance(self.node, VolumeNode):
self.node.method = self.layer.rendering
self._on_attenuation_change()
self._on_iso_threshold_change()
def _on_colormap_change(self, event=None):
self.node.cmap = VispyColormap(*self.layer.colormap)
def _on_contrast_limits_change(self, event=None):
self.node.clim = self.layer.contrast_limits
def _on_gamma_change(self, event=None):
if len(self.node.shared_program.frag._set_items) > 0:
self.node.gamma = self.layer.gamma
def _on_iso_threshold_change(self, event=None):
if isinstance(self.node, VolumeNode):
self.node.threshold = self.layer.iso_threshold
def _on_attenuation_change(self, event=None):
if isinstance(self.node, VolumeNode):
self.node.attenuation = self.layer.attenuation
def _on_experimental_slicing_plane_enabled_change(self, event=None):
if isinstance(self.node, VolumeNode):
if self.layer.experimental_slicing_plane.enabled is True:
raycasting_mode = 'plane'
else:
raycasting_mode = 'volume'
self.node.raycasting_mode = raycasting_mode
def _on_experimental_slicing_plane_thickness_change(self, event=None):
if isinstance(self.node, VolumeNode):
self.node.plane_thickness = (
self.layer.experimental_slicing_plane.thickness
)
def _on_experimental_slicing_plane_position_change(self, event=None):
if isinstance(self.node, VolumeNode):
self.node.plane_position = (
self.layer.experimental_slicing_plane.position
)
def _on_experimental_slicing_plane_normal_change(self, event=None):
if isinstance(self.node, VolumeNode):
self.node.plane_normal = (
self.layer.experimental_slicing_plane.normal
)
def reset(self, event=None):
self._reset_base()
self._on_interpolation_change()
self._on_colormap_change()
self._on_contrast_limits_change()
self._on_gamma_change()
self._on_rendering_change()
self._on_experimental_slicing_plane_enabled_change()
self._on_experimental_slicing_plane_position_change()
self._on_experimental_slicing_plane_normal_change()
self._on_experimental_slicing_plane_thickness_change()
def downsample_texture(self, data, MAX_TEXTURE_SIZE):
"""Downsample data based on maximum allowed texture size.
Parameters
----------
data : array
Data to be downsampled if needed.
MAX_TEXTURE_SIZE : int
Maximum allowed texture size.
Returns
-------
data : array
Data that now fits inside texture.
"""
if np.any(np.greater(data.shape, MAX_TEXTURE_SIZE)):
if self.layer.multiscale:
raise ValueError(
trans._(
"Shape of in dividual tiles in multiscale {shape} cannot exceed GL_MAX_TEXTURE_SIZE {texture_size}. Rendering is currently in {ndisplay}D mode.",
deferred=True,
shape=data.shape,
texture_size=MAX_TEXTURE_SIZE,
ndisplay=self.layer._ndisplay,
)
)
warnings.warn(
trans._(
"data shape {shape} exceeds GL_MAX_TEXTURE_SIZE {texture_size} in at least one axis and will be downsampled. Rendering is currently in {ndisplay}D mode.",
deferred=True,
shape=data.shape,
texture_size=MAX_TEXTURE_SIZE,
ndisplay=self.layer._ndisplay,
)
)
downsample = np.ceil(
np.divide(data.shape, MAX_TEXTURE_SIZE)
).astype(int)
scale = np.ones(self.layer.ndim)
for i, d in enumerate(self.layer._dims_displayed):
scale[d] = downsample[i]
self.layer._transforms['tile2data'].scale = scale
self._on_matrix_change()
slices = tuple(slice(None, None, ds) for ds in downsample)
data = data[slices]
return data
|
py | 1a497880aa37fea68930e48886032dc1d49a4ff8 | # -*- coding: utf-8 -*-
# PLEASE DO NOT EDIT THIS FILE, IT IS GENERATED AND WILL BE OVERWRITTEN:
# https://github.com/ccxt/ccxt/blob/master/CONTRIBUTING.md#how-to-contribute-code
from ccxt.async_support.base.exchange import Exchange
# -----------------------------------------------------------------------------
try:
basestring # Python 3
except NameError:
basestring = str # Python 2
import math
import json
from ccxt.base.errors import ExchangeError
from ccxt.base.errors import AuthenticationError
from ccxt.base.errors import PermissionDenied
from ccxt.base.errors import InsufficientFunds
from ccxt.base.errors import InvalidOrder
from ccxt.base.errors import OrderNotFound
from ccxt.base.errors import DDoSProtection
from ccxt.base.errors import ExchangeNotAvailable
class bitz (Exchange):
def describe(self):
return self.deep_extend(super(bitz, self).describe(), {
'id': 'bitz',
'name': 'Bit-Z',
'countries': ['HK'],
'rateLimit': 2000,
'version': 'v2',
'userAgent': self.userAgents['chrome'],
'has': {
'fetchTickers': True,
'fetchOHLCV': True,
'fetchOpenOrders': True,
'fetchClosedOrders': True,
'fetchOrders': True,
'fetchOrder': True,
'createMarketOrder': False,
},
'timeframes': {
'1m': '1min',
'5m': '5min',
'15m': '15min',
'30m': '30min',
'1h': '60min',
'4h': '4hour',
'1d': '1day',
'5d': '5day',
'1w': '1week',
'1M': '1mon',
},
'urls': {
'logo': 'https://user-images.githubusercontent.com/1294454/35862606-4f554f14-0b5d-11e8-957d-35058c504b6f.jpg',
'api': {
'market': 'https://apiv2.bitz.com',
'trade': 'https://apiv2.bitz.com',
'assets': 'https://apiv2.bitz.com',
},
'www': 'https://www.bit-z.com',
'doc': 'https://apidoc.bit-z.com/en',
'fees': 'https://www.bit-z.com/about/fee',
'referral': 'https://u.bit-z.com/register?invite_code=1429193',
},
'api': {
'market': {
'get': [
'ticker',
'depth',
'order', # trades
'tickerall',
'kline',
'symbolList',
'currencyRate',
'currencyCoinRate',
'coinRate',
],
},
'trade': {
'post': [
'addEntrustSheet',
'cancelEntrustSheet',
'cancelAllEntrustSheet',
'getUserHistoryEntrustSheet', # closed orders
'getUserNowEntrustSheet', # open orders
'getEntrustSheetInfo', # order
],
},
'assets': {
'post': [
'getUserAssets',
],
},
},
'fees': {
'trading': {
'maker': 0.001,
'taker': 0.001,
},
'funding': {
'withdraw': {
'BTC': '0.5%',
'DKKT': '0.5%',
'ETH': 0.01,
'USDT': '0.5%',
'LTC': '0.5%',
'FCT': '0.5%',
'LSK': '0.5%',
'HXI': '0.8%',
'ZEC': '0.5%',
'DOGE': '0.5%',
'MZC': '0.5%',
'ETC': '0.5%',
'GXS': '0.5%',
'XPM': '0.5%',
'PPC': '0.5%',
'BLK': '0.5%',
'XAS': '0.5%',
'HSR': '0.5%',
'NULS': 5.0,
'VOISE': 350.0,
'PAY': 1.5,
'EOS': 0.6,
'YBCT': 35.0,
'OMG': 0.3,
'OTN': 0.4,
'BTX': '0.5%',
'QTUM': '0.5%',
'DASH': '0.5%',
'GAME': '0.5%',
'BCH': '0.5%',
'GNT': 9.0,
'SSS': 1500.0,
'ARK': '0.5%',
'PART': '0.5%',
'LEO': '0.5%',
'DGB': '0.5%',
'ZSC': 130.0,
'VIU': 350.0,
'BTG': '0.5%',
'ARN': 10.0,
'VTC': '0.5%',
'BCD': '0.5%',
'TRX': 200.0,
'HWC': '0.5%',
'UNIT': '0.5%',
'OXY': '0.5%',
'MCO': 0.3500,
'SBTC': '0.5%',
'BCX': '0.5%',
'ETF': '0.5%',
'PYLNT': 0.4000,
'XRB': '0.5%',
'ETP': '0.5%',
},
},
},
'precision': {
'amount': 8,
'price': 8,
},
'options': {
'fetchOHLCVVolume': True,
'fetchOHLCVWarning': True,
'lastNonceTimestamp': 0,
},
'commonCurrencies': {
'XRB': 'NANO',
'PXC': 'Pixiecoin',
},
'exceptions': {
# '200': Success
'-102': ExchangeError, # Invalid parameter
'-103': AuthenticationError, # Verification failed
'-104': ExchangeNotAvailable, # Network Error-1
'-105': AuthenticationError, # Invalid api signature
'-106': ExchangeNotAvailable, # Network Error-2
'-109': AuthenticationError, # Invalid scretKey
'-110': DDoSProtection, # The number of access requests exceeded
'-111': PermissionDenied, # Current IP is not in the range of trusted IP
'-112': ExchangeNotAvailable, # Service is under maintenance
'-100015': AuthenticationError, # Trade password error
'-100044': ExchangeError, # Fail to request data
'-100101': ExchangeError, # Invalid symbol
'-100201': ExchangeError, # Invalid symbol
'-100301': ExchangeError, # Invalid symbol
'-100401': ExchangeError, # Invalid symbol
'-100302': ExchangeError, # Type of K-line error
'-100303': ExchangeError, # Size of K-line error
'-200003': AuthenticationError, # Please set trade password
'-200005': PermissionDenied, # This account can not trade
'-200025': ExchangeNotAvailable, # Temporary trading halt
'-200027': InvalidOrder, # Price Error
'-200028': InvalidOrder, # Amount must be greater than 0
'-200029': InvalidOrder, # Number must be between %s and %d
'-200030': InvalidOrder, # Over price range
'-200031': InsufficientFunds, # Insufficient assets
'-200032': ExchangeError, # System error. Please contact customer service
'-200033': ExchangeError, # Fail to trade
'-200034': OrderNotFound, # The order does not exist
'-200035': OrderNotFound, # Cancellation error, order filled
'-200037': InvalidOrder, # Trade direction error
'-200038': ExchangeError, # Trading Market Error
'-200055': OrderNotFound, # Order record does not exist
'-300069': AuthenticationError, # api_key is illegal
'-300101': ExchangeError, # Transaction type error
'-300102': InvalidOrder, # Price or number cannot be less than 0
'-300103': AuthenticationError, # Trade password error
'-301001': ExchangeNotAvailable, # Network Error-3
},
})
async def fetch_markets(self):
response = await self.marketGetSymbolList()
#
# { status: 200,
# msg: "",
# data: { ltc_btc: { id: "1",
# name: "ltc_btc",
# coinFrom: "ltc",
# coinTo: "btc",
# numberFloat: "4",
# priceFloat: "8",
# status: "1",
# minTrade: "0.010",
# maxTrade: "500000000.000"},
# qtum_usdt: { id: "196",
# name: "qtum_usdt",
# coinFrom: "qtum",
# coinTo: "usdt",
# numberFloat: "4",
# priceFloat: "2",
# status: "1",
# minTrade: "0.100",
# maxTrade: "500000000.000"}, },
# time: 1535969146,
# microtime: "0.66955600 1535969146",
# source: "api" }
#
markets = response['data']
ids = list(markets.keys())
result = []
for i in range(0, len(ids)):
id = ids[i]
market = markets[id]
numericId = self.safe_string(market, 'id')
baseId = self.safe_string(market, 'coinFrom')
quoteId = self.safe_string(market, 'coinTo')
base = baseId.upper()
quote = quoteId.upper()
base = self.common_currency_code(base)
quote = self.common_currency_code(quote)
symbol = base + '/' + quote
precision = {
'amount': self.safe_integer(market, 'numberFloat'),
'price': self.safe_integer(market, 'priceFloat'),
}
result.append({
'info': market,
'id': id,
'numericId': numericId,
'symbol': symbol,
'base': base,
'quote': quote,
'baseId': baseId,
'quoteId': quoteId,
'active': True,
'precision': precision,
'limits': {
'amount': {
'min': self.safe_float(market, 'minTrade'),
'max': self.safe_float(market, 'maxTrade'),
},
'price': {
'min': math.pow(10, -precision['price']),
'max': None,
},
'cost': {
'min': None,
'max': None,
},
},
})
return result
async def fetch_balance(self, params={}):
await self.load_markets()
response = await self.assetsPostGetUserAssets(params)
#
# {
# status: 200,
# msg: "",
# data: {
# cny: 0,
# usd: 0,
# btc_total: 0,
# info: [{
# "name": "zpr",
# "num": "37.49067275",
# "over": "37.49067275",
# "lock": "0.00000000",
# "btc": "0.00000000",
# "usd": "0.00000000",
# "cny": "0.00000000",
# }],
# },
# time: 1535983966,
# microtime: "0.70400500 1535983966",
# source: "api",
# }
#
balances = response['data']['info']
result = {'info': response}
for i in range(0, len(balances)):
balance = balances[i]
currencyId = self.safe_string(balance, 'name')
code = currencyId.upper()
if currencyId in self.markets_by_id:
code = self.currencies_by_id[currencyId]['code']
else:
code = self.common_currency_code(code)
account = self.account()
account['used'] = self.safe_float(balance, 'lock')
account['total'] = self.safe_float(balance, 'num')
account['free'] = self.safe_float(balance, 'over')
result[code] = account
return self.parse_balance(result)
def parse_ticker(self, ticker, market=None):
#
# { symbol: "eth_btc",
# quoteVolume: "3905.72",
# volume: "97058.21",
# priceChange: "-1.72",
# priceChange24h: "-1.65",
# askPrice: "0.03971272",
# askQty: "0.0663",
# bidPrice: "0.03961469",
# bidQty: "19.5451",
# open: "0.04036769",
# high: "0.04062988",
# low: "0.03956123",
# now: "0.03970100",
# firstId: 115567767,
# lastId: 115795316,
# dealCount: 14078,
# numberPrecision: 4,
# pricePrecision: 8,
# cny: "1959.05",
# usd: "287.10",
# krw: "318655.82" }
#
timestamp = None
symbol = None
if market is None:
marketId = self.safe_string(ticker, 'symbol')
market = self.safe_value(self.markets_by_id, marketId)
if market is not None:
symbol = market['symbol']
last = self.safe_float(ticker, 'now')
return {
'symbol': symbol,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'high': self.safe_float(ticker, 'high'),
'low': self.safe_float(ticker, 'low'),
'bid': self.safe_float(ticker, 'bidPrice'),
'bidVolume': self.safe_float(ticker, 'bidQty'),
'ask': self.safe_float(ticker, 'askPrice'),
'askVolume': self.safe_float(ticker, 'askQty'),
'vwap': None,
'open': self.safe_float(ticker, 'open'),
'close': last,
'last': last,
'previousClose': None,
'change': self.safe_float(ticker, 'priceChange24h'),
'percentage': None,
'average': None,
'baseVolume': self.safe_float(ticker, 'volume'),
'quoteVolume': self.safe_float(ticker, 'quoteVolume'),
'info': ticker,
}
def parse_microtime(self, microtime):
if microtime is None:
return microtime
parts = microtime.split(' ')
milliseconds = float(parts[0])
seconds = int(parts[1])
total = seconds + milliseconds
return int(total * 1000)
async def fetch_ticker(self, symbol, params={}):
await self.load_markets()
market = self.market(symbol)
response = await self.marketGetTicker(self.extend({
'symbol': market['id'],
}, params))
#
# { status: 200,
# msg: "",
# data: { symbol: "eth_btc",
# quoteVolume: "3905.72",
# volume: "97058.21",
# priceChange: "-1.72",
# priceChange24h: "-1.65",
# askPrice: "0.03971272",
# askQty: "0.0663",
# bidPrice: "0.03961469",
# bidQty: "19.5451",
# open: "0.04036769",
# high: "0.04062988",
# low: "0.03956123",
# now: "0.03970100",
# firstId: 115567767,
# lastId: 115795316,
# dealCount: 14078,
# numberPrecision: 4,
# pricePrecision: 8,
# cny: "1959.05",
# usd: "287.10",
# krw: "318655.82" },
# time: 1535970397,
# microtime: "0.76341900 1535970397",
# source: "api" }
#
ticker = self.parse_ticker(response['data'], market)
timestamp = self.parse_microtime(self.safe_string(response, 'microtime'))
return self.extend(ticker, {
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
})
async def fetch_tickers(self, symbols=None, params={}):
await self.load_markets()
request = {}
if symbols is not None:
ids = self.market_ids(symbols)
request['symbols'] = ','.join(ids)
response = await self.marketGetTickerall(self.extend(request, params))
#
# { status: 200,
# msg: "",
# data: { ela_btc: { symbol: "ela_btc",
# quoteVolume: "0.00",
# volume: "3.28",
# priceChange: "0.00",
# priceChange24h: "0.00",
# askPrice: "0.00147984",
# askQty: "5.4580",
# bidPrice: "0.00120230",
# bidQty: "12.5384",
# open: "0.00149078",
# high: "0.00149078",
# low: "0.00149078",
# now: "0.00149078",
# firstId: 115581219,
# lastId: 115581219,
# dealCount: 1,
# numberPrecision: 4,
# pricePrecision: 8,
# cny: "73.66",
# usd: "10.79",
# krw: "11995.03" } },
# time: 1535971578,
# microtime: "0.39854200 1535971578",
# source: "api" }
#
tickers = response['data']
timestamp = self.parse_microtime(self.safe_string(response, 'microtime'))
iso8601 = self.iso8601(timestamp)
result = {}
ids = list(tickers.keys())
for i in range(0, len(ids)):
id = ids[i]
ticker = tickers[id]
market = None
if id in self.markets_by_id:
market = self.markets_by_id[id]
ticker = self.parse_ticker(tickers[id], market)
symbol = ticker['symbol']
if symbol is None:
if market is not None:
symbol = market['symbol']
else:
baseId, quoteId = id.split('_')
base = baseId.upper()
quote = quoteId.upper()
base = self.common_currency_code(baseId)
quote = self.common_currency_code(quoteId)
symbol = base + '/' + quote
if symbol is not None:
result[symbol] = self.extend(ticker, {
'timestamp': timestamp,
'datetime': iso8601,
})
return result
async def fetch_order_book(self, symbol, limit=None, params={}):
await self.load_markets()
response = await self.marketGetDepth(self.extend({
'symbol': self.market_id(symbol),
}, params))
#
# { status: 200,
# msg: "",
# data: { asks: [["10.00000000", "0.4426", "4.4260"],
# ["1.00000000", "0.8339", "0.8339"],
# ["0.91700000", "0.0500", "0.0458"],
# ["0.20000000", "0.1000", "0.0200"],
# ["0.03987120", "16.1262", "0.6429"],
# ["0.03986120", "9.7523", "0.3887"] ],
# bids: [["0.03976145", "0.0359", "0.0014"],
# ["0.03973401", "20.9493", "0.8323"],
# ["0.03967970", "0.0328", "0.0013"],
# ["0.00000002", "10000.0000", "0.0002"],
# ["0.00000001", "231840.7500", "0.0023"]],
# coinPair: "eth_btc" },
# time: 1535974778,
# microtime: "0.04017400 1535974778",
# source: "api" }
#
orderbook = response['data']
timestamp = self.parse_microtime(self.safe_string(response, 'microtime'))
return self.parse_order_book(orderbook, timestamp)
def parse_trade(self, trade, market=None):
#
# fetchTrades(public)
#
# {id: 115807453,
# t: "19:36:24",
# T: 1535974584,
# p: "0.03983296",
# n: "0.1000",
# s: "buy" },
#
id = self.safe_string(trade, 'id')
timestamp = self.safe_integer(trade, 'T')
if timestamp is not None:
timestamp = timestamp * 1000
price = self.safe_float(trade, 'p')
amount = self.safe_float(trade, 'n')
symbol = None
if market is not None:
symbol = market['symbol']
cost = self.price_to_precision(symbol, amount * price)
side = self.safe_string(trade, 's')
return {
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'symbol': symbol,
'id': id,
'order': None,
'type': 'limit',
'side': side,
'price': price,
'amount': amount,
'cost': cost,
'fee': None,
'info': trade,
}
async def fetch_trades(self, symbol, since=None, limit=None, params={}):
await self.load_markets()
market = self.market(symbol)
response = await self.marketGetOrder(self.extend({
'symbol': market['id'],
}, params))
#
# { status: 200,
# msg: "",
# data: [{id: 115807453,
# t: "19:36:24",
# T: 1535974584,
# p: "0.03983296",
# n: "0.1000",
# s: "buy" },
# {id: 115806811,
# t: "19:33:19",
# T: 1535974399,
# p: "0.03981135",
# n: "9.4612",
# s: "sell" } ],
# time: 1535974583,
# microtime: "0.57118100 1535974583",
# source: "api" }
#
return self.parse_trades(response['data'], market, since, limit)
def parse_ohlcv(self, ohlcv, market=None, timeframe='1m', since=None, limit=None):
#
# { time: "1535973420000",
# open: "0.03975084",
# high: "0.03975084",
# low: "0.03967700",
# close: "0.03967700",
# volume: "12.4733",
# datetime: "2018-09-03 19:17:00"}
#
return [
self.safe_integer(ohlcv, 'time'),
self.safe_float(ohlcv, 'open'),
self.safe_float(ohlcv, 'high'),
self.safe_float(ohlcv, 'low'),
self.safe_float(ohlcv, 'close'),
self.safe_float(ohlcv, 'volume'),
]
async def fetch_ohlcv(self, symbol, timeframe='1m', since=None, limit=None, params={}):
await self.load_markets()
duration = self.parse_timeframe(timeframe) * 1000
market = self.market(symbol)
request = {
'symbol': market['id'],
'resolution': self.timeframes[timeframe],
}
if limit is not None:
request['size'] = min(limit, 300) # 1-300
if since is not None:
request['to'] = since + limit * duration * 1000
else:
if since is not None:
raise ExchangeError(self.id + ' fetchOHLCV requires a since argument to be supplied along with the limit argument')
response = await self.marketGetKline(self.extend(request, params))
#
# { status: 200,
# msg: "",
# data: { bars: [{ time: "1535973420000",
# open: "0.03975084",
# high: "0.03975084",
# low: "0.03967700",
# close: "0.03967700",
# volume: "12.4733",
# datetime: "2018-09-03 19:17:00"},
# { time: "1535955480000",
# open: "0.04009900",
# high: "0.04016745",
# low: "0.04009900",
# close: "0.04012074",
# volume: "74.4803",
# datetime: "2018-09-03 14:18:00"} ],
# resolution: "1min",
# symbol: "eth_btc",
# from: "1535973420000",
# to: "1535955480000",
# size: 300 },
# time: 1535973435,
# microtime: "0.56462100 1535973435",
# source: "api" }
#
return self.parse_ohlcvs(response['data']['bars'], market, timeframe, since, limit)
def parse_order_status(self, status):
statuses = {
'0': 'open',
'1': 'open', # partially filled
'2': 'closed', # filled
'3': 'canceled',
}
if status in statuses:
return statuses[status]
return status
def parse_order(self, order, market=None):
#
# createOrder
#
# {
# "id": "693248739", # order id
# "uId": "2074056", # uid
# "price": "100", # price
# "number": "10", # number
# "numberOver": "10", # undealed
# "flag": "sale", # flag
# "status": "0", # unfilled
# "coinFrom": "vtc",
# "coinTo": "dkkt",
# "numberDeal": "0" # dealed
# }
#
id = self.safe_string(order, 'id')
symbol = None
if market is None:
baseId = self.safe_string(order, 'coinFrom')
quoteId = self.safe_string(order, 'coinTo')
if (baseId is not None) and(quoteId is not None):
marketId = baseId + '_' + quoteId
if marketId in self.markets_by_id:
market = self.safe_value(self.markets_by_id, marketId)
else:
base = baseId.upper()
quote = quoteId.upper()
base = self.common_currency_code(base)
quote = self.common_currency_code(quote)
symbol = base + '/' + quote
if market is not None:
symbol = market['symbol']
side = self.safe_string(order, 'flag')
if side is not None:
side = 'sell' if (side == 'sale') else 'buy'
price = self.safe_float(order, 'price')
amount = self.safe_float(order, 'number')
remaining = self.safe_float(order, 'numberOver')
filled = self.safe_float(order, 'numberDeal')
timestamp = self.safe_integer(order, 'timestamp')
if timestamp is None:
timestamp = self.safe_integer(order, 'created')
if timestamp is not None:
timestamp = timestamp * 1000
cost = self.safe_float(order, 'orderTotalPrice')
if price is not None:
if filled is not None:
cost = filled * price
status = self.parse_order_status(self.safe_string(order, 'status'))
return {
'id': id,
'datetime': self.iso8601(timestamp),
'timestamp': timestamp,
'lastTradeTimestamp': None,
'status': status,
'symbol': symbol,
'type': 'limit',
'side': side,
'price': price,
'cost': cost,
'amount': amount,
'filled': filled,
'remaining': remaining,
'trades': None,
'fee': None,
'info': order,
}
async def create_order(self, symbol, type, side, amount, price=None, params={}):
await self.load_markets()
if type != 'limit':
raise ExchangeError(self.id + ' createOrder allows limit orders only')
market = self.market(symbol)
orderType = '1' if (side == 'buy') else '2'
if not self.password:
raise ExchangeError(self.id + ' createOrder() requires you to set exchange.password = "YOUR_TRADING_PASSWORD"(a trade password is NOT THE SAME as your login password)')
request = {
'symbol': market['id'],
'type': orderType,
'price': self.price_to_precision(symbol, price),
'number': self.amount_to_string(symbol, amount),
'tradePwd': self.password,
}
response = await self.tradePostAddEntrustSheet(self.extend(request, params))
#
# {
# "status": 200,
# "msg": "",
# "data": {
# "id": "693248739", # order id
# "uId": "2074056", # uid
# "price": "100", # price
# "number": "10", # number
# "numberOver": "10", # undealed
# "flag": "sale", # flag
# "status": "0", # unfilled
# "coinFrom": "vtc",
# "coinTo": "dkkt",
# "numberDeal": "0" # dealed
# },
# "time": "1533035297",
# "microtime": "0.41892000 1533035297",
# "source": "api",
# }
#
timestamp = self.parse_microtime(self.safe_string(response, 'microtime'))
order = self.extend({
'timestamp': timestamp,
}, response['data'])
return self.parse_order(order, market)
async def cancel_order(self, id, symbol=None, params={}):
await self.load_markets()
response = await self.tradePostCancelEntrustSheet(self.extend({
'entrustSheetId': id,
}, params))
#
# {
# "status":200,
# "msg":"",
# "data":{
# "updateAssetsData":{
# "coin":"bz",
# "over":"1000.00000000",
# "lock":"-1000.00000000"
# },
# "assetsInfo":{
# "coin":"bz",
# "over":"9999.99999999",
# "lock":"9999.99999999"
# }
# },
# "time":"1535464383",
# "microtime":"0.91558000 1535464383",
# "source":"api"
# }
#
return response
async def cancel_orders(self, ids, symbol=None, params={}):
await self.load_markets()
response = await self.tradePostCancelEntrustSheet(self.extend({
'ids': ','.join(ids),
}, params))
#
# {
# "status":200,
# "msg":"",
# "data":{
# "744173808":{
# "updateAssetsData":{
# "coin":"bz",
# "over":"100.00000000",
# "lock":"-100.00000000"
# },
# "assetsInfo":{
# "coin":"bz",
# "over":"899.99999999",
# "lock":"19099.99999999"
# }
# },
# "744173809":{
# "updateAssetsData":{
# "coin":"bz",
# "over":"100.00000000",
# "lock":"-100.00000000"
# },
# "assetsInfo":{
# "coin":"bz",
# "over":"999.99999999",
# "lock":"18999.99999999"
# }
# }
# },
# "time":"1535525649",
# "microtime":"0.05009400 1535525649",
# "source":"api"
# }
#
return response
async def fetch_order(self, id, symbol=None, params={}):
await self.load_markets()
request = {
'entrustSheetId': id,
}
response = await self.tradePostGetEntrustSheetInfo(self.extend(request, params))
#
# {
# "status":200,
# "msg":"",
# "data":{
# "id":"708279852",
# "uId":"2074056",
# "price":"100.00000000",
# "number":"10.0000",
# "total":"0.00000000",
# "numberOver":"10.0000",
# "numberDeal":"0.0000",
# "flag":"sale",
# "status":"0", #0:unfilled, 1:partial deal, 2:all transactions, 3:already cancelled
# "coinFrom":"bz",
# "coinTo":"usdt",
# "orderTotalPrice":"0",
# "created":"1533279876"
# },
# "time":"1533280294",
# "microtime":"0.36859200 1533280294",
# "source":"api"
# }
#
return self.parse_order(response['data'])
async def fetch_orders_with_method(self, method, symbol=None, since=None, limit=None, params={}):
await self.load_markets()
if symbol is None:
raise ExchangeError(self.id + ' fetchOpenOrders requires a symbol argument')
market = self.market(symbol)
request = {
'coinFrom': market['baseId'],
'coinTo': market['quoteId'],
# 'type': 1, # optional integer, 1 = buy, 2 = sell
# 'page': 1, # optional integer
# 'pageSize': 100, # optional integer, max 100
# 'startTime': 1510235730, # optional integer timestamp in seconds
# 'endTime': 1510235730, # optional integer timestamp in seconds
}
if limit is not None:
request['page'] = 1
request['pageSize'] = limit
if since is not None:
request['startTime'] = int(since / 1000)
# request['endTime'] = int(since / 1000)
response = await getattr(self, method)(self.extend(request, params))
#
# {
# "status": 200,
# "msg": "",
# "data": {
# "data": [
# {
# "id": "693248739",
# "uid": "2074056",
# "price": "100.00000000",
# "number": "10.0000",
# "total": "0.00000000",
# "numberOver": "0.0000",
# "numberDeal": "0.0000",
# "flag": "sale",
# "status": "3", # 0:unfilled, 1:partial deal, 2:all transactions, 3:already cancelled
# "isNew": "N",
# "coinFrom": "vtc",
# "coinTo": "dkkt",
# "created": "1533035300",
# },
# {
# "id": "723086996",
# "uid": "2074056",
# "price": "100.00000000",
# "number": "10.0000",
# "total": "0.00000000",
# "numberOver": "0.0000",
# "numberDeal": "0.0000",
# "flag": "sale",
# "status": "3",
# "isNew": "N",
# "coinFrom": "bz",
# "coinTo": "usdt",
# "created": "1533523568",
# },
# ],
# "pageInfo": {
# "limit": "10",
# "offest": "0",
# "current_page": "1",
# "page_size": "10",
# "total_count": "17",
# "page_count": "2",
# }
# },
# "time": "1533279329",
# "microtime": "0.15305300 1533279329",
# "source": "api"
# }
#
return self.parse_orders(response['data']['data'], None, since, limit)
async def fetch_orders(self, symbol=None, since=None, limit=None, params={}):
return await self.fetch_orders_with_method('tradePostGetUserHistoryEntrustSheet', symbol, since, limit, params)
async def fetch_open_orders(self, symbol=None, since=None, limit=None, params={}):
return await self.fetch_orders_with_method('tradePostGetUserNowEntrustSheet', symbol, since, limit, params)
async def fetch_closed_orders(self, symbol=None, since=None, limit=None, params={}):
return await self.fetch_orders_with_method('tradePostGetUserHistoryEntrustSheet', symbol, since, limit, params)
def nonce(self):
currentTimestamp = self.seconds()
if currentTimestamp > self.options['lastNonceTimestamp']:
self.options['lastNonceTimestamp'] = currentTimestamp
self.options['lastNonce'] = 100000
self.options['lastNonce'] = self.sum(self.options['lastNonce'], 1)
return self.options['lastNonce']
def sign(self, path, api='market', method='GET', params={}, headers=None, body=None):
url = self.urls['api'][api] + '/' + self.capitalize(api) + '/' + path
query = None
if api == 'market':
query = self.urlencode(params)
if len(query):
url += '?' + query
else:
self.check_required_credentials()
body = self.rawencode(self.keysort(self.extend({
'apiKey': self.apiKey,
'timeStamp': self.seconds(),
'nonce': self.nonce(),
}, params)))
body += '&sign=' + self.hash(self.encode(body + self.secret))
headers = {'Content-type': 'application/x-www-form-urlencoded'}
return {'url': url, 'method': method, 'body': body, 'headers': headers}
def handle_errors(self, httpCode, reason, url, method, headers, body):
if not isinstance(body, basestring):
return # fallback to default error handler
if len(body) < 2:
return # fallback to default error handler
if (body[0] == '{') or (body[0] == '['):
response = json.loads(body)
status = self.safe_string(response, 'status')
if status is not None:
feedback = self.id + ' ' + body
exceptions = self.exceptions
#
# {"status":-107,"msg":"","data":"","time":1535968848,"microtime":"0.89092200 1535968848","source":"api"}
#
if status == '200':
#
# {"status":200,"msg":"","data":-200031,"time":1535999806,"microtime":"0.85476800 1535999806","source":"api"}
#
code = self.safe_integer(response, 'data')
if code is not None:
if code in exceptions:
raise exceptions[code](feedback)
else:
raise ExchangeError(feedback)
else:
return # no error
if status in exceptions:
raise exceptions[status](feedback)
else:
raise ExchangeError(feedback)
|
py | 1a4978ca08cf4e19f6f6087f8d071be8fc725edc | ###############################################################################
#
# The MIT License (MIT)
#
# Copyright (c) Tavendo GmbH
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
###############################################################################
from __future__ import print_function
import random
from os import environ
from twisted.internet.defer import inlineCallbacks
from autobahn.twisted.util import sleep
from autobahn.twisted.wamp import ApplicationSession, ApplicationRunner
class Component(ApplicationSession):
"""
An application component that publishes events with no payload
"""
@inlineCallbacks
def onJoin(self, details):
print("session attached")
counter = 0
while True:
print("publish: com.myapp.heartbeat")
self.publish(u'com.myapp.heartbeat')
yield sleep(1)
if __name__ == '__main__':
runner = ApplicationRunner(
environ.get("AUTOBAHN_DEMO_ROUTER", u"ws://127.0.0.1:8080/ws"),
u"realm1",
)
runner.run(Component)
|
py | 1a4978f42835e8326b5313a93dbe135b958315b3 | import unittest
from tri.delaunay.helpers import ToPointsAndSegments
from grassfire import calc_skel, calc_offsets
from grassfire.events import at_same_location
from grassfire.test.intersection import segments_intersecting
from grassfire.vectorops import dist
import fixtures
def all_tests():
"""Find all functions inside the *fixtures* module
and returns a list with all function objects"""
import inspect
all_functions = inspect.getmembers(fixtures, inspect.isfunction)
return [fn for fn_nm, fn in sorted(all_functions)]
def make_test_cases(fixtures):
"""For all functions in the list, make
an entry in the cases dictionary, by
invoking the function.
"""
cases = {}
for i, f in enumerate(fixtures):
data, total, node, infinite, = f()
assert f.__name__ not in cases, "duplicate test name ({}) found".format(
f.__name__
)
cases[f.__name__] = (
"* {:>2d}: ".format(i) + str(f.__doc__),
data,
total,
node,
infinite,
)
return cases
EXPENSIVE_POST_CONDITION = True
CASES = make_test_cases(all_tests())
INTERACTIVE = False
# CASES = make_test_cases([all_tests()[48]])
# INTERACTIVE = True
# After: https://stackoverflow.com/a/20870875
class TestSequenceMeta(type):
"""A meta class for all our TestCases"""
def __new__(mcs, name, bases, dict):
def gen_test(description, data, total, node, infinite):
def test(self):
if INTERACTIVE:
skel = calc_skel(
data, pause=True, output=True, internal_only=False, shrink=True
# data, pause=False, output=False, internal_only=False, shrink=True
)
else:
skel = calc_skel(data)
# check the amount of segments in the skeleton
self.assertEqual(len(skel.segments()), total)
# check the amount of skeleton nodes
self.assertEqual(len(skel.sk_nodes), node)
# # check the amount of kinetic vertices that are (not) stopped
not_stopped = [v for v in skel.vertices if v.stops_at is None]
stopped = [v for v in skel.vertices if v.stops_at is not None and v.start_node is not v.stop_node]
self.assertEqual(len(not_stopped), infinite)
self.assertEqual(len(stopped), total - infinite)
# check cross relationship between kinetic vertices and skeleton nodes
for v in skel.vertices:
# exact same starting location
if abs(v.velocity[0]) < 100 and abs(v.velocity[1]) < 100: # check only 'slow' moving vertices
self.assertTrue(at_same_location([v.start_node, v], v.starts_at), "{} [{}] {} does not have correct start_node(!) position".format(id(v), v.info, v.velocity))
# quite close at the stop node (given the vertex + its direction/speed)
if True and v.stops_at is not None and not v.inf_fast and (abs(v.velocity[0]) < 100 and abs(v.velocity[1]) < 100):
d = dist(
v.stop_node.position_at(v.stops_at),
v.position_at(v.stops_at),
)
self.assertAlmostEqual(
d,
0.0,
2,
"{} [{}] velocity '{}' does not have correct stop_node position -- dist: {}".format(id(v), v.info, v.velocity, d)
)
# self.assertTrue(at_same_location([v.stop_node, v], v.stops_at),
# '{} != {}; {}'.format(v.stop_node.position_at(v.stops_at), v.position_at(v.stops_at),
# dist(v.stop_node.position_at(v.stops_at), v.position_at(v.stops_at)))
# )
if EXPENSIVE_POST_CONDITION == True:
# check that we do not have any self intersections between segments
self.assertFalse(
segments_intersecting(skel.segments()),
"intersection between straight skeleton segments found",
)
# offset segments should not intersect
# (FIXME: these use left_at of kinetic vertices, also check right_at)
last_evt_time = max(v.stops_at for v in skel.vertices if v.stops_at is not None)
offset_segments = [
(line[0], line[1]) for line in calc_offsets(skel, last_evt_time, 25)
]
self.assertFalse(
segments_intersecting(offset_segments),
"Intersection in offsets found",
)
# with open("/tmp/offsets.wkt", "w") as fh:
# for segment in offset_segments:
# s = "LINESTRING({0[0]} {0[1]}, {1[0]} {1[1]})".format(segment[0], segment[1])
# fh.write(s)
# fh.write("\n")
# set the docstring of the test function
test.__doc__ = description
return test
for tname in CASES:
test_name = "test_%s" % tname
dict[test_name] = gen_test(*CASES[tname])
return type.__new__(mcs, name, bases, dict)
class GrassfireTestCase(unittest.TestCase, metaclass=TestSequenceMeta):
pass
if __name__ == "__main__":
if INTERACTIVE:
import logging
import sys
root = logging.getLogger()
root.setLevel(logging.DEBUG)
ch = logging.StreamHandler(sys.stdout)
ch.setLevel(logging.DEBUG)
formatter = logging.Formatter("%(asctime)s - %(message)s")
ch.setFormatter(formatter)
root.addHandler(ch)
# import cProfile
# command = """unittest.main(verbosity=10)"""
# cProfile.runctx( command, globals(), locals(), filename="/tmp/gf.profile" )
unittest.main()
|
py | 1a49795d9b64b91a102395be85ec4754edc44d94 | from typing import Callable, List, Tuple
from outdated_item_selection_strategy.no_update import *
from outdated_item_selection_strategy.oldest_chunks_update import *
from outdated_item_selection_strategy.last_n_chunks_update import *
from outdated_item_selection_strategy.regular_interval_update import *
from outdated_item_selection_strategy.binned_update import *
UPDATE_STRATEGY_LABELS = [
"no update",
"oldest n chunks",
"last n chunks",
"regular intervals",
"outdated bins"
]
def get_update_strategies(n_dims: int, n_chunks: int, max_age: int, n_bins: int) -> List[Tuple[str, Callable[[], OutdatedItemSelectionStrategy]]]:
return list(
map(
lambda label: (label, lambda: get_update_strategy(label, n_dims, n_chunks, max_age, n_bins)),
UPDATE_STRATEGY_LABELS
)
)
def get_update_strategy(label: str, n_dims: int, n_chunks: int, max_age: int, n_bins: int) -> OutdatedItemSelectionStrategy:
if label == "no update":
return NoUpdate(n_dims=n_dims, storage=None)
elif label == "oldest n chunks":
return OldestChunksUpdate(n_dims=n_dims, storage=None, max_age=max_age)
elif label == "last n chunks":
return LastNChunksUpdate(n_dims=n_dims, n_chunks=n_chunks, storage=None)
elif label == "regular intervals":
return RegularIntervalUpdate(n_dims=n_dims, n_chunks=n_chunks, storage=None, max_age=max_age)
elif label == "outdated bins":
return BinnedUpdate(n_dims=n_dims, storage=None, n_bins=n_bins)
|
py | 1a4979c159b0d6aa2b7e3058615b9eb43d991002 | # coding: utf-8
"""
Talend Management Console Public API
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) # noqa: E501
OpenAPI spec version: 2.0
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import re # noqa: F401
# python 2 and python 3 compatibility library
import six
from swagger_client.api_client import ApiClient
class PromotionsExecutionsApi(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
Ref: https://github.com/swagger-api/swagger-codegen
"""
def __init__(self, api_client=None):
if api_client is None:
api_client = ApiClient()
self.api_client = api_client
def execute(self, body, **kwargs): # noqa: E501
"""Execute Promotion # noqa: E501
Execute Promotion # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.execute(body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param PromotionExecutableTask body: ExecutableTask (required)
:return: ExecutionIdentifier
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.execute_with_http_info(body, **kwargs) # noqa: E501
else:
(data) = self.execute_with_http_info(body, **kwargs) # noqa: E501
return data
def execute_with_http_info(self, body, **kwargs): # noqa: E501
"""Execute Promotion # noqa: E501
Execute Promotion # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.execute_with_http_info(body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param PromotionExecutableTask body: ExecutableTask (required)
:return: ExecutionIdentifier
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['body'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method execute" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'body' is set
if ('body' not in params or
params['body'] is None):
raise ValueError("Missing the required parameter `body` when calling `execute`") # noqa: E501
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['Access Token', 'Basic Authentication'] # noqa: E501
return self.api_client.call_api(
'/executions/promotions', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='ExecutionIdentifier', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_execution_status(self, id, **kwargs): # noqa: E501
"""Get Promotion execution status # noqa: E501
Get Promotion execution status # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_execution_status(id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str id: execution ID (required)
:return: PromotionExecutionInfo
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.get_execution_status_with_http_info(id, **kwargs) # noqa: E501
else:
(data) = self.get_execution_status_with_http_info(id, **kwargs) # noqa: E501
return data
def get_execution_status_with_http_info(self, id, **kwargs): # noqa: E501
"""Get Promotion execution status # noqa: E501
Get Promotion execution status # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_execution_status_with_http_info(id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str id: execution ID (required)
:return: PromotionExecutionInfo
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['id'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_execution_status" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'id' is set
if ('id' not in params or
params['id'] is None):
raise ValueError("Missing the required parameter `id` when calling `get_execution_status`") # noqa: E501
collection_formats = {}
path_params = {}
if 'id' in params:
path_params['id'] = params['id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['Access Token', 'Basic Authentication'] # noqa: E501
return self.api_client.call_api(
'/executions/promotions/{id}', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='PromotionExecutionInfo', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
|
py | 1a497a2b88eb4612a613b5d3fd6eb7eac58d661e | import pytest
from src.project.risks import Risk
from src.project.risks.helpers import RiskCounterMeasure, RiskImpact, RiskProbabilty, RiskScore
from tests.faker import faker
@pytest.fixture
def risk():
yield Risk(risk_name="Fake Risk Name", probability=50, impact=100)
def test_create_Risk_object_directly(monkeypatch, stakeholder, risk):
assert risk.risk_name == "Fake Risk Name"
assert risk.impact == 100
assert risk.probability == 50
assert risk.risk_owner is None
assert risk.description is None
assert risk.counter_measure is None
assert risk.get_risk_score() == RiskScore.HIGH
assert isinstance(risk.get_risk_score(), RiskScore)
monkeypatch.setattr(risk, "risk_owner", stakeholder, raising=True)
assert risk.risk_owner == stakeholder
monkeypatch.setattr(risk, "description", "Fake Description", raising=True)
assert risk.description == "Fake Description"
def test_cannot_set_unallowed_counter_measure(risk):
with pytest.raises(AssertionError):
setattr(risk, "counter_measure", "Fake counter measure")
assert risk.counter_measure is None
@pytest.mark.parametrize("counter_measure", ["ReDuce", "prevenT", "aCCepT", "transfer"])
def test_can_set_allowed_counter_measures_and_value_is_case_insensitive(counter_measure, risk):
setattr(risk, "counter_measure", counter_measure)
assert isinstance(risk.counter_measure, RiskCounterMeasure)
assert risk.counter_measure == RiskCounterMeasure(counter_measure.upper())
@pytest.mark.parametrize("wrong_classmethod", ["not_a_real_classmethod", "maybe_probability_high_impact"])
def test_access_to_dynamic_classmethod_not_matched_by_regex_raises(wrong_classmethod):
with pytest.raises(AttributeError):
getattr(Risk, wrong_classmethod)
@pytest.mark.parametrize(
"class_method",
[
"rare_probability_high_impact",
"rare_probability_medium_impact",
"rare_probability_low_impact",
"unlikely_probability_high_impact",
"unlikely_probability_medium_impact",
"unlikely_probability_low_impact",
"moderate_probability_high_impact",
"moderate_probability_medium_impact",
"moderate_probability_low_impact",
"likely_probability_high_impact",
"likely_probability_medium_impact",
"likely_probability_low_impact",
"certain_probability_high_impact",
"certain_probability_medium_impact",
"certain_probability_low_impact",
],
)
def test_access_to_dynamic_classmethod_matched_by_regex_will_not_raise(class_method):
name = faker.unique.name()
instance = getattr(Risk, class_method)(risk_name=name)
assert instance.risk_name == name
assert isinstance(instance.get_risk_score(), RiskScore)
probability, _, impact, _ = class_method.split("_")
probability_value = RiskProbabilty[probability.upper()].value
impact_value = RiskImpact[impact.upper()].value
assert instance.get_risk_score() == RiskScore.get_risk_score(int(probability_value / 100 * impact_value))
|
py | 1a497af7dd68b85faa784212a05bacfed138c275 | # Copyright (c) 2018 Red Hat, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from unittest import mock
import munch
from openstack import exceptions as os_exc
from oslo_config import cfg as oslo_cfg
from kuryr_kubernetes.controller.drivers import namespace_subnet as subnet_drv
from kuryr_kubernetes import exceptions as k_exc
from kuryr_kubernetes.tests import base as test_base
from kuryr_kubernetes.tests.unit import kuryr_fixtures as k_fix
def get_pod_obj():
return {
'status': {
'qosClass': 'BestEffort',
'hostIP': '192.168.1.2',
},
'kind': 'Pod',
'spec': {
'schedulerName': 'default-scheduler',
'containers': [{
'name': 'busybox',
'image': 'busybox',
'resources': {}
}],
'nodeName': 'kuryr-devstack'
},
'metadata': {
'name': 'busybox-sleep1',
'namespace': 'default',
'resourceVersion': '53808',
'selfLink': '/api/v1/namespaces/default/pods/busybox-sleep1',
'uid': '452176db-4a85-11e7-80bd-fa163e29dbbb',
'annotations': {
'openstack.org/kuryr-vif': {}
}
}}
class TestNamespacePodSubnetDriver(test_base.TestCase):
@mock.patch('kuryr_kubernetes.utils.get_subnet')
def test_get_subnets(self, m_get_subnet):
pod = get_pod_obj()
pod_namespace = pod['metadata']['namespace']
subnet_id = mock.sentinel.subnet_id
subnet = mock.sentinel.subnet
cls = subnet_drv.NamespacePodSubnetDriver
m_driver = mock.MagicMock(spec=cls)
m_driver._get_namespace_subnet_id.return_value = subnet_id
m_get_subnet.return_value = subnet
subnets = cls.get_namespace_subnet(m_driver, pod_namespace)
self.assertEqual({subnet_id: subnet}, subnets)
m_driver._get_namespace_subnet_id.assert_called_once_with(
pod_namespace)
m_get_subnet.assert_called_once_with(subnet_id)
@mock.patch('kuryr_kubernetes.utils.get_subnet')
def test_get_subnets_namespace_not_ready(self, m_get_subnet):
pod = get_pod_obj()
pod_namespace = pod['metadata']['namespace']
cls = subnet_drv.NamespacePodSubnetDriver
m_driver = mock.MagicMock(spec=cls)
m_driver._get_namespace_subnet_id.side_effect = (
k_exc.ResourceNotReady(pod_namespace))
self.assertRaises(k_exc.ResourceNotReady, cls.get_namespace_subnet,
m_driver, pod_namespace)
m_driver._get_namespace_subnet_id.assert_called_once_with(
pod_namespace)
m_get_subnet.assert_not_called()
def test__get_namespace_subnet_id(self):
cls = subnet_drv.NamespacePodSubnetDriver
m_driver = mock.MagicMock(spec=cls)
namespace = mock.sentinel.namespace
subnet_id = mock.sentinel.subnet_id
crd = {
'status': {
'subnetId': subnet_id
}
}
kubernetes = self.useFixture(k_fix.MockK8sClient()).client
kubernetes.get.return_value = crd
subnet_id_resp = cls._get_namespace_subnet_id(m_driver, namespace)
kubernetes.get.assert_called()
self.assertEqual(subnet_id, subnet_id_resp)
def test__get_namespace_subnet_id_get_crd_exception(self):
cls = subnet_drv.NamespacePodSubnetDriver
m_driver = mock.MagicMock(spec=cls)
namespace = mock.sentinel.namespace
kubernetes = self.useFixture(k_fix.MockK8sClient()).client
kubernetes.get.side_effect = k_exc.K8sClientException
self.assertRaises(k_exc.K8sClientException,
cls._get_namespace_subnet_id, m_driver, namespace)
kubernetes.get.assert_called()
def test_delete_namespace_subnet(self):
cls = subnet_drv.NamespacePodSubnetDriver
m_driver = mock.MagicMock(spec=cls)
net_id = mock.sentinel.net_id
subnet_id = mock.sentinel.subnet_id
os_net = self.useFixture(k_fix.MockNetworkClient()).client
os_net.ports.return_value = []
os_net.remove_interface_from_router.return_value = {}
cls._delete_namespace_network_resources(m_driver, subnet_id, net_id)
os_net.remove_interface_from_router.assert_called_once()
os_net.delete_network.assert_called_once_with(net_id)
def test_delete_namespace_subnet_openstacksdk_error(self):
cls = subnet_drv.NamespacePodSubnetDriver
m_driver = mock.MagicMock(spec=cls)
net_id = mock.sentinel.net_id
subnet_id = mock.sentinel.subnet_id
os_net = self.useFixture(k_fix.MockNetworkClient()).client
os_net.delete_network.side_effect = os_exc.ConflictException
os_net.ports.return_value = []
os_net.remove_interface_from_router.return_value = {}
self.assertRaises(k_exc.ResourceNotReady,
cls._delete_namespace_network_resources, m_driver,
subnet_id, net_id)
os_net.remove_interface_from_router.assert_called_once()
os_net.delete_network.assert_called_once_with(net_id)
os_net.ports.assert_called_with(status='DOWN', network_id=net_id)
def test_create_network(self):
cls = subnet_drv.NamespacePodSubnetDriver
m_driver = mock.MagicMock(spec=cls)
namespace = 'test'
project_id = mock.sentinel.project_id
os_net = self.useFixture(k_fix.MockNetworkClient()).client
os_net.networks.return_value = iter([])
net = munch.Munch({'id': mock.sentinel.net})
os_net.create_network.return_value = net
net_id_resp = cls.create_network(m_driver, namespace, project_id)
self.assertEqual(net_id_resp, net['id'])
os_net.create_network.assert_called_once()
os_net.networks.assert_called_once()
def test_create_network_existing(self):
cls = subnet_drv.NamespacePodSubnetDriver
m_driver = mock.MagicMock(spec=cls)
namespace = 'test'
project_id = mock.sentinel.project_id
os_net = self.useFixture(k_fix.MockNetworkClient()).client
net = munch.Munch({'id': mock.sentinel.net})
os_net.networks.return_value = iter([net])
net_id_resp = cls.create_network(m_driver, namespace, project_id)
self.assertEqual(net_id_resp, net['id'])
os_net.create_network.assert_not_called()
os_net.networks.assert_called_once()
def test_create_subnet(self):
cls = subnet_drv.NamespacePodSubnetDriver
m_driver = mock.MagicMock(spec=cls)
namespace = 'test'
project_id = mock.sentinel.project_id
net_id = mock.sentinel.net_id
subnet = munch.Munch({'id': mock.sentinel.subnet,
'cidr': mock.sentinel.cidr})
os_net = self.useFixture(k_fix.MockNetworkClient()).client
os_net.subnets.return_value = iter([])
os_net.create_subnet.return_value = subnet
subnet_id, subnet_cidr = cls.create_subnet(m_driver, namespace,
project_id, net_id)
self.assertEqual(subnet_id, subnet['id'])
self.assertEqual(subnet_cidr, subnet['cidr'])
os_net.create_subnet.assert_called_once()
os_net.subnets.assert_called_once()
def test_create_subnet_existing(self):
cls = subnet_drv.NamespacePodSubnetDriver
m_driver = mock.MagicMock(spec=cls)
namespace = 'test'
project_id = mock.sentinel.project_id
net_id = mock.sentinel.net_id
subnet = munch.Munch({'id': mock.sentinel.subnet,
'cidr': mock.sentinel.cidr})
os_net = self.useFixture(k_fix.MockNetworkClient()).client
os_net.subnets.return_value = iter([subnet])
subnet_id, subnet_cidr = cls.create_subnet(m_driver, namespace,
project_id, net_id)
self.assertEqual(subnet_id, subnet['id'])
self.assertEqual(subnet_cidr, subnet['cidr'])
os_net.create_subnet.assert_not_called()
os_net.subnets.assert_called_once()
def test_add_subnet_to_router(self):
cls = subnet_drv.NamespacePodSubnetDriver
m_driver = mock.MagicMock(spec=cls)
subnet_id = mock.sentinel.subnet_id
os_net = self.useFixture(k_fix.MockNetworkClient()).client
os_net.add_interface_to_router.return_value = {}
router_id = 'router1'
oslo_cfg.CONF.set_override('pod_router',
router_id,
group='namespace_subnet')
router_id_resp = cls.add_subnet_to_router(m_driver, subnet_id)
self.assertEqual(router_id_resp, router_id)
os_net.add_interface_to_router.assert_called_once()
def test_add_subnet_to_router_already_connected(self):
cls = subnet_drv.NamespacePodSubnetDriver
m_driver = mock.MagicMock(spec=cls)
subnet_id = mock.sentinel.subnet_id
os_net = self.useFixture(k_fix.MockNetworkClient()).client
os_net.add_interface_to_router.side_effect = (
os_exc.BadRequestException)
router_id = 'router1'
oslo_cfg.CONF.set_override('pod_router',
router_id,
group='namespace_subnet')
router_id_resp = cls.add_subnet_to_router(m_driver, subnet_id)
self.assertEqual(router_id_resp, router_id)
os_net.add_interface_to_router.assert_called_once()
def test_add_subnet_to_router_exception(self):
cls = subnet_drv.NamespacePodSubnetDriver
m_driver = mock.MagicMock(spec=cls)
subnet_id = mock.sentinel.subnet_id
os_net = self.useFixture(k_fix.MockNetworkClient()).client
os_net.add_interface_to_router.side_effect = (
os_exc.SDKException)
router_id = 'router1'
oslo_cfg.CONF.set_override('pod_router',
router_id,
group='namespace_subnet')
self.assertRaises(os_exc.SDKException,
cls.add_subnet_to_router, m_driver, subnet_id)
os_net.add_interface_to_router.assert_called_once()
|
py | 1a497c4cf7effe1e46de76d8888b0102039c030e | # DONOT COMMIT CONFIGURATION FILES TO VCS - THIS IS JUST FOR SAMPLE
class DatabaseConfig:
HOST_URL = "127.0.0.1"
USERNAME = "user"
PASSWORD = "password"
DATABASE_NAME = "database"
PORT = 27017
|
py | 1a497c5b3e46b765e08787c4d6ffa90528813295 | # -*- coding: utf-8 -*-
# Define here the models for your scraped items
#
# See documentation in:
# http://doc.scrapy.org/en/latest/topics/items.html
import scrapy
class CateItem(scrapy.Item):
title = scrapy.Field()
link = scrapy.Field()
level = scrapy.Field()
pid = scrapy.Field()
pass
class AsinBestItem(scrapy.Item):
asin = scrapy.Field()
cid = scrapy.Field()
rank = scrapy.Field()
pass
class DetailItem(scrapy.Item):
asin = scrapy.Field()
image = scrapy.Field()
title = scrapy.Field()
star = scrapy.Field()
reviews = scrapy.Field()
seller_price = scrapy.Field()
amazon_price = scrapy.Field()
pass
class ReviewProfileItem(scrapy.Item):
asin = scrapy.Field()
product = scrapy.Field()
brand = scrapy.Field()
seller = scrapy.Field()
image = scrapy.Field()
review_total = scrapy.Field()
review_rate = scrapy.Field()
pct_five = scrapy.Field()
pct_four = scrapy.Field()
pct_three = scrapy.Field()
pct_two = scrapy.Field()
pct_one = scrapy.Field()
pass
class ReviewDetailItem(scrapy.Item):
asin = scrapy.Field()
review_id = scrapy.Field()
reviewer = scrapy.Field()
review_url = scrapy.Field()
star = scrapy.Field()
date = scrapy.Field()
title = scrapy.Field()
content = scrapy.Field()
pass
class KeywordRankingItem(scrapy.Item):
skwd_id = scrapy.Field()
rank = scrapy.Field()
date = scrapy.Field()
class SalesRankingItem(scrapy.Item):
rank = scrapy.Field()
classify = scrapy.Field()
asin = scrapy.Field()
|
py | 1a497c5d35ec1c21ffc1add012e07ee7c1ca61c3 | from importlib import import_module
def import_attribute(attribute_string):
module_string, attribute_name = attribute_string.rsplit('.', maxsplit=1)
return getattr(import_module(module_string), attribute_name)
|
py | 1a497c94ec4ceca18ecb43cec7e838fbb72d4aff | # Author: Hamzeh Alsalhi <[email protected]>
#
# License: BSD 3 clause
import numpy as np
import scipy.sparse as sp
import array
from . import check_random_state
from ._random import sample_without_replacement
__all__ = ["sample_without_replacement"]
def _random_choice_csc(n_samples, classes, class_probability=None, random_state=None):
"""Generate a sparse random matrix given column class distributions
Parameters
----------
n_samples : int,
Number of samples to draw in each column.
classes : list of size n_outputs of arrays of size (n_classes,)
List of classes for each column.
class_probability : list of size n_outputs of arrays of \
shape (n_classes,), default=None
Class distribution of each column. If None, uniform distribution is
assumed.
random_state : int, RandomState instance or None, default=None
Controls the randomness of the sampled classes.
See :term:`Glossary <random_state>`.
Returns
-------
random_matrix : sparse csc matrix of size (n_samples, n_outputs)
"""
data = array.array("i")
indices = array.array("i")
indptr = array.array("i", [0])
for j in range(len(classes)):
classes[j] = np.asarray(classes[j])
if classes[j].dtype.kind != "i":
raise ValueError("class dtype %s is not supported" % classes[j].dtype)
classes[j] = classes[j].astype(np.int64, copy=False)
# use uniform distribution if no class_probability is given
if class_probability is None:
class_prob_j = np.empty(shape=classes[j].shape[0])
class_prob_j.fill(1 / classes[j].shape[0])
else:
class_prob_j = np.asarray(class_probability[j])
if not np.isclose(np.sum(class_prob_j), 1.0):
raise ValueError(
"Probability array at index {0} does not sum to one".format(j)
)
if class_prob_j.shape[0] != classes[j].shape[0]:
raise ValueError(
"classes[{0}] (length {1}) and "
"class_probability[{0}] (length {2}) have "
"different length.".format(
j, classes[j].shape[0], class_prob_j.shape[0]
)
)
# If 0 is not present in the classes insert it with a probability 0.0
if 0 not in classes[j]:
classes[j] = np.insert(classes[j], 0, 0)
class_prob_j = np.insert(class_prob_j, 0, 0.0)
# If there are nonzero classes choose randomly using class_probability
rng = check_random_state(random_state)
if classes[j].shape[0] > 1:
p_nonzero = 1 - class_prob_j[classes[j] == 0]
nnz = int(n_samples * p_nonzero)
ind_sample = sample_without_replacement(
n_population=n_samples, n_samples=nnz, random_state=random_state
)
indices.extend(ind_sample)
# Normalize probabilities for the nonzero elements
classes_j_nonzero = classes[j] != 0
class_probability_nz = class_prob_j[classes_j_nonzero]
class_probability_nz_norm = class_probability_nz / np.sum(
class_probability_nz
)
classes_ind = np.searchsorted(
class_probability_nz_norm.cumsum(), rng.rand(nnz)
)
data.extend(classes[j][classes_j_nonzero][classes_ind])
indptr.append(len(indices))
return sp.csc_matrix((data, indices, indptr), (n_samples, len(classes)), dtype=int)
|
py | 1a497cc6c94a5cb93c04c34dfc8d10b66044bd36 | # Development specific settings
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql',
'NAME': 'postgres',
'USER': 'postgres',
'HOST': 'db',
'PORT': 5432,
}
}
EMAIL_BACKEND = 'django.core.mail.backends.console.EmailBackend'
ALLOWED_HOSTS = ['0.0.0.0']
|
py | 1a497d1adf2ddcaa75a06c3ea3f780e4092b0ec9 | # -*- coding: utf-8 -*-
# Generated by Django 1.9.8 on 2016-09-19 15:29
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
import modelcluster.fields
class Migration(migrations.Migration):
dependencies = [
('cms', '0016_remove_programpage_contact_us'),
('wagtailimages', '0013_make_rendition_upload_callable'),
]
operations = [
migrations.CreateModel(
name='ProgramFaculty',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('sort_order', models.IntegerField(blank=True, editable=False, null=True)),
('name', models.CharField(help_text='Full name of the faculty member', max_length=255)),
('title', models.CharField(blank=True, max_length=20)),
('short_bio', models.CharField(blank=True, max_length=200)),
('image', models.ForeignKey(blank=True, help_text='Image for the faculty member', null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='+', to='wagtailimages.Image')),
],
options={
'ordering': ['sort_order'],
'abstract': False,
},
),
migrations.AddField(
model_name='programpage',
name='faculty_description',
field=models.CharField(blank=True, help_text='The text to be shown as an introduction in the Faculty section', max_length=500, null=True),
),
migrations.AddField(
model_name='programfaculty',
name='program_page',
field=modelcluster.fields.ParentalKey(on_delete=django.db.models.deletion.CASCADE, related_name='faculty_members', to='cms.ProgramPage'),
),
]
|
py | 1a497d770486c81483aae613b0dfbfdf52537422 | from wtforms import StringField, PasswordField, BooleanField
from wtforms.validators import DataRequired, Email, ValidationError, EqualTo
from wtforms import TextAreaField, SubmitField, DateField, FieldList, SelectField
from wtforms.fields.html5 import EmailField
from flask_wtf import FlaskForm, RecaptchaField, Recaptcha
import datetime
from data import User, Game, create_session
from config import config
DATE_FORMAT = config.DATE_FORMAT
class NullableDateField(DateField):
"""Native WTForms DateField throws error for empty dates.
Let fix this so that we could have DateField nullable."""
def process_formdata(self, valuelist):
if valuelist:
date_str = ' '.join(valuelist).strip()
if date_str == '':
self.data = None
return
try:
self.data = datetime.datetime.strptime(
date_str, self.format).date()
except ValueError:
self.data = None
raise ValueError(self.gettext('Не правильный формат даты'))
class RuDataRequired(DataRequired):
"""DataRequired but with russian message"""
def __init__(self, message="Это поле обязательно"):
super().__init__(message)
class RuDateField(DateField):
"""DateField but with russian message"""
def process_formdata(self, valuelist):
if valuelist:
date_str = ' '.join(valuelist)
try:
self.data = datetime.datetime.strptime(
date_str, self.format).date()
except ValueError:
self.data = None
raise ValueError(self.gettext('Не правильный формат даты'))
class FillWith:
"""
Check that other field isn't empty if this field isn't empty
:param fieldname:
The name of the other field to compare to.
:param message:
Error message to raise in case of a validation error. Can be
interpolated with `%(other_label)s` and `%(other_name)s` to provide a
more helpful error.
"""
def __init__(self, fieldname, other_msg=None, this_msg=None):
self.fieldname = fieldname
self.other_msg = other_msg
self.this_msg = this_msg
def __call__(self, form, field):
if not field.data:
return
try: # Get other form field
other = form[self.fieldname]
except KeyError:
raise ValidationError(field.gettext(
"Invalid field name '%s'.") % self.fieldname)
if not other.data:
d = {
'other_label': hasattr(other, 'label') and other.label.text or self.fieldname,
'other_name': self.fieldname,
'this_label': hasattr(field, 'label') and field.label.text or field.name,
'this_name': field.name,
}
# Add error to other field
other_msg = self.other_msg
if other_msg is None:
other_msg = field.gettext(
'Поле "%(this_label)s" заполнено и это должно')
other.errors.append(other_msg)
# Raise error
this_msg = self.this_msg
if this_msg is None:
this_msg = field.gettext(
'Поле "%(other_label)s" должно быть тоже заполнено')
raise ValidationError(this_msg % d)
def field_data_lower(form, field):
"""Turns field.data to a lower case"""
field.data = field.data.lower()
def field_data_capitalizer(form, field):
"""Capitalize field.data"""
field.data = field.data.capitalize()
def unique_email_validator(form, field):
"""Check if user with same e-mail exist"""
email = field.data.lower()
session = create_session()
if session.query(User).filter(User.email == email).first():
raise ValidationError(
"Пользователь с таким e-mail уже зарегестрирован")
def exist_email_validator(form, field):
"""Check if user with the e-mail exist"""
email = field.data.lower()
session = create_session()
if not session.query(User).filter(User.email == email).first():
raise ValidationError(
"Пользователь не найден")
def password_secure_validator(form, field):
password = field.data
if len(password) > 50:
raise ValidationError("Пароль должен быть меньше 50 символов")
elif len(password) < 8:
raise ValidationError("Пароль должен быть не меньше 8 символов")
class BaseForm(FlaskForm):
class Meta:
locales = ['ru_RU', 'ru']
class RegisterForm(BaseForm):
email = EmailField(
'E-mail *', validators=[field_data_lower,
Email(message="Неправильный формат"),
RuDataRequired(),
unique_email_validator])
password = PasswordField(
'Пароль *', validators=[password_secure_validator])
password_again = PasswordField(
'Повторите пароль *', validators=[EqualTo("password", message="Пароли должны совпадать")])
email_notifications = BooleanField('Уведомления по почте')
surname = StringField('Фамилия *', validators=[
field_data_capitalizer, RuDataRequired()])
name = StringField('Имя *', validators=[
field_data_capitalizer, RuDataRequired()])
patronymic = StringField("Отчество (если есть)", validators=[
field_data_capitalizer])
city = StringField(
"Город *", validators=[field_data_capitalizer, RuDataRequired()])
birthday = RuDateField("Дата рождения *", format=DATE_FORMAT, )
recaptcha = RecaptchaField(
validators=[Recaptcha(message='Это поле обязательно')])
submit = SubmitField('Зарегистрироваться')
class LoginForm(BaseForm):
email = EmailField(
"E-mail", validators=[field_data_lower, Email(message="Неправильный формат"),
RuDataRequired()])
password = PasswordField("Пароль", validators=[RuDataRequired()])
submit = SubmitField("Войти")
class TournamentInfoForm(BaseForm):
title = StringField("Название *", validators=[RuDataRequired()])
description = TextAreaField("Дополнительная информация")
place = StringField("Место проведения")
start = NullableDateField("Начало турнира", format=DATE_FORMAT)
end = NullableDateField("Конец турнира",
format=DATE_FORMAT,
validators=[FillWith('start', other_msg='Без начала нет конца')])
submit = SubmitField("Подтвердить")
class TeamForm(BaseForm):
"""Form for team request"""
name = StringField("Название команды *", validators=[RuDataRequired()])
motto = TextAreaField("Девиз команды")
players = FieldList(EmailField(label="E-mail участника *",
validators=[RuDataRequired(),
field_data_lower,
exist_email_validator]
),
"E-mail yчастников",
min_entries=4,
max_entries=8, )
submit = SubmitField("Подтвердить")
class ResetPasswordStep1(BaseForm):
email = EmailField(
"E-mail", validators=[field_data_lower, Email(message="Неправильный формат"),
RuDataRequired(), exist_email_validator])
submit = SubmitField("Восстановить")
class EditPassword(BaseForm):
password = PasswordField(
'Новый пароль *', validators=[RuDataRequired(), password_secure_validator])
password_again = PasswordField(
'Повторите пароль *', validators=[RuDataRequired(),
EqualTo("password", message="Пароли должны совпадать")])
submit = SubmitField("Изменить пароль")
class EditEmail(BaseForm):
email = EmailField(
"E-mail *", validators=[field_data_lower, Email(message="Неправильный формат"),
RuDataRequired(), unique_email_validator])
submit = SubmitField("Изменить почту")
class PlayerBooleanField(BooleanField):
def __init__(self, *args, player_id, **kwargs):
super(PlayerBooleanField, self).__init__(*args, **kwargs)
self.player_id = player_id
def PrepareToGameForm(game: Game):
"""Generate FlaskForm for game"""
class PrepareToGameForm(BaseForm):
def __init__(self, *args, **kwargs):
"""Add fields to lists"""
super().__init__(*args, **kwargs)
for team in self.teams.values(): # Add initialized fields to dict
team['players'] = []
for field_name in team['_players']:
team['players'].append(getattr(self, field_name))
team['captain'] = getattr(self, team['_captain'])
team['deputy'] = getattr(self, team['_deputy'])
# All operations with the protocol assume that it may be empty or incomplete
teams_json = (game.protocol or {}).get('teams', [{}, {}])
teams = {}
for i, team in enumerate((game.team1, game.team2,), 1):
teams[i] = dict()
teams[i]['team'] = team
teams[i]['_players'] = []
choices = []
selected_players = teams_json[i - 1].get('players', None)
if selected_players is None:
selected_ids = None
else:
selected_ids = [p['id'] for p in selected_players]
for player in team.players:
# Only previosly selected players are checked
# If it's the first time all players are checked
checked = ""
if selected_ids is None or player.id in selected_ids:
checked = "checked"
# Add attr to class
field = PlayerBooleanField(player.fullname,
default=checked,
player_id=player.id)
field_name = f"team{i}_player-{player.id}"
teams[i]['_players'].append(field_name)
setattr(PrepareToGameForm, field_name, field)
choices.append((player.id, player.fullname))
selected_cap = teams_json[i - 1].get('captain', {}).get('id', -1)
selected_deputy = teams_json[i - 1].get('deputy', {}).get('id', -1)
# Choisen on the first place
cap_choices = sorted(choices, reverse=True,
key=lambda p: p[0] == selected_cap)
deputy_choices = sorted(choices, reverse=True,
key=lambda p: p[0] == selected_deputy)
cap = SelectField(u"Капитан", coerce=int, choices=cap_choices)
deputy = SelectField(u"Заместитель капитана",
coerce=int, choices=deputy_choices)
teams[i]['_captain'] = f'team{i}_captain'
teams[i]['_deputy'] = f'team{i}_deputy'
# Add attr to class
setattr(PrepareToGameForm, teams[i]['_captain'], cap)
setattr(PrepareToGameForm, teams[i]['_deputy'], deputy)
PrepareToGameForm.teams = teams
PrepareToGameForm.submit = SubmitField("Перейти к протоколу")
return PrepareToGameForm()
|
py | 1a497dd574121c1a91b0c52786d0a50b23258641 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
post_ongoing_update.py
Script to post an ongoing books update on tumblr.
"""
import datetime
import json
import random
import sys
import traceback
from optparse import OptionParser
from twitter import TwitterHTTPError
from gluon import *
from applications.zcomx.modules.creators import Creator
from applications.zcomx.modules.stickon.dal import RecordGenerator
from applications.zcomx.modules.facebook import \
Authenticator as FbAuthenticator, \
FacebookAPIError, \
Poster as FbPoster, \
TextDataPreparer as FbTextDataPreparer
from applications.zcomx.modules.social_media import OngoingPost
from applications.zcomx.modules.tumblr import \
Authenticator, \
Poster, \
TextDataPreparer, \
postable_activity_log_ids
from applications.zcomx.modules.tweeter import \
Authenticator as TwAuthenticator, \
Poster as TwPoster, \
TextDataPreparer as TwTextDataPreparer, \
creators_in_ongoing_post
from applications.zcomx.modules.zco import \
IN_PROGRESS, \
SITE_NAME
from applications.zcomx.modules.logger import set_cli_logging
VERSION = 'Version 0.1'
def post_on_facebook(ongoing_post):
"""Post on facebook
Args:
ongoing_post: OngoingPost instance
Returns:
str, facebook post id
"""
LOG.debug(
'Creating facebook posting for date: %s', str(ongoing_post.post_date))
settings = current.app.local_settings
credentials = {
'email': settings.facebook_email,
'password': settings.facebook_password,
'client_id': settings.facebook_client_id,
'redirect_uri': settings.facebook_redirect_uri,
'page_name': settings.facebook_page_name
}
client = FbAuthenticator(credentials).authenticate()
poster = FbPoster(client)
facebook_data = {'tumblr_post_id': ongoing_post.tumblr_post_id}
text_data = FbTextDataPreparer(facebook_data).data()
error = None
try:
result = poster.post_text(text_data)
except FacebookAPIError as err:
error = err
result = {}
if 'id' not in result:
LOG.error(
'Facebook post failed for ongoing_post: %s', ongoing_post.id
)
LOG.error(
'Fix: post_ongoing_update.py --facebook %s', str(ongoing_post.date)
)
if error:
LOG.error(err)
return
post_id = result['id']
LOG.debug('post_id: %s', post_id)
return post_id
def post_on_tumblr(ongoing_post):
"""Post on tumblr
Args:
ongoing_post: OngoingPost instance
Returns:
str, tumblr posting id
"""
LOG.debug(
'Creating tumblr posting for date: %s', str(ongoing_post.post_date))
settings = current.app.local_settings
credentials = {
'consumer_key': settings.tumblr_consumer_key,
'consumer_secret': settings.tumblr_consumer_secret,
'oauth_token': settings.tumblr_oauth_token,
'oauth_secret': settings.tumblr_oauth_secret,
}
client = Authenticator(credentials).authenticate()
poster = Poster(client)
query = (db.activity_log.ongoing_post_id == ongoing_post.id)
generator = RecordGenerator(query)
text_data = TextDataPreparer(ongoing_post.post_date, generator).data()
if settings.tumblr_post_state:
text_data['state'] = settings.tumblr_post_state
result = poster.post_text(settings.tumblr_username, text_data)
if 'id' not in result:
LOG.error(
'Tumblr ongoing post failed for date: %s',
str(ongoing_post.post_date)
)
# Try to get an error message.
if 'meta' in result:
if 'status' in result['meta'] and 'msg' in result['meta']:
LOG.error(
'Status: %s, msg: %s',
result['meta']['status'],
result['meta']['msg']
)
if 'response' in result and 'errors' in result['response']:
for error in result['response']['errors']:
LOG.error(error)
return
post_id = result['id']
LOG.debug('post_id: %s', post_id)
return post_id
def post_on_twitter(ongoing_post):
"""Post on twitter
Args:
ongoing_post: OngoingPost instance
Returns:
str, twitter posting id
"""
LOG.debug(
'Creating twitter posting for date: %s', str(ongoing_post.post_date))
settings = current.app.local_settings
credentials = {
'consumer_key': settings.twitter_consumer_key,
'consumer_secret': settings.twitter_consumer_secret,
'oauth_token': settings.twitter_oauth_token,
'oauth_secret': settings.twitter_oauth_secret,
}
client = TwAuthenticator(credentials).authenticate()
poster = TwPoster(client)
creators = [] # [{'name': 'Joe Smoe', 'twitter': '@joesmoe'},...]
for creator_id in creators_in_ongoing_post(ongoing_post):
try:
creator = Creator.from_id(creator_id)
except LookupError:
LOG.error('Creator not found, id: %s', creator_id)
continue
creators.append({
'name': creator.name,
'twitter': creator.twitter,
})
# Shuffle creators so there is no alphabetical bias
random.shuffle(creators)
twitter_data = {
'ongoing_post': {
'creators': creators,
'tumblr_post_id': ongoing_post.tumblr_post_id,
},
'site': {'name': SITE_NAME},
}
text_data = TwTextDataPreparer(twitter_data).data()
error = None
try:
result = poster.post_text(text_data)
except TwitterHTTPError as err:
error = err
result = {}
if 'id' not in result:
LOG.error(
'Twitter post failed for ongoing_post: %s', ongoing_post.id
)
if error:
response_data = json.loads(error.response_data)
if 'errors' in response_data and response_data['errors']:
code = response_data['errors'][0]['code']
msg = response_data['errors'][0]['message']
LOG.error('Code: %s, msg: %s', code, msg)
return
post_id = result['id']
LOG.debug('post_id: %s', post_id)
return post_id
def get_ongoing_post(date, create=True):
"""Get the ongoing_post record for the given date.
Args:
date: datetime.date instance
create: If true, create an ongoing_post record if not found.
Returns:
OngoingPost instance
"""
key = dict(post_date=date)
try:
ongoing_post = OngoingPost.from_key(key)
except LookupError:
ongoing_post = None
if not ongoing_post and create:
ongoing_post = OngoingPost.from_add(key)
return ongoing_post
def man_page():
"""Print manual page-like help"""
print("""
USAGE
post_ongoing_update.py [OPTIONS] yyyy-mm-dd
OPTIONS
-f, --force
Post regardless if ongoing_post record indicates a post has already
been made (ie ongoing_post.tumblr_post_id and
ongoing_post.twitter_post_id are set)
--facebook
Post only on facebook.
-h, --help
Print a brief help.
--man
Print man page-like help.
-p --process-activity-logs
By default posts are made for existing ongoing_post records only
(matched on date) and no activity_log records are processed.
With this option an ongoing_post is created for the date if necessary,
and all activity_log records not yet associated with an ongoing_post
are associated with the new ongoing_post.
--tumblr
Post only on tumblr.
--twitter
Post only on twitter.
-v, --verbose
Print information messages to stdout.
--vv,
More verbose. Print debug messages to stdout.
""")
def main():
"""Main processing."""
usage = '%prog [options] YYYY-MM-DD'
parser = OptionParser(usage=usage, version=VERSION)
parser.add_option(
'-f', '--force',
action='store_true', dest='force', default=False,
help='Post regardles if ongoing post_ids exist.',
)
parser.add_option(
'--facebook',
action='store_true', dest='facebook', default=False,
help='Post only on facebook.',
)
parser.add_option(
'--man',
action='store_true', dest='man', default=False,
help='Display manual page-like help and exit.',
)
parser.add_option(
'-p', '--process-activity-logs',
action='store_true', dest='process_activity_logs', default=False,
help='Process activity_log records.',
)
parser.add_option(
'--tumblr',
action='store_true', dest='tumblr', default=False,
help='Post only on tumblr.',
)
parser.add_option(
'--twitter',
action='store_true', dest='twitter', default=False,
help='Post only on twitter.',
)
parser.add_option(
'-v', '--verbose',
action='store_true', dest='verbose', default=False,
help='Print messages to stdout.',
)
parser.add_option(
'--vv',
action='store_true', dest='vv', default=False,
help='More verbose.',
)
(options, args) = parser.parse_args()
if options.man:
man_page()
quit(0)
set_cli_logging(LOG, options.verbose, options.vv)
if len(args) != 1:
parser.print_help()
exit(1)
LOG.debug('Starting')
try:
date = datetime.datetime.strptime(args[0], '%Y-%m-%d').date()
except ValueError as err:
LOG.error('Invalid date: %s, %s', args[0], err)
exit(1)
if options.process_activity_logs:
activity_log_ids = postable_activity_log_ids()
if not activity_log_ids:
LOG.info('There are no postable activity_log records')
LOG.info('Nothing to do. Aborting')
exit(0)
ongoing_post = get_ongoing_post(date)
for activity_log_id in activity_log_ids:
query = (db.activity_log.id == activity_log_id)
db(query).update(ongoing_post_id=ongoing_post.id)
else:
ongoing_post = get_ongoing_post(date, create=False)
if not ongoing_post:
LOG.error('Ongoing post not found, date: %s', str(date))
exit(1)
services = []
if options.facebook:
services.append('facebook')
if options.tumblr:
services.append('tumblr')
if options.twitter:
services.append('twitter')
if not options.facebook and not options.tumblr and not options.twitter:
services = ['facebook', 'tumblr', 'twitter']
if 'tumblr' in services:
if ongoing_post.tumblr_post_id \
and ongoing_post.tumblr_post_id != IN_PROGRESS \
and not options.force:
LOG.warn(
'Ongoing_post has tumblr_post_id: %s',
ongoing_post.tumblr_post_id
)
LOG.warn('Refusing to post to tumblr without --force')
else:
tumblr_post_id = post_on_tumblr(ongoing_post)
if tumblr_post_id:
ongoing_post = OngoingPost.from_updated(
ongoing_post, dict(tumblr_post_id=tumblr_post_id))
if 'twitter' in services:
if ongoing_post.twitter_post_id \
and ongoing_post.twitter_post_id != IN_PROGRESS \
and not options.force:
LOG.warn(
'Ongoing_post has twitter_post_id: %s',
ongoing_post.twitter_post_id
)
LOG.warn('Refusing to post to twitter without --force')
else:
twitter_post_id = post_on_twitter(ongoing_post)
if twitter_post_id:
ongoing_post = OngoingPost.from_updated(
ongoing_post, dict(twitter_post_id=twitter_post_id))
if 'facebook' in services:
if not ongoing_post.tumblr_post_id \
or ongoing_post.tumblr_post_id == IN_PROGRESS:
LOG.error('Unable to post to facebook without a tumblr_post_id')
elif ongoing_post.facebook_post_id \
and ongoing_post.facebook_post_id != IN_PROGRESS \
and not options.force:
LOG.warn(
'Ongoing_post has facebook_post_id: %s',
ongoing_post.facebook_post_id
)
LOG.warn('Refusing to post to facebook without --force')
else:
facebook_post_id = post_on_facebook(ongoing_post)
if facebook_post_id:
ongoing_post = OngoingPost.from_updated(
ongoing_post, dict(facebook_post_id=facebook_post_id))
LOG.debug('Done')
if __name__ == '__main__':
# pylint: disable=broad-except
try:
main()
except SystemExit:
pass
except Exception:
traceback.print_exc(file=sys.stderr)
exit(1)
|
py | 1a497eef99b016c103e2e2223abdb8cb8aff85c0 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from datetime import date
from workalendar.core import WesternCalendar, ChristianMixin
from ..registry_tools import iso_register
@iso_register('NL')
class Netherlands(WesternCalendar, ChristianMixin):
'Netherlands'
include_good_friday = True
include_easter_sunday = True
include_easter_monday = True
include_ascension = True
include_whit_sunday = True
include_whit_monday = True
include_boxing_day = True
FIXED_HOLIDAYS = WesternCalendar.FIXED_HOLIDAYS + (
(5, 5, "Liberation Day"),
)
def get_king_queen_day(self, year):
"""27 April unless this is a Sunday in which case it is the 26th
Before 2013 it was called Queensday, falling on
30 April, unless this is a Sunday in which case it is the 29th.
"""
if year > 2013:
if date(year, 4, 27).weekday() != 6:
return date(year, 4, 27), "King's day"
else:
return date(year, 4, 26), "King's day"
else:
if date(year, 4, 30).weekday() != 6:
return date(year, 4, 30), "Queen's day"
else:
return date(year, 4, 29), "Queen's day"
def get_variable_days(self, year):
days = super(Netherlands, self).get_variable_days(year)
days.append(self.get_king_queen_day(year))
return days
|
py | 1a497f05ad57810805cd2ed8bbd37fabc5450ab7 | """I/O format for MongoDB
This plugin is designed with data monitoring in mind, to put smaller
amounts of extracted data into a database for quick access. However
it should work with any plugin.
Note that there is no check to make sure the 16MB document size
limit is respected!
"""
import strax
import numpy as np
from pymongo import MongoClient, DESCENDING
from strax import StorageFrontend, StorageBackend, Saver
from datetime import datetime
from pytz import utc as py_utc
from warnings import warn
from sys import getsizeof
export, __all__ = strax.exporter()
# Some data is stored in the buffer. Delete when either of these values
# are exceeded
DEFAULT_MONGO_BACKEND_BUFFER_MB = 200
DEFAULT_MONGO_BACKEND_BUFFER_NRUNS = 5
@export
class MongoBackend(StorageBackend):
"""Mongo storage backend"""
def __init__(self, uri, database, col_name=None):
"""
Backend for reading/writing data from Mongo
:param uri: Mongo url (with pw and username)
:param database: name of database (str)
:param col_name: collection name (str) to look for data
"""
self.client = MongoClient(uri)
self.db = self.client[database]
self.col_name = col_name
# Attributes for the chunks-buffer
self.chunks_registry = {}
self._buffered_backend_keys = []
self._buff_mb = DEFAULT_MONGO_BACKEND_BUFFER_MB
self._buff_nruns = DEFAULT_MONGO_BACKEND_BUFFER_NRUNS
def _read_chunk(self, backend_key, chunk_info, dtype, compressor):
"""See strax.Backend"""
chunk_i = chunk_info["chunk_i"]
registry_key = backend_key + str(chunk_i)
# Build the chunk-registry if not done already, also rebuild if
# the key is not in the registry (will fail below if also not
# there on rebuild).
if registry_key not in self.chunks_registry.keys():
self._build_chunk_registry(backend_key)
# Unpack info about this chunk from the query. Return empty if
# not available. Use a *string* in the registry to lookup the
# chunk-data (like we do in _build_chunk_registry).
doc = self.chunks_registry.get(registry_key, None)
if doc is None:
# Did not find the data. NB: can be that the query is off in
# the _build_chunk_registry. In case you end up here but did
# not expect that, double check that self.chunks_registry is
# not an empty dict!
raise ValueError(
f'Metadata claims chunk{chunk_i} exists but it is unknown to '
f'the chunks_registry')
else:
chunk_doc = doc.get('data', None)
if chunk_doc is None:
raise ValueError(
f'Doc for chunk_{chunk_i} in wrong format:\n{doc}')
# Convert JSON to numpy
chunk_len = len(chunk_doc)
result = np.zeros(chunk_len, dtype=dtype)
for i in range(chunk_len):
for key in np.dtype(dtype).names:
result[i][key] = chunk_doc[i][key]
return result
def _saver(self, key, metadata):
"""See strax.Backend"""
# Use the key to make a collection otherwise, use the backend-key
col = self.db[self.col_name if self.col_name is not None else str(key)]
return MongoSaver(key, metadata, col)
def get_metadata(self, key):
"""See strax.Backend"""
query = backend_key_to_query(key)
# Make sure to get the last of the meta-data docs. Otherwise we
# might be getting a previously failed document. Sort argument
# should be obsolete (due to the self.col.delete_many in the
# MongoSaver) but rather safe than sorry.
doc = self.db[self.col_name].find_one({
**query, 'metadata': {"$exists": True}},
# **query, 'provides_meta': True}, <-change to this after TTL has flushed
sort=[('write_time', DESCENDING)])
if doc and 'metadata' in doc:
return doc['metadata']
raise strax.DataNotAvailable
def _build_chunk_registry(self, backend_key):
"""
Build chunk info in a single registry using only one query to
the database. This is much faster as one does not have to do
n-chunk queries to the database. Just one will do. As the
documents-size is limited to 16 MB, it's unlikely that we will
run into memory issues (that we otherwise would not run into).
:param backend_key: strax.DataKey to query the collection for
"""
query = backend_key_to_query(backend_key)
chunks_registry = self.db[self.col_name].find(
{**query, 'chunk_i': {'$exists': True}},
# {**query, 'provides_meta': False}, <-change to this after TTL has flushed
{"chunk_i": 1, "data": 1})
# We are going to convert this to a dictionary as that is
# easier to lookup
for doc in chunks_registry:
chunk_key = doc.get('chunk_i', None)
if chunk_key is None:
# Should not happen because of the projection in find
# but let's double check:
raise ValueError(
f'Projection failed, got doc with no "chunk_i":\n{doc}')
# Update our registry with this chunks info. Use chunk_i as
# chunk_key. Make it a *string* to avoid potential key-error
# issues or json-encoding headaches.
self.chunks_registry[backend_key + str(chunk_key)] = doc.copy()
# Some bookkeeping to make sure we don't buffer too much in this
# backend. We still need to return at least one hence the 'and'.
# See: https://github.com/AxFoundation/strax/issues/346
if backend_key not in self._buffered_backend_keys:
self._buffered_backend_keys.append(backend_key)
while ((getsizeof(self.chunks_registry) / 1e6 > self._buff_mb
and len(self._buffered_backend_keys) > 1)
or len(self._buffered_backend_keys) > self._buff_nruns):
self._clean_first_key_from_registry()
def _clean_first_key_from_registry(self):
"""
Remove the first item in the self.buffered_keys and all the
associated keys in the self.chunks_registry to limit RAM-usage
"""
# only clean the first entry from the list
to_clean = self._buffered_backend_keys[0]
for registry_key in list(self.chunks_registry.keys()):
if to_clean in registry_key:
del self.chunks_registry[registry_key]
del self._buffered_backend_keys[0]
@export
class MongoFrontend(StorageFrontend):
"""MongoDB storage frontend"""
def __init__(self, uri, database, col_name=None, *args, **kwargs):
"""
MongoFrontend for reading/writing data from Mongo
:param uri: Mongo url (with pw and username)
:param database: name of database (str)
:param col_name: collection name (str) to look for data
:param args: init for StorageFrontend
:param kwargs: init for StorageFrontend
"""
super().__init__(*args, **kwargs)
self.client = MongoClient(uri)
self.db = self.client[database]
self.backends = [MongoBackend(uri, database, col_name=col_name)]
self.col_name = col_name
def _find(self, key, write, allow_incomplete, fuzzy_for,
fuzzy_for_options):
"""See strax.Frontend"""
if write:
return self.backends[0].__class__.__name__, str(key)
query = backend_key_to_query(str(key))
if self.db[self.col_name].count_documents(query):
self.log.debug(f"{key} is in cache.")
return self.backends[0].__class__.__name__, str(key)
self.log.debug(f"{key} is NOT in cache.")
raise strax.DataNotAvailable
@export
class MongoSaver(Saver):
allow_rechunk = False
def __init__(self, key, metadata, col):
"""
Mongo saver
:param key: strax.Datakey
:param metadata: metadata to save belonging to data
:param col: collection (NB! pymongo collection object) of mongo
instance to write to
"""
super().__init__(metadata)
self.col = col
# All meta_documents should have the key to query against
basic_meta = backend_key_to_query(key).copy()
# Start with a clean sheet, we are just going to overwrite
self.col.delete_many(basic_meta)
# Add datetime objects as candidates for TTL collections. Either
# can be used according to the preference of the user to index.
# Two entries can be used:
# 1. The time of writing.
# 2. The time of data taking.
basic_meta['write_time'] = datetime.now(py_utc)
# The run_start_time below is a placeholder and will be updated
# in the _save_chunk_metadata for the first chunk. Nevertheless
# we need an object in case there e.g. is no chunk.
basic_meta['run_start_time'] = datetime.now(py_utc)
# Add flag to doc that we are providing the metadata
basic_meta['provides_meta'] = True
# If available later update with this value:
self.run_start = None
# This info should be added to all of the associated documents
self.basic_md = basic_meta
# For the metadata copy this too:
meta_data = basic_meta.copy()
meta_data['metadata'] = self.md
# Save object_ids for fast querying and updates
self.id_md = self.col.insert_one(meta_data).inserted_id
# Also save all the chunks
self.ids_chunk = {}
def _save_chunk(self, data, chunk_info, executor=None):
"""see strax.Saver"""
chunk_i = chunk_info['chunk_i']
if getattr(data, 'nbytes') > 10_000_000:
warn('Inserting documents of size > 10 MB, this is getting '
'close to the 16 MB document size in mongo',
UserWarning)
aggregate_data = []
# Remove the numpy structures and parse the data. The dtype
# information is saved with the metadata so don't worry
for row in data:
ins = {}
for key in list(data.dtype.names):
ins[key] = row[key]
ins = remove_np(ins)
aggregate_data.append(ins)
# Get the document to update, if none available start a new one
# for this chunk
chunk_id = self.ids_chunk.get(chunk_i, None)
# We can fail here if the document is too large to be written
# out to mongo. One could do a try: except
# pymongo.errors.WriteError: pass, but that potentially leads to
# abuse of a Mongo instance going unnoticed.
if chunk_id is not None:
# In principle this should not end up here as each chunk
# should be it's own document unless you re-chunk
self.col.update_one({'_id': chunk_id},
{'$push': {f'data': aggregate_data}})
else:
# Start a new document, update it with the proper information
doc = self.basic_md.copy()
doc['write_time'] = datetime.now(py_utc)
doc['chunk_i'] = chunk_i
doc["data"] = aggregate_data
doc['provides_meta'] = False
chunk_id = self.col.insert_one(doc).inserted_id
self.ids_chunk[chunk_i] = chunk_id
return dict(), None
def _save_chunk_metadata(self, chunk_info):
"""see strax.Saver"""
# For the first chunk we get the run_start_time and update the
# run-metadata file
if int(chunk_info['chunk_i']) == 0:
self.run_start = datetime.fromtimestamp(
chunk_info['start']/1e9).replace(tzinfo=py_utc)
self.col.update_one({'_id': self.id_md},
{'$addToSet': {'metadata.chunks': chunk_info}})
def _close(self):
"""see strax.Saver"""
# First update the run-starts of all of the chunk-documents as
# this is a TTL index-candidate
if self.run_start is not None:
update = {'run_start_time': self.run_start}
query = {k: v for k, v in self.basic_md.items()
if k in ('number', 'data_type', 'lineage_hash')}
self.col.update_many(query, {'$set': update})
# Update the metadata
update = {f'metadata.{k}': v
for k, v in self.md.items()
if k in ('writing_ended', 'exception')}
# Also update all of the chunk-documents with the run_start_time
self.col.update_one({'_id': self.id_md}, {'$set': update})
def backend_key_to_query(backend_key):
"""Convert backend key to queryable dictionary"""
n, d, l = backend_key.split('-')
return {'number': int(n), 'data_type': d, 'lineage_hash': l}
def remove_np(dictin):
"""Remove numpy types from a dict so it can be inserted into
mongo."""
if isinstance(dictin, dict):
result = {}
for k in dictin.keys():
result[k] = remove_np(dictin[k])
elif isinstance(dictin, (np.ndarray, list)):
result = []
for k in dictin:
result.append(remove_np(k))
elif isinstance(dictin, np.integer):
return int(dictin)
elif isinstance(dictin, np.floating):
return float(dictin)
else:
return dictin
return result
|
py | 1a497f55cedad8e37b3810f33dacd5e973531409 | import numpy as np
import matplotlib.pyplot as plt
print("Running plot_runtime script..")
maindir = 'res/runtime/'
filename = 'runtime_results.csv'
file = maindir + filename
print("Reading input data from " + file + "..")
data = np.genfromtxt(file, delimiter=',', skip_header=1)
print("Input completed..")
kind = ['static', 'dynamic', 'guided']
chunksize=[1, 2, 4, 8, 16, 32, 64]
avg_time_loop1 = np.zeros((len(chunksize),len(kind)))
avg_time_loop2 = np.zeros((len(chunksize),len(kind)))
for num, label in enumerate(kind):
# read kind type column
blocks = data[:,1]
# get data that only match the current kind type
kind_data = data[blocks==num+1,:]
for idx in range(len(chunksize)):
# get data that only match the current chunksize
blocks = kind_data[:,2]
chunk_data = kind_data[blocks==chunksize[idx],:]
# take the average time for each chunksize
avg_time_loop1[idx,num] = np.mean(chunk_data[:,5])
avg_time_loop2[idx,num] = np.mean(chunk_data[:,7])
#time vs chunksize
plt.figure()
plt.plot(chunksize, avg_time_loop1[:,0], '-*', label='Static')
plt.plot(chunksize, avg_time_loop1[:,1], '-^', label='Dynamic')
plt.plot(chunksize, avg_time_loop1[:,2], '-<', label='Guided')
plt.xlabel('Chunksize')
plt.ylabel('Time (s)')
# plt.legend(loc=2)
plt.legend()
plt.grid(True)
plt.savefig(maindir + 'runtime_loop1.eps', format='eps', dpi=1000)
plt.close()
print("Execution time for plot for loop1 completed..")
#time vs chunksize
plt.figure()
plt.plot(chunksize, avg_time_loop2[:,0], '-*', label='Static')
plt.plot(chunksize, avg_time_loop2[:,1], '-^', label='Dynamic')
plt.plot(chunksize, avg_time_loop2[:,2], '-<', label='Guided')
plt.xlabel('Chunksize')
plt.ylabel('Time (s)')
# plt.legend(loc=2)
plt.legend()
plt.grid(True)
plt.savefig(maindir + 'runtime_loop2.eps', format='eps', dpi=1000)
plt.close()
print("Execution time for plot for loop2 completed..")
print("plot_runtime script completed..")
|
py | 1a497faac433b214fd6a71f5c329ef46146c9f7d | """Gunicorn configuration file."""
import multiprocessing
import environ
from koku.feature_flags import UNLEASH_CLIENT
from koku.probe_server import BasicProbeServer
from koku.probe_server import start_probe_server
ENVIRONMENT = environ.Env()
SOURCES = ENVIRONMENT.bool("SOURCES", default=False)
CLOWDER_PORT = "8000"
if ENVIRONMENT.bool("CLOWDER_ENABLED", default=False):
from app_common_python import LoadedConfig
CLOWDER_PORT = LoadedConfig.publicPort
if ENVIRONMENT.bool("MASU", default=False) or ENVIRONMENT.bool("SOURCES", default=False):
CLOWDER_PORT = LoadedConfig.privatePort
bind = f"0.0.0.0:{CLOWDER_PORT}"
cpu_resources = ENVIRONMENT.int("POD_CPU_LIMIT", default=multiprocessing.cpu_count())
workers = 1 if SOURCES else cpu_resources * 2 + 1
timeout = ENVIRONMENT.int("TIMEOUT", default=90)
loglevel = ENVIRONMENT.get_value("GUNICORN_LOG_LEVEL", default="INFO")
graceful_timeout = ENVIRONMENT.int("GRACEFUL_TIMEOUT", default=180)
gunicorn_threads = ENVIRONMENT.bool("GUNICORN_THREADS", default=False)
if gunicorn_threads:
threads = cpu_resources * 2 + 1
# Server Hooks
def on_starting(server):
"""Called just before the main process is initialized."""
httpd = start_probe_server(BasicProbeServer, server.log)
httpd.RequestHandlerClass.ready = True
def post_fork(server, worker):
"""Called just after a worker has been forked."""
UNLEASH_CLIENT.unleash_instance_id += f"_pid_{worker.pid}"
worker.log.info("Initializing UNLEASH_CLIENT for gunicorn worker.")
UNLEASH_CLIENT.initialize_client()
def worker_exit(server, worker):
"""Called just after a worker has been exited, in the worker process."""
worker.log.info("Shutting down UNLEASH_CLIENT for gunicorn worker.")
UNLEASH_CLIENT.destroy()
|
py | 1a498210a44245ce22d49d76e9a2387d88d3b5a5 | import requests
import time
class Facebook:
def __init__(self, config, permutations_list):
# 1000 ms
self.delay = config['plateform']['facebook']['rate_limit'] / 1000
# https://facebook.com/{username}
self.format = config['plateform']['facebook']['format']
# facebook usernames are not case sensitive
self.permutations_list = [perm.lower() for perm in permutations_list]
# social
self.type = config['plateform']['facebook']['type']
# Generate all potential facebook usernames
def possible_usernames(self):
possible_usernames = []
for permutation in self.permutations_list:
possible_usernames.append(self.format.format(
permutation = permutation,
))
return possible_usernames
def search(self):
facebook_usernames = {
"type": self.type,
"accounts": []
}
possible_usernames_list = self.possible_usernames()
for username in possible_usernames_list:
try:
r = requests.get(username, timeout=5)
except requests.ConnectionError:
print("failed to connect to facebook")
# If the account exists
if r.status_code == 200:
facebook_usernames["accounts"].append({"value": username})
time.sleep(self.delay)
return facebook_usernames |
py | 1a4982ead9a3e49a8e3c8748b92dcf85a12fcd7f | # TODO nits:
# Get rid of asserts that are the caller's fault.
# Docstrings (e.g. ABCs).
import abc
from abc import abstractmethod, abstractproperty
import collections
import functools
import re as stdlib_re # Avoid confusion with the re we export.
import sys
import types
try:
import collections.abc as collections_abc
except ImportError:
import collections as collections_abc # Fallback for PY3.2.
# Please keep __all__ alphabetized within each category.
__all__ = [
# Super-special typing primitives.
'Any',
'Callable',
'Generic',
'Optional',
'TypeVar',
'Union',
'Tuple',
# ABCs (from collections.abc).
'AbstractSet', # collections.abc.Set.
'ByteString',
'Container',
'Hashable',
'ItemsView',
'Iterable',
'Iterator',
'KeysView',
'Mapping',
'MappingView',
'MutableMapping',
'MutableSequence',
'MutableSet',
'Sequence',
'Sized',
'ValuesView',
# Structural checks, a.k.a. protocols.
'Reversible',
'SupportsAbs',
'SupportsFloat',
'SupportsInt',
'SupportsRound',
# Concrete collection types.
'Dict',
'List',
'Set',
'NamedTuple', # Not really a type.
'Generator',
# One-off things.
'AnyStr',
'cast',
'get_type_hints',
'no_type_check',
'no_type_check_decorator',
'overload',
# Submodules.
'io',
're',
]
def _qualname(x):
if sys.version_info[:2] >= (3, 3):
return x.__qualname__
else:
# Fall back to just name.
return x.__name__
class TypingMeta(type):
"""Metaclass for every type defined below.
This overrides __new__() to require an extra keyword parameter
'_root', which serves as a guard against naive subclassing of the
typing classes. Any legitimate class defined using a metaclass
derived from TypingMeta (including internal subclasses created by
e.g. Union[X, Y]) must pass _root=True.
This also defines a dummy constructor (all the work is done in
__new__) and a nicer repr().
"""
_is_protocol = False
def __new__(cls, name, bases, namespace, *, _root=False):
if not _root:
raise TypeError("Cannot subclass %s" %
(', '.join(map(_type_repr, bases)) or '()'))
return super().__new__(cls, name, bases, namespace)
def __init__(self, *args, **kwds):
pass
def _eval_type(self, globalns, localns):
"""Override this in subclasses to interpret forward references.
For example, Union['C'] is internally stored as
Union[_ForwardRef('C')], which should evaluate to _Union[C],
where C is an object found in globalns or localns (searching
localns first, of course).
"""
return self
def _has_type_var(self):
return False
def __repr__(self):
return '%s.%s' % (self.__module__, _qualname(self))
class Final:
"""Mix-in class to prevent instantiation."""
__slots__ = ()
def __new__(self, *args, **kwds):
raise TypeError("Cannot instantiate %r" % self.__class__)
class _ForwardRef(TypingMeta):
"""Wrapper to hold a forward reference."""
def __new__(cls, arg):
if not isinstance(arg, str):
raise TypeError('ForwardRef must be a string -- got %r' % (arg,))
try:
code = compile(arg, '<string>', 'eval')
except SyntaxError:
raise SyntaxError('ForwardRef must be an expression -- got %r' %
(arg,))
self = super().__new__(cls, arg, (), {}, _root=True)
self.__forward_arg__ = arg
self.__forward_code__ = code
self.__forward_evaluated__ = False
self.__forward_value__ = None
typing_globals = globals()
frame = sys._getframe(1)
while frame is not None and frame.f_globals is typing_globals:
frame = frame.f_back
assert frame is not None
self.__forward_frame__ = frame
return self
def _eval_type(self, globalns, localns):
if not isinstance(localns, dict):
raise TypeError('ForwardRef localns must be a dict -- got %r' %
(localns,))
if not isinstance(globalns, dict):
raise TypeError('ForwardRef globalns must be a dict -- got %r' %
(globalns,))
if not self.__forward_evaluated__:
if globalns is None and localns is None:
globalns = localns = {}
elif globalns is None:
globalns = localns
elif localns is None:
localns = globalns
self.__forward_value__ = _type_check(
eval(self.__forward_code__, globalns, localns),
"Forward references must evaluate to types.")
self.__forward_evaluated__ = True
return self.__forward_value__
def __instancecheck__(self, obj):
raise TypeError("Forward references cannot be used with isinstance().")
def __subclasscheck__(self, cls):
if not self.__forward_evaluated__:
globalns = self.__forward_frame__.f_globals
localns = self.__forward_frame__.f_locals
try:
self._eval_type(globalns, localns)
except NameError:
return False # Too early.
return issubclass(cls, self.__forward_value__)
def __repr__(self):
return '_ForwardRef(%r)' % (self.__forward_arg__,)
class _TypeAlias:
"""Internal helper class for defining generic variants of concrete types.
Note that this is not a type; let's call it a pseudo-type. It can
be used in instance and subclass checks, e.g. isinstance(m, Match)
or issubclass(type(m), Match). However, it cannot be itself the
target of an issubclass() call; e.g. issubclass(Match, C) (for
some arbitrary class C) raises TypeError rather than returning
False.
"""
__slots__ = ('name', 'type_var', 'impl_type', 'type_checker')
def __new__(cls, *args, **kwds):
"""Constructor.
This only exists to give a better error message in case
someone tries to subclass a type alias (not a good idea).
"""
if (len(args) == 3 and
isinstance(args[0], str) and
isinstance(args[1], tuple)):
# Close enough.
raise TypeError("A type alias cannot be subclassed")
return object.__new__(cls)
def __init__(self, name, type_var, impl_type, type_checker):
"""Initializer.
Args:
name: The name, e.g. 'Pattern'.
type_var: The type parameter, e.g. AnyStr, or the
specific type, e.g. str.
impl_type: The implementation type.
type_checker: Function that takes an impl_type instance.
and returns a value that should be a type_var instance.
"""
assert isinstance(name, str), repr(name)
assert isinstance(type_var, type), repr(type_var)
assert isinstance(impl_type, type), repr(impl_type)
assert not isinstance(impl_type, TypingMeta), repr(impl_type)
self.name = name
self.type_var = type_var
self.impl_type = impl_type
self.type_checker = type_checker
def __repr__(self):
return "%s[%s]" % (self.name, _type_repr(self.type_var))
def __getitem__(self, parameter):
assert isinstance(parameter, type), repr(parameter)
if not isinstance(self.type_var, TypeVar):
raise TypeError("%s cannot be further parameterized." % self)
if self.type_var.__constraints__:
if not issubclass(parameter, Union[self.type_var.__constraints__]):
raise TypeError("%s is not a valid substitution for %s." %
(parameter, self.type_var))
return self.__class__(self.name, parameter,
self.impl_type, self.type_checker)
def __instancecheck__(self, obj):
raise TypeError("Type aliases cannot be used with isinstance().")
def __subclasscheck__(self, cls):
if cls is Any:
return True
if isinstance(cls, _TypeAlias):
# Covariance. For now, we compare by name.
return (cls.name == self.name and
issubclass(cls.type_var, self.type_var))
else:
# Note that this is too lenient, because the
# implementation type doesn't carry information about
# whether it is about bytes or str (for example).
return issubclass(cls, self.impl_type)
def _has_type_var(t):
return t is not None and isinstance(t, TypingMeta) and t._has_type_var()
def _eval_type(t, globalns, localns):
if isinstance(t, TypingMeta):
return t._eval_type(globalns, localns)
else:
return t
def _type_check(arg, msg):
"""Check that the argument is a type, and return it.
As a special case, accept None and return type(None) instead.
Also, _TypeAlias instances (e.g. Match, Pattern) are acceptable.
The msg argument is a human-readable error message, e.g.
"Union[arg, ...]: arg should be a type."
We append the repr() of the actual value (truncated to 100 chars).
"""
if arg is None:
return type(None)
if isinstance(arg, str):
arg = _ForwardRef(arg)
if not isinstance(arg, (type, _TypeAlias)):
raise TypeError(msg + " Got %.100r." % (arg,))
return arg
def _type_repr(obj):
"""Return the repr() of an object, special-casing types.
If obj is a type, we return a shorter version than the default
type.__repr__, based on the module and qualified name, which is
typically enough to uniquely identify a type. For everything
else, we fall back on repr(obj).
"""
if isinstance(obj, type) and not isinstance(obj, TypingMeta):
if obj.__module__ == 'builtins':
return _qualname(obj)
else:
return '%s.%s' % (obj.__module__, _qualname(obj))
else:
return repr(obj)
class AnyMeta(TypingMeta):
"""Metaclass for Any."""
def __new__(cls, name, bases, namespace, _root=False):
self = super().__new__(cls, name, bases, namespace, _root=_root)
return self
def __instancecheck__(self, obj):
raise TypeError("Any cannot be used with isinstance().")
def __subclasscheck__(self, cls):
if not isinstance(cls, type):
return super().__subclasscheck__(cls) # To TypeError.
return True
class Any(Final, metaclass=AnyMeta, _root=True):
"""Special type indicating an unconstrained type.
- Any object is an instance of Any.
- Any class is a subclass of Any.
- As a special case, Any and object are subclasses of each other.
"""
__slots__ = ()
class TypeVar(TypingMeta, metaclass=TypingMeta, _root=True):
"""Type variable.
Usage::
T = TypeVar('T') # Can be anything
A = TypeVar('A', str, bytes) # Must be str or bytes
Type variables exist primarily for the benefit of static type
checkers. They serve as the parameters for generic types as well
as for generic function definitions. See class Generic for more
information on generic types. Generic functions work as follows:
def repeat(x: T, n: int) -> Sequence[T]:
'''Return a list containing n references to x.'''
return [x]*n
def longest(x: A, y: A) -> A:
'''Return the longest of two strings.'''
return x if len(x) >= len(y) else y
The latter example's signature is essentially the overloading
of (str, str) -> str and (bytes, bytes) -> bytes. Also note
that if the arguments are instances of some subclass of str,
the return type is still plain str.
At runtime, isinstance(x, T) will raise TypeError. However,
issubclass(C, T) is true for any class C, and issubclass(str, A)
and issubclass(bytes, A) are true, and issubclass(int, A) is
false.
Type variables may be marked covariant or contravariant by passing
covariant=True or contravariant=True. See PEP 484 for more
details. By default type variables are invariant.
Type variables can be introspected. e.g.:
T.__name__ == 'T'
T.__constraints__ == ()
T.__covariant__ == False
T.__contravariant__ = False
A.__constraints__ == (str, bytes)
"""
def __new__(cls, name, *constraints, bound=None,
covariant=False, contravariant=False):
self = super().__new__(cls, name, (Final,), {}, _root=True)
if covariant and contravariant:
raise ValueError("Bivariant type variables are not supported.")
self.__covariant__ = bool(covariant)
self.__contravariant__ = bool(contravariant)
if constraints and bound is not None:
raise TypeError("Constraints cannot be combined with bound=...")
if constraints and len(constraints) == 1:
raise TypeError("A single constraint is not allowed")
msg = "TypeVar(name, constraint, ...): constraints must be types."
self.__constraints__ = tuple(_type_check(t, msg) for t in constraints)
if bound:
self.__bound__ = _type_check(bound, "Bound must be a type.")
else:
self.__bound__ = None
return self
def _has_type_var(self):
return True
def __repr__(self):
if self.__covariant__:
prefix = '+'
elif self.__contravariant__:
prefix = '-'
else:
prefix = '~'
return prefix + self.__name__
def __instancecheck__(self, instance):
raise TypeError("Type variables cannot be used with isinstance().")
def __subclasscheck__(self, cls):
# TODO: Make this raise TypeError too?
if cls is self:
return True
if cls is Any:
return True
if self.__bound__ is not None:
return issubclass(cls, self.__bound__)
if self.__constraints__:
return any(issubclass(cls, c) for c in self.__constraints__)
return True
# Some unconstrained type variables. These are used by the container types.
T = TypeVar('T') # Any type.
KT = TypeVar('KT') # Key type.
VT = TypeVar('VT') # Value type.
T_co = TypeVar('T_co', covariant=True) # Any type covariant containers.
V_co = TypeVar('V_co', covariant=True) # Any type covariant containers.
VT_co = TypeVar('VT_co', covariant=True) # Value type covariant containers.
T_contra = TypeVar('T_contra', contravariant=True) # Ditto contravariant.
# A useful type variable with constraints. This represents string types.
# TODO: What about bytearray, memoryview?
AnyStr = TypeVar('AnyStr', bytes, str)
class UnionMeta(TypingMeta):
"""Metaclass for Union."""
def __new__(cls, name, bases, namespace, parameters=None, _root=False):
if parameters is None:
return super().__new__(cls, name, bases, namespace, _root=_root)
if not isinstance(parameters, tuple):
raise TypeError("Expected parameters=<tuple>")
# Flatten out Union[Union[...], ...] and type-check non-Union args.
params = []
msg = "Union[arg, ...]: each arg must be a type."
for p in parameters:
if isinstance(p, UnionMeta):
params.extend(p.__union_params__)
else:
params.append(_type_check(p, msg))
# Weed out strict duplicates, preserving the first of each occurrence.
all_params = set(params)
if len(all_params) < len(params):
new_params = []
for t in params:
if t in all_params:
new_params.append(t)
all_params.remove(t)
params = new_params
assert not all_params, all_params
# Weed out subclasses.
# E.g. Union[int, Employee, Manager] == Union[int, Employee].
# If Any or object is present it will be the sole survivor.
# If both Any and object are present, Any wins.
# Never discard type variables, except against Any.
# (In particular, Union[str, AnyStr] != AnyStr.)
all_params = set(params)
for t1 in params:
if t1 is Any:
return Any
if isinstance(t1, TypeVar):
continue
if any(issubclass(t1, t2)
for t2 in all_params - {t1} if not isinstance(t2, TypeVar)):
all_params.remove(t1)
# It's not a union if there's only one type left.
if len(all_params) == 1:
return all_params.pop()
# Create a new class with these params.
self = super().__new__(cls, name, bases, {}, _root=True)
self.__union_params__ = tuple(t for t in params if t in all_params)
self.__union_set_params__ = frozenset(self.__union_params__)
return self
def _eval_type(self, globalns, localns):
p = tuple(_eval_type(t, globalns, localns)
for t in self.__union_params__)
if p == self.__union_params__:
return self
else:
return self.__class__(self.__name__, self.__bases__, {},
p, _root=True)
def _has_type_var(self):
if self.__union_params__:
for t in self.__union_params__:
if _has_type_var(t):
return True
return False
def __repr__(self):
r = super().__repr__()
if self.__union_params__:
r += '[%s]' % (', '.join(_type_repr(t)
for t in self.__union_params__))
return r
def __getitem__(self, parameters):
if self.__union_params__ is not None:
raise TypeError(
"Cannot subscript an existing Union. Use Union[u, t] instead.")
if parameters == ():
raise TypeError("Cannot take a Union of no types.")
if not isinstance(parameters, tuple):
parameters = (parameters,)
return self.__class__(self.__name__, self.__bases__,
dict(self.__dict__), parameters, _root=True)
def __eq__(self, other):
if not isinstance(other, UnionMeta):
return NotImplemented
return self.__union_set_params__ == other.__union_set_params__
def __hash__(self):
return hash(self.__union_set_params__)
def __instancecheck__(self, obj):
raise TypeError("Unions cannot be used with isinstance().")
def __subclasscheck__(self, cls):
if cls is Any:
return True
if self.__union_params__ is None:
return isinstance(cls, UnionMeta)
elif isinstance(cls, UnionMeta):
if cls.__union_params__ is None:
return False
return all(issubclass(c, self) for c in (cls.__union_params__))
elif isinstance(cls, TypeVar):
if cls in self.__union_params__:
return True
if cls.__constraints__:
return issubclass(Union[cls.__constraints__], self)
return False
else:
return any(issubclass(cls, t) for t in self.__union_params__)
class Union(Final, metaclass=UnionMeta, _root=True):
"""Union type; Union[X, Y] means either X or Y.
To define a union, use e.g. Union[int, str]. Details:
- The arguments must be types and there must be at least one.
- None as an argument is a special case and is replaced by
type(None).
- Unions of unions are flattened, e.g.::
Union[Union[int, str], float] == Union[int, str, float]
- Unions of a single argument vanish, e.g.::
Union[int] == int # The constructor actually returns int
- Redundant arguments are skipped, e.g.::
Union[int, str, int] == Union[int, str]
- When comparing unions, the argument order is ignored, e.g.::
Union[int, str] == Union[str, int]
- When two arguments have a subclass relationship, the least
derived argument is kept, e.g.::
class Employee: pass
class Manager(Employee): pass
Union[int, Employee, Manager] == Union[int, Employee]
Union[Manager, int, Employee] == Union[int, Employee]
Union[Employee, Manager] == Employee
- Corollary: if Any is present it is the sole survivor, e.g.::
Union[int, Any] == Any
- Similar for object::
Union[int, object] == object
- To cut a tie: Union[object, Any] == Union[Any, object] == Any.
- You cannot subclass or instantiate a union.
- You cannot write Union[X][Y] (what would it mean?).
- You can use Optional[X] as a shorthand for Union[X, None].
"""
# Unsubscripted Union type has params set to None.
__union_params__ = None
__union_set_params__ = None
class OptionalMeta(TypingMeta):
"""Metaclass for Optional."""
def __new__(cls, name, bases, namespace, _root=False):
return super().__new__(cls, name, bases, namespace, _root=_root)
def __getitem__(self, arg):
arg = _type_check(arg, "Optional[t] requires a single type.")
return Union[arg, type(None)]
class Optional(Final, metaclass=OptionalMeta, _root=True):
"""Optional type.
Optional[X] is equivalent to Union[X, type(None)].
"""
__slots__ = ()
class TupleMeta(TypingMeta):
"""Metaclass for Tuple."""
def __new__(cls, name, bases, namespace, parameters=None,
use_ellipsis=False, _root=False):
self = super().__new__(cls, name, bases, namespace, _root=_root)
self.__tuple_params__ = parameters
self.__tuple_use_ellipsis__ = use_ellipsis
return self
def _has_type_var(self):
if self.__tuple_params__:
for t in self.__tuple_params__:
if _has_type_var(t):
return True
return False
def _eval_type(self, globalns, localns):
tp = self.__tuple_params__
if tp is None:
return self
p = tuple(_eval_type(t, globalns, localns) for t in tp)
if p == self.__tuple_params__:
return self
else:
return self.__class__(self.__name__, self.__bases__, {},
p, _root=True)
def __repr__(self):
r = super().__repr__()
if self.__tuple_params__ is not None:
params = [_type_repr(p) for p in self.__tuple_params__]
if self.__tuple_use_ellipsis__:
params.append('...')
r += '[%s]' % (
', '.join(params))
return r
def __getitem__(self, parameters):
if self.__tuple_params__ is not None:
raise TypeError("Cannot re-parameterize %r" % (self,))
if not isinstance(parameters, tuple):
parameters = (parameters,)
if len(parameters) == 2 and parameters[1] == Ellipsis:
parameters = parameters[:1]
use_ellipsis = True
msg = "Tuple[t, ...]: t must be a type."
else:
use_ellipsis = False
msg = "Tuple[t0, t1, ...]: each t must be a type."
parameters = tuple(_type_check(p, msg) for p in parameters)
return self.__class__(self.__name__, self.__bases__,
dict(self.__dict__), parameters,
use_ellipsis=use_ellipsis, _root=True)
def __eq__(self, other):
if not isinstance(other, TupleMeta):
return NotImplemented
return self.__tuple_params__ == other.__tuple_params__
def __hash__(self):
return hash(self.__tuple_params__)
def __instancecheck__(self, obj):
raise TypeError("Tuples cannot be used with isinstance().")
def __subclasscheck__(self, cls):
if cls is Any:
return True
if not isinstance(cls, type):
return super().__subclasscheck__(cls) # To TypeError.
if issubclass(cls, tuple):
return True # Special case.
if not isinstance(cls, TupleMeta):
return super().__subclasscheck__(cls) # False.
if self.__tuple_params__ is None:
return True
if cls.__tuple_params__ is None:
return False # ???
if cls.__tuple_use_ellipsis__ != self.__tuple_use_ellipsis__:
return False
# Covariance.
return (len(self.__tuple_params__) == len(cls.__tuple_params__) and
all(issubclass(x, p)
for x, p in zip(cls.__tuple_params__,
self.__tuple_params__)))
class Tuple(Final, metaclass=TupleMeta, _root=True):
"""Tuple type; Tuple[X, Y] is the cross-product type of X and Y.
Example: Tuple[T1, T2] is a tuple of two elements corresponding
to type variables T1 and T2. Tuple[int, float, str] is a tuple
of an int, a float and a string.
To specify a variable-length tuple of homogeneous type, use Sequence[T].
"""
__slots__ = ()
class CallableMeta(TypingMeta):
"""Metaclass for Callable."""
def __new__(cls, name, bases, namespace, _root=False,
args=None, result=None):
if args is None and result is None:
pass # Must be 'class Callable'.
else:
if args is not Ellipsis:
if not isinstance(args, list):
raise TypeError("Callable[args, result]: "
"args must be a list."
" Got %.100r." % (args,))
msg = "Callable[[arg, ...], result]: each arg must be a type."
args = tuple(_type_check(arg, msg) for arg in args)
msg = "Callable[args, result]: result must be a type."
result = _type_check(result, msg)
self = super().__new__(cls, name, bases, namespace, _root=_root)
self.__args__ = args
self.__result__ = result
return self
def _has_type_var(self):
if self.__args__:
for t in self.__args__:
if _has_type_var(t):
return True
return _has_type_var(self.__result__)
def _eval_type(self, globalns, localns):
if self.__args__ is None and self.__result__ is None:
return self
if self.__args__ is Ellipsis:
args = self.__args__
else:
args = [_eval_type(t, globalns, localns) for t in self.__args__]
result = _eval_type(self.__result__, globalns, localns)
if args == self.__args__ and result == self.__result__:
return self
else:
return self.__class__(self.__name__, self.__bases__, {},
args=args, result=result, _root=True)
def __repr__(self):
r = super().__repr__()
if self.__args__ is not None or self.__result__ is not None:
if self.__args__ is Ellipsis:
args_r = '...'
else:
args_r = '[%s]' % ', '.join(_type_repr(t)
for t in self.__args__)
r += '[%s, %s]' % (args_r, _type_repr(self.__result__))
return r
def __getitem__(self, parameters):
if self.__args__ is not None or self.__result__ is not None:
raise TypeError("This Callable type is already parameterized.")
if not isinstance(parameters, tuple) or len(parameters) != 2:
raise TypeError(
"Callable must be used as Callable[[arg, ...], result].")
args, result = parameters
return self.__class__(self.__name__, self.__bases__,
dict(self.__dict__), _root=True,
args=args, result=result)
def __eq__(self, other):
if not isinstance(other, CallableMeta):
return NotImplemented
return (self.__args__ == other.__args__ and
self.__result__ == other.__result__)
def __hash__(self):
return hash(self.__args__) ^ hash(self.__result__)
def __instancecheck__(self, obj):
# For unparametrized Callable we allow this, because
# typing.Callable should be equivalent to
# collections.abc.Callable.
if self.__args__ is None and self.__result__ is None:
return isinstance(obj, collections_abc.Callable)
else:
raise TypeError("Callable[] cannot be used with isinstance().")
def __subclasscheck__(self, cls):
if cls is Any:
return True
if not isinstance(cls, CallableMeta):
return super().__subclasscheck__(cls)
if self.__args__ is None and self.__result__ is None:
return True
# We're not doing covariance or contravariance -- this is *invariance*.
return self == cls
class Callable(Final, metaclass=CallableMeta, _root=True):
"""Callable type; Callable[[int], str] is a function of (int) -> str.
The subscription syntax must always be used with exactly two
values: the argument list and the return type. The argument list
must be a list of types; the return type must be a single type.
There is no syntax to indicate optional or keyword arguments,
such function types are rarely used as callback types.
"""
__slots__ = ()
def _gorg(a):
"""Return the farthest origin of a generic class."""
assert isinstance(a, GenericMeta)
while a.__origin__ is not None:
a = a.__origin__
return a
def _geqv(a, b):
"""Return whether two generic classes are equivalent.
The intention is to consider generic class X and any of its
parameterized forms (X[T], X[int], etc.) as equivalent.
However, X is not equivalent to a subclass of X.
The relation is reflexive, symmetric and transitive.
"""
assert isinstance(a, GenericMeta) and isinstance(b, GenericMeta)
# Reduce each to its origin.
return _gorg(a) is _gorg(b)
class GenericMeta(TypingMeta, abc.ABCMeta):
"""Metaclass for generic types."""
# TODO: Constrain more how Generic is used; only a few
# standard patterns should be allowed.
# TODO: Use a more precise rule than matching __name__ to decide
# whether two classes are the same. Also, save the formal
# parameters. (These things are related! A solution lies in
# using origin.)
__extra__ = None
def __new__(cls, name, bases, namespace,
parameters=None, origin=None, extra=None):
if parameters is None:
# Extract parameters from direct base classes. Only
# direct bases are considered and only those that are
# themselves generic, and parameterized with type
# variables. Don't use bases like Any, Union, Tuple,
# Callable or type variables.
params = None
for base in bases:
if isinstance(base, TypingMeta):
if not isinstance(base, GenericMeta):
raise TypeError(
"You cannot inherit from magic class %s" %
repr(base))
if base.__parameters__ is None:
continue # The base is unparameterized.
for bp in base.__parameters__:
if _has_type_var(bp) and not isinstance(bp, TypeVar):
raise TypeError(
"Cannot inherit from a generic class "
"parameterized with "
"non-type-variable %s" % bp)
if params is None:
params = []
if bp not in params:
params.append(bp)
if params is not None:
parameters = tuple(params)
self = super().__new__(cls, name, bases, namespace, _root=True)
self.__parameters__ = parameters
if extra is not None:
self.__extra__ = extra
# Else __extra__ is inherited, eventually from the
# (meta-)class default above.
self.__origin__ = origin
return self
def _has_type_var(self):
if self.__parameters__:
for t in self.__parameters__:
if _has_type_var(t):
return True
return False
def __repr__(self):
r = super().__repr__()
if self.__parameters__ is not None:
r += '[%s]' % (
', '.join(_type_repr(p) for p in self.__parameters__))
return r
def __eq__(self, other):
if not isinstance(other, GenericMeta):
return NotImplemented
return (_geqv(self, other) and
self.__parameters__ == other.__parameters__)
def __hash__(self):
return hash((self.__name__, self.__parameters__))
def __getitem__(self, params):
if not isinstance(params, tuple):
params = (params,)
if not params:
raise TypeError("Cannot have empty parameter list")
msg = "Parameters to generic types must be types."
params = tuple(_type_check(p, msg) for p in params)
if self.__parameters__ is None:
for p in params:
if not isinstance(p, TypeVar):
raise TypeError("Initial parameters must be "
"type variables; got %s" % p)
if len(set(params)) != len(params):
raise TypeError(
"All type variables in Generic[...] must be distinct.")
else:
if len(params) != len(self.__parameters__):
raise TypeError("Cannot change parameter count from %d to %d" %
(len(self.__parameters__), len(params)))
for new, old in zip(params, self.__parameters__):
if isinstance(old, TypeVar):
if not old.__constraints__:
# Substituting for an unconstrained TypeVar is OK.
continue
if issubclass(new, Union[old.__constraints__]):
# Specializing a constrained type variable is OK.
continue
if not issubclass(new, old):
raise TypeError(
"Cannot substitute %s for %s in %s" %
(_type_repr(new), _type_repr(old), self))
return self.__class__(self.__name__, self.__bases__,
dict(self.__dict__),
parameters=params,
origin=self,
extra=self.__extra__)
def __instancecheck__(self, instance):
# Since we extend ABC.__subclasscheck__ and
# ABC.__instancecheck__ inlines the cache checking done by the
# latter, we must extend __instancecheck__ too. For simplicity
# we just skip the cache check -- instance checks for generic
# classes are supposed to be rare anyways.
return self.__subclasscheck__(instance.__class__)
def __subclasscheck__(self, cls):
if cls is Any:
return True
if isinstance(cls, GenericMeta):
# For a class C(Generic[T]) where T is co-variant,
# C[X] is a subclass of C[Y] iff X is a subclass of Y.
origin = self.__origin__
if origin is not None and origin is cls.__origin__:
assert len(self.__parameters__) == len(origin.__parameters__)
assert len(cls.__parameters__) == len(origin.__parameters__)
for p_self, p_cls, p_origin in zip(self.__parameters__,
cls.__parameters__,
origin.__parameters__):
if isinstance(p_origin, TypeVar):
if p_origin.__covariant__:
# Covariant -- p_cls must be a subclass of p_self.
if not issubclass(p_cls, p_self):
break
elif p_origin.__contravariant__:
# Contravariant. I think it's the opposite. :-)
if not issubclass(p_self, p_cls):
break
else:
# Invariant -- p_cls and p_self must equal.
if p_self != p_cls:
break
else:
# If the origin's parameter is not a typevar,
# insist on invariance.
if p_self != p_cls:
break
else:
return True
# If we break out of the loop, the superclass gets a chance.
if super().__subclasscheck__(cls):
return True
if self.__extra__ is None or isinstance(cls, GenericMeta):
return False
return issubclass(cls, self.__extra__)
class Generic(metaclass=GenericMeta):
"""Abstract base class for generic types.
A generic type is typically declared by inheriting from an
instantiation of this class with one or more type variables.
For example, a generic mapping type might be defined as::
class Mapping(Generic[KT, VT]):
def __getitem__(self, key: KT) -> VT:
...
# Etc.
This class can then be used as follows::
def lookup_name(mapping: Mapping, key: KT, default: VT) -> VT:
try:
return mapping[key]
except KeyError:
return default
For clarity the type variables may be redefined, e.g.::
X = TypeVar('X')
Y = TypeVar('Y')
def lookup_name(mapping: Mapping[X, Y], key: X, default: Y) -> Y:
# Same body as above.
"""
__slots__ = ()
def __new__(cls, *args, **kwds):
next_in_mro = object
# Look for the last occurrence of Generic or Generic[...].
for i, c in enumerate(cls.__mro__[:-1]):
if isinstance(c, GenericMeta) and _gorg(c) is Generic:
next_in_mro = cls.__mro__[i+1]
return next_in_mro.__new__(_gorg(cls))
def cast(typ, val):
"""Cast a value to a type.
This returns the value unchanged. To the type checker this
signals that the return value has the designated type, but at
runtime we intentionally don't check anything (we want this
to be as fast as possible).
"""
return val
def _get_defaults(func):
"""Internal helper to extract the default arguments, by name."""
code = func.__code__
pos_count = code.co_argcount
kw_count = code.co_kwonlyargcount
arg_names = code.co_varnames
kwarg_names = arg_names[pos_count:pos_count + kw_count]
arg_names = arg_names[:pos_count]
defaults = func.__defaults__ or ()
kwdefaults = func.__kwdefaults__
res = dict(kwdefaults) if kwdefaults else {}
pos_offset = pos_count - len(defaults)
for name, value in zip(arg_names[pos_offset:], defaults):
assert name not in res
res[name] = value
return res
def get_type_hints(obj, globalns=None, localns=None):
"""Return type hints for a function or method object.
This is often the same as obj.__annotations__, but it handles
forward references encoded as string literals, and if necessary
adds Optional[t] if a default value equal to None is set.
BEWARE -- the behavior of globalns and localns is counterintuitive
(unless you are familiar with how eval() and exec() work). The
search order is locals first, then globals.
- If no dict arguments are passed, an attempt is made to use the
globals from obj, and these are also used as the locals. If the
object does not appear to have globals, an exception is raised.
- If one dict argument is passed, it is used for both globals and
locals.
- If two dict arguments are passed, they specify globals and
locals, respectively.
"""
if getattr(obj, '__no_type_check__', None):
return {}
if globalns is None:
globalns = getattr(obj, '__globals__', {})
if localns is None:
localns = globalns
elif localns is None:
localns = globalns
defaults = _get_defaults(obj)
hints = dict(obj.__annotations__)
for name, value in hints.items():
if isinstance(value, str):
value = _ForwardRef(value)
value = _eval_type(value, globalns, localns)
if name in defaults and defaults[name] is None:
value = Optional[value]
hints[name] = value
return hints
# TODO: Also support this as a class decorator.
def no_type_check(arg):
"""Decorator to indicate that annotations are not type hints.
The argument must be a class or function; if it is a class, it
applies recursively to all methods defined in that class (but not
to methods defined in its superclasses or subclasses).
This mutates the function(s) in place.
"""
if isinstance(arg, type):
for obj in arg.__dict__.values():
if isinstance(obj, types.FunctionType):
obj.__no_type_check__ = True
else:
arg.__no_type_check__ = True
return arg
def no_type_check_decorator(decorator):
"""Decorator to give another decorator the @no_type_check effect.
This wraps the decorator with something that wraps the decorated
function in @no_type_check.
"""
@functools.wraps(decorator)
def wrapped_decorator(*args, **kwds):
func = decorator(*args, **kwds)
func = no_type_check(func)
return func
return wrapped_decorator
def overload(func):
raise RuntimeError("Overloading is only supported in library stubs")
class _ProtocolMeta(GenericMeta):
"""Internal metaclass for _Protocol.
This exists so _Protocol classes can be generic without deriving
from Generic.
"""
def __instancecheck__(self, obj):
raise TypeError("Protocols cannot be used with isinstance().")
def __subclasscheck__(self, cls):
if not self._is_protocol:
# No structural checks since this isn't a protocol.
return NotImplemented
if self is _Protocol:
# Every class is a subclass of the empty protocol.
return True
# Find all attributes defined in the protocol.
attrs = self._get_protocol_attrs()
for attr in attrs:
if not any(attr in d.__dict__ for d in cls.__mro__):
return False
return True
def _get_protocol_attrs(self):
# Get all Protocol base classes.
protocol_bases = []
for c in self.__mro__:
if getattr(c, '_is_protocol', False) and c.__name__ != '_Protocol':
protocol_bases.append(c)
# Get attributes included in protocol.
attrs = set()
for base in protocol_bases:
for attr in base.__dict__.keys():
# Include attributes not defined in any non-protocol bases.
for c in self.__mro__:
if (c is not base and attr in c.__dict__ and
not getattr(c, '_is_protocol', False)):
break
else:
if (not attr.startswith('_abc_') and
attr != '__abstractmethods__' and
attr != '_is_protocol' and
attr != '__dict__' and
attr != '__slots__' and
attr != '_get_protocol_attrs' and
attr != '__parameters__' and
attr != '__origin__' and
attr != '__module__'):
attrs.add(attr)
return attrs
class _Protocol(metaclass=_ProtocolMeta):
"""Internal base class for protocol classes.
This implements a simple-minded structural isinstance check
(similar but more general than the one-offs in collections.abc
such as Hashable).
"""
__slots__ = ()
_is_protocol = True
# Various ABCs mimicking those in collections.abc.
# A few are simply re-exported for completeness.
Hashable = collections_abc.Hashable # Not generic.
class Iterable(Generic[T_co], extra=collections_abc.Iterable):
__slots__ = ()
class Iterator(Iterable[T_co], extra=collections_abc.Iterator):
__slots__ = ()
class SupportsInt(_Protocol):
__slots__ = ()
@abstractmethod
def __int__(self) -> int:
pass
class SupportsFloat(_Protocol):
__slots__ = ()
@abstractmethod
def __float__(self) -> float:
pass
class SupportsComplex(_Protocol):
__slots__ = ()
@abstractmethod
def __complex__(self) -> complex:
pass
class SupportsBytes(_Protocol):
__slots__ = ()
@abstractmethod
def __bytes__(self) -> bytes:
pass
class SupportsAbs(_Protocol[T_co]):
__slots__ = ()
@abstractmethod
def __abs__(self) -> T_co:
pass
class SupportsRound(_Protocol[T_co]):
__slots__ = ()
@abstractmethod
def __round__(self, ndigits: int = 0) -> T_co:
pass
class Reversible(_Protocol[T_co]):
__slots__ = ()
@abstractmethod
def __reversed__(self) -> 'Iterator[T_co]':
pass
Sized = collections_abc.Sized # Not generic.
class Container(Generic[T_co], extra=collections_abc.Container):
__slots__ = ()
# Callable was defined earlier.
class AbstractSet(Sized, Iterable[T_co], Container[T_co],
extra=collections_abc.Set):
pass
class MutableSet(AbstractSet[T], extra=collections_abc.MutableSet):
pass
# NOTE: Only the value type is covariant.
class Mapping(Sized, Iterable[KT], Container[KT], Generic[VT_co],
extra=collections_abc.Mapping):
pass
class MutableMapping(Mapping[KT, VT], extra=collections_abc.MutableMapping):
pass
class Sequence(Sized, Iterable[T_co], Container[T_co],
extra=collections_abc.Sequence):
pass
class MutableSequence(Sequence[T], extra=collections_abc.MutableSequence):
pass
class ByteString(Sequence[int], extra=collections_abc.ByteString):
pass
ByteString.register(type(memoryview(b'')))
class List(list, MutableSequence[T]):
def __new__(cls, *args, **kwds):
if _geqv(cls, List):
raise TypeError("Type List cannot be instantiated; "
"use list() instead")
return list.__new__(cls, *args, **kwds)
class Set(set, MutableSet[T]):
def __new__(cls, *args, **kwds):
if _geqv(cls, Set):
raise TypeError("Type Set cannot be instantiated; "
"use set() instead")
return set.__new__(cls, *args, **kwds)
class _FrozenSetMeta(GenericMeta):
"""This metaclass ensures set is not a subclass of FrozenSet.
Without this metaclass, set would be considered a subclass of
FrozenSet, because FrozenSet.__extra__ is collections.abc.Set, and
set is a subclass of that.
"""
def __subclasscheck__(self, cls):
if issubclass(cls, Set):
return False
return super().__subclasscheck__(cls)
class FrozenSet(frozenset, AbstractSet[T_co], metaclass=_FrozenSetMeta):
__slots__ = ()
def __new__(cls, *args, **kwds):
if _geqv(cls, FrozenSet):
raise TypeError("Type FrozenSet cannot be instantiated; "
"use frozenset() instead")
return frozenset.__new__(cls, *args, **kwds)
class MappingView(Sized, Iterable[T_co], extra=collections_abc.MappingView):
pass
class KeysView(MappingView[KT], AbstractSet[KT],
extra=collections_abc.KeysView):
pass
# TODO: Enable Set[Tuple[KT, VT_co]] instead of Generic[KT, VT_co].
class ItemsView(MappingView, Generic[KT, VT_co],
extra=collections_abc.ItemsView):
pass
class ValuesView(MappingView[VT_co], extra=collections_abc.ValuesView):
pass
class Dict(dict, MutableMapping[KT, VT]):
def __new__(cls, *args, **kwds):
if _geqv(cls, Dict):
raise TypeError("Type Dict cannot be instantiated; "
"use dict() instead")
return dict.__new__(cls, *args, **kwds)
# Determine what base class to use for Generator.
if hasattr(collections_abc, 'Generator'):
# Sufficiently recent versions of 3.5 have a Generator ABC.
_G_base = collections_abc.Generator
else:
# Fall back on the exact type.
_G_base = types.GeneratorType
class Generator(Iterator[T_co], Generic[T_co, T_contra, V_co],
extra=_G_base):
__slots__ = ()
def __new__(cls, *args, **kwds):
if _geqv(cls, Generator):
raise TypeError("Type Generator cannot be instantiated; "
"create a subclass instead")
return super().__new__(cls, *args, **kwds)
def NamedTuple(typename, fields):
"""Typed version of namedtuple.
Usage::
Employee = typing.NamedTuple('Employee', [('name', str), 'id', int)])
This is equivalent to::
Employee = collections.namedtuple('Employee', ['name', 'id'])
The resulting class has one extra attribute: _field_types,
giving a dict mapping field names to types. (The field names
are in the _fields attribute, which is part of the namedtuple
API.)
"""
fields = [(n, t) for n, t in fields]
cls = collections.namedtuple(typename, [n for n, t in fields])
cls._field_types = dict(fields)
return cls
class IO(Generic[AnyStr]):
"""Generic base class for TextIO and BinaryIO.
This is an abstract, generic version of the return of open().
NOTE: This does not distinguish between the different possible
classes (text vs. binary, read vs. write vs. read/write,
append-only, unbuffered). The TextIO and BinaryIO subclasses
below capture the distinctions between text vs. binary, which is
pervasive in the interface; however we currently do not offer a
way to track the other distinctions in the type system.
"""
__slots__ = ()
@abstractproperty
def mode(self) -> str:
pass
@abstractproperty
def name(self) -> str:
pass
@abstractmethod
def close(self) -> None:
pass
@abstractmethod
def closed(self) -> bool:
pass
@abstractmethod
def fileno(self) -> int:
pass
@abstractmethod
def flush(self) -> None:
pass
@abstractmethod
def isatty(self) -> bool:
pass
@abstractmethod
def read(self, n: int = -1) -> AnyStr:
pass
@abstractmethod
def readable(self) -> bool:
pass
@abstractmethod
def readline(self, limit: int = -1) -> AnyStr:
pass
@abstractmethod
def readlines(self, hint: int = -1) -> List[AnyStr]:
pass
@abstractmethod
def seek(self, offset: int, whence: int = 0) -> int:
pass
@abstractmethod
def seekable(self) -> bool:
pass
@abstractmethod
def tell(self) -> int:
pass
@abstractmethod
def truncate(self, size: int = None) -> int:
pass
@abstractmethod
def writable(self) -> bool:
pass
@abstractmethod
def write(self, s: AnyStr) -> int:
pass
@abstractmethod
def writelines(self, lines: List[AnyStr]) -> None:
pass
@abstractmethod
def __enter__(self) -> 'IO[AnyStr]':
pass
@abstractmethod
def __exit__(self, type, value, traceback) -> None:
pass
class BinaryIO(IO[bytes]):
"""Typed version of the return of open() in binary mode."""
__slots__ = ()
@abstractmethod
def write(self, s: Union[bytes, bytearray]) -> int:
pass
@abstractmethod
def __enter__(self) -> 'BinaryIO':
pass
class TextIO(IO[str]):
"""Typed version of the return of open() in text mode."""
__slots__ = ()
@abstractproperty
def buffer(self) -> BinaryIO:
pass
@abstractproperty
def encoding(self) -> str:
pass
@abstractproperty
def errors(self) -> str:
pass
@abstractproperty
def line_buffering(self) -> bool:
pass
@abstractproperty
def newlines(self) -> Any:
pass
@abstractmethod
def __enter__(self) -> 'TextIO':
pass
class io:
"""Wrapper namespace for IO generic classes."""
__all__ = ['IO', 'TextIO', 'BinaryIO']
IO = IO
TextIO = TextIO
BinaryIO = BinaryIO
io.__name__ = __name__ + '.io'
sys.modules[io.__name__] = io
Pattern = _TypeAlias('Pattern', AnyStr, type(stdlib_re.compile('')),
lambda p: p.pattern)
Match = _TypeAlias('Match', AnyStr, type(stdlib_re.match('', '')),
lambda m: m.re.pattern)
class re:
"""Wrapper namespace for re type aliases."""
__all__ = ['Pattern', 'Match']
Pattern = Pattern
Match = Match
re.__name__ = __name__ + '.re'
sys.modules[re.__name__] = re
|
py | 1a4982f80018a34973f5b742588e52de48f315d3 | # -*- coding: utf-8 -*-
#
# John C. Thomas 2021 gpSTS
import torch
import torch.nn as nn
import torch.utils.data as dataloader
import torchvision
from torchvision.datasets import DatasetFolder
import torch.nn.functional as F
import matplotlib.pyplot as plt
import numpy as np
import Config
import Config as conf
def make_predictions(model, device, test_loader):
# Set model to eval mode to notify all layers.
model.eval()
targets = []
preds = []
# Set torch.no_grad() to disable gradient computation and backpropagation
with torch.no_grad():
for sample in test_loader:
data, target = sample
data, target = data.to(device), target.to(device)
# Predict for data by doing forward pass
output = model(data)
pred = output.max(1, keepdim=True)[1]
preds.append(pred.cpu().numpy())
targets.append(target.cpu().numpy())
targets = [np.hstack(y) for y in targets]
preds = [np.hstack(y) for y in preds]
targets = np.hstack(targets)
preds = np.hstack(preds)
return targets, preds
def progbar(curr, total, full_progbar, epoch, num_epochs, loss, accuracy):
frac = curr/total
filled_progbar = round(frac*full_progbar)
print('\r',
'#'*filled_progbar + '-'*(full_progbar-filled_progbar),
f'Epoch [{epoch}/{num_epochs}]',
f'Step [{curr}/{total}]',
'Loss: {:.6f}'.format(loss),
'Accuracy: [{:>7.2%}]'.format(accuracy),
end='')
def specnorm(data):
dmin = np.min(data)
dmax = np.max(data)
out = np.zeros(data.shape[0])
for i in range(0,data.shape[0]):
out[i] = (data[i] - dmin)/(dmax-dmin)
return out
def np_loader(path):
with open(path, 'rb') as f:
data = np.load(f,allow_pickle=True)
dnp = data[0]
dnp = specnorm(dnp)
dout = torch.from_numpy(dnp).float()
return torch.reshape(dout,(1,len(dnp)))
def spec_loader(data):
dnp = data
dnp = specnorm(dnp)
dout = torch.from_numpy(dnp).float()
return torch.reshape(dout,(1,1,len(dnp)))
def dplot(imagein, title='Interpolated'):
fig, ax = plt.subplots()
z_min, z_max = imagein.min(), imagein.max()
xx, yy = np.meshgrid(np.linspace(1, imagein.shape[0], imagein.shape[0]), np.linspace(1, imagein.shape[1], imagein.shape[1]))
x = xx[::1]
y = yy[::1]
cout = ax.pcolormesh(x, y, imagein, cmap='bwr', vmin=z_min, vmax=z_max)
ax.set_title(title)
ax.axis([x.min(), x.max(), y.min(), y.max()])
ax.get_xaxis().set_ticks([])
ax.get_yaxis().set_ticks([])
plt.axis('scaled')
fig.tight_layout()
plt.show()
def dscplot(dx,dy,di, title='Collected Points'):
fig = plt.figure(1, clear=True)
plt.scatter(dx, dy, c=di, cmap='viridis') #0,1
plt.title(title)
plt.colorbar()
plt.axis('scaled')
fig.tight_layout()
plt.show()
# Convolutional neural network
class Conv1d(nn.Module):
def __init__(self,num_classes=4):
super(Conv1d, self).__init__()
self.layer1 = nn.Sequential(
nn.Conv1d(1, 64, kernel_size=3, stride=1, padding=1),
nn.BatchNorm1d(64),
nn.ReLU(),
nn.MaxPool1d(2, stride=1))
self.layer2 = nn.Sequential(
nn.Conv1d(64, 128, kernel_size=3, stride=1, padding=1),
nn.BatchNorm1d(128),
nn.ReLU(),
nn.MaxPool1d(2, stride=1))
self.layer3 = nn.Dropout(p=0.2)
self.fc = nn.Linear(self.getinput(), num_classes)
def size_postopt(self, x):
out = self.layer1(x)
out = self.layer2(out)
out = self.layer3(out)
return out.size()
def getinput(self):
size = self.size_postopt(torch.rand(1,1,conf.nanonis_config['Nanonis_Settings']['NumSpectralPoints'])) # image size: 64x32
m = 1
for i in size:
m *= i
return int(m)
def forward(self, x):
out = self.layer1(x)
out = self.layer2(out)
out = self.layer3(out)
out = out.reshape(out.size(0), -1)
out = self.fc(out)
return out |
py | 1a49847fef2ac3233ffda6b403251722fd718b6d | import os
import numpy as np
from matplotlib import pyplot as plt
import figlatex
import hist2d
import colormap
commands = [
'-m 100000 -L 15250 -U 15850 darksidehd/merged_000886.root:53',
'-m 100000 -L 750 -v 750 -l 8900 darksidehd/nuvhd_lf_3x_tile53_77K_64V_6VoV_1.wav',
'-m 100000 -L 750 -v 750 -l 8900 darksidehd/nuvhd_lf_3x_tile53_77K_66V_7VoV_1.wav',
]
###########################
figs = []
cmap = colormap.uniform()
for ifile, cmd in enumerate(commands):
figname = f'fighist2dtile53-{ifile}'
fig = plt.figure(num=figname, clear=True, figsize=[9, 4])
save = f'figthesis/{figname}.npz'
if not os.path.exists(save):
hist = hist2d.Hist2D(cmd.split())
print(f'save {save}...')
hist.save(save, compress=True)
print(f'load {save}...')
hist = hist2d.Hist2D.load(save)
hist.hist2d(fig, cmap=cmap)
figs.append(fig)
for fig in figs:
figlatex.save(fig)
for fig in figs:
fig.show()
|
py | 1a49853e74d6535f4c5886308c42518e85cb170c | from typing import List
UvciList = List[str]
class UvciData:
def __init__(self, new: UvciList = [], removed: UvciList = []) -> None:
self._new: UvciList = new
self._removed: UvciList = removed
def add_uvcis(self, new: UvciList = [], removed: UvciList = []) -> None:
self._new += new
self._removed += removed
def get_new(self) -> UvciList:
return self._new
def get_removed(self) -> UvciList:
return self._removed
|
py | 1a498628d9ed41f76e7ce440a4d4475348a6ba12 | ### Team6 main.py ###
### author: tanahashi, kurita, ito ###
import os
import eel
import csv
import datetime
from datetime import datetime as dt
import numpy
import random
import matplotlib.pyplot as plt
import japanize_matplotlib # グラフの日本語表示に必要
from typing import Counter
# import importer
# import exporter
# P000の初期PWは000b
print("404 Not Found エラーが出た場合、VSCodeでこのファイルを開いてから実行してみてください。")
eel.init("MainProject/view")
eel.start("login.html", size=(800, 480), block=False)
@eel.expose
def registtData():
#print(registtDatatoPy())
try:
if registtDatatoPy() == True:
return "tomato"
else:
return "onion"
except(KeyError):
return "onion"
def gettData():
tData = eel.sendtDatatoPy()()
gtID = tData[0]
gtPW = tData[1]
return gtID, gtPW
tID, tPW = "xxxx", "yyyy"
#main.htmlで入力されたtIDとtPWを照合した先の処理
def registtDatatoPy():
global tID, tPW
tID, tPW = gettData()
print("tID: {0} tPW: {1}".format(tID, tPW))
if tIDtPWverify(tID,tPW):
print("Yeeeeeeeeee")
return True
else:
print("Noooooooooo")
return False
#教員ファイル読み込み/tID,tPW照合
def tIDtPWverify(tID,tPW):
tID, tPW = gettData()
tnamecsv = {}
with open("./data/教員・担当科目リスト.csv", "r", encoding="utf_8", errors="", newline="") as f:
reader = csv.DictReader(f)
for row in reader:
tnamecsv[row["ID"]] = row["氏名"]
print(tnamecsv[tID])
tpwcsv = {}
with open("./data/tPW.csv","r")as p:
reader = csv.DictReader(p)
for prow in reader:
tpwcsv[prow["tID"]] = prow["tPW"]
tPWoncsv = tpwcsv[tID]
#print(tPWoncsv)
if tPW == tPWoncsv:
return True
else:
return False
#管理モードで教員氏名を表示
@eel.expose
def picktName():
try:
global tID
tnamecsv = {}
with open("./data/教員・担当科目リスト.csv", "r", encoding="utf_8", errors="", newline="") as f:
reader = csv.DictReader(f)
for row in reader:
tnamecsv[row["ID"]] = row["氏名"]
#print(tnamecsv[tID])
tName = str(tnamecsv[tID])
print("user: " + tName)
eel.printtName(tName)
except(FileNotFoundError):
os.getcwd()
os.chdir("./team6/MainProject/")
picktName()
# reader = "x"
tcName = ["xx", "xx"]
tcDay = [0, 0]
tcPeriod = [0, 0]
@eel.expose
def pickcName():
global tID
global tcName
global tcDay
global tcPeriod
# tccsv = [[0] * 5 for i in range(4)]
# print(tccsv)
# tcName = [[0] * 5 for i in range(4)]
# tccsvx = []
# for i in range(5):
# with open("./data/教員・担当科目リスト.csv", "r", encoding="utf_8", errors="", newline="") as f:
# reader = csv.DictReader(f)
# for row in reader:
# print(row)
# tanto = str('担当科目' + str(i+1))
# print(tanto)
# tccsvx[row["ID"]] = row["担当科目1"]
# tcName[i] = str(tccsvx[tID])
# print("calss1: " + tcName[i])
tc1csv = {}
tc2csv = {}
tcName = ["name", "name"]
with open("./data/教員・担当科目リスト.csv", "r", encoding="utf_8", errors="", newline="") as f:
reader = csv.DictReader(f)
for row in reader:
tc1csv[row["ID"]] = row["担当科目1"]
tc2csv[row["ID"]] = row["担当科目2"]
tcName[0] = str(tc1csv[tID])
tcName[1] = str(tc2csv[tID])
print("calss1: " + tcName[0])
print("calss2: " + tcName[1])
# tcID = [[0] * 5 for i in range(4)]
# tcxID = [[0] * 5 for i in range(4)]
# for j in range(5):
# with open("./data/講義科目ルール.csv", "r", encoding="utf_8", errors="", newline="") as p:
# reader = csv.DictReader(p)
# for row in reader:
# tcxID[j][row["科目名"]] = row["講義ID"]
# tcID[j] = str(tcxID[tc1Name])
# print("classID: " + tcID[j])
tc1xID = {}
tc2xID = {}
with open("./data/講義科目ルール.csv", "r", encoding="utf_8", errors="", newline="") as p:
reader = csv.DictReader(p)
for row in reader:
tc1xID[row["科目名"]] = row["講義ID"]
tc2xID[row["科目名"]] = row["講義ID"]
tc1ID = str(tc1xID[tcName[0]])
try:
tc2ID = str(tc2xID[tcName[1]])
except(KeyError):
tc2ID = "X0_"
print("calss1ID: " + tc1ID)
print("calss2ID: " + tc2ID)
tcDay = [0, 0]
tcPeriod = [0, 0]
cID = [tc1ID, tc2ID]
for n in range(0, len(cID)):
# print(n)
# print(len(cID))
if('M' in cID[n]):
tcDay[n] = '月'
elif('Tu' in cID[n]):
tcDay[n] = '火'
elif('W' in cID[n]):
tcDay[n] = '水'
elif('Th' in cID[n]):
tcDay[n] = '木'
elif('F' in cID[n]):
tcDay[n] = '金'
else:
tcDay[n] = ''
tcName[1] = "undefined"
print('Day config error')
if('12_' in cID[n]):
tcPeriod[n] = '1,2限'
elif('23_' in cID[n]):
tcPeriod[n] = '2,3限'
elif('34_' in cID[n]):
tcPeriod[n] = '3,4限'
elif('45_' in cID[n]):
tcPeriod[n] = '4,5限'
elif('1_' in cID[n]):
tcPeriod[n] = '1限'
elif('2_' in cID[n]):
tcPeriod[n] = '2限'
elif('3_' in cID[n]):
tcPeriod[n] = '3限'
elif('4_' in cID[n]):
tcPeriod[n] = '4限'
elif('5_' in cID[n]):
tcPeriod[n] = '5限'
else:
tcPeriod[n] = ''
print('Class period config error')
try:
print(tcDay[n] + tcPeriod[n])
except(TypeError):
pass
except(IndexError):
pass
n = n+1
tc1Name = tcName[0]
tc2Name = tcName[1]
tclen = len(tcName)
tclen = 5
eel.addcData(tcName, tclen, tcDay, tcPeriod)
#adminでの分岐用
@eel.expose
def clidSet(clid):
global tcName
global tcDay
global tcPeriod
print(clid)
print(tcName)
cDay = "0"
cPeriod = "0"
try:
if clid == "101":
cConfig = tcName[0]
cDay = tcDay[0]
cPeriod = tcPeriod[0]
elif clid == "102":
cConfig = tcName[1]
cDay = tcDay[1]
cPeriod = tcPeriod[1]
elif clid == "103":
cConfig = tcName[2]
cDay = tcDay[2]
cPeriod = tcPeriod[2]
elif clid == "104":
cConfig = tcName[3]
cDay = tcDay[3]
cPeriod = tcPeriod[4]
elif clid == "105":
cConfig = tcName[4]
cDay = tcDay[4]
cPeriod = tcPeriod[4]
except(IndexError):
pass
print(cConfig)
tcxID = {}
tcxCT1 = {}
tcxCT2 = {}
tcxLT1 = {}
tcxLT2 = {}
with open("./data/講義科目ルール.csv", "r", encoding="utf_8", errors="", newline="") as p:
reader = csv.DictReader(p)
for row in reader:
tcxID[row["科目名"]] = row["講義ID"]
tcxCT1[row["科目名"]] = row["開始時間"]
tcxCT2[row["科目名"]] = row["終了時間"]
tcxLT1[row["科目名"]] = row["出席限度(分)"]
tcxLT2[row["科目名"]] = row["遅刻限度(分)"]
tccID = str(tcxID[cConfig])
tccCT1 = str(tcxCT1[cConfig])
tccCT2 = str(tcxCT2[cConfig])
tccLT1 = str(tcxLT1[cConfig])
tccLT2 = str(tcxLT2[cConfig])
print("ID: " + tccID)
print("Day: " + cDay)
print("Period:" + cPeriod)
print("Start: " + tccCT1)
print("End: " + tccCT2)
print("Limit1:" + tccLT1)
print("Limit2:" + tccLT2)
tccCT1 = str(tcxCT1[cConfig])
tccCT2 = str(tcxCT2[cConfig])
tccLT1 = str(tcxCT1[cConfig][0:5])
tccLT2 = str(tcxCT1[cConfig][0:5])
tcxLT1m = int(tcxLT1[cConfig])
tcxLT2m = int(tcxLT2[cConfig])
# tcxLT1m = dt.strptime(tcxLT1m, '%H:%M:%S')
# tcxLT2m = dt.strptime(tcxLT2m, '%H:%M:%S')
tccCT1t = dt.strptime(tccCT1, '%H:%M')
tccCT2t = dt.strptime(tccCT2, '%H:%M')
tccLT1t = dt.strptime(tccLT1, '%H:%M')
tccLT2t = dt.strptime(tccLT2, '%H:%M')
tccLT1t = tccLT1t + datetime.timedelta(minutes=tcxLT1m)
tccLT2t = tccLT2t + datetime.timedelta(minutes=tcxLT2m)
tccCT1 = str(tccCT1t.time())
tccCT2 = str(tccCT2t.time())
tccLT1 = str(tccLT1t.time())
tccLT2 = str(tccLT2t.time())
tccCT1 = tccCT1[0:5]
tccCT2 = tccCT2[0:5]
tccLT1 = tccLT1[0:5]
tccLT2 = tccLT2[0:5]
print("授業開始: " + tccCT1)
print("授業終了: " + tccCT2)
print("以降遅刻: " + tccLT1)
print("以降欠席: " + tccLT2)
eel.initialID(cConfig, tccID, cDay, cPeriod, tccCT1, tccCT2, tccLT1, tccLT2)
# eel.initialCT(tccCT1, tccCT2)
# eel.initialLT(tccLT1, tccLT2)
# return tccCT1, tccCT2, tccLT1, tccLT2
datew = datetime.date.today()
datew = datew.strftime("%Y_%m_%d")
print(datew)
# 仮の出席者
# main author: ito
def stdSim(cID):
number=range(1,101)
rnumber=random.sample(number,len(number)) #学籍番号を(ランダムに)生成
temlist=[]
for i in rnumber:
temNo= "S{:0>3}".format(i) #"S001" "S012"のように3桁表示
temlist.append(temNo) #temlistはS001からS100の100個の要素からなるリスト
#講義IDに一致した履修者csvを開く
stdIDmx = {} #辞書型
stdIDm = [] #配列
stdcsvName = "./data/履修者-" + cID + ".csv"
with open(stdcsvName, "r", encoding="utf_8", errors="", newline="") as p:
reader = csv.DictReader(p)
for row in reader:
stdIDmx[row["学籍番号"]] = row["IDm"]
for i in range(len(temlist)):
try:
IDm = str(stdIDmx[temlist[i]])
stdIDm.append(IDm)
except KeyError:
pass
# print(stdcsvName)
# print(len(stdIDm))
return stdIDm
IOcsvName = "xx"
#出欠リストCSV操作 兼 出席シミュレータ
@eel.expose
def openIOcsv(cID, cName):
global datew
global IOcsvName
tcxCT1 = {}
tcxCT2 = {}
tcxLT1 = {}
tcxLT2 = {}
with open("./data/講義科目ルール.csv", "r", encoding="utf_8", errors="", newline="") as p:
reader = csv.DictReader(p)
for row in reader:
tcxCT1[row["科目名"]] = row["開始時間"]
tcxCT2[row["科目名"]] = row["終了時間"]
tcxLT1[row["科目名"]] = row["出席限度(分)"]
tcxLT2[row["科目名"]] = row["遅刻限度(分)"]
tccCT1 = str(tcxCT1[cName]) + ":00"
tccCT2 = str(tcxCT2[cName]) + ":00"
tccLT1 = str(tcxCT1[cName][0:5]) + ":00"
tccLT2 = str(tcxCT1[cName][0:5]) + ":00"
tcxLT1m = int(tcxLT1[cName])
tcxLT2m = int(tcxLT2[cName])
# tcxLT1m = dt.strptime(tcxLT1m, '%H:%M:%S')
# tcxLT2m = dt.strptime(tcxLT2m, '%H:%M:%S')
tccCT1t = dt.strptime(tccCT1, '%H:%M:%S')
tccCT2t = dt.strptime(tccCT2, '%H:%M:%S')
tccLT1t = dt.strptime(tccLT1, '%H:%M:%S')
tccLT2t = dt.strptime(tccLT2, '%H:%M:%S')
tccLT1t = tccLT1t + datetime.timedelta(minutes=tcxLT1m)
tccLT2t = tccLT2t + datetime.timedelta(minutes=tcxLT2m)
tccCT1t = tccCT1t.time()
tccCT2t = tccCT2t.time()
tccLT1t = tccLT1t.time()
tccLT2t = tccLT2t.time()
print("授業開始: " + str(tccCT1t))
print("授業終了: " + str(tccCT2t))
print("以降遅刻: " + str(tccLT1t))
print("以降欠席: " + str(tccLT2t))
LimitTime = [tccCT1t, tccCT2t, tccLT1t, tccLT2t]
stdIDm = stdSim(cID)
# print(stdIDm)
stdIDx = {}
stdNamex = {}
stdID = []
stdName = []
print("Preparations are underway: " + cName)
dirName = "./Mainproject/IOList/" + cName
IOcsvName = "./Mainproject/IOList/" + cName + "/" + cName + datew + "出欠リスト.csv"
stdcsvName = "./data/履修者-" + cID + ".csv"
if(os.path.exists(dirName) == False):
os.mkdir(dirName)
#履修者のリストを取得
with open(stdcsvName, "r", encoding="utf_8", errors="") as stdcsv:
reader = csv.DictReader(stdcsv)
for row in reader:
stdIDx[row["IDm"]] = row["学籍番号"]
stdNamex[row["IDm"]] = row["名前"]
stdlen = len(stdIDm)
print("履修者数: " + str(stdlen))
for i in range(len(stdIDm)):
try:
try:
stdID.append(str(stdIDx[stdIDm[i]]))
stdName.append(str(stdNamex[stdIDm[i]]))
except(KeyError):
stdID.append("S000")
stdName.append("名無ノ権兵衛")
except(IndexError):
pass
#初期出欠リストcsv作成
if(os.path.exists(IOcsvName) == False):
with open(IOcsvName, "w", encoding="utf_8", newline="") as IOcsv:
writer = csv.writer(IOcsv)
writer.writerow(["学籍番号", "名前", "IDm", "入室時刻", "出欠"])
for k in range(len(stdIDm)):
writer.writerow([stdID[k], stdName[k], stdIDm[k], "00:00:00", "欠席"])
# ソート
with open(IOcsvName, "r", encoding="utf_8") as IOcsvs:
reader = csv.DictReader(IOcsvs)
IOdict = []
for row in reader:
IOdict.append(row)
sortedIOdict = sorted(IOdict, key=lambda x:x["学籍番号"])
with open(IOcsvName, "w", encoding="utf_8", newline="") as IOcsvw:
writer2 = csv.writer(IOcsvw)
writer2.writerow(["学籍番号", "名前", "IDm", "入室時刻", "出欠"])
for g in range(len(stdIDm)):
dictvalues = sortedIOdict[g].values()
writer2.writerow(dictvalues)
# print(stdID)
# print(stdName)
# for in rangeでstdIDとstdNameをJS関数に投げることで出席
# 適度な間隔をあけて
# カードタッチ間隔
timespanx = numpy.random.normal(
loc = 7, # 平均
scale = (len(stdIDm)/6), # 標準偏差
size = len(stdIDm) # 出力配列のサイズ
)
timespan = timespanx
tmp = 0
for j in range(len(timespanx)):
timespan[j] = int(timespan[j])
tmp = tmp + timespan[j]
# print(timespan)
print(tmp/60)
#出席リスト更新
def touchIDcard(no, stdlenx, LimitTime):
dtNow = datetime.datetime.now()
now = dtNow.time()
print(now)
status = "出席"
if now < LimitTime[2]:
status = "出席"
elif now < LimitTime[3]:
status = "遅刻"
elif now < LimitTime[1]:
status = "欠席"
else:
status = "欠席"
print(status)
eel.showIDinfo(stdID[no], stdName[no])
eel.showNo(no + 1, stdlenx)
eel.showStatus(status)
f = open(IOcsvName, "r", encoding="utf-8")
csv_data = csv.reader(f)
list = [ e for e in csv_data]
f.close()
now = str(now)
now = now[0:8]
# 更新後のデータ
data = [stdID[no], stdName[no], stdIDm[no], now, status]
for i in range(len(list)):
if list[i][0]==data[0]:
list[i] = data
# csv更新
with open(IOcsvName, "w", encoding="utf_8", newline="") as f:
writer = csv.writer(f)
writer.writerows(list)
# タッチのトリガー
eel.sleep(3)
for s in range(len(stdIDm)):
if s != (len(stdIDm)-1):
if timespan[s]<=0:
timespan[s] = (timespan[s] * -1) + 1
print(timespan[s], end=" ")
print(stdIDm[s])
touchIDcard(s, stdlen, LimitTime)
eel.sleep(timespan[s])
else:
# 遅刻ちゃん
num = random.randint(0,9)
print(num)
if num > 8:
eel.sleep(800)
elif num > 7:
eel.sleep(300)
elif num > 4:
eel.sleep(60)
else:
eel.sleep(3)
print(stdIDm[s])
touchIDcard(s, stdlen, LimitTime)
@eel.expose
def generateIOcsvName(clid):
global tcName
try:
if clid == "101":
cName = tcName[0]
elif clid == "102":
cName = tcName[1]
elif clid == "103":
cName = tcName[2]
elif clid == "104":
cName = tcName[3]
elif clid == "105":
cName = tcName[4]
except(IndexError):
pass
IOcsvName = "./Mainproject/IOList/" + cName + "/" + cName + datew + "出欠リスト.csv"
print(IOcsvName)
eel.getcName(cName)
eel.getIOcsvName(IOcsvName)
@eel.expose
def updateIOcsv(cDataPockets):
newcData = cDataPockets
print(newcData[0])
print(newcData[1])
print(newcData[2])
print(newcData[3])
print(newcData[4])
cName = newcData[0]
newcDay = newcData[1]
newcPeri = newcData[2]
newLT1 = newcData[3]
newLT2 = newcData[4]
f = open("./data/講義科目ルール.csv", "r", encoding="utf-8")
csv_data = csv.reader(f)
list = [ e for e in csv_data]
f.close()
# print(list)
# newcID
for s in range(len(list)):
if list[s][1]==cName:
basecID = list[s][0]
tID = list[s][2]
tName = list[s][3]
exam = list[s][8]
sNo = list[s][9]
newcID = newcDay + newcPeri
if basecID[-1:] == "1":
newcID = newcID + "1"
if basecID[-1:] == "2":
newcID = newcID + "2"
if basecID[-1:] == "3":
newcID = newcID + "3"
if basecID[-1:] == "4":
newcID = newcID + "4"
# cID重複回避
for t in range(len(list)):
if list[t][0]==newcID:
if list[t][1]!=cName:
excID = list[t][0]
if excID[-1:] == "_":
newcID = newcID + "1"
elif excID[-1:] == "1":
newcID = newcID[:-1] + "2"
elif excID[-1:] == "2":
newcID = newcID[:-1] + "1"
if excID[-1:] == "2":
newcID = newcID[:-1] + "3"
elif excID[-1:] == "3":
newcID = newcID[:-1] + "4"
# newCT1, newCT2 (授業開始、終了時刻)
if newcPeri == "1_":
newCT1 = "09:00"
newCT2 = "10:30"
if newcPeri == "2_":
newCT1 = "10:40"
newCT2 = "12:10"
if newcPeri == "3_":
newCT1 = "13:00"
newCT2 = "14:30"
if newcPeri == "4_":
newCT1 = "14:40"
newCT2 = "16:10"
if newcPeri == "5_":
newCT1 = "16:20"
newCT2 = "17:50"
if newcPeri == "12_":
newCT1 = "09:00"
newCT2 = "12:10"
if newcPeri == "23_":
newCT1 = "10:40"
newCT2 = "14:30"
if newcPeri == "34_":
newCT1 = "13:00"
newCT2 = "16:10"
if newcPeri == "45_":
newCT1 = "14:40"
newCT2 = "17:50"
# newLT1 (出席限度)
newCT1t = dt.strptime(newCT1, '%H:%M')
newCT2t = dt.strptime(newCT2, '%H:%M')
newLT1t = dt.strptime(newLT1, '%H:%M')
newLT2t = dt.strptime(newLT2, '%H:%M')
if newLT1t<newCT1t:
eel.showErrorInfo()
return
if newLT2t<newCT1t:
eel.showErrorInfo()
return
if newLT2t<newLT1t:
eel.showErrorInfo()
return
if newCT2t<newLT2t:
eel.showErrorInfo()
return
newLT1t = newLT1t - newCT1t
newLT2t = newLT2t - newCT1t
newLT1 = str(newLT1t)
newLT2 = str(newLT2t)
print(newLT1)
print(newLT2)
newLT1 = newLT1[2:4]
newLT2 = newLT2[2:4]
if newLT1 == " d":
newLT1 = "00"
if newLT2 == " d":
newLT2 = "00"
# 更新後のデータ
data = [newcID, cName, tID, tName, newCT1, newCT2, newLT1, newLT2, exam, sNo]
print(data)
for i in range(len(list)):
if list[i][1]==cName:
list[i] = data
# csv更新
with open("./data/講義科目ルール.csv", "w", encoding="utf_8", newline="") as f:
writer = csv.writer(f)
writer.writerows(list)
eel.toAdmin()
#出欠リスト表示用
@eel.expose
def chooseIOList(cName, iNo):
path = "./Mainproject/IOList/" + cName + "/"
try:
IOcsvNames = os.listdir(path)
except(FileNotFoundError):
eel.showNameError()
return
csvNo = len(IOcsvNames)
listS = []
sStatusVal = []
for c in range(csvNo):
IOcsvNamepath = path + IOcsvNames[c]
print(IOcsvNamepath)
f = open(IOcsvNamepath, "r", encoding="utf-8")
csv_data = csv.reader(f)
listS = [ o for o in csv_data]
f.close()
# print(listS)
sStatusVal.append(listS)
# 最新の出欠リスト
IOcsvNamepath = path + IOcsvNames[int(iNo)]
nIOcsvName = IOcsvNames[int(iNo)]
print(IOcsvNamepath)
f = open(IOcsvNamepath, "r", encoding="utf-8")
csv_data = csv.reader(f)
list = [ e for e in csv_data]
f.close()
sID = []
sName = []
sIDm = []
sIntime = []
sStatus = []
sStatusValApnd = 0
sStatusValLate = 0
sStatusValAbsc = 0
sStatusRates = []
sNo = len(list)-1
for i in range(sNo):
sID.append(list[i+1][0])
sName.append(list[i+1][1])
sIDm.append(list[i+1][2])
sIntime.append(list[i+1][3])
sStatus.append(list[i+1][4])
for x in range(csvNo):
if sStatusVal[x][i+1][4] == "出席":
sStatusValApnd += 1
elif sStatusVal[x][i+1][4] == "遅刻":
sStatusValLate += 1
elif sStatusVal[x][i+1][4] == "欠席":
sStatusValAbsc += 1
rate = str(sStatusValApnd) + "/" + str(sStatusValApnd + sStatusValLate + sStatusValAbsc)
# rate = round(rate)
# rate = str(rate) + "%"
sStatusRates.append(rate)
sStatusValApnd = 0
sStatusValLate = 0
sStatusValAbsc = 0
# print(sStatusRates)
# print(list)
eel.createIOTable(sID, sName, sIDm, sIntime, sStatus, sStatusRates, sNo, nIOcsvName, csvNo, IOcsvNames)
@eel.expose
def createOneClassGraph(cName, iNo):
# 講義回グラフ作成
# main author: kurita
path = "./Mainproject/IOList/" + cName + "/"
IOcsvNames = os.listdir(path)
print(path)
print(IOcsvNames)
# 最新の出欠リスト
IOcsvName = path + IOcsvNames[int(iNo)]
#グラフタイトル用の読み込みです。
file_path = IOcsvName
file_name_path=os.path.basename(file_path)
#出席,遅刻,欠席のカウント
count0 = {}
with open(IOcsvName,encoding='UTF8') as fo:
atl_reader = csv.reader(fo)
atl_header = next(atl_reader)
# data=fo
print(atl_header)
for row in atl_reader:
data0=row[4]
count0.setdefault(data0,0)
count0[data0] +=1
with open(IOcsvName,encoding='UTF8') as fc:
line_count=sum([1 for line in fc])
li_ct=line_count-1
print(li_ct)
y_list=[]
x_label=[]
#グラフ保存用
fig=plt.figure()
plt.title(file_name_path)
for key0, value0 in count0.items():
att_counter='{}: {:d}'.format(key0,value0)
#y軸設定用
y_list.append(int(value0))
#x軸の文字ラベル用
x_label.append('{}'.format(key0))
#ここでy軸を降順にソート
y_list2=sorted(y_list,reverse=True)
#'遅刻''欠席'が一人もいないとき用の処理(y軸用)
if len(y_list2)==2:
y_list2.append(0)
#要素が2つのとき
elif len(y_list2)==1:
y_list2.append(0)
y_list2.append(0)
#要素が1つのとき
else:
y_list2
#要素が3つのとき
x=[0,1,2]
#このex_labelで出席遅刻欠席の順番を指定
ex_label=['出席','遅刻','欠席']
#'遅刻''欠席'が一人もいないとき用の処理(x軸用)
if len(x_label)==2:
if '出席' in x_label:
if '遅刻' in x_label:
x_label.append('欠席')
#'欠席'がないとき
else:
x_label.append('遅刻')
#'遅刻'がないとき
else:
x_label.append('出席')
#'出席'がないとき ←この場合はいらないとは思うが例外から外すために記載
#要素が2つのとき
elif len(x_label)==1:
if '出席' in x_label:
x_label.append('遅刻')
x_label.append('欠席')
#'遅刻','欠席'がないとき
elif '遅刻' in x_label:
x_label.append('出席')
x_label.append('欠席')
#'出席''欠席'がないとき
else:
x_label.append('出席')
x_label.append('遅刻')
#'出席''遅刻'がないとき ←この場合はいらないとは思うが例外から外すために記載2
else:
x_label
x_label2=sorted(x_label,key=ex_label.index)
#↓棒グラフ作成
print(y_list2)
print(x_label2)
plt.ylim(0,li_ct)
graph=plt.bar(x,y_list2)
#棒の上に数値を挿入するための処理
height=y_list2
for rect in graph:
height=rect.get_height()
plt.annotate('{}'.format(height),xy=(rect.get_x() + rect.get_width()/2,height),xytext=(0,3),textcoords="offset points",ha='center',va='bottom')
plt.xticks(x,x_label2)
plt.show()
#ここまでが一つの出席リストをグラフ化するスクリプト
@eel.expose
def createCumulativeClassGraph(cName):
# 累積講義グラフ作成
# main author: kurita
path = "./Mainproject/IOList/" + cName + "/"
csv_list3 = os.listdir(path)
os.chdir(path)
#csv_list3=glob.glob("/*.csv")
#csv_list3
#print(IOcsvNames)
print(csv_list3)
count1 = {}
# csv_list3=glob.glob(IOcsvNames)
for n in range(len(csv_list3)):
print(csv_list3[n])
with open(csv_list3[n],encoding='UTF8') as f3:
atl_reader3 = csv.reader(f3)
atl_header3 = next(atl_reader3)
#print(atl_header3)
for row in atl_reader3:
data=row[0]
data2=row[4]
count1.setdefault(data,0)
if '出席' in data2:
count1[data] +=1
#alatd_list=[]
#各生徒ごとに'出席'の数をカウント
stnumb_list=[]
atd_count_list=[]
for key, value in count1.items():
att_counter='{}: {:d}'.format(key,value)
#学番と出席数リスト
#alatd_list.append(att_counter)
#学番リスト
stnumb_list.append('{}'.format(key))
#出席数リスト
atd_count_list.append(int(value))
#print(stnumb_list)
#print(atd_count_list)
count2 = {}
for m in range(len(csv_list3)):
with open(csv_list3[m],encoding='UTF8') as f4:
atl_reader4 = csv.reader(f4)
atl_header4 = next(atl_reader4)
#print(atl_header3)
for row in atl_reader4:
data3=row[0]
data4=row[4]
count2.setdefault(data3,0)
if '遅刻' in data4:
count2[data3] +=1
#alatd_list=[]
stnumb_list2=[]
atd_count_list2=[]
for key2, value2 in count2.items():
att_counter2='{}: {:d}'.format(key2,value2)
#学番と出席数リスト
#alatd_list.append(att_counter)
#学番リスト
stnumb_list2.append('{}'.format(key2))
#出席数リスト
atd_count_list2.append(int(value2))
#print(stnumb_list)
#print(atd_count_list)
count3 = {}
for l in range(len(csv_list3)):
with open(csv_list3[l],encoding='UTF8') as f5:
atl_reader5 = csv.reader(f5)
atl_header5 = next(atl_reader5)
#print(atl_header3)
for row in atl_reader5:
data5=row[0]
data6=row[4]
count3.setdefault(data5,0)
if '欠席' in data6:
count3[data5] +=1
#alatd_list=[]
stnumb_list3=[]
atd_count_list3=[]
for key3, value3 in count3.items():
att_counter3='{}: {:d}'.format(key3,value3)
#学番と出席数リスト
#alatd_list.append(att_counter)
#学番リスト
stnumb_list3.append('{}'.format(key3))
#出席数リスト
atd_count_list3.append(int(value3))
#print(stnumb_list)
#print(atd_count_list)
#人数
list_length=len(stnumb_list)
print(list_length)
#リストの先頭('出席'と出席数)を削除
#stnumb_list.remove('出席')
#atd_count_list.remove(list_length)
#print(alatd_list)
#print(stnumb_list)
#print(atd_count_list)
#print(stnumb_list2)
#print(atd_count_list2)
#↓ここから棒グラフ作成
fig=plt.figure()
#学生の数,0から連続した整数のリスト
y_set=list(range(list_length))
graph1=plt.bar(y_set,atd_count_list,align="edge",width=-0.5,color="#44cca3",label="出席")
graph2=plt.bar(y_set,atd_count_list2,align="center",width=0.5,color="#c3cc44",label="遅刻")
graph3=plt.bar(y_set,atd_count_list3,align="edge",width=0.5,color="#cc5844",label="欠席")
plt.xticks(y_set,stnumb_list,rotation=90)
plt.legend()
plt.show()
print(os.getcwd())
os.chdir("./team6/MainProject/")
#これがないと動かないんでよ
while True:
eel.sleep(2.0) |
py | 1a49868492df41a0143980522bb6052b09d6eea0 | # -*- coding: utf-8 -*-
"""
finance_summary.py
~~~~~~~~~~~
finance summary crawler
:copyright: (c) 2015 by Lu Tianchao.
:license: Apache, see LICENSE for more details.
"""
import sys
import urllib2
from urllib2 import Request
from bs4 import BeautifulSoup
class SummaryPerSeason():
"""summary of finance status of a company"""
def __init__(self):
self.dead_line = '' #截止日期
self.net_assets_value_per_share = '' #每股资产净值
self.earnings_per_share = '' #每股收益
self.cash_flow_per_share = '' #每股现金含量
self.capital_fund_per_share = '' #每股公积金
self.total_fixed_assets = '' #固定资产合计
self.total_current_assets = '' #流动资产合计
self.total_assets = '' #资产总计
self.total_long_term_liabilities = '' #长期负债合计
self.main_business_revenue = '' #主营业务收入
self.financial_expenses = '' #财务费用
self.net_profit = '' #净利润
def set_property(self,idx,value):
"""setup the property by order in the crawed html"""
if idx==1:
self.dead_line = value
elif idx==2:
self.net_assets_value_per_share = value
elif idx==3:
self.earnings_per_share = value
elif idx==4:
self.cash_flow_per_share = value
elif idx==5:
self.capital_fund_per_share = value
elif idx==6:
self.total_fixed_assets = value
elif idx==7:
self.total_current_assets = value
elif idx==8:
self.total_assets = value
elif idx==9:
self.total_long_term_liabilities = value
elif idx==10:
self.main_business_revenue = value
elif idx==11:
self.financial_expenses = value
elif idx==12:
self.net_profit = value
def __str__(self):
return ','.join([self.dead_line, self.net_assets_value_per_share,
self.earnings_per_share, self.cash_flow_per_share,
self.capital_fund_per_share, self.total_fixed_assets,
self.total_current_assets, self.total_assets,
self.total_long_term_liabilities, self.main_business_revenue,
self.financial_expenses,self.net_profit])
class SeasonlySummaryCrawler():
"""craw the seasonly finance summary"""
base_url = 'http://vip.stock.finance.sina.com.cn/corp/go.php/vFD_FinanceSummary/stockid/%s.phtml'
def fetch_seasonly_summary(self, companyCode, latest_date):
"""fetch the seasonly finance summary of a given company"""
url = self.base_url % (companyCode)
response = urllib2.urlopen(Request(url))
html = BeautifulSoup(response.read().decode('GBK'),'html.parser')
summary_history_list = self.__analyzeHtml(html, latest_date)
return summary_history_list
def __analyzeHtml(self, data, latest_date):
"""analyze the html and setup each property"""
result_list = []
rows = data.find(id="FundHoldSharesTable")
idx = 1
summary_unit = SummaryPerSeason()
for row in rows.find_all('tr',recursive=False):
td = row.find('td', class_='tdr')
if td!=None:
propertyValue = td.text.replace(u'\xa0',u'')
summary_unit.set_property(idx,filter(lambda ch:ch in '-.0123456789', propertyValue))
idx+=1
else:
if latest_date!=None and summary_unit.dead_line<=latest_date:
return result_list
idx=1
result_list.append(summary_unit)
summary_unit = SummaryPerSeason()
return result_list
if __name__ == '__main__':
"""test for the crawed result"""
crawler = SeasonlySummaryCrawler()
result = crawler.fetch_seasonly_summary('600710', '2015-06-30')
for x in result:
print x |
py | 1a4986abc3fed1b7dbe4f6654db6d940b4ea7467 | import FWCore.ParameterSet.Config as cms
siStripBackPlaneCorrectionDummyDBWriter = cms.EDAnalyzer("SiStripBackPlaneCorrectionDummyDBWriter",
record = cms.string(""),
OpenIovAt = cms.untracked.string("beginOfTime"),
OpenIovAtTime = cms.untracked.uint32(1))
|
py | 1a498732d26a15299d6820fda938ba8cf5d5dd32 | import random
rock = """
_______
---' ____)
(_____)
(_____)
(____)
---.__(___)
"""
paper = """
_______
---' ____)____
______)
_______)
_______)
---.__________)
"""
scissors = """
_______
---' ____)____
______)
__________)
(____)
---.__(___)
"""
symbols = [rock, paper, scissors]
user_symbol = int(input("Chosse between Rock, Paper or Scissors (0, 1 or 2) ==> "))
computer_symbol = random.randint(0,2)
if user_symbol > 2:
print("Value out of valid range, retry!")
exit
print("User selected:\n")
print(symbols[user_symbol] + "\n")
print("Computer Selected:\n")
print(symbols[computer_symbol] + "\n")
if user_symbol == 0: #rock
if computer_symbol == 0: #rock vs rock
print("Nobody Win 😶")
elif computer_symbol == 1: #rock vs paper
print("You Lose 😓 ")
else: #rock vs scissors
print("You Win 😁")
elif user_symbol == 1: #paper
if computer_symbol == 0: #paper vs rock
print("You Win 😁")
elif computer_symbol == 1: #paper vs paper
print("Nobody Win 😶")
else: #paper vs scissors
print("You Lose 😓 ")
elif user_symbol == 2: #scissors
if computer_symbol == 0: #scissors vs rock
print("You Lose 😓 ")
elif computer_symbol == 1: #scissors vs paper
print("You Win 😁")
else: #scissors vs scissors
print("Nobody Win 😶") |
py | 1a4987b57ed8c6048e8cb379161d8107e75f0805 |
# global
import pytest
import numpy as np
# local
import ivy
import ivy.backends.numpy
import ivy_tests.helpers as helpers
# cross_entropy
@pytest.mark.parametrize(
"t_n_p_n_res", [([[0., 1., 0.]], [[0.3, 0.2, 0.5]], [1.609438])])
@pytest.mark.parametrize(
"dtype", ['float32'])
@pytest.mark.parametrize(
"tensor_fn", [ivy.array, helpers.var_fn])
def test_cross_entropy(t_n_p_n_res, dtype, tensor_fn, dev, call):
# smoke test
true, pred, true_target = t_n_p_n_res
pred = tensor_fn(pred, dtype, dev)
true = tensor_fn(true, dtype, dev)
ret = ivy.cross_entropy(true, pred)
# type test
assert ivy.is_array(ret)
# cardinality test
assert list(ret.shape) == [1]
# value test
assert np.allclose(call(ivy.cross_entropy, true, pred), np.asarray(true_target))
# compilation test
if call in [helpers.torch_call]:
# cross_entropy does not have backend implementation,
# pytorch scripting requires direct bindings to work, which bypass get_framework()
return
if not ivy.wrapped_mode():
helpers.assert_compilable(ivy.cross_entropy)
# binary_cross_entropy
@pytest.mark.parametrize(
"t_n_p_n_res", [([[0., 1., 0.]], [[0.3, 0.7, 0.5]], [[0.35667494, 0.35667494, 0.69314718]])])
@pytest.mark.parametrize(
"dtype", ['float32'])
@pytest.mark.parametrize(
"tensor_fn", [ivy.array, helpers.var_fn])
def test_binary_cross_entropy(t_n_p_n_res, dtype, tensor_fn, dev, call):
# smoke test
true, pred, true_target = t_n_p_n_res
pred = tensor_fn(pred, dtype, dev)
true = tensor_fn(true, dtype, dev)
ret = ivy.binary_cross_entropy(true, pred)
# type test
assert ivy.is_array(ret)
# cardinality test
assert ret.shape == pred.shape
# value test
assert np.allclose(call(ivy.binary_cross_entropy, true, pred), np.asarray(true_target))
# compilation test
if call in [helpers.torch_call]:
# binary_cross_entropy does not have backend implementation,
# pytorch scripting requires direct bindings to work, which bypass get_framework()
return
if not ivy.wrapped_mode():
helpers.assert_compilable(ivy.binary_cross_entropy)
# sparse_cross_entropy
@pytest.mark.parametrize(
"t_n_p_n_res", [([1], [[0.3, 0.2, 0.5]], [1.609438])])
@pytest.mark.parametrize(
"dtype", ['float32'])
@pytest.mark.parametrize(
"tensor_fn", [ivy.array, helpers.var_fn])
def test_sparse_cross_entropy(t_n_p_n_res, dtype, tensor_fn, dev, call):
# smoke test
true, pred, true_target = t_n_p_n_res
pred = tensor_fn(pred, dtype, dev)
true = ivy.array(true, 'int32', dev)
ret = ivy.sparse_cross_entropy(true, pred)
# type test
assert ivy.is_array(ret)
# cardinality test
assert list(ret.shape) == [1]
# value test
assert np.allclose(call(ivy.sparse_cross_entropy, true, pred), np.asarray(true_target))
# compilation test
if call in [helpers.torch_call]:
# sparse_cross_entropy does not have backend implementation,
# pytorch scripting requires direct bindings to work, which bypass get_framework()
return
if not ivy.wrapped_mode():
helpers.assert_compilable(ivy.sparse_cross_entropy)
|
py | 1a49882021449294dbdc2f1a5c6c1494ec861b2e |
from Data.parameters import Data
from reuse_func import GetData
class district_home():
def __init__(self,driver):
self.driver =driver
def test_district(self):
self.p = GetData()
self.driver.find_element_by_xpath(Data.hyper_link).click()
self.p.page_loading(self.driver)
self.driver.find_element_by_id('block').click()
self.p.page_loading(self.driver)
self.driver.find_element_by_id(Data.homeicon).click()
print("home icon is working ")
self.p.page_loading(self.driver)
|
py | 1a49892b1ffcd3cfd4fd2cece1b3602b75c46caf | #!/usr/bin/env python
# Copyright 2019 Jian Wu
# License: Apache 2.0 (http://www.apache.org/licenses/LICENSE-2.0)
import io
import os
import subprocess
import numpy as np
import soundfile as sf
import scipy.signal as ss
from kaldi_python_io import Reader as BaseReader
from typing import Optional, IO, Union, Any, NoReturn, Tuple
def read_audio(fname: Union[str, IO[Any]],
beg: int = 0,
end: Optional[int] = None,
norm: bool = True,
sr: int = 16000) -> np.ndarray:
"""
Read audio files using soundfile (support multi-channel & chunk)
Args:
fname: file name or object
beg, end: begin and end index for chunk-level reading
norm: normalized samples between -1 and 1
sr: sample rate of the audio
Return:
samps: in shape C x N
sr: sample rate
"""
# samps: N x C or N
# N: number of samples
# C: number of channels
samps, ret_sr = sf.read(fname,
start=beg,
stop=end,
dtype="float32" if norm else "int16")
if sr != ret_sr:
raise RuntimeError(f"Expect sr={sr} of {fname}, get {ret_sr} instead")
if not norm:
samps = samps.astype("float32")
# put channel axis first
# N x C => C x N
if samps.ndim != 1:
samps = np.transpose(samps)
return samps
def write_audio(fname: Union[str, IO[Any]],
samps: np.ndarray,
sr: int = 16000,
norm: bool = True) -> NoReturn:
"""
Write audio files, support single/multi-channel
Args:
fname: IO object or str
samps: np.ndarray, C x S or S
sr: sample rate
norm: keep same as the one in read_audio
"""
samps = samps.astype("float32" if norm else "int16")
# for multi-channel, accept ndarray N x C
if samps.ndim != 1 and samps.shape[0] < samps.shape[1]:
samps = np.transpose(samps)
samps = np.squeeze(samps)
# make dirs
if isinstance(fname, str):
parent = os.path.dirname(fname)
if parent and not os.path.exists(parent):
os.makedirs(parent)
sf.write(fname, samps, sr)
def add_room_response(spk: np.ndarray,
rir: np.ndarray,
early_energy: bool = False,
sr: int = 16000) -> Tuple[np.ndarray, float]:
"""
Convolute source signal with selected rirs
Args
spk: S, close talk signal
rir: N x R, single or multi-channel RIRs
early_energy: return energy of early parts
sr: sample rate of the signal
Return
revb: N x S, reverberated signals
"""
if spk.ndim != 1:
raise RuntimeError(f"Can not convolve rir with {spk.ndim}D signals")
S = spk.shape[-1]
revb = ss.convolve(spk[None, ...], rir)[..., :S]
revb = np.asarray(revb)
if early_energy:
rir_ch0 = rir[0]
rir_peak = np.argmax(rir_ch0)
rir_beg_idx = max(0, int(rir_peak - 0.001 * sr))
rir_end_idx = min(rir_ch0.size, int(rir_peak + 0.05 * sr))
early_rir = np.zeros_like(rir_ch0)
early_rir[rir_beg_idx:rir_end_idx] = rir_ch0[rir_beg_idx:rir_end_idx]
early_rev = ss.convolve(spk, early_rir)[:S]
return revb, np.mean(early_rev**2)
else:
return revb, np.mean(revb[0]**2)
def run_command(command: str, wait: bool = True):
"""
Runs shell commands
"""
p = subprocess.Popen(command,
shell=True,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
if wait:
[stdout, stderr] = p.communicate()
if p.returncode != 0:
stderr_str = bytes.decode(stderr)
raise Exception("There was an error while running the " +
f"command \"{command}\":\n{stderr_str}\n")
return stdout, stderr
else:
return p
class AudioReader(BaseReader):
"""
Sequential/Random Reader for single/multiple channel audio using soundfile as the backend
The format of wav.scp follows Kaldi's definition:
key1 /path/to/key1.wav
key2 /path/to/key2.wav
...
or
key1 sox /home/data/key1.wav -t wav - remix 1 |
key2 sox /home/data/key2.wav -t wav - remix 1 |
...
or
key1 /path/to/ark1:XXXX
key2 /path/to/ark1:XXXY
are supported
Args:
wav_scp: path of the audio script
sr: sample rate of the audio
norm: normalize audio samples between (-1, 1) if true
channel: read audio at #channel if > 0 (-1 means all)
"""
def __init__(self,
wav_scp: str,
sr: int = 16000,
norm: bool = True,
channel: int = -1) -> None:
super(AudioReader, self).__init__(wav_scp, num_tokens=2)
self.sr = sr
self.ch = channel
self.norm = norm
self.mngr = {}
def _load(self, key: str) -> Optional[np.ndarray]:
fname = self.index_dict[key]
samps = None
# return C x N or N
if ":" in fname:
tokens = fname.split(":")
if len(tokens) != 2:
raise RuntimeError(f"Value format error: {fname}")
fname, offset = tokens[0], int(tokens[1])
# get ark object
if fname not in self.mngr:
self.mngr[fname] = open(fname, "rb")
wav_ark = self.mngr[fname]
# wav_ark = open(fname, "rb")
# seek and read
wav_ark.seek(offset)
try:
samps = read_audio(wav_ark, norm=self.norm, sr=self.sr)
except RuntimeError:
print(f"Read audio {key} {fname}:{offset} failed...",
flush=True)
else:
if fname[-1] == "|":
shell, _ = run_command(fname[:-1], wait=True)
fname = io.BytesIO(shell)
try:
samps = read_audio(fname, norm=self.norm, sr=self.sr)
except RuntimeError:
print(f"Load audio {key} {fname} failed...", flush=True)
if samps is None:
raise RuntimeError("Audio IO failed ...")
if self.ch >= 0 and samps.ndim == 2:
samps = samps[self.ch]
return samps
def nsamps(self, key: str) -> int:
"""
Number of samples
"""
data = self._load(key)
return data.shape[-1]
def power(self, key: str) -> float:
"""
Power of utterance
"""
data = self._load(key)
s = data if data.ndim == 1 else data[0]
return np.linalg.norm(s, 2)**2 / data.size
def duration(self, key: str) -> float:
"""
Utterance duration
"""
N = self.nsamps(key)
return N / self.sr
|
py | 1a49897aba46ff16636727b34b9189798d93fb35 | ###############################################################################
#
# file: __init__.py
#
# Purpose: refer to python doc for documentation details.
#
# Note: This file is part of Termsaver application, and should not be used
# or executed separately.
#
###############################################################################
#
# Copyright 2012 Termsaver
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
###############################################################################
"""
This module holds all stuff required for termsaver application to work properly
The modules available in this package are:
* `common`: helper functions used by termsaver code
* `constants`: series of configuration constants used by termsaver code
* `exceptions`: various exceptions classes to handle termsaver errors
* `i18n`: handles internationalization for termsaver application
This also contains the following sub-packages:
* `screen`: holds all screens accessible by termsaver application. Also
holds base and helper classes to ease the implementation of new screens.
"""
|
py | 1a498a964b666d583f876ee2cba6c91a2c7e853a | # -*- coding: utf-8 -*-
#
# Configuration file for the Sphinx documentation builder.
#
# This file does only contain a selection of the most common options. For a
# full list see the documentation:
# http://www.sphinx-doc.org/en/master/config
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
import os
import sys
sys.path.insert(0, os.path.abspath('.'))
# -- Project information -----------------------------------------------------
project = 'DocStack'
copyright = '2019, d05660'
author = 'd05660'
# The short X.Y version
version = ''
# The full version, including alpha/beta/rc tags
release = '1.0'
# -- General configuration ---------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path .
exclude_patterns = []
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
# html_theme = 'alabaster'
on_rtd = os.environ.get('READTHEDOCS') == 'True'
if on_rtd:
html_theme = 'default'
else:
html_theme = 'sphinx_rtd_theme'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Custom sidebar templates, must be a dictionary that maps document names
# to template names.
#
# The default sidebars (for documents that don't match any pattern) are
# defined by theme itself. Builtin themes are using these templates by
# default: ``['localtoc.html', 'relations.html', 'sourcelink.html',
# 'searchbox.html']``.
#
# html_sidebars = {}
html_show_sourcelink = False
html_favicon = 'favicon.ico'
# -- Options for HTMLHelp output ---------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'DocStackdoc'
# -- Options for LaTeX output ------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'DocStack.tex', 'DocStack Documentation',
'd05660', 'manual'),
]
# -- Options for manual page output ------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'docstack', 'DocStack Documentation',
[author], 1)
]
# -- Options for Texinfo output ----------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'DocStack', 'DocStack Documentation',
author, 'DocStack', 'One line description of project.',
'Miscellaneous'),
]
def setup(app):
app.add_stylesheet('css/custom.css?v20190329')
|
py | 1a498b3857d5c7b9bfdb91fec8a98cba4b83cfc7 | # -*- coding: utf-8 -*-
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('authentication', '0008_auto_20151014_2027'),
]
operations = [
migrations.AlterField(
model_name='position',
name='committee',
field=models.CharField(default=b'hs', max_length=10, verbose_name='komite', choices=[(b'hs', 'Hovedstyret'), (b'appkom', 'Applikasjonskomiteen'), (b'arrkom', 'Arrangementskomiteen'), (b'bankom', 'Bank- og \xf8konomikomiteen'), (b'bedkom', 'Bedriftskomiteen'), (b'dotkom', 'Drifts- og utviklingskomiteen'), (b'ekskom', 'Ekskursjonskomiteen'), (b'fagkom', 'Fag- og kurskomiteen'), (b'jubkom', 'Jubileumskomiteen'), (b'pangkom', 'Pensjonistkomiteen'), (b'prokom', 'Profil-og aviskomiteen'), (b'redaksjonen', 'Redaksjonen'), (b'trikom', 'Trivselskomiteen'), (b'velkom', 'Velkomstkomiteen')]),
preserve_default=True,
),
migrations.AlterField(
model_name='position',
name='position',
field=models.CharField(default=b'medlem', max_length=10, verbose_name='stilling', choices=[(b'medlem', 'Medlem'), (b'leder', 'Leder'), (b'nestleder', 'Nestleder'), (b'redaktor', 'Redakt\xf8r'), (b'okoans', '\xd8konomiansvarlig')]),
preserve_default=True,
),
]
|
py | 1a498ea267bd0926a18f1ed85e138b27e35dbbbe | ################################################################################
# #
# GENERATE MOVIES FROM SIMULATION OUTPUT #
# #
################################################################################
import sys; sys.dont_write_bytecode = True
sys.path.insert(0, '../')
import numpy as np
import hdf5_to_dict as io
import matplotlib
matplotlib.use('agg')
import matplotlib.pyplot as plt
import util
import glob
import os
import plot as bplt
FIGX = 13
FIGY = 10
SIZE = 40
if len(sys.argv) != 2:
util.warn('PATH TO DUMP FOLDER NEEDED AS ARGUMENT')
sys.exit()
path = sys.argv[1]
#files = np.sort(glob.glob(os.path.join(path, "dump*.h5")))
print 'Getting list of full dumps...'
#files = io.get_dumps_full(os.path.join(path, 'dumps/'))
files = io.get_dumps_reduced(os.path.join(path, 'dumps/'))
#FRAMEDIR = 'FRAMES'
FRAMEDIR = os.path.join(path, 'frames_xz/')
print FRAMEDIR
util.make_dir(FRAMEDIR)
hdr = io.load_hdr(files[0])
geom = io.load_geom(hdr)
print len(files)
def plot(args):
n = args
imname = 'frame_%08d.png' % n
imname = os.path.join(FRAMEDIR, imname)
print '%08d / ' % (n+1) + '%08d' % len(files)
print imname
# Ignore if frame already exists
if os.path.isfile(imname):
return
dump = io.load_dump(files[n], geom)
fig = plt.figure(figsize=(FIGX, FIGY))
fig.suptitle('t = %05.2g' % dump['t'])
ax = plt.subplot(2,2,1)
bplt.plot_xz(ax, geom, np.log10(dump['RHO']), dump,
vmin=-4, vmax = 0, label='RHO')
bplt.overlay_field(ax, geom, dump, NLEV=10)
ax.set_xlim([-SIZE, SIZE]); ax.set_ylim([-SIZE, SIZE])
ax = plt.subplot(2,2,2)
bplt.plot_xz(ax, geom, np.log10(dump['beta']), dump,
vmin=-2, vmax=2, label='beta', cmap='RdBu_r')
bplt.overlay_field(ax, geom, dump, NLEV=10)
ax.set_xlim([-SIZE, SIZE]); ax.set_ylim([-SIZE, SIZE])
ax = plt.subplot(2,2,3)
bplt.plot_xy(ax, geom, np.log10(dump['RHO']), dump,
vmin=-4, vmax=0, label='RHO')
ax.set_xlim([-SIZE, SIZE]); ax.set_ylim([-SIZE, SIZE])
ax = plt.subplot(2,2,4)
bplt.plot_xy(ax, geom, np.log10(dump['beta']), dump,
vmin=-2, vmax=2, label='beta', cmap='RdBu_r')
ax.set_xlim([-SIZE, SIZE]); ax.set_ylim([-SIZE, SIZE])
#ax.pcolormesh(dump['X1'][:,:,0], dump['X2'][:,:,0], dump['RHO'][:,:,0])
plt.savefig(imname, bbox_inches='tight', dpi=100)
plt.close(fig)
import multiprocessing
import signal
import psutil
nthreads = psutil.cpu_count(logical=False)
nthreads = 4
print 'Number of CPUs: %i' % psutil.cpu_count(logical=False)
original_sigint_handler = signal.signal(signal.SIGINT, signal.SIG_IGN)
pool = multiprocessing.Pool(nthreads)
signal.signal(signal.SIGINT, original_sigint_handler)
try:
res = pool.map_async(plot, range(len(files)))
res.get(720000)
except KeyboardInterrupt:
print 'Caught interrupt!'
pool.terminate()
else:
pool.close()
pool.join()
|
py | 1a498f80de2ce823902167089768de2ea7ea5289 | from django.shortcuts import render
# pdf
from django.http import FileResponse
import io
from reportlab.pdfgen import canvas
from reportlab.lib.units import inch
from reportlab.lib.pagesizes import letter
import os
from django.conf import settings
# Create your views here.
def get_pdf_name(request):
pdf_dir = os.listdir(os.path.join(settings.MEDIA_ROOT, 'sample_pdf'))
pdf_dir_path = list(map( lambda x : '/{}/{}/{}'.format('media', 'sample_pdf', x), pdf_dir))
res = []
for ind, item in enumerate(pdf_dir):
res.append("<a href='{}' target='_blank' >{}</a>".format(pdf_dir_path[ind], item))
content = {'list' : res}
return render(request, 'index.html', content)
def get_pdf(request):
# ccreaate Bytestream Buffer
buf = io.BytesIO()
# create a canvas
c = canvas.Canvas(buf, pagesize=letter, bottomup=0)
# Create a text Object
textob = c.beginText()
textob.setTextOrigin(inch, inch)
textob.setFont("Helvetica", 14)
# add Some lines of text
lines = [
"This is line one",
"This is line two",
"This is line three",
]
for line in lines:
textob.textLine(line)
#Finsh up
c.drawText(textob)
c.showPage()
c.save()
buf.seek(0)
response = FileResponse(buf, as_attachment=True, filename='file.pdf')
response.headers['Content-Type'] = 'application/pdf'
return response
contnet = {}
return render(request, 'index.html', content) |
py | 1a49909733d2db4e90dbd91e40467759f9e0f24e | # pylint: disable=redefined-outer-name,unused-variable,expression-not-assigned
import sys
import pytest
from twerk import utils
from twerk.views import private, public
@pytest.fixture(scope="session")
def browser():
with utils.get_browser("firefox", headless="--pdb" not in sys.argv) as browser:
yield browser
@pytest.fixture
def credentials():
try:
return utils.get_credentials(prompt=False)
except EnvironmentError as e:
pytest.skip(str(e))
def describe_public_views():
def describe_profile():
@pytest.fixture
def profile(browser):
return public.Profile(browser, username="jack")
@pytest.mark.flaky
def it_contains_properties(expect, profile):
expect(profile.tweets) > 0
expect(profile.following) > 0
expect(profile.followers) > 0
expect(profile.likes) > 0
expect(profile.joined) != None
def describe_private_views():
def describe_profile():
@pytest.fixture
def profile(browser, credentials):
return private.Profile(browser, username="jack", credentials=credentials)
@pytest.mark.flaky
def it_contains_properties(expect, profile):
expect(profile.tweets) > 0
expect(profile.following) > 0
expect(profile.followers) > 0
expect(profile.likes) == 0 # not yet supported
expect(profile.joined) != None
def describe_profile_block():
@pytest.fixture
def profile_block(browser, credentials):
return private.ProfileBlock(
browser, username="jack", credentials=credentials
)
@pytest.mark.flaky
def it_can_cancel(expect, profile_block):
view = profile_block.cancel()
expect(view).isinstance(private.Profile)
def describe_profile_report():
@pytest.fixture
def profile_report(browser, credentials):
return private.ProfileReport(
browser, username="jack", credentials=credentials
)
@pytest.mark.flaky
def it_can_cancel(expect, profile_report):
view = profile_report.cancel()
expect(view).isinstance(private.Profile)
|
py | 1a49917ee8f0a9ae3107a6f955f0b2e4466069ac | import unittest
import artifactcli.artifactcli
class TestArtifactCli(unittest.TestCase):
def test_usage(self):
self.assertEqual(artifactcli.artifactcli.main(), 1)
|
py | 1a4991a4198661463b9092260a8ee42bc3a8109f | #!/usr/bin/python
#
# CLI compiler for bcmd's new model description language
#
import sys
import argparse
import bcmd_yacc
import os
import decimal
import string
import pprint
import logger
import ast
import codegen
import info
# default compiler configuration
# (this is effectively a template whose details
# may be adapted by command line args)
CONFIG = {'modelpath': ['.', 'models'],
'outdir': '.',
'outfile': None,
'treefile': None,
'name': None,
'unused': True,
'graph': None,
'graph-exclude-unused': False,
'graph-exclude-init': False,
'graph-exclude-self': True,
'graph-exclude-clusters': False,
'graph-exclude-params': False,
'independent': 't',
'input-makes-intermed': True}
# these are effectively constants
VERSION = 0.6
MODELDEF_EXT = '.modeldef'
CODE_EXT = '.c'
MODEL_EXT = '.model'
TREE_EXT = '.tree'
COMPILE_EXT = '.bcmpl'
GRAPHVIZ_EXT = '.gv'
DUMMY_SOURCE = '##\n'
# parse a chosen model definition file and return the AST
def parse_file(filename):
try:
f = open(filename)
data = f.read()
f.close()
except IOError as e:
logger.error("I/O error({0}): {1}".format(e.errno, e.strerror))
return None
logger.message("Processing file: " + filename)
bcmd_yacc.currentFile = filename
errsBefore = len(bcmd_yacc.compilationInfo['errors'])
lp = bcmd_yacc.get_lexer_parser()
result = lp[1].parse(data, lexer=lp[0])
fileErrs = len(bcmd_yacc.compilationInfo['errors']) - errsBefore
bcmd_yacc.currentFile = None
if fileErrs == 1:
logger.error('Compilation failed with 1 syntax error')
elif fileErrs > 1:
logger.error('Compilation failed with %d syntax errors' % fileErrs)
return fileErrs, result
def print_errors():
logger.error('*** Summary of model compilation errors ***')
errs = bcmd_yacc.compilationInfo
for ii in range(len(errs['errors'])):
logger.error(errs['messages'][ii]
+ ' (' + errs['files'][ii]
+ ', line ' + str(errs['lines'][ii]) + ')')
# find a file on the search path
def search_file(filename, search_path):
for path in search_path:
candidate = os.path.join(path, filename)
if os.path.isfile(candidate):
return os.path.abspath(candidate)
return None
# process arguments
def process_args():
config = CONFIG
ap = argparse.ArgumentParser(
description="Model compiler for the BCMD modelling system.")
ap.add_argument('--version', action='version',
version='bcmd version %.1fa' % VERSION)
ap.add_argument(
'-i', help='append to default model search path', metavar='PATH')
ap.add_argument(
'-I', help='replace default model search path', metavar='PATH')
ap.add_argument(
'-n', '--name', help='specify model name (default: <file1>)', metavar='NAME')
ap.add_argument(
'-o', help='specify output file name (default: <modelname>.model)', metavar='FILE')
ap.add_argument(
'-d', help='specify output directory (default: .)', metavar='DIR')
ap.add_argument(
'-u', '--unused', help='omit apparently unused intermediates', action='store_false')
ap.add_argument(
'-g', '--debug', help='include debug outputs in generated model code', action='store_true')
ap.add_argument('-t', '--tree', help='write parse tree to file (default: <modelname>.tree)',
nargs='?', default=None, const='', metavar='FILE')
ap.add_argument('-p', '--processed', help='write compilation data to file (default: <modelname>.bcmpl)',
nargs='?', default=None, const='', metavar='FILE')
ap.add_argument('-G', '--graph', help='write dependency structure in GraphViz format (default: <modelname>.gv)',
nargs='?', default=None, const='', metavar='FILE')
ap.add_argument('-U', '--graphxunused',
help='exclude apparently unused elements from graph output', action='store_true')
ap.add_argument('-N', '--graphxinit',
help='exclude initialisation dependencies from graph output', action='store_true')
ap.add_argument('-C', '--graphxclust',
help='exclude clustering from graph output', action='store_true')
ap.add_argument('-S', '--graphself',
help='include direct circular dependencies in graph output', action='store_false')
ap.add_argument('-v', '--verbose',
help='set level of detail logged to stderr (0-7, default: 3)', metavar='LEVEL', type=int)
ap.add_argument(
'-Y', '--yacc', help='run a dummy parse to rebuild parse tables', action='store_true')
# ... add further options here as needed ...
ap.add_argument('file', nargs='+',
help='one or more model description files to be compiled')
args = ap.parse_args()
if args.yacc:
lp = bcmd_yacc.get_lexer_parser()
result = lp[1].parse(DUMMY_SOURCE, lexer=lp[0])
return False
if not (args.I is None):
config['modelpath'] = args.I.split(os.pathsep)
elif not (args.i is None):
config['modelpath'] = config['modelpath'] + \
args.i.split(os.pathsep)
if not (args.name is None):
config['name'] = args.name
else:
srcname, srcext = os.path.splitext(args.file[0])
config['name'] = srcname
if not (args.o is None):
config['outfile'] = args.o
else:
config['outfile'] = config['name'] + MODEL_EXT
if args.d is not None:
if not os.path.isdir(args.d):
os.makedirs(args.d)
config['outdir'] = args.d
config['treefile'] = args.tree
config['compfile'] = args.processed
config['sources'] = args.file
config['unused'] = args.unused
config['debug'] = args.debug
config['graph'] = args.graph
config['graph-exclude-unused'] = args.graphxunused
config['graph-exclude-init'] = args.graphxinit
config['graph-exclude-self'] = args.graphself
config['graph-exclude-clusters'] = args.graphxclust
if args.verbose is not None:
logger.verbosity = args.verbose
return config
# load and parse source files named on the command line, plus imports
# note failures and return a structure including those details and
# the resulting merged item list
def load_sources(config):
sources = config['sources']
srcIndex = 0
parsedSources = []
failedSources = []
merged = []
while srcIndex < len(sources):
logger.message("Searching for source file: " + sources[srcIndex])
src = search_file(sources[srcIndex], config['modelpath'])
if (src is None) and (not sources[srcIndex].endswith(MODELDEF_EXT)):
logger.message(
"Not found, trying with added extension: " + sources[srcIndex] + MODELDEF_EXT)
src = search_file(sources[srcIndex] +
MODELDEF_EXT, config['modelpath'])
if src is None:
logger.warn("File not found: " + sources[srcIndex])
failedSources.append(sources[srcIndex])
else:
nErrs, ast = parse_file(src)
if nErrs > 0 or ast is None:
failedSources.append(src)
else:
ast = list(ast)
# add imports that are not already in the source list to it
for imp in list(sum([x[1:] for x in ast if x[0] == 'import'], ())):
if imp not in sources and imp + MODELDEF_EXT not in sources:
sources.append(imp)
logger.detail(ast, prettify=True)
parsedSources.append((sources[srcIndex], src))
merged = merged + ast
srcIndex = srcIndex + 1
logger.message("Total number of attempted source files: %d" % srcIndex)
logger.message("%d parsed, %d failed" %
(len(parsedSources), len(failedSources)))
for failed in failedSources:
logger.message(" -> %s" % failed)
return {'sources': sources, 'parsed': parsedSources, 'failed': failedSources, 'merged': merged}
# write the loaded merged item list to a file, if so specified
def write_tree(config, work):
if not config['treefile'] is None:
if config['treefile'] == '':
config['treefile'] = config['name'] + TREE_EXT
treePath = os.path.join(config['outdir'], config['treefile'])
logger.message("Attempting to write parse tree to " + treePath)
try:
treeStream = open(treePath, 'w')
pprint.pprint(work['merged'], stream=treeStream)
treeStream.close()
except IOError as e:
logger.error("Error writing file ({0}): {1}".format(
e.errno, e.strerror))
# write the processed model structure to a file, if so specified
def write_comp(config, processed):
if not config['compfile'] is None:
if config['compfile'] == '':
config['compfile'] = config['name'] + COMPILE_EXT
compPath = os.path.join(config['outdir'], config['compfile'])
logger.message(
"Attempting to write compilation structure to " + compPath)
try:
compStream = open(compPath, 'w')
pprint.pprint(processed, stream=compStream)
compStream.close()
except IOError as e:
logger.error("Error writing file ({0}): {1}".format(
e.errno, e.strerror))
# write the model dependencies to a graph, if so specified
def write_graph(config, model):
if not config['graph'] is None:
if config['graph'] == '':
config['graph'] = config['name'] + GRAPHVIZ_EXT
graphPath = os.path.join(config['outdir'], config['graph'])
logger.message("Attempting to write dependency graph to " + graphPath)
try:
stream = open(graphPath, 'w')
print(info.generateGraphViz(model, config), file=stream)
stream.close()
except IOError as e:
logger.error("Error writing file ({0}): {1}".format(
e.errno, e.strerror))
# ----------------------------------------------------------------------------
# main entry point of this compiler script
if __name__ == '__main__':
config = process_args()
if not config:
sys.exit(2)
work = load_sources(config)
if len(work['failed']) > 0:
print_errors()
sys.exit(1)
write_tree(config, work)
processed = ast.process(work['merged'], work[
'parsed'], config['independent'])
info.logModelInfo(processed, config)
write_comp(config, processed)
write_graph(config, processed)
source = codegen.generateSource(processed, config)
codepath = os.path.join(config['outdir'], config['name'] + CODE_EXT)
logger.message("Attempting to write C code to " + codepath)
try:
cfile = open(codepath, 'w')
cfile.write(source)
cfile.close()
except IOError as e:
logger.error("Error writing file ({0}): {1}".format(
e.errno, e.strerror))
sys.exit(1)
|
py | 1a4991fbf670deae37651565d0df4f0a57b3df90 | import os
import sys
sys.path.insert(0, ".")
sys.path.insert(1, "..")
from praw import __version__
copyright = "2020, Bryce Boe"
exclude_patterns = ["_build"]
extensions = ["sphinx.ext.autodoc", "sphinx.ext.intersphinx"]
html_static_path = ["_static"]
html_theme = "sphinx_rtd_theme"
html_theme_options = {"collapse_navigation": True}
htmlhelp_basename = "PRAW"
intersphinx_mapping = {"python": ("https://docs.python.org/3.8", None)}
master_doc = "index"
nitpicky = True
project = "PRAW"
pygments_style = "sphinx"
release = __version__
source_suffix = ".rst"
suppress_warnings = ["image.nonlocal_uri"]
version = ".".join(__version__.split(".", 2)[:2])
# Use RTD theme locally
if not os.environ.get("READTHEDOCS"):
import sphinx_rtd_theme
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
def skip(app, what, name, obj, skip, options):
if name in {
"__call__",
"__contains__",
"__getitem__",
"__init__",
"__iter__",
"__len__",
}:
return False
return skip
def setup(app):
app.connect("autodoc-skip-member", skip)
app.add_stylesheet("theme_override.css")
|
py | 1a4992a27f59bc61cd4022219a03bf23d3eba889 | import filecmp
import logging
import os
import textwrap
import uuid
from pathlib import Path
from unittest import mock
import pytest
from dvc.cli import main
from dvc.dependency.base import DependencyIsStageFileError
from dvc.dvcfile import DVC_FILE_SUFFIX
from dvc.exceptions import (
ArgumentDuplicationError,
CircularDependencyError,
CyclicGraphError,
OutputDuplicationError,
OverlappingOutputPathsError,
StagePathAsOutputError,
)
from dvc.fs import system
from dvc.objects.hash import file_md5
from dvc.output import Output, OutputIsStageFileError
from dvc.repo import Repo as DvcRepo
from dvc.stage import Stage
from dvc.stage.exceptions import (
StageFileAlreadyExistsError,
StageFileBadNameError,
StagePathNotDirectoryError,
StagePathNotFoundError,
StagePathOutsideError,
)
from dvc.utils.serialize import load_yaml
from tests.basic_env import TestDvc, TestDvcGit
class TestRun(TestDvc):
def test(self):
cmd = "python {} {} {}".format(self.CODE, self.FOO, "out")
deps = [self.FOO, self.CODE]
outs = [os.path.join(self.dvc.root_dir, "out")]
outs_no_cache = []
fname = "out.dvc"
self.dvc.add(self.FOO)
stage = self.dvc.run(
cmd=cmd,
deps=deps,
outs=outs,
outs_no_cache=outs_no_cache,
fname=fname,
single_stage=True,
)
self.assertTrue(filecmp.cmp(self.FOO, "out", shallow=False))
self.assertTrue(os.path.isfile(stage.path))
self.assertEqual(stage.cmd, cmd)
self.assertEqual(len(stage.deps), len(deps))
self.assertEqual(len(stage.outs), len(outs + outs_no_cache))
self.assertEqual(stage.outs[0].fspath, outs[0])
self.assertEqual(
stage.outs[0].hash_info.value, file_md5(self.FOO, self.dvc.fs)
)
self.assertTrue(stage.path, fname)
with self.assertRaises(OutputDuplicationError):
self.dvc.run(
cmd=cmd,
deps=deps,
outs=outs,
outs_no_cache=outs_no_cache,
fname="duplicate" + fname,
single_stage=True,
)
class TestRunEmpty(TestDvc):
def test(self):
self.dvc.run(
cmd="echo hello world",
deps=[],
outs=[],
outs_no_cache=[],
fname="empty.dvc",
single_stage=True,
)
class TestRunMissingDep(TestDvc):
def test(self):
from dvc.dependency.base import DependencyDoesNotExistError
with self.assertRaises(DependencyDoesNotExistError):
self.dvc.run(
cmd="command",
deps=["non-existing-dep"],
outs=[],
outs_no_cache=[],
fname="empty.dvc",
single_stage=True,
)
class TestRunNoExec(TestDvcGit):
def test(self):
self.dvc.run(
cmd="python {} {} {}".format(self.CODE, self.FOO, "out"),
deps=[self.CODE, self.FOO],
outs=["out"],
no_exec=True,
single_stage=True,
)
self.assertFalse(os.path.exists("out"))
with open(".gitignore", encoding="utf-8") as fobj:
self.assertEqual(fobj.read(), "/out\n")
class TestRunCircularDependency(TestDvc):
def test(self):
with self.assertRaises(CircularDependencyError):
self.dvc.run(
cmd="command",
deps=[self.FOO],
outs=[self.FOO],
fname="circular-dependency.dvc",
single_stage=True,
)
def test_outs_no_cache(self):
with self.assertRaises(CircularDependencyError):
self.dvc.run(
cmd="command",
deps=[self.FOO],
outs_no_cache=[self.FOO],
fname="circular-dependency.dvc",
single_stage=True,
)
def test_non_normalized_paths(self):
with self.assertRaises(CircularDependencyError):
self.dvc.run(
cmd="command",
deps=["./foo"],
outs=["foo"],
fname="circular-dependency.dvc",
single_stage=True,
)
def test_graph(self):
self.dvc.run(
deps=[self.FOO],
outs=["bar.txt"],
cmd="echo bar > bar.txt",
single_stage=True,
)
self.dvc.run(
deps=["bar.txt"],
outs=["baz.txt"],
cmd="echo baz > baz.txt",
single_stage=True,
)
with self.assertRaises(CyclicGraphError):
self.dvc.run(
deps=["baz.txt"],
outs=[self.FOO],
cmd="echo baz > foo",
single_stage=True,
)
class TestRunDuplicatedArguments(TestDvc):
def test(self):
with self.assertRaises(ArgumentDuplicationError):
self.dvc.run(
cmd="command",
deps=[],
outs=[self.FOO, self.FOO],
fname="circular-dependency.dvc",
single_stage=True,
)
def test_outs_no_cache(self):
with self.assertRaises(ArgumentDuplicationError):
self.dvc.run(
cmd="command",
outs=[self.FOO],
outs_no_cache=[self.FOO],
fname="circular-dependency.dvc",
single_stage=True,
)
def test_non_normalized_paths(self):
with self.assertRaises(ArgumentDuplicationError):
self.dvc.run(
cmd="command",
deps=[],
outs=["foo", "./foo"],
fname="circular-dependency.dvc",
single_stage=True,
)
class TestRunStageInsideOutput(TestDvc):
def test_cwd(self):
self.dvc.run(
cmd=f"mkdir {self.DATA_DIR}",
deps=[],
outs=[self.DATA_DIR],
single_stage=True,
)
with self.assertRaises(StagePathAsOutputError):
self.dvc.run(
cmd="command",
fname=os.path.join(self.DATA_DIR, "inside-cwd.dvc"),
single_stage=True,
)
def test_file_name(self):
self.dvc.run(
cmd=f"mkdir {self.DATA_DIR}",
deps=[],
outs=[self.DATA_DIR],
single_stage=True,
)
with self.assertRaises(StagePathAsOutputError):
self.dvc.run(
cmd="command",
outs=[self.FOO],
fname=os.path.join(self.DATA_DIR, "inside-cwd.dvc"),
single_stage=True,
)
class TestRunBadCwd(TestDvc):
def test(self):
with self.assertRaises(StagePathOutsideError):
self.dvc.run(cmd="command", wdir=self.mkdtemp(), single_stage=True)
def test_same_prefix(self):
with self.assertRaises(StagePathOutsideError):
path = f"{self._root_dir}-{uuid.uuid4()}"
os.mkdir(path)
self.dvc.run(cmd="command", wdir=path, single_stage=True)
class TestRunBadWdir(TestDvc):
def test(self):
with self.assertRaises(StagePathOutsideError):
self.dvc.run(cmd="command", wdir=self.mkdtemp(), single_stage=True)
def test_same_prefix(self):
with self.assertRaises(StagePathOutsideError):
path = f"{self._root_dir}-{uuid.uuid4()}"
os.mkdir(path)
self.dvc.run(cmd="command", wdir=path, single_stage=True)
def test_not_found(self):
with self.assertRaises(StagePathNotFoundError):
path = os.path.join(self._root_dir, str(uuid.uuid4()))
self.dvc.run(cmd="command", wdir=path, single_stage=True)
def test_not_dir(self):
with self.assertRaises(StagePathNotDirectoryError):
path = os.path.join(self._root_dir, str(uuid.uuid4()))
os.mkdir(path)
path = os.path.join(path, str(uuid.uuid4()))
open(path, "a", encoding="utf-8").close()
self.dvc.run(cmd="command", wdir=path, single_stage=True)
class TestRunBadName(TestDvc):
def test(self):
with self.assertRaises(StagePathOutsideError):
self.dvc.run(
cmd="command",
fname=os.path.join(self.mkdtemp(), self.FOO + DVC_FILE_SUFFIX),
single_stage=True,
)
def test_same_prefix(self):
with self.assertRaises(StagePathOutsideError):
path = f"{self._root_dir}-{uuid.uuid4()}"
os.mkdir(path)
self.dvc.run(
cmd="command",
fname=os.path.join(path, self.FOO + DVC_FILE_SUFFIX),
single_stage=True,
)
def test_not_found(self):
with self.assertRaises(StagePathNotFoundError):
path = os.path.join(self._root_dir, str(uuid.uuid4()))
self.dvc.run(
cmd="command",
fname=os.path.join(path, self.FOO + DVC_FILE_SUFFIX),
single_stage=True,
)
class TestRunRemoveOuts(TestDvc):
def test(self):
with open(self.CODE, "w+", encoding="utf-8") as fobj:
fobj.write("import sys\n")
fobj.write("import os\n")
fobj.write("if os.path.exists(sys.argv[1]):\n")
fobj.write(" sys.exit(1)\n")
fobj.write("open(sys.argv[1], 'w+').close()\n")
self.dvc.run(
deps=[self.CODE],
outs=[self.FOO],
cmd=f"python {self.CODE} {self.FOO}",
single_stage=True,
)
class TestRunUnprotectOutsCopy(TestDvc):
def test(self):
with open(self.CODE, "w+", encoding="utf-8") as fobj:
fobj.write("import sys\n")
fobj.write("with open(sys.argv[1], 'a+') as fobj:\n")
fobj.write(" fobj.write('foo')\n")
ret = main(["config", "cache.type", "copy"])
self.assertEqual(ret, 0)
ret = main(
[
"run",
"-d",
self.CODE,
"-o",
self.FOO,
"--single-stage",
"python",
self.CODE,
self.FOO,
]
)
self.assertEqual(ret, 0)
self.assertTrue(os.access(self.FOO, os.W_OK))
with open(self.FOO, encoding="utf-8") as fd:
self.assertEqual(fd.read(), "foo")
ret = main(
[
"run",
"--force",
"--no-run-cache",
"--single-stage",
"-d",
self.CODE,
"-o",
self.FOO,
"python",
self.CODE,
self.FOO,
]
)
self.assertEqual(ret, 0)
self.assertTrue(os.access(self.FOO, os.W_OK))
with open(self.FOO, encoding="utf-8") as fd:
self.assertEqual(fd.read(), "foo")
class TestRunUnprotectOutsSymlink(TestDvc):
def test(self):
with open(self.CODE, "w+", encoding="utf-8") as fobj:
fobj.write("import sys\n")
fobj.write("import os\n")
fobj.write("with open(sys.argv[1], 'a+') as fobj:\n")
fobj.write(" fobj.write('foo')\n")
ret = main(["config", "cache.type", "symlink"])
self.assertEqual(ret, 0)
self.assertEqual(ret, 0)
ret = main(
[
"run",
"-d",
self.CODE,
"-o",
self.FOO,
"--single-stage",
"python",
self.CODE,
self.FOO,
]
)
self.assertEqual(ret, 0)
if os.name == "nt":
# NOTE: Windows symlink perms don't propagate to the target
self.assertTrue(os.access(self.FOO, os.W_OK))
else:
self.assertFalse(os.access(self.FOO, os.W_OK))
self.assertTrue(system.is_symlink(self.FOO))
with open(self.FOO, encoding="utf-8") as fd:
self.assertEqual(fd.read(), "foo")
ret = main(
[
"run",
"--force",
"--no-run-cache",
"--single-stage",
"-d",
self.CODE,
"-o",
self.FOO,
"python",
self.CODE,
self.FOO,
]
)
self.assertEqual(ret, 0)
if os.name == "nt":
# NOTE: Windows symlink perms don't propagate to the target
self.assertTrue(os.access(self.FOO, os.W_OK))
else:
self.assertFalse(os.access(self.FOO, os.W_OK))
self.assertTrue(system.is_symlink(self.FOO))
with open(self.FOO, encoding="utf-8") as fd:
self.assertEqual(fd.read(), "foo")
class TestRunUnprotectOutsHardlink(TestDvc):
def test(self):
with open(self.CODE, "w+", encoding="utf-8") as fobj:
fobj.write("import sys\n")
fobj.write("import os\n")
fobj.write("with open(sys.argv[1], 'a+') as fobj:\n")
fobj.write(" fobj.write('foo')\n")
ret = main(["config", "cache.type", "hardlink"])
self.assertEqual(ret, 0)
self.assertEqual(ret, 0)
ret = main(
[
"run",
"-d",
self.CODE,
"-o",
self.FOO,
"--single-stage",
"python",
self.CODE,
self.FOO,
]
)
self.assertEqual(ret, 0)
self.assertFalse(os.access(self.FOO, os.W_OK))
self.assertTrue(system.is_hardlink(self.FOO))
with open(self.FOO, encoding="utf-8") as fd:
self.assertEqual(fd.read(), "foo")
ret = main(
[
"run",
"--force",
"--no-run-cache",
"--single-stage",
"-d",
self.CODE,
"-o",
self.FOO,
"python",
self.CODE,
self.FOO,
]
)
self.assertEqual(ret, 0)
self.assertFalse(os.access(self.FOO, os.W_OK))
self.assertTrue(system.is_hardlink(self.FOO))
with open(self.FOO, encoding="utf-8") as fd:
self.assertEqual(fd.read(), "foo")
class TestCmdRunOverwrite(TestDvc):
def test(self):
# NOTE: using sleep() is a workaround for filesystems
# with low mtime resolution. We have to use mtime since
# comparing mtime's is the only way to check that the stage
# file didn't change(size and inode in the first test down
# below don't change).
import time
ret = main(
[
"run",
"-d",
self.FOO,
"-d",
self.CODE,
"-o",
"out",
"--file",
"out.dvc",
"--single-stage",
"python",
self.CODE,
self.FOO,
"out",
]
)
self.assertEqual(ret, 0)
stage_mtime = os.path.getmtime("out.dvc")
time.sleep(1)
ret = main(
[
"run",
"-d",
self.FOO,
"-d",
self.CODE,
"--force",
"--no-run-cache",
"--single-stage",
"-o",
"out",
"--file",
"out.dvc",
"python",
self.CODE,
self.FOO,
"out",
]
)
self.assertEqual(ret, 0)
# NOTE: check that dvcfile was overwritten
self.assertNotEqual(stage_mtime, os.path.getmtime("out.dvc"))
stage_mtime = os.path.getmtime("out.dvc")
time.sleep(1)
ret = main(
[
"run",
"--force",
"--single-stage",
"--file",
"out.dvc",
"-d",
self.BAR,
f"cat {self.BAR}",
]
)
self.assertEqual(ret, 0)
# NOTE: check that dvcfile was overwritten
self.assertNotEqual(stage_mtime, os.path.getmtime("out.dvc"))
class TestCmdRunCliMetrics(TestDvc):
def test_cached(self):
ret = main(
[
"run",
"-m",
"metrics.txt",
"--single-stage",
"echo test > metrics.txt",
]
)
self.assertEqual(ret, 0)
with open("metrics.txt", encoding="utf-8") as fd:
self.assertEqual(fd.read().rstrip(), "test")
def test_not_cached(self):
ret = main(
[
"run",
"-M",
"metrics.txt",
"--single-stage",
"echo test > metrics.txt",
]
)
self.assertEqual(ret, 0)
with open("metrics.txt", encoding="utf-8") as fd:
self.assertEqual(fd.read().rstrip(), "test")
class TestCmdRunWorkingDirectory(TestDvc):
def test_default_wdir_is_not_written(self):
stage = self.dvc.run(
cmd=f"echo test > {self.FOO}",
outs=[self.FOO],
wdir=".",
single_stage=True,
)
d = load_yaml(stage.relpath)
self.assertNotIn(Stage.PARAM_WDIR, d.keys())
stage = self.dvc.run(
cmd=f"echo test > {self.BAR}", outs=[self.BAR], single_stage=True
)
d = load_yaml(stage.relpath)
self.assertNotIn(Stage.PARAM_WDIR, d.keys())
def test_fname_changes_path_and_wdir(self):
dname = "dir"
os.mkdir(os.path.join(self._root_dir, dname))
foo = os.path.join(dname, self.FOO)
fname = os.path.join(dname, "stage" + DVC_FILE_SUFFIX)
stage = self.dvc.run(
cmd=f"echo test > {foo}",
outs=[foo],
fname=fname,
single_stage=True,
)
self.assertEqual(stage.wdir, os.path.realpath(self._root_dir))
self.assertEqual(
stage.path, os.path.join(os.path.realpath(self._root_dir), fname)
)
# Check that it is dumped properly (relative to fname)
d = load_yaml(stage.relpath)
self.assertEqual(d[Stage.PARAM_WDIR], "..")
def test_rerun_deterministic(tmp_dir, run_copy, mocker):
from dvc.stage.run import subprocess
tmp_dir.gen("foo", "foo content")
spy = mocker.spy(subprocess, "Popen")
run_copy("foo", "out", single_stage=True)
assert spy.called
spy.reset_mock()
run_copy("foo", "out", single_stage=True)
assert not spy.called
def test_rerun_deterministic_ignore_cache(tmp_dir, run_copy, mocker):
from dvc.stage.run import subprocess
tmp_dir.gen("foo", "foo content")
spy = mocker.spy(subprocess, "Popen")
run_copy("foo", "out", single_stage=True)
assert spy.called
spy.reset_mock()
run_copy("foo", "out", run_cache=False, single_stage=True)
assert spy.called
def test_rerun_callback(dvc):
def run_callback(force=False):
return dvc.run(
cmd="echo content > out", force=force, single_stage=True
)
assert run_callback() is not None
with pytest.raises(StageFileAlreadyExistsError):
assert run_callback() is not None
assert run_callback(force=True) is not None
def test_rerun_changed_dep(tmp_dir, run_copy):
tmp_dir.gen("foo", "foo content")
assert run_copy("foo", "out", single_stage=True) is not None
tmp_dir.gen("foo", "changed content")
with pytest.raises(StageFileAlreadyExistsError):
run_copy("foo", "out", force=False, single_stage=True)
assert run_copy("foo", "out", force=True, single_stage=True)
def test_rerun_changed_stage(tmp_dir, run_copy):
tmp_dir.gen("foo", "foo content")
assert run_copy("foo", "out", single_stage=True) is not None
tmp_dir.gen("bar", "bar content")
with pytest.raises(StageFileAlreadyExistsError):
run_copy("bar", "out", force=False, single_stage=True)
def test_rerun_changed_out(tmp_dir, run_copy):
tmp_dir.gen("foo", "foo content")
assert run_copy("foo", "out", single_stage=True) is not None
Path("out").write_text("modification", encoding="utf-8")
with pytest.raises(StageFileAlreadyExistsError):
run_copy("foo", "out", force=False, single_stage=True)
class TestRunCommit(TestDvc):
def test(self):
fname = "test"
ret = main(
[
"run",
"-o",
fname,
"--no-commit",
"--single-stage",
"echo",
"test",
">",
fname,
]
)
self.assertEqual(ret, 0)
self.assertTrue(os.path.isfile(fname))
self.assertFalse(os.path.exists(self.dvc.odb.local.cache_dir))
ret = main(["commit", fname + ".dvc"])
self.assertEqual(ret, 0)
self.assertTrue(os.path.isfile(fname))
self.assertEqual(len(os.listdir(self.dvc.odb.local.cache_dir)), 1)
class TestRunPersist(TestDvc):
@property
def outs_command(self):
raise NotImplementedError
def _test(self):
file = "file.txt"
file_content = "content"
stage_file = file + DVC_FILE_SUFFIX
self.run_command(file, file_content)
self.stage_should_contain_persist_flag(stage_file)
self.should_append_upon_repro(file, stage_file)
self.should_remove_persistent_outs(file, stage_file)
def run_command(self, file, file_content):
ret = main(
[
"run",
"--single-stage",
"--always-changed",
self.outs_command,
file,
f"echo {file_content} >> {file}",
]
)
self.assertEqual(0, ret)
def stage_should_contain_persist_flag(self, stage_file):
stage_file_content = load_yaml(stage_file)
self.assertEqual(
True, stage_file_content["outs"][0][Output.PARAM_PERSIST]
)
def should_append_upon_repro(self, file, stage_file):
ret = main(["repro", stage_file])
self.assertEqual(0, ret)
with open(file, encoding="utf-8") as fobj:
lines = fobj.readlines()
self.assertEqual(2, len(lines))
def should_remove_persistent_outs(self, file, stage_file):
ret = main(["remove", stage_file, "--outs"])
self.assertEqual(0, ret)
self.assertFalse(os.path.exists(file))
class TestRunPersistOuts(TestRunPersist):
@property
def outs_command(self):
return "--outs-persist"
def test(self):
self._test()
class TestRunPersistOutsNoCache(TestRunPersist):
@property
def outs_command(self):
return "--outs-persist-no-cache"
def test(self):
self._test()
class TestShouldRaiseOnOverlappingOutputPaths(TestDvc):
def test(self):
ret = main(["add", self.DATA_DIR])
self.assertEqual(0, ret)
with self.assertRaises(OverlappingOutputPathsError) as err:
self.dvc.run(
outs=[self.DATA],
cmd=f"echo data >> {self.DATA}",
single_stage=True,
)
error_output = str(err.exception)
data_dir_stage = self.DATA_DIR + DVC_FILE_SUFFIX
data_stage = os.path.basename(self.DATA) + DVC_FILE_SUFFIX
self.assertIn("The output paths:\n", error_output)
self.assertIn(
f"\n'{self.DATA_DIR}'('{data_dir_stage}')\n", error_output
)
self.assertIn(f"\n'{self.DATA}'('{data_stage}')\n", error_output)
self.assertIn(
"overlap and are thus in the same tracked directory.\n"
"To keep reproducibility, outputs should be in separate "
"tracked directories or tracked individually.",
error_output,
)
class TestRerunWithSameOutputs(TestDvc):
def _read_content_only(self, path):
with open(path, encoding="utf-8") as fobj:
return [line.rstrip() for line in fobj]
@property
def _outs_command(self):
raise NotImplementedError
def _run_twice_with_same_outputs(self):
ret = main(
[
"run",
"--single-stage",
"--outs",
self.FOO,
f"echo {self.FOO_CONTENTS} > {self.FOO}",
]
)
self.assertEqual(0, ret)
output_file_content = self._read_content_only(self.FOO)
self.assertEqual([self.FOO_CONTENTS], output_file_content)
ret = main(
[
"run",
self._outs_command,
self.FOO,
"--force",
"--single-stage",
f"echo {self.BAR_CONTENTS} >> {self.FOO}",
]
)
self.assertEqual(0, ret)
class TestNewRunShouldRemoveOutsOnNoPersist(TestRerunWithSameOutputs):
def test(self):
self._run_twice_with_same_outputs()
output_file_content = self._read_content_only(self.FOO)
self.assertEqual([self.BAR_CONTENTS], output_file_content)
@property
def _outs_command(self):
return "--outs"
class TestNewRunShouldNotRemoveOutsOnPersist(TestRerunWithSameOutputs):
def test(self):
self._run_twice_with_same_outputs()
output_file_content = self._read_content_only(self.FOO)
self.assertEqual(
[self.FOO_CONTENTS, self.BAR_CONTENTS], output_file_content
)
@property
def _outs_command(self):
return "--outs-persist"
class TestShouldNotCheckoutUponCorruptedLocalHardlinkCache(TestDvc):
def setUp(self):
super().setUp()
ret = main(["config", "cache.type", "hardlink"])
self.assertEqual(ret, 0)
self.dvc.close()
self.dvc = DvcRepo(".")
def test(self):
from tests.utils import clean_staging
cmd = f"python {self.CODE} {self.FOO} {self.BAR}"
stage = self.dvc.run(
deps=[self.FOO], outs=[self.BAR], cmd=cmd, single_stage=True
)
clean_staging()
os.chmod(self.BAR, 0o644)
with open(self.BAR, "w", encoding="utf-8") as fd:
fd.write("corrupting the output cache")
patch_checkout = mock.patch.object(
stage.outs[0], "checkout", wraps=stage.outs[0].checkout
)
from dvc.stage.run import cmd_run
patch_run = mock.patch("dvc.stage.run.cmd_run", wraps=cmd_run)
with self.dvc.lock:
with patch_checkout as mock_checkout:
with patch_run as mock_run:
stage.run()
mock_run.assert_called_once()
mock_checkout.assert_not_called()
def test_bad_stage_fname(tmp_dir, dvc, run_copy):
tmp_dir.dvc_gen("foo", "foo content")
with pytest.raises(StageFileBadNameError):
# fname should end with .dvc
run_copy("foo", "foo_copy", fname="out_stage", single_stage=True)
# Check that command hasn't been run
assert not (tmp_dir / "foo_copy").exists()
def test_should_raise_on_stage_dependency(run_copy):
with pytest.raises(DependencyIsStageFileError):
run_copy("name.dvc", "stage_copy", single_stage=True)
def test_should_raise_on_stage_output(tmp_dir, dvc, run_copy):
tmp_dir.dvc_gen("foo", "foo content")
with pytest.raises(OutputIsStageFileError):
run_copy("foo", "name.dvc", single_stage=True)
@pytest.mark.parametrize("metrics_type", ["metrics", "metrics_no_cache"])
def test_metrics_dir(tmp_dir, dvc, caplog, run_copy_metrics, metrics_type):
copyargs = {metrics_type: ["dir_metric"]}
tmp_dir.gen({"dir": {"file": "content"}})
with caplog.at_level(logging.DEBUG, "dvc"):
run_copy_metrics("dir", "dir_metric", **copyargs)
assert (
"directory 'dir_metric' cannot be used as metrics." in caplog.messages
)
def test_run_force_preserves_comments_and_meta(tmp_dir, dvc, run_copy):
tmp_dir.gen({"foo": "foo", "foo1": "foo1"})
text = textwrap.dedent(
"""\
desc: top desc
cmd: python copy.py foo bar
deps:
- path: copy.py
- path: foo
outs:
# comment preserved
- path: bar
desc: out desc
meta:
name: copy-foo-bar
"""
)
(tmp_dir / "bar.dvc").write_text(text)
dvc.reproduce("bar.dvc")
# CRLF on windows makes the generated file bigger in size
code_size = 143 if os.name == "nt" else 142
assert (tmp_dir / "bar.dvc").read_text() == textwrap.dedent(
f"""\
desc: top desc
cmd: python copy.py foo bar
deps:
- path: copy.py
md5: 90c27dd80b698fe766f0c3ee0b6b9729
size: {code_size}
- path: foo
md5: acbd18db4cc2f85cedef654fccc4a4d8
size: 3
outs:
# comment preserved
- path: bar
desc: out desc
md5: acbd18db4cc2f85cedef654fccc4a4d8
size: 3
meta:
name: copy-foo-bar
md5: be659ce4a33cebb85d4e8e1335d394ad
"""
)
run_copy("foo1", "bar1", single_stage=True, force=True, fname="bar.dvc")
assert (tmp_dir / "bar.dvc").read_text() == textwrap.dedent(
f"""\
desc: top desc
cmd: python copy.py foo1 bar1
deps:
- path: foo1
md5: 299a0be4a5a79e6a59fdd251b19d78bb
size: 4
- path: copy.py
md5: 90c27dd80b698fe766f0c3ee0b6b9729
size: {code_size}
outs:
# comment preserved
- path: bar1
md5: 299a0be4a5a79e6a59fdd251b19d78bb
size: 4
meta:
name: copy-foo-bar
md5: 9e725b11cb393e6a7468369fa50328b7
"""
)
|
py | 1a499308d259bdee01abc6a7bcdf0cf0a7a5ed9e | transactions = []
def get_trans(trans_id):
global transactions
for trans in transactions:
if trans.id == trans_id:
return trans
return False
def test_recoverable(story):
has_written = {}
has_read = {}
has_commited = []
is_recoverable = True
for i in range(len(story)):
act = story[i]
# if it wants to commit
if act.action == "c":
has_commited.append(act.transaction.id)
# check who it has read from
# check that all have commited
if act.transaction.id in has_read.keys():
for trans in has_read[act.transaction.id]:
if trans not in has_commited:
# print(f"UNRECOVERABLE: action number {i} - {act.transaction.id} tries to read {act.value} from {get_trans(id).id} before it has been commited")
print(f"UNRECOVERABLE: action number {i} - {act.transaction.id} tried to read from {get_trans(trans).id} before it has been commited")
is_recoverable = False
elif act.action == "w":
if act.value not in has_written.keys():
has_written[act.value] = [act.transaction.id]
else:
has_written[act.value].append(act.transaction.id)
elif act.action == "r":
if act.value in has_written.keys():
if act.transaction.id not in has_read.keys():
has_read[act.transaction.id] = []
for trans in has_written[act.value]:
has_read[act.transaction.id].append(trans)
return is_recoverable
def test_aca(story):
has_commited = []
has_written = {}
is_aca = True
for i in range(len(story)):
act = story[i]
if act.action == "r":
if act.value in has_written.keys():
for trans in has_written[act.value]:
if trans not in has_commited:
print(f"NOT ACA - action {i} tried to read from {act.value} before transaction {trans} had commited")
is_aca = False
elif act.action == "w":
if act.value not in has_written.keys():
has_written[act.value] = [act.transaction.id]
else:
has_written[act.value].append(act.transaction.id)
elif act.action == "c":
has_commited.append(act.transaction.id)
return is_aca
def test_strict(story):
has_commited = []
has_written = {}
is_strict = True
for i in range(len(story)):
act = story[i]
if act.action == "r":
if act.value in has_written.keys():
for trans in has_written[act.value]:
if trans not in has_commited:
print(f"NOT STRICT - action {i} tried to read from {act.value} before transaction {trans} had commited")
is_strict = False
elif act.action == "w":
if act.value in has_written.keys():
for trans in has_written[act.value]:
if trans not in has_commited:
print(f"NOT STRICT - action {i} tried to write to {act.value} before transaction {trans} had commited")
is_strict = False
if act.value not in has_written.keys():
has_written[act.value] = [act.transaction.id]
else:
has_written[act.value].append(act.transaction.id)
elif act.action == "c":
has_commited.append(act.transaction.id)
return is_strict
class action:
def __init__(self, code):
code = code.lower()
self.action = code[0]
t = get_trans(code[1])
if t:
self.transaction = t
else:
self.transaction = transaction(code[1])
transactions.append(self.transaction)
self.value = None
if self.action != "c":
self.value = code[3]
class transaction:
def __init__(self, id):
self.id = id
def test(story):
actions = story.split("; ")
story_array = []
for act in actions:
if len(act) > 0:
a = action(act)
story_array.append(a)
#debug
# for ac in story_array:
# print(f"{ac.action}, {ac.transaction.id}, {ac.value}")
if not test_recoverable(story_array):
print("NOT RECOVERABLE")
elif not test_aca(story_array):
print("RECOVERABLE")
elif not test_strict(story_array):
print("ACA")
else:
print("STRICT")
<<<<<<< HEAD:Other interesting stuff/transaction_recovery_grade.py
=======
# Example of usage
# test("w1(X); r2(X); w1(Y); w2(Y); c2;")
#print("-----------------------")
#test("w1(X); r2(X); w1(Y); w2(Y); c1; c2;")
#print("-----------------------")
#test("w1(X); r2(Y); w1(Y); c1; r2(X); c2;")
#print("-----------------------")
#test("w1(X); w2(X); w1(Y); w2(Y); c1; C2;")
#print("-----------------------")
>>>>>>> ff3933a8a9decdd415f8eee1204b842d2508543b:Other interesting stuff/RECOVERY.py
|
py | 1a4993ac99c7007b5b7115b06cdea5d46200e547 | from rdkit import Chem
from rdkit.Chem import AllChem, Draw
from rdkit.Chem import rdMolDescriptors
import numpy as np
import matplotlib.pyplot as plt
from PIL import Image, ImageFilter
#please check the rdkit manual drawing chemical fragments
#https://www.rdkit.org/docs/GettingStartedInPython.html#drawing-molecules
class FPVisualizer:
"""
Utility class to visualize and process chemical fragments
"""
def __init__(self, dataset, draw_all=False):
"""
Parameters
--------------------
dataset: tuple of (list of string , list of bit)
SMILES_list: list of smiles recorded in the database
fingerprint_list: list of the corresponding FPs recorded in the database
draw_all: bool
if true, draw all fragments in a chemical
"""
self.SMILES_list, self.fingerprint_list = zip(*dataset)
self.draw_all = draw_all
def get_mol_id_with_specific_bit_id(self, bit_ID):
"""
extract chemicals whose fingerprint[bit_ID]==1
Parameters
--------------------
bit_ID: int
id of fignerprint
Returns
-------------------
hit_ID: int
ID of chemicals whose fingerpint's bit_ID ==1
"""
hit = [True if fp[bit_ID] == 1 else False for fp in self.fingerprint_list]
temp = list(range(len(hit)))
hit_ID = [i for i, j in zip(temp, hit) if j == 1]
return hit_ID
def auto_draw_fragments(self, ID_list,draw=True):
"""
draw chemical fragments with specific bit_ID
Parameters
---------------
ID_list: list of int
list of bit_ID
Returns
-----------------
self.draw_fragments(tup): image object
chemical structures
smiles_list: list of string
corresponding smiles
"""
tup, smiles_list = self.calc_draw_tuples(ID_list)
#TODO: kekulization errors with some compounds
if draw:
img=self.draw_fragments(tup)
else:
img=None
return img, smiles_list
def calc_draw_tuples(self, ID_list):
"""
internal function of auto_draw_fragments
"""
draw_tuple = []
smiles_list = []
for bit_ID in ID_list:
#get smiles indexes whose bit_ID ==1
hit_ID = self.get_mol_id_with_specific_bit_id(bit_ID)
#create mol object whose molecular weight is smallest
match_SMILES_list = np.array(self.SMILES_list)[hit_ID]
sm = sort_SMILES_list_by_MW(match_SMILES_list)[0]
if sm == -1:
continue
smiles_list.append(sm)
mol = Chem.MolFromSmiles(sm)
bitI_rdkit = {}
fp_rdkit = Chem.RDKFingerprint(mol, bitInfo=bitI_rdkit)
draw_tuple.append((mol, bit_ID, bitI_rdkit))
return draw_tuple, smiles_list
def draw_fragments(self, draw_tuple):
image_list = []
for tup in draw_tuple:
mol, bit_ID, fp = tup
if self.draw_all:
# one molecule can have multiple fragments
for i in range(len(fp[bit_ID])):
img = Draw.DrawRDKitBit(mol, bit_ID, fp, whichExample=i)
image_list.append(img)
else:
img = Draw.DrawRDKitBit(mol, bit_ID, fp, whichExample=0)
image_list.append(img)
imgs = Image.fromarray(np.concatenate(image_list, axis=0))
return imgs
def calc_duplicate_array(self, bit_ID_list, threshold=0.5, plot=True):
"""
this is an original function to drop similar fingerprints
Parameters
-----------------
bit_ID_list: list of int
list of bit_ID of fignerprints. If different bit_ID have simialr contributions, they will be merged.
threshold: float
threshold to drop similar bit_IDs
plot: bool
if true, plot similarity heatmap
"""
ID_types = len(bit_ID_list)
subset_array = np.ones((ID_types, ID_types))
# from the database, extract a compound whose bit_ID ==1
for n1, i in enumerate(bit_ID_list):
hit_ids1 = self.get_mol_id_with_specific_bit_id(i)
for n2, j in enumerate(bit_ID_list):
hit_ids2 = self.get_mol_id_with_specific_bit_id(j)
# calcualte the difference of FP_i and FP_j
subset_array[n1][n2] = len(
list((set(hit_ids1)-set(hit_ids2))))/len(hit_ids1)
if plot:
plt.imshow(subset_array, interpolation='nearest', cmap='jet')
# delete similar bit_ids
dup_score = np.mean(subset_array, axis=0)
modif_bit_ID_list = [i for i, j in zip(
bit_ID_list, dup_score) if j > threshold]
return modif_bit_ID_list, subset_array
def calc_MW_from_SMILES_list(SMILES):
mol = Chem.MolFromSmiles(SMILES)
return rdMolDescriptors._CalcMolWt(mol)
def sort_SMILES_list_by_MW(SMILES_list):
"""
sort smiles by molecular weight
"""
if len(SMILES_list) == 0:
return [-1]
mw_list = [calc_MW_from_SMILES_list(i) for i in SMILES_list]
dataset = (list(zip(mw_list, SMILES_list)))
dataset.sort(key=lambda x: x[0])
mw_list, SMILES_list = list(zip(*dataset))
return SMILES_list
|
py | 1a4993ff1a78d62cb6a1f5b946907260434dda72 | # MIT License
#
# Copyright (c) 2020 Arkadiusz Netczuk <[email protected]>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
import unittest
import datetime
from worklog.gui.dataobject import KernLogParser
from testworklog.data import get_data_path
class KernLogParserTest(unittest.TestCase):
def setUp(self):
## Called before testfunction is executed
pass
def tearDown(self):
## Called after testfunction was executed
pass
def test_parseKernLog_regular(self):
kernlogPath = get_data_path( "kern.log_regular" )
logList = KernLogParser.parseKernLog( kernlogPath )
self.assertEqual( len( logList ), 9 )
item = logList[0]
self.assertEqual( item[0], datetime.datetime( year=2020, month=10, day=26, hour=0, minute=9 ) )
self.assertEqual( item[1], datetime.datetime( year=2020, month=10, day=26, hour=1, minute=22 ) )
def test_parseKernLog_fail(self):
kernlogPath = get_data_path( "kern.log_fail" )
logList = KernLogParser.parseKernLog( kernlogPath )
self.assertEqual( len( logList ), 1 )
item = logList[0]
self.assertEqual( item[0], datetime.datetime( year=2020, month=10, day=26, hour=15, minute=49 ) )
self.assertEqual( item[1], datetime.datetime( year=2020, month=10, day=26, hour=15, minute=49 ) )
def test_parseKernLog_suspend(self):
kernlogPath = get_data_path( "kern.log_suspend" )
logList = KernLogParser.parseKernLog( kernlogPath )
self.assertEqual( len( logList ), 4 )
item = logList[0]
self.assertEqual( item[0], datetime.datetime( year=2020, month=10, day=31, hour=10, minute=46 ) )
self.assertEqual( item[1], datetime.datetime( year=2020, month=10, day=31, hour=10, minute=53 ) )
def test_parseKernLog_newyear(self):
kernlogPath = get_data_path( "kern.log_newyear" )
logList = KernLogParser.parseKernLog( kernlogPath )
self.assertEqual( len( logList ), 2 )
item1 = logList[0]
self.assertEqual( item1[0], datetime.datetime( year=2020, month=12, day=31, hour=18, minute=28 ) )
self.assertEqual( item1[1], datetime.datetime( year=2020, month=12, day=31, hour=18, minute=32 ) )
item2 = logList[1]
self.assertEqual( item2[0], datetime.datetime( year=2021, month=1, day=1, hour=20, minute=31 ) )
self.assertEqual( item2[1], datetime.datetime( year=2021, month=1, day=1, hour=20, minute=32 ) )
def test_parseKernLog_joinline(self):
## sometimes can happen that two lines of log are joined together without newline separator
kernlogPath = get_data_path( "kern.log_joinline" )
logList = KernLogParser.parseKernLog( kernlogPath )
self.assertEqual( len( logList ), 2 )
item1 = logList[0]
self.assertEqual( item1[0], datetime.datetime( year=2021, month=5, day=7, hour=23, minute=24 ) )
self.assertEqual( item1[1], datetime.datetime( year=2021, month=5, day=7, hour=23, minute=24 ) )
item2 = logList[1]
self.assertEqual( item2[0], datetime.datetime( year=2021, month=5, day=8, hour=21, minute=35 ) )
self.assertEqual( item2[1], datetime.datetime( year=2021, month=5, day=8, hour=21, minute=35 ) )
|
py | 1a49947530a1b2cb5bd9426bc6f2ad0ebd53d2f5 | # Copyright 2014 Mellanox Technologies, Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
from oslo_log import log as logging
from neutron._i18n import _, _LE
from neutron.agent.linux import utils
from neutron.common import exceptions as n_exc
LOG = logging.getLogger(__name__)
class IpLinkSupportError(n_exc.NeutronException):
pass
class UnsupportedIpLinkCommand(IpLinkSupportError):
message = _("ip link command is not supported: %(reason)s")
class InvalidIpLinkCapability(IpLinkSupportError):
message = _("ip link capability %(capability)s is not supported")
class IpLinkConstants(object):
IP_LINK_CAPABILITY_STATE = "state"
IP_LINK_CAPABILITY_VLAN = "vlan"
IP_LINK_CAPABILITY_RATE = "rate"
IP_LINK_CAPABILITY_SPOOFCHK = "spoofchk"
IP_LINK_SUB_CAPABILITY_QOS = "qos"
class IpLinkSupport(object):
VF_BLOCK_REGEX = r"\[ vf NUM(?P<vf_block>.*) \] \]"
CAPABILITY_REGEX = r"\[ %s (.*)"
SUB_CAPABILITY_REGEX = r"\[ %(cap)s (.*) \[ %(subcap)s (.*)"
@classmethod
def get_vf_mgmt_section(cls):
"""Parses ip link help output, and gets vf block"""
output = cls._get_ip_link_output()
vf_block_pattern = re.search(cls.VF_BLOCK_REGEX,
output,
re.DOTALL | re.MULTILINE)
if vf_block_pattern:
return vf_block_pattern.group("vf_block")
@classmethod
def vf_mgmt_capability_supported(cls, vf_section, capability,
subcapability=None):
"""Validate vf capability support
Checks if given vf capability (and sub capability
if given) supported
:param vf_section: vf Num block content
:param capability: for example: vlan, rate, spoofchk, state
:param subcapability: for example: qos
"""
if not vf_section:
return False
if subcapability:
regex = cls.SUB_CAPABILITY_REGEX % {"cap": capability,
"subcap": subcapability}
else:
regex = cls.CAPABILITY_REGEX % capability
pattern_match = re.search(regex, vf_section,
re.DOTALL | re.MULTILINE)
return pattern_match is not None
@classmethod
def _get_ip_link_output(cls):
"""Gets the output of the ip link help command
Runs ip link help command and stores its output
Note: ip link help return error and writes its output to stderr
so we get the output from there. however, if this issue
will be solved and the command will write to stdout, we
will get the output from there too.
"""
try:
ip_cmd = ['ip', 'link', 'help']
_stdout, _stderr = utils.execute(
ip_cmd,
check_exit_code=False,
return_stderr=True,
log_fail_as_error=False)
except Exception as e:
LOG.exception(_LE("Failed executing ip command"))
raise UnsupportedIpLinkCommand(reason=e)
return _stdout or _stderr
|
py | 1a49955d0378e57e2befe31cf55e4b2b788c5021 | import pathlib
import warnings
import functools
from typing import Dict
from contextlib import contextmanager
from urllib.parse import urlparse
from sunpy.util.exceptions import SunpyUserWarning
from sunpy.util.util import hash_file
__all__ = ['DataManager']
class DataManager:
"""
This class provides a remote data manager for managing remote files.
Parameters
----------
cache: `sunpy.data.data_manager.cache.Cache`
Cache object to be used by `~sunpy.data.data_manager.manager.DataManager`.
"""
def __init__(self, cache):
self._cache = cache
self._file_cache = {}
self._skip_hash_check = False
self._skip_file: Dict[str, str] = {}
def require(self, name, urls, sha_hash):
"""
Decorator for informing the data manager about the requirement of
a file by a function.
Parameters
----------
name: `str`
The name to reference the file with.
urls: `list` or `str`
A list of urls to download the file from.
sha_hash: `str`
SHA-1 hash of file.
"""
if isinstance(urls, str):
urls = [urls]
def decorator(func):
@functools.wraps(func)
def wrapper(*args, **kwargs):
replace = self._skip_file.get(name, None)
if replace:
uri_parse = urlparse(replace['uri'])
if uri_parse.scheme in ("", "file"):
# If a relative file uri is specified (i.e.
# `file://sunpy/test`) this maintains compatibility
# with the original behaviour where this would be
# interpreted as `./sunpy/test` if no scheme is
# specified netloc will be '' by default.
file_path = uri_parse.netloc + uri_parse.path
file_hash = hash_file(file_path)
else:
file_path, file_hash, _ = self._cache._download_and_hash([replace['uri']])
if replace['hash'] and file_hash != replace['hash']:
# if hash provided to replace function doesn't match the hash of the file
# raise error
raise ValueError(
"Hash provided to override_file does not match hash of the file.")
elif self._skip_hash_check:
file_path = self._cache.download(urls, redownload=True)
else:
details = self._cache.get_by_hash(sha_hash)
if not details:
# In case we are matching by hash and file does not exist
# That might mean the wrong hash is supplied to decorator
# We match by urls to make sure that is not the case
if self._cache_has_file(urls):
raise ValueError(" Hash provided does not match the hash in database.")
file_path = self._cache.download(urls)
if hash_file(file_path) != sha_hash:
# the hash of the file downloaded does not match provided hash
# this means the file has changed on the server.
# the function should be updated to use the new
# hash. Raise an error to notify.
raise RuntimeError(
"Remote file on the server has changed. Update hash of the function.")
else:
# This is to handle the case when the local file
# appears to be tampered/corrupted
if hash_file(details['file_path']) != details['file_hash']:
warnings.warn("Hashes do not match, the file will be redownloaded (could be be tampered/corrupted)",
SunpyUserWarning)
file_path = self._cache.download(urls, redownload=True)
# Recheck the hash again, if this fails, we will exit.
if hash_file(file_path) != details['file_hash']:
raise RuntimeError("Redownloaded file also has the incorrect hash."
"The remote file on the server might have changed.")
else:
file_path = details['file_path']
self._file_cache[name] = file_path
return func(*args, **kwargs)
return wrapper
return decorator
@contextmanager
def override_file(self, name, uri, sha_hash=None):
"""
Replaces the file by the name with the file provided by the url/path.
Parameters
----------
name: `str`
Name of the file provided in the `require` decorator.
uri: `str`
URI of the file which replaces original file. Scheme should be one
of ``http``, ``https``, ``ftp`` or ``file``. If no scheme is given
the uri will be interpreted as a local path. i.e.
``file:///tmp/test`` and ``/tmp/test`` are the same.
sha_hash: `str`, optional
SHA256 hash of the file to compared to after downloading.
"""
try:
self._skip_file[name] = {
'uri': uri,
'hash': sha_hash,
}
yield
finally:
_ = self._skip_file.pop(name, None)
@contextmanager
def skip_hash_check(self):
"""
Disables hash checking temporarily
Examples
--------
>>> with remote_data_manager.skip_hash_check(): # doctest: +SKIP
... myfunction() # doctest: +SKIP
"""
try:
self._skip_hash_check = True
yield
finally:
self._skip_hash_check = False
def get(self, name):
"""
Get the file by name.
Parameters
----------
name: `str`
Name of the file given to the data manager, same as the one provided
in `~sunpy.data.data_manager.manager.DataManager.require`.
Returns
-------
`pathlib.Path`
Path of the file.
Raises
------
`KeyError`
If ``name`` is not in the cache.
"""
return pathlib.Path(self._file_cache[name])
def _cache_has_file(self, urls):
for url in urls:
if self._cache._get_by_url(url):
return True
return False
|
py | 1a4995c325ef4e54e70415cb64f6f5db5b973b07 | """Emoji
Available Commands:
.emoji shrug
.emoji apple
.emoji :/
.emoji -_-"""
from telethon import events
import asyncio
@borg.on(events.NewMessage(pattern=r"\.(.*)", outgoing=True))
async def _(event):
if event.fwd_from:
return
animation_interval = 3
animation_ttl = range(0, 18)
input_str = event.pattern_match.group(1)
if input_str == "call":
await event.edit(input_str)
animation_chars = [
"`Connecting To Telegram Headquarters...`",
"`Call Connected.`",
"`Telegram: Hello This is Telegram HQ. Who is this?`",
"`Me: Yo this is` @mantiz_rip ,`Please Connect me to my lil bro,Pavel Durov`",
"`User Authorised.`",
"`Calling Pavel Durov` `At +916969696969`",
"`Private Call Connected...`",
"`Me: Hello Sir, Please Ban This Telegram Account.`",
"`Pavel: May I Know Who Is This?`",
"`Me: Yo Brah, I Am` @mantiz_rip ",
"`Pavel: OMG!!! Long time no see, Wassup Brother...\nI'll Make Sure That Guy Account Will Get Blocked Within 24Hrs.`",
"`Me: Thanks, See You Later Brah.`",
"`Pavel: Please Don't Thank Brah, Telegram Is Our's. Just Gimme A Call When You Become Free.`",
"`Me: Is There Any Issue/Emergency???`",
"`Pavel: Yes Sur, There Is A Bug In Telegram v69.6.9.\nI Am Not Able To Fix It. If Possible, Please Help Fix The Bug.`",
"`Me: Send Me The App On My Telegram Account, I Will Fix The Bug & Send You.`",
"`Pavel: Sure Sur \nTC Bye Bye :)`",
"`Private Call Disconnected.`"
]
for i in animation_ttl:
await asyncio.sleep(animation_interval)
await event.edit(animation_chars[i % 18])
|
py | 1a4996279c3e52b9b9f7445586b25dc6be074570 | import itertools
import logging
from pint import pi_theorem
from pint.testsuite import QuantityTestCase
class TestPiTheorem(QuantityTestCase):
def test_simple(self, caplog):
# simple movement
with caplog.at_level(logging.DEBUG):
assert pi_theorem({"V": "m/s", "T": "s", "L": "m"}) == [
{"V": 1, "T": 1, "L": -1}
]
# pendulum
assert pi_theorem({"T": "s", "M": "grams", "L": "m", "g": "m/s**2"}) == [
{"g": 1, "T": 2, "L": -1}
]
assert len(caplog.records) == 7
def test_inputs(self):
V = "km/hour"
T = "ms"
L = "cm"
f1 = lambda x: x
f2 = lambda x: self.Q_(1, x)
f3 = lambda x: self.Q_(1, x).units
f4 = lambda x: self.Q_(1, x).dimensionality
fs = f1, f2, f3, f4
for fv, ft, fl in itertools.product(fs, fs, fs):
qv = fv(V)
qt = ft(T)
ql = ft(L)
assert self.ureg.pi_theorem({"V": qv, "T": qt, "L": ql}) == [
{"V": 1.0, "T": 1.0, "L": -1.0}
]
|
py | 1a49968f80388a16b1b1e7479dda98001bf597cb | import unittest
import reframe.core.runtime as rt
import unittests.fixtures as fixtures
class TestRuntime(unittest.TestCase):
@rt.switch_runtime(fixtures.TEST_SITE_CONFIG, 'testsys')
def test_hostsystem_api(self):
system = rt.runtime().system
self.assertEqual('testsys', system.name)
self.assertEqual('Fake system for unit tests', system.descr)
self.assertEqual(2, len(system.partitions))
self.assertIsNotNone(system.partition('login'))
self.assertIsNotNone(system.partition('gpu'))
self.assertIsNone(system.partition('foobar'))
# Test delegation to the underlying System
self.assertEqual('.rfm_testing', system.prefix)
self.assertEqual('.rfm_testing/resources', system.resourcesdir)
self.assertEqual('.rfm_testing/perflogs', system.perflogdir)
|
py | 1a499769933c0709a5d8b5a679c2281cfd7206e0 | import sys
from setuptools import find_packages, setup
from setuptools.command.test import test as TestCommand
class NoseTest(TestCommand):
def finalize_options(self):
TestCommand.finalize_options(self)
self.test_args = []
self.test_suite = True
def run_tests(self):
import nose
errcode = nose.main(self.test_args)
sys.exit(errcode)
setup(name='battleforcastile',
version='0.0.2',
description='Play a fantasy cards game on your terminal',
maintainer='José Vidal',
maintainer_email='[email protected]',
author='José Vidal',
author_email='[email protected]',
url='https://github.com/battleforcastile/battleforcastile',
license='MIT',
long_description=open('README.md').read(),
platforms='any',
keywords=[
'fantasy',
'game',
],
packages=find_packages(),
install_requires=[
'click==7.0'
],
entry_points={
"console_scripts": [
"battleforcastile = battleforcastile.main:cli",
],
},
classifiers=[
'Programming Language :: Python',
'Operating System :: OS Independent'
],
tests_require=['nose'],
cmdclass={'test': NoseTest}
) |
py | 1a4997c9f8dae6cd3f22c04e404ee4fa015845ac | """
ASGI config for taskscraper project.
It exposes the ASGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/howto/deployment/asgi/
"""
import os
from django.core.asgi import get_asgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'taskscraper.settings')
application = get_asgi_application()
|
py | 1a499a12cab3fe1f786c56b9f11253d35225d230 | # coding: utf-8
#
# Copyright 2022 :Barry-Thomas-Paul: Moss
#
# Licensed under the Apache License, Version 2.0 (the "License")
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http: // www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Exception Class
# this is a auto generated file generated by Cheetah
# Namespace: com.sun.star.ucb
# Libre Office Version: 7.3
from ooo.oenv.env_const import UNO_NONE
import typing
from ..task.classified_interaction_request import ClassifiedInteractionRequest as ClassifiedInteractionRequest_9f72121b
from ..uno.x_interface import XInterface as XInterface_8f010a43
from ..task.interaction_classification import InteractionClassification as InteractionClassification_6c4d10e7
class InteractiveAppException(ClassifiedInteractionRequest_9f72121b):
"""
Exception Class
An application error.
**since**
OOo 1.1.2
See Also:
`API InteractiveAppException <https://api.libreoffice.org/docs/idl/ref/exceptioncom_1_1sun_1_1star_1_1ucb_1_1InteractiveAppException.html>`_
"""
__ooo_ns__: str = 'com.sun.star.ucb'
__ooo_full_ns__: str = 'com.sun.star.ucb.InteractiveAppException'
__ooo_type_name__: str = 'exception'
__pyunointerface__: str = 'com.sun.star.ucb.InteractiveAppException'
__pyunostruct__: str = 'com.sun.star.ucb.InteractiveAppException'
typeName: str = 'com.sun.star.ucb.InteractiveAppException'
"""Literal Constant ``com.sun.star.ucb.InteractiveAppException``"""
def __init__(self, Message: typing.Optional[str] = '', Context: typing.Optional[XInterface_8f010a43] = None, Classification: typing.Optional[InteractionClassification_6c4d10e7] = InteractionClassification_6c4d10e7.ERROR, Code: typing.Optional[int] = 0) -> None:
"""
Constructor
Arguments:
Message (str, optional): Message value.
Context (XInterface, optional): Context value.
Classification (InteractionClassification, optional): Classification value.
Code (int, optional): Code value.
"""
kargs = {
"Message": Message,
"Context": Context,
"Classification": Classification,
"Code": Code,
}
self._init(**kargs)
def _init(self, **kwargs) -> None:
self._code = kwargs["Code"]
inst_keys = ('Code',)
kargs = kwargs.copy()
for key in inst_keys:
del kargs[key]
super()._init(**kargs)
@property
def Code(self) -> int:
"""
The type of application error.
"""
return self._code
@Code.setter
def Code(self, value: int) -> None:
self._code = value
__all__ = ['InteractiveAppException']
|
py | 1a499b7698d0899868739a9ae16f9f736381c8f6 | #!/usr/bin/env python
#################################################################
##
## Script: pyttt.py
## Author: Premshree Pillai
## Description: Tic-Tac-Toe game in Python
## Web: http://www.qiksearch.com/
## http://premshree.resource-locator.com/
## Created: 19/03/04 (dd/mm/yy)
##
## (C) 2004 Premshree Pillai
##
#################################################################
import cgi
print("Content-type: text/html\n\n")
global bsize,playerToken,myToken,gameOver,winArr,rowArr,colArr,digArr,vals,s1,s2,s3,s4,s5,s6,s7,s8,s9
bsize = 3
playerToken = "X"
myToken = "0"
gameOver = 0
winArr = []
rowArr = []
colArr = []
digArr = []
x = 0
while x < bsize * bsize :
rowArr.append(0)
colArr.append(0)
digArr.append(0)
x = x + 1
out1 = """<html>
<head>
<title>Tic Tac Toe in Python</title>
<style type="text/css">
.main{border:#9999CC solid 2px; width:350px}
.btn{font-family:comic sans ms,verdana,arial,helvetica; font-size:20pt; font-weight:bold; background:#9999CC; width:50px; height:50px; border:#666699 solid 1px; cursor:hand; color:#EFEFFF}
.btn_over{font-family:comic sans ms,verdana,arial,helvetica; font-size:20pt; font-weight:bold; background:#EFEFFF; width:50px; height:50px; border:#666699 solid 1px; cursor:hand; color:#9999CC}
.btn_down{font-family:comic sans ms,verdana,arial,helvetica; font-size:20pt; font-weight:bold; background:#666699; width:50px; height:50px; border:#666699 solid 1px; cursor:hand; color:#EFEFFF}
.footer{font-family:verdana,arial,helvetica; font-size:8pt; color:#FFFFFF}
.link{font-family:verdana,arial,helvetica; font-size:8pt; color:#FFFFFF}
.link:hover{font-family:verdana,arial,helvetica; font-size:8pt; color:#EFEFFF}
</style>
<script language="JavaScript">
var doneFlag=false;
function toggleVal(who) {
var check;
eval('check=document.ttt.'+who+'_btn.value;');
if(check==" ") {
if(!doneFlag) {
eval('document.ttt.'+who+'_btn.value="X";');
eval('document.ttt.'+who+'_btn.disabled="true";');
eval('document.ttt.'+who+'.value="X";');
document.ttt.submit();
doneFlag=true;
document.getElementById('process').innerHTML="Processing.........";
}
}
else {
alert('Invalid Move!');
}
}
</script>
</head>
<body>
<table width="100%" height="100%"><tr><td align="center">
<table width="346" align="center" bgcolor="#9999CC" cellspacing="0" cellpadding="0"><tr><td></td></tr></table>
<table width="348" align="center" bgcolor="#9999CC" cellspacing="0" cellpadding="0"><tr><td></td></tr></table>
<table align="center" cellspacing="0" cellpadding="0" class="main"><tr><td align="center">
<table width="100%" bgcolor="#9999CC" cellspacing="0" cellpadding="0"><tr><td align="center"><a href="pyttt.py"><img src="../ttt_py.gif" border="0" alt="Tic Tac Toe (in Python)"></a></td></tr></table>
<table width="100%" bgcolor="#EFEFFF" cellspacing="0" cellpadding="0"><tr><td align="center"><a href="http://www.qiksearch.com"><img src="../qiksearch_ttt_py.gif" border="0" alt="www.qiksearch.com"></a></td></tr></table>"""
print(out1)
def genBox(size):
global bsize,playerToken,myToken,gameOver,winArr,rowArr,colArr,digArr,vals,s1,s2,s3,s4,s5,s6,s7,s8,s9
count = 0
retVal = '<form name="ttt" method="post" action="pyttt.py">'
i = 0
while i < size :
j = 0
while j < size :
count = count + 1
retVal = retVal + '<input type="button" name="s' + str(count) + '_btn" value=" " class="btn" onClick="toggleVal(\'s' + str(count) + '\')" onMouseover="this.className=\'btn_over\'" onMouseout="this.className=\'btn\'" onMousedown="this.className=\'btn_down\'"><input type="hidden" name="s' + str(count) + '" value=" ">'
j = j + 1
retVal = retVal + '<br>'
i = i + 1
retVal = retVal + '</form>'
print(retVal)
def genBox2(size,arr):
global bsize,playerToken,myToken,gameOver,winArr,rowArr,colArr,digArr,vals,s1,s2,s3,s4,s5,s6,s7,s8,s9
count = 0
retVal = '<form name="ttt" method="post" action="pyttt.py">'
i = 0
while i < size :
j = 0
while j < size :
count = count + 1
retVal = retVal + '<input type="button" name="s' + str(count) + '_btn" value="' + str(arr[count-1]) + '" class="btn" onClick="toggleVal(\'s' + str(count) + '\')" onMouseover="this.className=\'btn_over\'" onMouseout="this.className=\'btn\'" onMousedown="this.className=\'btn_down\'"><input type="hidden" name="s' + str(count) + '" value="' + str(arr[count-1]) + '">'
j = j + 1
retVal = retVal + '<br>'
i = i + 1
retVal = retVal + '</form>'
print(retVal)
def isEmpty(who):
if who == " ":
return 1
else:
return 0;
def move(bsize,arr):
global playerToken,myToken,gameOver,winArr,rowArr,colArr,digArr,vals,s1,s2,s3,s4,s5,s6,s7,s8,s9
count = 0
maxCount = 0
pos = 0
retVal = 0
# Build Row Array
i = 0
while i < bsize :
maxCount = 0
fullCounter = 0
j = 0
while j < bsize :
count = count + 1
who = arr[count-1]
if who == playerToken :
maxCount = maxCount + 1
fullCounter = fullCounter + 1
if who == myToken :
fullCounter = fullCounter + 1
j = j + 1
rowArr[i] = maxCount
if fullCounter == bsize :
rowArr[i] = -1
i = i + 1
# Building Column Array
i = 0
while i < bsize :
count = i + 1
maxCount = 0
fullCounter = 0
j = 0
while j < bsize :
who = arr[count-1]
if who == playerToken :
maxCount = maxCount + 1
fullCounter = fullCounter + 1
if who == myToken :
fullCounter = fullCounter + 1
count = count + bsize
j = j + 1
colArr[i] = maxCount
if fullCounter == bsize :
colArr[i] = -1
i = i + 1
# Building Diagonal Array
i = 0
while i < 2 :
if i == 0 :
count = i + 1
else:
count = bsize
maxCount = 0
fullCounter = 0
j = 0
while j < bsize :
who = arr[count-1]
if who == playerToken :
maxCount = maxCount + 1
fullCounter = fullCounter + 1
if who == myToken :
fullCounter = fullCounter + 1
if i == 0 :
count = count + bsize + 1
else:
count = count + bsize - 1
j = j + 1
digArr[i] = maxCount
if fullCounter == bsize :
digArr[i] = -1
i = i + 1
# Finding Max Values
maxRow = myMax(0,bsize,"row",rowArr)
maxCol = myMax(0,bsize,"col",colArr)
maxDig = myMax(0,bsize,"dig",digArr)
maxArrs = []
maxArrs.append(myMax(1,bsize,"row",rowArr))
maxArrs.append(myMax(1,bsize,"col",colArr))
maxArrs.append(myMax(1,bsize,"dig",digArr))
if myMax(0,bsize,"x",maxArrs) == 0 :
pos = bsize * (maxRow + 1) - bsize
if myMax(0,bsize,"x",maxArrs) == 1 :
pos = maxCol
if myMax(0,bsize,"x",maxArrs) == 2 :
if maxDig == 0 :
pos = maxDig
else:
pos = bsize - 1
retFlag = 0
y = 0
while y < bsize :
if not(retFlag):
if arr[pos] == " " :
retVal = pos
retFlag = 1
if myMax(0,bsize,"x",maxArrs) == 0 :
pos = pos + 1
if myMax(0,bsize,"x",maxArrs) == 1 :
pos = pos + bsize
if myMax(0,bsize,"x",maxArrs) == 2 :
if maxDig == 0 :
pos = pos + bsize + 1
else:
pos = pos + bsize - 1
y = y + 1
return retVal
def myMax(what,bsize,type,arr):
global playerToken,myToken,gameOver,winArr,rowArr,colArr,digArr,vals,s1,s2,s3,s4,s5,s6,s7,s8,s9
max = -1
maxIndex = -1
if type != "dig" :
i = 0
while i < bsize :
if arr[i] > max :
max = arr[i]
maxIndex = i
i = i + 1
if type == "dig" :
i = 0
while i < 2 :
if arr[i] > max :
max = arr[i]
maxIndex = i
i = i + 1
if what == 0 :
return maxIndex
else:
return max
def playerWin():
global bsize,playerToken,myToken,gameOver,winArr,rowArr,colArr,digArr,vals,s1,s2,s3,s4,s5,s6,s7,s8,s9
who = playerToken
if (s1 == who == s2 == s3) or (s4 == who == s5 == s6) or (s7 == who == s8 == s9) or (s1 == who == s4 == s7) or (s2 == who == s5 == s8) or (s3 == who == s6 == s9) or (s1 == who == s5 == s9) or (s3 == who == s5 == s7) :
return 1
else:
return 0
def iWin():
global bsize,playerToken,myToken,gameOver,winArr,rowArr,colArr,digArr,vals,s1,s2,s3,s4,s5,s6,s7,s8,s9
who = myToken
if (s1 == who == s2 == s3) or (s4 == who == s5 == s6) or (s7 == who == s8 == s9) or (s1 == who == s4 == s7) or (s2 == who == s5 == s8) or (s3 == who == s6 == s9) or (s1 == who == s5 == s9) or (s3 == who == s5 == s7) :
return 1
else:
return 0
def whereWinComp():
global bsize,playerToken,myToken,gameOver,winArr,rowArr,colArr,digArr,vals,s1,s2,s3,s4,s5,s6,s7,s8,s9
who = myToken
if (s1 == who == s2 == s3) :
winArr = ['s1','s2','s3']
if (s4 == who == s5 == s6) :
winArr = ['s4','s5','s6']
if (s7 == who == s8 == s9) :
winArr = ['s7','s8','s9']
if (s1 == who == s4 == s7) :
winArr = ['s1','s4','s7']
if (s2 == who == s5 == s8) :
winArr = ['s2','s5','s8']
if (s3 == who == s6 == s9) :
winArr = ['s3','s6','s9']
if (s1 == who == s5 == s9) :
winArr = ['s1','s5','s9']
if (s3 == who == s5 == s7) :
winArr = ['s3','s5','s7']
def whereWinPlayer():
global bsize,playerToken,myToken,gameOver,winArr,rowArr,colArr,digArr,vals,s1,s2,s3,s4,s5,s6,s7,s8,s9
who = playerToken
if (s1 == who == s2 == s3) :
winArr = ['s1','s2','s3']
if (s4 == who == s5 == s6) :
winArr = ['s4','s5','s6']
if (s7 == who == s8 == s9) :
winArr = ['s7','s8','s9']
if (s1 == who == s4 == s7) :
winArr = ['s1','s4','s7']
if (s2 == who == s5 == s8) :
winArr = ['s2','s5','s8']
if (s3 == who == s6 == s9) :
winArr = ['s3','s6','s9']
if (s1 == who == s5 == s9) :
winArr = ['s1','s5','s9']
if (s3 == who == s5 == s7) :
winArr = ['s3','s5','s7']
def draw():
global bsize,playerToken,myToken,gameOver,winArr,rowArr,colArr,digArr,vals,s1,s2,s3,s4,s5,s6,s7,s8,s9
drawCounter = 0
dCounter = 0
while dCounter < len(vals) :
if vals[dCounter] != " " :
drawCounter = drawCounter + 1
dCounter = dCounter + 1
if drawCounter == bsize * bsize :
return 1
else:
return 0
form = cgi.FieldStorage()
if form :
s1 = form['s1'].value
s2 = form['s2'].value
s3 = form['s3'].value
s4 = form['s4'].value
s5 = form['s5'].value
s6 = form['s6'].value
s7 = form['s7'].value
s8 = form['s8'].value
s9 = form['s9'].value
vals = [s1,s2,s3,s4,s5,s6,s7,s8,s9]
if draw() or playerWin() :
gameOver = 1
# Computer's Move!
movIndex = move(bsize,vals)
if not(gameOver) :
vals[movIndex] = myToken
# Update S's
if not(gameOver) :
if movIndex == 0 :
s1 = myToken
if movIndex == 1 :
s2 = myToken
if movIndex == 2 :
s3 = myToken
if movIndex == 3 :
s4 = myToken
if movIndex == 4 :
s5 = myToken
if movIndex == 5 :
s6 = myToken
if movIndex == 6 :
s7 = myToken
if movIndex == 7 :
s8 = myToken
if movIndex == 8 :
s9 = myToken
genBox2(bsize,vals)
if playerWin() :
print('<font face="verdana,arial,helvetica" color="#009900" size="4"><b>Wow! You Won!</b></font><br><br>')
print('<input type="button" onClick="location.href=\'pyttt.py\'" value="Play Again!" style="background:#CCCCCC; font-weight:bold; cursor:hand"><br><br>')
whereWinPlayer()
print('<script language="JavaScript">')
winCount = 0
while winCount < len(winArr) :
print('document.ttt.' + winArr[winCount] + '_btn.style.color=\'#009900\';')
winCount = winCount + 1
w = 0
while w < (bsize * bsize) :
if vals[w] == " " :
print('document.ttt.s' + str(w + 1) + '_btn.disabled=true;')
w = w + 1
print('</script>')
gameOver = 1
if iWin() and not(gameOver) :
print('<font face="verdana,arial,helvetica" color="#FF0000" size="4"><b>Oops! You Lost!</b></font><br><br>')
print('<input type="button" onClick="location.href=\'pyttt.py\'" value="Play Again!" style="background:#CCCCCC; font-weight:bold; cursor:hand"><br><br>')
whereWinComp()
print('<script language="JavaScript">')
winCount = 0
while winCount < len(winArr) :
print('document.ttt.' + winArr[winCount] + '_btn.style.color=\'#FF0000\';');
winCount = winCount + 1
w = 0
while w < bsize * bsize :
if vals[w] == " " :
print('document.ttt.s' + str(w + 1) + '_btn.disabled=true;')
w = w + 1
print('</script>')
gameOver = 1
if draw() and not(playerWin()) and not(iWin()) :
print('<font face="verdana,arial,helvetica" color="#000000" size="4"><b>It\'s a Draw!</b></font><br><br>')
print('<input type="button" onClick="location.href=\'pyttt.py\'" value="Play Again!" style="background:#CCCCCC; font-weight:bold; cursor:hand"><br><br>')
print('<script language="JavaScript">')
w = 0
while w < bsize * bsize :
if vals[w] == " " :
print('document.ttt.s' + str(w + 1) + '_btn.disabled=true;')
w = w + 1
print('</script>')
else:
genBox(bsize)
out2 = """<div style="font-family:verdana,arial,helvetica; font-weight:bold; font-size:10pt; color:#CC0000; background:#EFEFFF; width:100%; padding:3px" id="process"></div>
<table width="100%" bgcolor="#9999CC"><tr><td><span class="footer">© 2004 <a href="http://www.qiksearch.com" class="link">Premshree Pillai</a> | <a href="http://www.guestbookdepot.com/cgi-bin/guestbook.cgi?book_id=374186" class="link">Sign my Guestbook</a>.</span></td></tr></table>
</td></tr></table>
<table width="348" align="center" bgcolor="#9999CC" cellspacing="0" cellpadding="0"><tr><td></td></tr></table>
<table width="346" align="center" bgcolor="#9999CC" cellspacing="0" cellpadding="0"><tr><td></td></tr></table>
</td></tr></table>
</body>
</html>"""
print(out2)
|
py | 1a499c072bf95a8ead167e7f0e8a54fb6cd8c4ab | import os
import time
import string
import argparse
import re
import torch
import torch.backends.cudnn as cudnn
import torch.utils.data
import torch.nn.functional as F
import numpy as np
from nltk.metrics.distance import edit_distance
from utils import CTCLabelConverter, AttnLabelConverter, Averager
from dataset import hierarchical_dataset, AlignCollate
from model import Model
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
def benchmark_all_eval(model, criterion, converter, opt, calculate_infer_time=False):
""" evaluation with 10 benchmark evaluation datasets """
# The evaluation datasets, dataset order is same with Table 1 in our paper.
eval_data_list = ['IIIT5k_3000', 'SVT', 'IC03_860', 'IC03_867', 'IC13_857',
'IC13_1015', 'IC15_1811', 'SVTP', 'CUTE80', 'IC15_2077']
if calculate_infer_time:
evaluation_batch_size = 1 # batch_size should be 1 to calculate the GPU inference time per image.
else:
evaluation_batch_size = opt.batch_size
list_accuracy = []
total_forward_time = 0
total_evaluation_data_number = 0
total_correct_number = 0
log = open(f'./result/{opt.exp_name}/log_all_evaluation.txt', 'a')
dashed_line = '-' * 80
print(dashed_line)
log.write(dashed_line + '\n')
for eval_data in eval_data_list:
eval_data_path = os.path.join(opt.eval_data, eval_data)
AlignCollate_evaluation = AlignCollate(imgH=opt.imgH, imgW=opt.imgW, keep_ratio_with_pad=opt.PAD)
eval_data, eval_data_log = hierarchical_dataset(root=eval_data_path, opt=opt)
evaluation_loader = torch.utils.data.DataLoader(
eval_data, batch_size=evaluation_batch_size,
shuffle=False,
num_workers=int(opt.workers),
collate_fn=AlignCollate_evaluation, pin_memory=True)
_, accuracy_by_best_model, norm_ED_by_best_model, _, _, _, infer_time, length_of_data = validation(
model, criterion, evaluation_loader, converter, opt)
list_accuracy.append(f'{accuracy_by_best_model:0.3f}')
total_forward_time += infer_time
total_evaluation_data_number += len(eval_data)
total_correct_number += accuracy_by_best_model * length_of_data
log.write(eval_data_log)
print(f'Acc {accuracy_by_best_model:0.3f}\t normalized_ED {norm_ED_by_best_model:0.3f}')
log.write(f'Acc {accuracy_by_best_model:0.3f}\t normalized_ED {norm_ED_by_best_model:0.3f}\n')
print(dashed_line)
log.write(dashed_line + '\n')
averaged_forward_time = total_forward_time / total_evaluation_data_number * 1000
total_accuracy = total_correct_number / total_evaluation_data_number
params_num = sum([np.prod(p.size()) for p in model.parameters()])
evaluation_log = 'accuracy: '
for name, accuracy in zip(eval_data_list, list_accuracy):
evaluation_log += f'{name}: {accuracy}\t'
evaluation_log += f'total_accuracy: {total_accuracy:0.3f}\t'
evaluation_log += f'averaged_infer_time: {averaged_forward_time:0.3f}\t# parameters: {params_num/1e6:0.3f}'
print(evaluation_log)
log.write(evaluation_log + '\n')
log.close()
return None
def validation(model, criterion, evaluation_loader, converter, opt):
""" validation or evaluation """
n_correct = 0
norm_ED = 0
length_of_data = 0
infer_time = 0
valid_loss_avg = Averager()
for i, (image_tensors, labels) in enumerate(evaluation_loader):
batch_size = image_tensors.size(0)
length_of_data = length_of_data + batch_size
image = image_tensors.to(device)
# For max length prediction
length_for_pred = torch.IntTensor([opt.batch_max_length] * batch_size).to(device)
text_for_pred = torch.LongTensor(batch_size, opt.batch_max_length + 1).fill_(0).to(device)
text_for_loss, length_for_loss = converter.encode(labels, batch_max_length=opt.batch_max_length)
start_time = time.time()
preds, _= model(image, text_for_pred, is_train=False)
preds = preds[-1]
forward_time = time.time() - start_time
preds = preds[:, :text_for_loss.shape[1] - 1, :]
target = text_for_loss[:, 1:] # without [GO] Symbol
cost = criterion(preds.contiguous().view(-1, preds.shape[-1]), target.contiguous().view(-1))
# select max probability (greedy decoding) then decode index to character
_, preds_index = preds.max(2)
preds_str = converter.decode(preds_index, length_for_pred)
labels = converter.decode(text_for_loss[:, 1:], length_for_loss)
infer_time += forward_time
valid_loss_avg.add(cost)
# calculate accuracy & confidence score
preds_prob = F.softmax(preds, dim=2)
preds_max_prob, _ = preds_prob.max(dim=2)
confidence_score_list = []
for gt, pred, pred_max_prob in zip(labels, preds_str, preds_max_prob):
gt = gt[:gt.find('[s]')]
pred_EOS = pred.find('[s]')
pred = pred[:pred_EOS] # prune after "end of sentence" token ([s])
pred_max_prob = pred_max_prob[:pred_EOS]
# To evaluate 'case sensitive model' with alphanumeric and case insensitive setting.
if opt.sensitive and opt.data_filtering_off:
pred = pred.lower()
gt = gt.lower()
alphanumeric_case_insensitve = '0123456789abcdefghijklmnopqrstuvwxyz'
out_of_alphanumeric_case_insensitve = f'[^{alphanumeric_case_insensitve}]'
pred = re.sub(out_of_alphanumeric_case_insensitve, '', pred)
gt = re.sub(out_of_alphanumeric_case_insensitve, '', gt)
if pred == gt:
n_correct += 1
'''
(old version) ICDAR2017 DOST Normalized Edit Distance https://rrc.cvc.uab.es/?ch=7&com=tasks
"For each word we calculate the normalized edit distance to the length of the ground truth transcription."
if len(gt) == 0:
norm_ED += 1
else:
norm_ED += edit_distance(pred, gt) / len(gt)
'''
# ICDAR2019 Normalized Edit Distance
if len(gt) == 0 or len(pred) == 0:
norm_ED += 0
elif len(gt) > len(pred):
norm_ED += 1 - edit_distance(pred, gt) / len(gt)
else:
norm_ED += 1 - edit_distance(pred, gt) / len(pred)
# calculate confidence score (= multiply of pred_max_prob)
try:
confidence_score = pred_max_prob.cumprod(dim=0)[-1]
except:
confidence_score = 0 # for empty pred case, when prune after "end of sentence" token ([s])
confidence_score_list.append(confidence_score)
# print(pred, gt, pred==gt, confidence_score)
accuracy = n_correct / float(length_of_data) * 100
norm_ED = norm_ED / float(length_of_data) # ICDAR2019 Normalized Edit Distance
return valid_loss_avg.val(), accuracy, norm_ED, preds_str, confidence_score_list, labels, infer_time, length_of_data
def test(opt):
""" model configuration """
converter = AttnLabelConverter(opt.character)
opt.num_class = len(converter.character)
if opt.rgb:
opt.input_channel = 3
model = Model(opt)
print('model input parameters', opt.imgH, opt.imgW, opt.num_fiducial, opt.input_channel, opt.output_channel,
opt.hidden_size, opt.num_class, opt.batch_max_length)
model = torch.nn.DataParallel(model).to(device)
# load model
print('loading pretrained model from %s' % opt.saved_model)
model.load_state_dict(torch.load(opt.saved_model, map_location=device))
opt.exp_name = '_'.join(opt.saved_model.split('/')[1:])
# print(model)
""" keep evaluation model and result logs """
os.makedirs(f'./result/{opt.exp_name}', exist_ok=True)
os.system(f'cp {opt.saved_model} ./result/{opt.exp_name}/')
""" setup loss """
criterion = torch.nn.CrossEntropyLoss(ignore_index=0).to(device) # ignore [GO] token = ignore index 0
""" evaluation """
model.eval()
with torch.no_grad():
if opt.benchmark_all_eval: # evaluation with 10 benchmark evaluation datasets
benchmark_all_eval(model, criterion, converter, opt)
else:
log = open(f'./result/{opt.exp_name}/log_evaluation.txt', 'a')
AlignCollate_evaluation = AlignCollate(imgH=opt.imgH, imgW=opt.imgW, keep_ratio_with_pad=opt.PAD)
eval_data, eval_data_log = hierarchical_dataset(root=opt.eval_data, opt=opt)
evaluation_loader = torch.utils.data.DataLoader(
eval_data, batch_size=opt.batch_size,
shuffle=False,
num_workers=int(opt.workers),
collate_fn=AlignCollate_evaluation, pin_memory=True)
_, accuracy_by_best_model, _, _, _, _, _, _ = validation(
model, criterion, evaluation_loader, converter, opt)
log.write(eval_data_log)
print(f'{accuracy_by_best_model:0.3f}')
log.write(f'{accuracy_by_best_model:0.3f}\n')
log.close()
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--eval_data', default='../data_lmdb_release/evaluation/', help='path to evaluation dataset')
parser.add_argument('--benchmark_all_eval', default=True, action='store_true', help='evaluate 10 benchmark evaluation datasets')
parser.add_argument('--workers', type=int, help='number of data loading workers', default=4)
parser.add_argument('--batch_size', type=int, default=192, help='input batch size')
parser.add_argument('--saved_model', required=True, help="path to saved_model to evaluation")
""" Data processing """
parser.add_argument('--batch_max_length', type=int, default=35, help='maximum-label-length')
parser.add_argument('--imgH', type=int, default=32, help='the height of the input image')
parser.add_argument('--imgW', type=int, default=100, help='the width of the input image')
parser.add_argument('--rgb', action='store_true', help='use rgb input')
parser.add_argument('--character', type=str, default='0123456789abcdefghijklmnopqrstuvwxyz', help='character label')
parser.add_argument('--sensitive', action='store_true', default=True, help='for sensitive character mode')
parser.add_argument('--PAD', action='store_true', help='whether to keep ratio then pad for image resize')
parser.add_argument('--data_filtering_off', action='store_true', help='for data_filtering_off mode')
""" Model Architecture """
parser.add_argument('--num_fiducial', type=int, default=20, help='number of fiducial points of TPS-STN')
parser.add_argument('--input_channel', type=int, default=1, help='the number of input channel of Feature extractor')
parser.add_argument('--output_channel', type=int, default=512,
help='the number of output channel of Feature extractor')
parser.add_argument('--hidden_size', type=int, default=512, help='the size of the LSTM hidden state')
opt = parser.parse_args()
""" vocab / character number configuration """
if opt.sensitive:
opt.character = string.printable[:-6] # same with ASTER setting (use 94 char).
cudnn.benchmark = True
cudnn.deterministic = True
opt.num_gpu = torch.cuda.device_count()
test(opt)
|
py | 1a499c367d5a65ec9132b51a5e7bd1df345b750e | # Copyright (c) OpenMMLab. All rights reserved.
import argparse
import os
import os.path as osp
import pickle
import time
import warnings
import mmcv
from mmcv import Config, DictAction
from mmcv.runner import get_dist_info, init_dist
from mmcv.utils import get_git_hash
from mmdet import __version__
from mmdet.apis import init_random_seed, set_random_seed, train_detector
from mmdet.apis.active_learning import (
custom_logic_pretraining,
custom_logic_posttraining,
active_learning_inference,
MAX_IMAGE_HEIGHT,
MAX_IMAGE_WIDTH,
)
from mmdet.datasets import build_dataset
from mmdet.models import build_detector
from mmdet.utils import collect_env, get_root_logger
def parse_args():
parser = argparse.ArgumentParser(
description='Train a detector for active learning and use it for '
'active learning inference, with the default dataset '
'being `DatumaroV1Dataset`'
)
parser.add_argument(
'train_dataset_dir',
help="""Dataset directory for training. It should have the following
structure
train_dataset_dir/
├── annotations
│ ├── train.json
│ └── val.json
└── images
└── default
├── xxx.jpg
├── ...
└── yyy.jpg
where `train.json` and `val.json` should have already been
processed with
`mmdetection/tools/dataset_converters/datumaro_to_coco.py`.
""",
)
parser.add_argument(
'inference_dataset_dir',
help="Dataset directory for AL inference. To be used with "
"`inference_patterns`",
)
parser.add_argument('work_dir', help='the dir to save logs and models')
parser.add_argument(
'--config',
help='train config file path',
default='configs/_active_learning_/faster_rcnn_r50_fpn_1x_datumaro.py',
)
parser.add_argument(
'--inference_patterns',
type=str,
nargs="+",
default=["*.jpg", "*.png", "*.jpeg"],
help="Search patterns for data. For example, in a image-based task, "
"one should specify ['*.jpg', '*.png', '*.jpeg']",
)
parser.add_argument(
'--max-image-width',
help='Maximum image width',
default=MAX_IMAGE_WIDTH,
)
parser.add_argument(
'--max-image-height',
help='Maximum image height',
default=MAX_IMAGE_HEIGHT,
)
parser.add_argument(
'--no-autoscale-lr',
action="store_true",
help='Whether NOT to auto-scale the learning rate based on the batch '
'size and number of GPUs. By default lr autoscaling is enabled.',
)
parser.add_argument(
'--backbone-path',
help='Whether NOT to auto-scale the learning rate based on the batch '
'size and number of GPUs. By default lr autoscaling is enabled.',
required=False,
type=str,
)
group_gpus = parser.add_mutually_exclusive_group()
group_gpus.add_argument(
'--gpus',
type=int,
help='number of gpus to use '
'(only applicable to non-distributed training)')
group_gpus.add_argument(
'--gpu-ids',
type=int,
nargs='+',
help='ids of gpus to use '
'(only applicable to non-distributed training)')
parser.add_argument('--seed', type=int, default=None, help='random seed')
parser.add_argument(
'--cfg-options',
nargs='+',
action=DictAction,
help='override some settings in the used config, the key-value pair '
'in xxx=yyy format will be merged into config file. If the value to '
'be overwritten is a list, it should be like key="[a,b]" or key=a,b '
'It also allows nested list/tuple values, e.g. key="[(a,b),(c,d)]" '
'Note that the quotation marks are necessary and that no white space '
'is allowed.')
parser.add_argument(
'--launcher',
choices=['none', 'pytorch', 'slurm', 'mpi'],
default='none',
help='job launcher')
parser.add_argument('--local_rank', type=int, default=0)
args = parser.parse_args()
if 'LOCAL_RANK' not in os.environ:
os.environ['LOCAL_RANK'] = str(args.local_rank)
return args
def main():
"""
======================================================================
Basic settings. Largely remain the same as the original `train.py` script.
======================================================================
"""
args = parse_args()
cfg = Config.fromfile(args.config)
orig_batch_size = cfg.data.samples_per_gpu
if args.cfg_options is not None:
cfg.merge_from_dict(args.cfg_options)
cfg.work_dir = args.work_dir
cfg.auto_resume = False
if args.gpu_ids is not None:
cfg.gpu_ids = args.gpu_ids
else:
cfg.gpu_ids = range(1) if args.gpus is None else range(args.gpus)
# init distributed env first, since logger depends on the dist info.
if args.launcher == 'none':
distributed = False
if len(cfg.gpu_ids) > 1:
warnings.warn(
f'We treat {cfg.gpu_ids} as gpu-ids, and reset to '
f'{cfg.gpu_ids[0:1]} as gpu-ids to avoid potential error in '
'non-distribute training time.')
cfg.gpu_ids = cfg.gpu_ids[0:1]
else:
distributed = True
init_dist(args.launcher, **cfg.dist_params)
# re-set gpu_ids with distributed training mode
_, world_size = get_dist_info()
cfg.gpu_ids = range(world_size)
cfg.distributed = distributed
# create work_dir
mmcv.mkdir_or_exist(osp.abspath(cfg.work_dir))
# dump config
cfg.dump(osp.join(cfg.work_dir, osp.basename(args.config)))
# init the logger before other steps
timestamp = time.strftime('%Y%m%d_%H%M%S', time.localtime())
log_file = osp.join(cfg.work_dir, f'{timestamp}.log')
logger = get_root_logger(log_file=log_file, log_level=cfg.log_level)
# init the meta dict to record some important information such as
# environment info and seed, which will be logged
meta = dict()
# log env info
env_info_dict = collect_env()
env_info = '\n'.join([(f'{k}: {v}') for k, v in env_info_dict.items()])
dash_line = '-' * 60 + '\n'
logger.info('Environment info:\n' + dash_line + env_info + '\n' +
dash_line)
meta['env_info'] = env_info
meta['config'] = cfg.pretty_text
# set random seeds
seed = init_random_seed(args.seed)
logger.info(f'Set random seed to {seed}, '
f'deterministic: False')
set_random_seed(seed, deterministic=False)
cfg.seed = seed
meta['seed'] = seed
meta['exp_name'] = osp.basename(args.config)
"""
======================================================================
Custom pre-training logic.
======================================================================
"""
# Set custom attributes
custom_logic_pretraining(cfg, args, logger, orig_batch_size)
# log some basic info
logger.info(f'Distributed training: {distributed}')
logger.info(f'Config:\n{cfg.pretty_text}')
"""
======================================================================
Define model, datasets, etc. then start training.
======================================================================
"""
model = build_detector(
cfg.model,
train_cfg=cfg.get('train_cfg'),
test_cfg=cfg.get('test_cfg'))
model.init_weights()
datasets = [build_dataset(cfg.data.train)]
logger.info(f"Number of training samples: {len(datasets[0])}")
# save mmdet version, config file content and class names in
# checkpoints as meta data
cfg.checkpoint_config.meta = dict(
mmdet_version=__version__ + get_git_hash()[:7],
CLASSES=datasets[0].CLASSES)
# add an attribute for visualization convenience
model.CLASSES = datasets[0].CLASSES
runner = train_detector(
model,
datasets,
cfg,
distributed=distributed,
validate=True,
timestamp=timestamp,
meta=meta)
"""
======================================================================
Custom post-training logic.
======================================================================
"""
custom_logic_posttraining(runner, cfg, logger)
"""
======================================================================
Active learning inference.
======================================================================
"""
results = active_learning_inference(
cfg=cfg,
model=model,
data_dir=args.inference_dataset_dir,
patterns=args.inference_patterns,
logger=logger,
)
# Consolidate results
mAPs = runner.meta["all_metrics"]["bbox_mAP"]
best_performance = max(mAPs) if len(mAPs) > 0 else -1
cat2label = datasets[0].cat2label
# Sanity checks to make sure that original classes are still preserved
assert all(k == v for k, v in cat2label.items()) # sanity check
assert len(cat2label) == len(datasets[0].CLASSES)
categories = [
{"id": i, "name": cat} for i, cat in enumerate(datasets[0].CLASSES)]
results = {
"images": results,
"model_performance": best_performance,
"classes": categories,
}
# Save
with open(osp.join(cfg.work_dir, "al_inference.pkl"), "wb") as fout:
pickle.dump(results, fout)
if __name__ == '__main__':
main()
|
py | 1a499cd974accc91757fd2e825e6d7fd47c3af13 | """WebSocket-specific events."""
import mitmproxy.http
import mitmproxy.websocket
class Events:
# WebSocket lifecycle
def websocket_handshake(self, flow: mitmproxy.http.HTTPFlow):
"""
Called when a client wants to establish a WebSocket connection. The
WebSocket-specific headers can be manipulated to alter the
handshake. The flow object is guaranteed to have a non-None request
attribute.
"""
def websocket_start(self, flow: mitmproxy.websocket.WebSocketFlow):
"""
A WebSocket connection has commenced.
"""
def websocket_message(self, flow: mitmproxy.websocket.WebSocketFlow):
"""
Called when a WebSocket message is received from the client or
server. The most recent message will be flow.messages[-1]. The
message is user-modifiable. Currently there are two types of
messages, corresponding to the BINARY and TEXT frame types.
"""
def websocket_error(self, flow: mitmproxy.websocket.WebSocketFlow):
"""
A WebSocket connection has had an error.
"""
def websocket_end(self, flow: mitmproxy.websocket.WebSocketFlow):
"""
A WebSocket connection has ended.
"""
|
py | 1a499cf3722069c386d25894d8f6575c3e5fa12b | from invmonInfra.enum import InventoryLastStatusEnum
from invmonService import FirefoxDriverService, HtmlParser, BasicLoggerService
from invmonInfra.base import JobsInventoryBase
from invmonInfra.domain import JobsInventoryInterface, DriverInterface, LoggerInterface
from invmonInfra.models import InventorySqlModel
class JobShopDisneyService(JobsInventoryBase, JobsInventoryInterface):
_logger: LoggerInterface
_driver: DriverInterface
_parser: HtmlParser
_urlPattern: str = '%shopdisney.com%'
def __init__(self, logger: LoggerInterface, driver = DriverInterface) -> None:
self._logger = logger
self._driver = driver
self._parser = HtmlParser()
self._parser.setLogger(self._logger)
def __checkInventoryStatus__(self, item: InventorySqlModel) -> None:
# Set the URI
self._logger.info(f"Checking '{item.url}'")
self.setUri(item.url)
self._driver.driverGoTo(self.getUri())
self.parser = HtmlParser(sourceCode=self._driver.driverGetContent())
# validate the HTML format didnt change
# Check if 'out of stock' is present
outOfStock = self.__checkIfOutOfStock__(tag='div', key='class', value='product-oos-info-title')
#inStock = self.checkIfInStock()
if outOfStock == True:
# if the lastStatus didnt change, move on
self._logger.debug("Item is out of stock")
if item.lastStatus == InventoryLastStatusEnum.OUTOFSTOCK.value:
self._logger.debug("Inventory Status didnt change, checking the next item.")
return None
item.lastStatus = InventoryLastStatusEnum.OUTOFSTOCK.value
if outOfStock == False:
# if the lastStatus didnt change, move on
self._logger.debug("Item is in stock!")
if item.lastStatus == InventoryLastStatusEnum.INSTOCK.value:
self._logger.debug("Inventory Status didnt change, checking the next item.")
return None
item.lastStatus = InventoryLastStatusEnum.INSTOCK.value
self.__updateInventoryRecord__(item)
self.__addAlerts__(item)
def checkIfInStock(self) -> bool:
invStatus: str = self.parser.findSingle(name='div', attrKey='class', attrValue='col-12 prices-add-to-cart-actions'
)
|
py | 1a499d17b52d92d55567c9ce0541dcc48674fde7 | def funcao():
return 1
variavel = funcao() + 2
print variavel
variavel = 1 + 2
print variavel
def f(var):
print(var)
def funcao():
return f
variavel = funcao()
variavel('OI')
def f(var):
print(var)
variavel = f
variavel('OI')
def f(var):
print(var)
variavel = f('OI')
#https://pt.stackoverflow.com/q/497714/101
|
py | 1a499d6428e3f5aaf1abf7a4ecf914e23b20b48d | # coding: utf-8
"""
Isilon SDK
Isilon SDK - Language bindings for the OneFS API # noqa: E501
OpenAPI spec version: 4
Contact: [email protected]
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
from isi_sdk_8_0_1.models.reports_scans import ReportsScans # noqa: F401,E501
from isi_sdk_8_0_1.models.reports_scans_report import ReportsScansReport # noqa: F401,E501
class ReportsScansExtended(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'reports': 'list[ReportsScansReport]',
'resume': 'str',
'total': 'int'
}
attribute_map = {
'reports': 'reports',
'resume': 'resume',
'total': 'total'
}
def __init__(self, reports=None, resume=None, total=None): # noqa: E501
"""ReportsScansExtended - a model defined in Swagger""" # noqa: E501
self._reports = None
self._resume = None
self._total = None
self.discriminator = None
if reports is not None:
self.reports = reports
if resume is not None:
self.resume = resume
if total is not None:
self.total = total
@property
def reports(self):
"""Gets the reports of this ReportsScansExtended. # noqa: E501
:return: The reports of this ReportsScansExtended. # noqa: E501
:rtype: list[ReportsScansReport]
"""
return self._reports
@reports.setter
def reports(self, reports):
"""Sets the reports of this ReportsScansExtended.
:param reports: The reports of this ReportsScansExtended. # noqa: E501
:type: list[ReportsScansReport]
"""
self._reports = reports
@property
def resume(self):
"""Gets the resume of this ReportsScansExtended. # noqa: E501
Continue returning results from previous call using this token (token should come from the previous call, resume cannot be used with other options). # noqa: E501
:return: The resume of this ReportsScansExtended. # noqa: E501
:rtype: str
"""
return self._resume
@resume.setter
def resume(self, resume):
"""Sets the resume of this ReportsScansExtended.
Continue returning results from previous call using this token (token should come from the previous call, resume cannot be used with other options). # noqa: E501
:param resume: The resume of this ReportsScansExtended. # noqa: E501
:type: str
"""
self._resume = resume
@property
def total(self):
"""Gets the total of this ReportsScansExtended. # noqa: E501
Total number of items available. # noqa: E501
:return: The total of this ReportsScansExtended. # noqa: E501
:rtype: int
"""
return self._total
@total.setter
def total(self, total):
"""Sets the total of this ReportsScansExtended.
Total number of items available. # noqa: E501
:param total: The total of this ReportsScansExtended. # noqa: E501
:type: int
"""
self._total = total
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, ReportsScansExtended):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
|
py | 1a499df6bf851bf9050bcf478028913d4b52ef92 | #!/usr/bin/python3
# all arguments to this script are considered as json files
# and attempted to be formatted alphabetically
import json
import os
from sys import argv
files = argv[1:]
for file in files[:]:
if os.path.isdir(file):
files.remove(file)
for f in os.listdir(file):
files.append(os.path.join(file, f))
for file in files:
if not file.endswith('.json'):
continue
print("formatting file {}".format(file))
with open(file) as f:
j = json.load(f)
if isinstance(j, list):
for item in j:
item["Exposes"] = sorted(item["Exposes"], key=lambda k: k["Type"])
else:
j["Exposes"] = sorted(j["Exposes"], key=lambda k: k["Type"])
with open(file, 'w') as f:
f.write(json.dumps(j, indent=4, sort_keys=True, separators=(',', ': ')))
|
py | 1a499e75a9bf6a0e79bff0331c2d984cb9bc3915 | from aces import Aces
class sub(Aces):
def submit(self):
opt=dict(
units="metal",
species="graphene_knot",
method="nvt",
nodes=1,
procs=4,
queue="q1.4",
runTime=500000
,runner="strain"
)
for T in range(100,300,20):
app=dict(vStrain=True,reverseStrain=True,equTime=200000,T=T,strainStep=1000,minStrain=-0.15,maxStrain=0.05,timestep=.3e-3,latx=70,laty=2)
self.commit(opt,app);
if __name__=='__main__':
sub().run()
|
py | 1a499e8e868d93fca161c0ef4cdacb72cad199fe | #!/usr/bin/python
# Copyright (c) 2014 Wladmir J. van der Laan
# Distributed under the MIT/X11 software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
'''
Script to generate list of seed nodes for chainparams.cpp.
This script expects two text files in the directory that is passed as an
argument:
nodes_main.txt
nodes_test.txt
These files must consist of lines in the format
<ip>
<ip>:<port>
[<ipv6>]
[<ipv6>]:<port>
<onion>.onion
0xDDBBCCAA (IPv4 little-endian old pnSeeds format)
The output will be two data structures with the peers in binary format:
static SeedSpec6 pnSeed6_main[]={
...
}
static SeedSpec6 pnSeed6_test[]={
...
}
These should be pasted into `src/chainparamsseeds.h`.
'''
from __future__ import print_function, division
from base64 import b32decode
from binascii import a2b_hex
import sys, os
import re
# ipv4 in ipv6 prefix
pchIPv4 = bytearray([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0xff, 0xff])
# tor-specific ipv6 prefix
pchOnionCat = bytearray([0xFD,0x87,0xD8,0x7E,0xEB,0x43])
def name_to_ipv6(addr):
if len(addr)>6 and addr.endswith('.onion'):
vchAddr = b32decode(addr[0:-6], True)
if len(vchAddr) != 16-len(pchOnionCat):
raise ValueError('Invalid onion %s' % s)
return pchOnionCat + vchAddr
elif '.' in addr: # IPv4
return pchIPv4 + bytearray((int(x) for x in addr.split('.')))
elif ':' in addr: # IPv6
sub = [[], []] # prefix, suffix
x = 0
addr = addr.split(':')
for i,comp in enumerate(addr):
if comp == '':
if i == 0 or i == (len(addr)-1): # skip empty component at beginning or end
continue
x += 1 # :: skips to suffix
assert(x < 2)
else: # two bytes per component
val = int(comp, 16)
sub[x].append(val >> 8)
sub[x].append(val & 0xff)
nullbytes = 16 - len(sub[0]) - len(sub[1])
assert((x == 0 and nullbytes == 0) or (x == 1 and nullbytes > 0))
return bytearray(sub[0] + ([0] * nullbytes) + sub[1])
elif addr.startswith('0x'): # IPv4-in-little-endian
return pchIPv4 + bytearray(reversed(a2b_hex(addr[2:])))
else:
raise ValueError('Could not parse address %s' % addr)
def parse_spec(s, defaultport):
match = re.match('\[([0-9a-fA-F:]+)\](?::([0-9]+))?$', s)
if match: # ipv6
host = match.group(1)
port = match.group(2)
else:
(host,_,port) = s.partition(':')
if not port:
port = defaultport
else:
port = int(port)
host = name_to_ipv6(host)
return (host,port)
def process_nodes(g, f, structname, defaultport):
g.write('static SeedSpec6 %s[] = {\n' % structname)
first = True
for line in f:
comment = line.find('#')
if comment != -1:
line = line[0:comment]
line = line.strip()
if not line:
continue
if not first:
g.write(',\n')
first = False
(host,port) = parse_spec(line, defaultport)
hoststr = ','.join(('0x%02x' % b) for b in host)
g.write(' {{%s}, %i}' % (hoststr, port))
g.write('\n};\n')
def main():
if len(sys.argv)<2:
print(('Usage: %s <path_to_nodes_txt>' % sys.argv[0]), file=sys.stderr)
exit(1)
g = sys.stdout
indir = sys.argv[1]
g.write('#ifndef H_CHAINPARAMSSEEDS\n')
g.write('#define H_CHAINPARAMSSEEDS\n')
g.write('// List of fixed seed nodes for the bitcoin network\n')
g.write('// AUTOGENERATED by contrib/devtools/generate-seeds.py\n\n')
g.write('// Each line contains a 16-byte IPv6 address and a port.\n')
g.write('// IPv4 as well as onion addresses are wrapped inside a IPv6 address accordingly.\n')
with open(os.path.join(indir,'nodes_main.txt'),'r') as f:
process_nodes(g, f, 'pnSeed6_main', 8273)
g.write('\n')
with open(os.path.join(indir,'nodes_test.txt'),'r') as f:
process_nodes(g, f, 'pnSeed6_test', 8272)
g.write('#endif\n')
if __name__ == '__main__':
main()
|
py | 1a499fd292b0c3b7b2623cc9021e76df0914204c | # Standard library imports
import logging
# Third party imports
from django.contrib import messages
from django.contrib.auth.decorators import user_passes_test
from django.contrib.admin.utils import NestedObjects
from django.urls import reverse
from django.db import DEFAULT_DB_ALIAS
from django.http import HttpResponseRedirect, HttpResponse
from django.shortcuts import render, get_object_or_404
from django.views.decorators.csrf import csrf_exempt
from github import Github
# Local application/library imports
from dojo.forms import GITHUBForm, DeleteGITHUBConfForm
from dojo.models import GITHUB_Conf
from dojo.utils import add_breadcrumb
logger = logging.getLogger(__name__)
@csrf_exempt
def webhook(request):
return HttpResponse('')
@user_passes_test(lambda u: u.is_superuser)
def new_github(request):
if request.method == 'POST':
gform = GITHUBForm(request.POST, instance=GITHUB_Conf())
if gform.is_valid():
try:
api_key = gform.cleaned_data.get('api_key')
g = Github(api_key)
user = g.get_user()
logger.debug('Using user ' + user.login)
new_j = gform.save(commit=False)
new_j.api_key = api_key
new_j.save()
messages.add_message(request,
messages.SUCCESS,
'Github Configuration Successfully Created.',
extra_tags='alert-success')
return HttpResponseRedirect(reverse('github', ))
except Exception as info:
logger.error(info)
messages.add_message(request,
messages.ERROR,
'Unable to authenticate on github.',
extra_tags='alert-danger')
return HttpResponseRedirect(reverse('github', ))
else:
gform = GITHUBForm()
add_breadcrumb(title="New Github Configuration", top_level=False, request=request)
return render(request, 'dojo/new_github.html',
{'gform': gform})
@user_passes_test(lambda u: u.is_superuser)
def github(request):
confs = GITHUB_Conf.objects.all()
add_breadcrumb(title="Github List", top_level=not len(request.GET), request=request)
return render(request,
'dojo/github.html',
{'confs': confs,
})
@user_passes_test(lambda u: u.is_superuser)
def delete_github(request, tid):
github_instance = get_object_or_404(GITHUB_Conf, pk=tid)
# eng = test.engagement
# TODO Make Form
form = DeleteGITHUBConfForm(instance=github_instance)
if request.method == 'POST':
if 'id' in request.POST and str(github_instance.id) == request.POST['id']:
form = DeleteGITHUBConfForm(request.POST, instance=github_instance)
if form.is_valid():
github_instance.delete()
messages.add_message(request,
messages.SUCCESS,
'Github Conf and relationships removed.',
extra_tags='alert-success')
return HttpResponseRedirect(reverse('github'))
collector = NestedObjects(using=DEFAULT_DB_ALIAS)
collector.collect([github_instance])
rels = collector.nested()
add_breadcrumb(title="Delete", top_level=False, request=request)
return render(request, 'dojo/delete_github.html',
{'inst': github_instance,
'form': form,
'rels': rels,
'deletable_objects': rels,
})
|
py | 1a49a0e46cf8c3dd5eb7985eb05adf32f838dfae | from hallo.function_dispatcher import FunctionDispatcher
from hallo.hallo import Hallo
def test_fd_load_order(hallo_getter):
test_hallo = hallo_getter({})
# Create a blank function dispatcher
fd = FunctionDispatcher(set(), test_hallo)
try:
# Add modules to allowed list
fd.module_list = {"euler", "math"}
# Load up Euler module, ensure no other modules load.
assert fd.reload_module("euler")
assert len(fd.function_dict) == 1
# Load second module, ensure all methods are there.
assert fd.reload_module("math")
assert len(fd.function_dict) == 2
finally:
fd.close()
def test_fd_disallowed_module(hallo_getter):
test_hallo = hallo_getter({})
# Create a blank function dispatcher
fd = FunctionDispatcher(set(), test_hallo)
try:
# Try and load a module
assert not fd.reload_module("euler")
finally:
fd.close()
def test_init():
# Create some basic stuff
test_modules = {"euler"}
test_hallo = Hallo()
# Create function dispatcher
fd = FunctionDispatcher(test_modules, test_hallo)
test_hallo.function_dispatcher = fd
try:
# Check basic class variable setting
assert (
fd.hallo == test_hallo
), "Hallo object was not set correctly in FunctionDispatcher."
assert (
fd.module_list == test_modules
), "Module list was not imported correctly by FunctionDispatcher."
# Check that module reloading has done things
assert len(fd.function_dict) == len(
test_modules
), "Modules were not loaded to function dictionary."
assert len(fd.function_names) != 0, "Functions were not added to function_names"
finally:
fd.close()
test_hallo.close()
def test_open_close(hallo_getter):
test_hallo = hallo_getter({})
# Set up
test_module = "euler"
test_modules = {test_module}
# Create function dispatcher
fd = FunctionDispatcher(test_modules, test_hallo)
try:
# Check test module is loaded
assert len(fd.function_dict) == len(test_modules)
assert len(fd.function_names) > 0
finally:
# Close function dispatcher
fd.close()
# Check test module unloaded
assert len(fd.function_dict) == 0
assert len(fd.function_names) == 0
# TODO: write tests for each method:
# Test init loads function from xml, test close saves it
# dispatch
# dispatch_passive
# get_function_by_name
# get_function_class_list
# get_function_object
# check_function_permissions
# reload_module
# _reload
# unload_module_functions
# check_function_class
# load_function
# unload_function
# close
|
py | 1a49a15f5fc2d3b36830f7702741e40dd0a72eff | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, AsyncIterable, Callable, Dict, Generic, Optional, TypeVar
import warnings
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest
from azure.mgmt.core.exceptions import ARMErrorFormat
from ... import models
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class Operations:
"""Operations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.containerregistry.v2019_05_01_preview.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def list(
self,
**kwargs
) -> AsyncIterable["models.OperationListResult"]:
"""Lists all of the available Azure Container Registry REST API operations.
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either OperationListResult or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.containerregistry.v2019_05_01_preview.models.OperationListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.OperationListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2017-10-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list.metadata['url'] # type: ignore
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('OperationListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/providers/Microsoft.ContainerRegistry/operations'} # type: ignore
|
py | 1a49a192f99f4bdb64321a6906c7332cdb03c93c | # -*- coding: utf-8 -*-
"""
This module contained backports to support older Python versions.
Their usage is deprecated and this module could be dropped soon.
"""
#
# (C) Pywikibot team, 2014-2018
#
# Distributed under the terms of the MIT license.
#
from __future__ import absolute_import, division, unicode_literals
from difflib import _format_range_unified
import logging
from pywikibot.tools import deprecated
@deprecated('difflib._format_range_unified', since='20160111')
def format_range_unified(start, stop):
"""
Convert range to the "ed" format.
DEPRECATED (Python 2.6 backport).
Use difflib._format_range_unified instead.
"""
return _format_range_unified(start, stop)
@deprecated('logging.NullHandler', since='20160111')
class NullHandler(logging.NullHandler):
"""This handler does nothing."""
pass
@deprecated('logging.captureWarnings', since='20160111')
def captureWarnings(capture):
"""
Capture warnings into logging.
DEPRECATED (Python 2.6 backport).
Use logging.captureWarnings instead.
"""
logging.captureWarnings(capture)
|
py | 1a49a1b2530a3095fba94e89274a1a4827dbe492 | import cv2
import numpy as np
from .utils import load_json, load_value_file
def get_video_names_and_annotations(data, subset):
"""Selects clips of a given subset from the parsed json annotation"""
video_names = []
annotations = []
for key, value in data['database'].items():
this_subset = value['subset']
if this_subset == subset:
video_name = key
label = value['annotations'].get('label', '')
if label:
video_name = label + '/' + video_name
video_names.append(video_name)
annotations.append(value)
return video_names, annotations
def get_video_props(video_path, video_format, annotation):
"""Tries to read video properties (total number of frames and FPS) from annotation
file or read it from file otherwise"""
n_frames = annotation.get('n_frames')
fps = annotation.get('fps')
if n_frames and fps:
return n_frames, fps
if video_format == 'frames':
if not video_path.exists():
return 0, 0
n_frames = int(load_value_file(video_path / 'n_frames'))
fps = 30
else:
cap = cv2.VideoCapture(video_path.as_posix())
n_frames = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
fps = cap.get(cv2.CAP_PROP_FPS)
return n_frames, fps
def load_json_annotation(root_path, annotation_path, subset, flow_path=None, video_format='frames'):
"""Load annotation in ActivityNet-like format"""
data = load_json(annotation_path)
video_names, annotations = get_video_names_and_annotations(data, subset)
idx_to_class = dict(enumerate(data['labels']))
class_to_idx = {v: k for k, v in idx_to_class.items()}
videos = []
for i, (video_name, annotation) in enumerate(zip(video_names, annotations)):
if i % 1000 == 0:
print('dataset loading [{}/{}]'.format(i, len(video_names)))
if video_format == 'video' and not video_name.lower().endswith('.mp4'):
video_name += '.mp4'
video_path = root_path / video_name
n_frames, fps = get_video_props(video_path, video_format, annotation)
if n_frames == 0:
continue
flow_full_path = flow_path
if flow_path is not None:
flow_full_path = (flow_path / video_name).as_posix()
try:
video_id = video_name.split('/')[1]
except IndexError:
video_id = video_name
def add_sample(begin_frame, end_frame, label):
sample = {
'video': video_path.as_posix(),
'flow': flow_full_path,
'segment': [begin_frame, end_frame],
'n_frames': n_frames,
'fps': fps,
'video_id': video_id,
'label': class_to_idx[label]
}
videos.append(sample)
video_annotation = annotation['annotations']
events_annotation = video_annotation.get('events', None)
if events_annotation is not None:
for event in events_annotation:
begin_time = float(event['start'])
end_time = float(event['stop'])
label = event['event']
assert label in class_to_idx
# From time to frame number.
timestamps = video_annotation['timestamps']
begin_frame, end_frame = np.searchsorted(timestamps, [begin_time, end_time])
# Frame indices are one-based.
begin_frame += 1
if begin_frame < end_frame:
add_sample(begin_frame, end_frame, label)
else:
begin_frame = 1
end_frame = n_frames
add_sample(begin_frame, end_frame, annotation['annotations']['label'])
return videos, idx_to_class
|
py | 1a49a1e186f5552a9ffbcf3c421d55e947acae45 |
import smtplib
import typing
import flask
import flask_mail
from ... import mail
from . import core
from ...models import BackgroundTask, BackgroundTaskStatus
def post_send_mail_task(
subject: str,
recipients: typing.List[str],
text: str,
html: str,
auto_delete: bool = True
) -> typing.Tuple[BackgroundTaskStatus, typing.Optional[BackgroundTask]]:
return core.post_background_task(
type='send_mail',
data={
'subject': subject,
'recipients': recipients,
'text': text,
'html': html
},
auto_delete=auto_delete
)
def handle_send_mail_task(
data: typing.Dict[str, typing.Any]
) -> bool:
try:
mail.send(flask_mail.Message(
subject=data['subject'],
sender=flask.current_app.config['MAIL_SENDER'],
recipients=data['recipients'],
body=data['text'],
html=data['html']
))
return True
except smtplib.SMTPRecipientsRefused:
return False
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.