ext
stringclasses 9
values | sha
stringlengths 40
40
| content
stringlengths 3
1.04M
|
---|---|---|
py | b40079c7a889306b91366089e05681b394bfe348 | #通过感知机分类半月数据
import numpy as np
import matplotlib.pyplot as plt
def sgn(y):
y[y > 0] = 1
y[y < 0] = -1
return y
class Perceptron(object):
'''单层感知机
'''
def __init__(self, shape):
super(Perceptron, self).__init__()
self.w = np.ones(shape) #weigth
self.b = 1.5 #the bias
self.activate_func = sgn
def update(self,x,y,out,learning_rate):
self.w += learning_rate * x.T * (y - out)
def calclate(self, x):
return self.activate_func(np.dot(self.w, x.T) + self.b)
def loss_func(self, pre_y, gt_y):
return (pre_y - gt_y) ** 2
def train(self, x, y, epochs, learning_rate):
losses = []
for epoch in range(epochs):
loss_tmp = []
for i in range(x.shape[0]):
out = self.calclate(x[i])
loss_tmp.append(self.loss_func(out, y[i]))
self.update(x[i], y[i], out, learning_rate)
losses.append(sum(loss_tmp)/len(loss_tmp))
return losses
def predict(self, x):
out = self.calclate(x)
return out
def test(self, x,y):
label = self.predict(x)
gt_count = np.sum(label==y)
wrong_count = np.sum(label!=y)
return wrong_count/(wrong_count+gt_count),gt_count/(wrong_count+gt_count)
def get_params(self):
return {'weight':self.w, 'bias':self.b}
def draw(self):
axis = [i for i in range(1000)]
out = [self.w * i + self.b for i in axis]
plt.plot(axis, out)
plt.show()
def load_data(file):
x = []
y = []
with open(file, 'r') as f:
lines = f.readlines()
for line in lines:
line = line.strip().split(',')
x_item = [float(line[0]), float(line[1])]
y_item = float(line[2])
x.append(x_item)
y.append(y_item)
return np.array(x), np.array(y)
def split_data(x, y):
train_x, test_x = x[:int(x.shape[0]*0.7)], x[int(x.shape[0]*0.7):]
train_y, test_y = y[:int(y.shape[0]*0.7)], y[int(y.shape[0]*0.7):]
return train_x, train_y, test_x, test_y
if __name__ == '__main__':
#进行非线性数据的分类实验时,只需要将数据的间隔缩小保证二者重合即可
desc = 'nonlinear'
file = './halfmoon.txt'
x,y = load_data(file)
train_x, train_y, test_x, test_y = split_data(x, y)
neur = Perceptron((1,2))
losses = neur.train(train_x,train_y,100, 0.0001)
err,acc = neur.test(test_x, test_y)
print('rate of error:', err)
print('rate of accuracy:', acc)
#画损失曲线
axis = [i for i in range(len(losses))]
plt.figure()
plt.plot(axis, losses)
plt.savefig('../imgs/%s_mse_loss.png' % desc)
#plt.show()
#画决策面
x_aixs = x[:,0]
y_aixs = x[:,1]
neg_x_axis = x_aixs[y==-1]
neg_y_axis = y_aixs[y==-1]
pos_x_axis = x_aixs[y==1]
pos_y_axis = y_aixs[y==1]
#感知机的参数
params = neur.get_params()
w = params['weight']
b = params['bias']
k = -1 * w[0][0] / w[0][1]
b = -1 * b / w[0][1]
divid_x = [i for i in range(-15,25)]
divid_y = [k * i + b for i in divid_x]
plt.figure()
plt.plot(divid_x, divid_y, c='r')
plt.scatter(neg_x_axis,neg_y_axis,c="b",s=10)
plt.scatter(pos_x_axis,pos_y_axis,c="g",s=10)
plt.savefig('../imgs/%s_divide.png' % desc) #保存决策面 |
py | b40079f992333798fb45a11005322ba5427c633e | #!/usr/bin/env python
#
# Copyright 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Implementation of Blobstore stub storage based on a dict.
Contains implementation of blobstore_stub.BlobStorage that writes
blobs directly to a directory stored in memory.
"""
import StringIO
from google.appengine.api import blobstore
from google.appengine.api.blobstore import blobstore_stub
class DictBlobStorage(blobstore_stub.BlobStorage):
"""Simply stores blobs in a dict."""
def __init__(self):
"""Constructor."""
self._blobs = {}
def StoreBlob(self, blob_key, blob_stream):
"""Store blob stream."""
content = StringIO.StringIO()
try:
while True:
block = blob_stream.read(1 << 20)
if not block:
break
content.write(block)
self.CreateBlob(blob_key, content.getvalue())
finally:
content.close()
def CreateBlob(self, blob_key, blob):
"""Store blob in map."""
self._blobs[blobstore.BlobKey(unicode(blob_key))] = blob
def OpenBlob(self, blob_key):
"""Get blob contents as stream."""
return StringIO.StringIO(
self._blobs[blobstore.BlobKey(unicode(blob_key))])
def DeleteBlob(self, blob_key):
"""Delete blob content."""
try:
del self._blobs[blobstore.BlobKey(unicode(blob_key))]
except KeyError:
pass
|
py | b4007b3a6f74587dbffb3275a32ae40469fa5c70 | # pylint: disable=C0302,bare-except,print-statement
"""pylint option block-disable"""
__revision__ = None
class Foo(object):
"""block-disable test"""
def __init__(self):
pass
def meth1(self, arg):
"""this issues a message"""
print(self)
def meth2(self, arg):
"""and this one not"""
# pylint: disable=W0613
print(self\
+ "foo")
def meth3(self):
"""test one line disabling"""
# no error
print(self.bla) # pylint: disable=E1101
# error
print(self.blop)
def meth4(self):
"""test re-enabling"""
# pylint: disable=E1101
# no error
print(self.bla)
print(self.blop)
# pylint: enable=E1101
# error
print(self.blip)
def meth5(self):
"""test IF sub-block re-enabling"""
# pylint: disable=E1101
# no error
print(self.bla)
if self.blop:
# pylint: enable=E1101
# error
print(self.blip)
else:
# no error
print(self.blip)
# no error
print(self.blip)
def meth6(self):
"""test TRY/EXCEPT sub-block re-enabling"""
# pylint: disable=E1101
# no error
print(self.bla)
try:
# pylint: enable=E1101
# error
print(self.blip)
except UndefinedName: # pylint: disable=E0602
# no error
print(self.blip)
# no error
print(self.blip)
def meth7(self):
"""test one line block opening disabling"""
if self.blop: # pylint: disable=E1101
# error
print(self.blip)
else:
# error
print(self.blip)
# error
print(self.blip)
def meth8(self):
"""test late disabling"""
# error
print(self.blip)
# pylint: disable=E1101
# no error
print(self.bla)
print(self.blop)
def meth9(self):
"""test re-enabling right after a block with whitespace"""
eris = 5
if eris:
print("In block")
# pylint: disable=E1101
# no error
print(self.bla)
print(self.blu)
# pylint: enable=E1101
# error
print(self.blip)
def meth10(self):
"""Test double disable"""
# pylint: disable=E1101
# no error
print(self.bla)
# pylint: disable=E1101
print(self.blu)
class ClassLevelMessage(object):
"""shouldn't display to much attributes/not enough methods messages
"""
# pylint: disable=R0902,R0903
def __init__(self):
self.attr1 = 1
self.attr2 = 1
self.attr3 = 1
self.attr4 = 1
self.attr5 = 1
self.attr6 = 1
self.attr7 = 1
self.attr8 = 1
self.attr9 = 1
self.attr0 = 1
def too_complex_but_thats_ok(self, attr1, attr2):
"""THIS Method has too much branches and returns but i don't care
"""
# pylint: disable=R0912,R0911
try:
attr3 = attr1+attr2
except ValueError:
attr3 = None
except:
return 'duh', self
if attr1:
for i in attr1:
if attr2:
return i
else:
return 'duh'
elif attr2:
for i in attr2:
if attr2:
return i
else:
return 'duh'
else:
for i in range(15):
if attr3:
return i
else:
return 'doh'
return None
print('hop, too many lines but i don\'t care')
|
py | b4007b535b76f81553b0959d35bffe15c08d3246 | import requests, re, json
class facebookSearchTool:
def searchFacebook(self, nom):
url = "https://www.facebook.com/public/%s"
name = nom.replace(" ","%20")
try:
page = requests.get(url % (name)).content.decode('utf-8')
except:
print(warning+" Aucun résultat.")
data = page
urlsAccount = re.findall('http[s]?://www.facebook.com/(?:[a-zA-Z]|[0-9]|[$-_@.&+]|[!*\(\),]|(?:%[0-9a-fA-F][0-9a-fA-F]))+', data)
# nameAccount = re.findall("width=\"100\" height=\"100\" alt=\"([a-zA-Z0-9_ é , ]+)", data)
nameAccount = re.findall("width=\"72\" height=\"72\" alt=\"([a-zA-Z0-9_ é , ]+)\" />", data)
# print(nameAccount)
urlList = []
for nbr in urlsAccount:
c = urlsAccount.count(nbr)
if c > 1:
urlsAccount.remove(nbr)
for x in urlsAccount:
if x.endswith("s"):
urlsAccount.remove(x)
for u in urlsAccount:
if "/public/" in u or "/login.php" in u or "/recover" in u or "/help/" in u:
pass
elif "directory/pages_variations/" in u:
pass
elif "login/" in u:
pass
elif "&" in u:
pass
elif "/pages/" in u:
pass
else:
urlList.append(u)
usersAccount = []
accountsFound = []
for url in urlList:
try:
url = url.replace("https://www.facebook.com/", '')
c = url.count("/")
if c == 1:
pass # un url avec 2 fois "/" signifie que c'est une page.
else:
usersAccount.append(url)
except:
pass
regroup = zip(usersAccount, nameAccount)
return(regroup)
def getInfoProfile(self, profile):
if not "http" in profile:
url = "https://www.facebook.com/"+profile
username = profile
else:
url = profile
username = profile.split("/")
username = [i for i in username if i != ''][-1]
try:
page = requests.get(url).content.decode('utf-8')
findId = re.search(r"entity_id=([0-9]+)", page).group(0)
if findId:
facebookID = findId.replace("entity_id=", '')
else:
facebookID = None
except:
facebookID = None
try:
jsonData = re.findall(r"type=\"application/ld\+json\">(.*?)</script>", page)[0]
values = json.loads(jsonData)
list_affiliation = []
name = values['name']
if "jobTitle" in values.keys():
job = values['jobTitle']
else:
job = None
if "address" in values.keys():
address = values['address']['addressLocality']
else:
address = None
affiliationsName = values['affiliation']
count_affiliation = len(affiliationsName)
x = 0
while x < int(count_affiliation):
nameAffiliation = affiliationsName[x]['name']
list_affiliation.append(nameAffiliation)
x +=1
self.facebookId = facebookID
self.name = name
self.profile = url
self.username = username
self.job = job
self.address = address
self.affiliations = list_affiliation
except:
self.facebookId = None
self.name = None
self.profile = None
self.username = None
self.job = None
self.address = None
self.affiliations = None
# name = re.search(r'pageTitle\">(.*)</title>', page).group(0)
# if name:
# name = name.replace("pageTitle\">", '').replace("| Facebook</title>", '')
# self.name = name
# else:
# self.name = "None"
# works = re.findall(r"<div class=\"_2lzr _50f5 _50f7\"><a href=\"http[s]?://(?:[a-zA-Z]|[0-9]|[$-_@.&+]|[!*\(\),]|(?:%[0-9a-fA-F][0-9a-fA-F]))+\">([a-zA-Z0-9_ - à é è ê ù ç ô ò û]+)", page)
# if works:
# self.work = works
# else:
# self.work = "None"
# locations = re.findall(u"<span class=\"_2iel _50f7\"><a href=\"http[s]?://(?:[a-zA-Z]|[0-9]|[$-_@.&+]|[!*\(\),]|(?:%[0-9a-fA-F][0-9a-fA-F]))+\">([a-zA-Z0-9_ - à é è ê ù ç ô ò û]+)", page)
# if locations:
# self.location = locations
# else:
# self.location = "None"
# img = re.findall(r"<img class=\"_11kf img\" alt=\".*\" src=\"(http[s]?://(?:[a-zA-Z]|[0-9]|[$-_@.&+]|[!*\(\),]|(?:%[0-9a-fA-F][0-9a-fA-F]))+)\"", page)
# if img:
# img = img[0].replace("amp;", "")
# self.img = img
# else:
# self.img = None
def searchPageLiked(self, profile):
if not "http" in profile:
profile = "https://www.facebook.com/"+profile
nom = profile.replace("https://www.facebook.com/", '')
page = requests.get(profile).content.decode('utf-8')
urlsPages = re.findall('http[s]?://www.facebook.com/(?:[a-zA-Z]|[0-9]|[$-_@.&+]|[!*\(\),]|(?:%[0-9a-fA-F][0-9a-fA-F]))+', page)
for nbr in urlsPages:
c = urlsPages.count(nbr)
if c > 1:
urlsPages.remove(nbr)
pagesLiked = []
for url in urlsPages:
if "/public/" in url or "/login.php" in url or "/recover" in url or "/help/" in url:
pass
else:
if nom in url:
pass
else:
pagesLiked.append(url)
return(pagesLiked) |
py | b4007c8d7440fb31d31d8a7cb14968f096712565 | """
Support for AirVisual air quality sensors.
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/sensor.airvisual/
"""
from logging import getLogger
from datetime import timedelta
import voluptuous as vol
import homeassistant.helpers.config_validation as cv
from homeassistant.components.sensor import PLATFORM_SCHEMA
from homeassistant.const import (
ATTR_ATTRIBUTION, ATTR_LATITUDE, ATTR_LONGITUDE, CONF_API_KEY,
CONF_LATITUDE, CONF_LONGITUDE, CONF_MONITORED_CONDITIONS, CONF_STATE,
CONF_SHOW_ON_MAP, CONF_RADIUS)
from homeassistant.helpers.entity import Entity
from homeassistant.util import Throttle
REQUIREMENTS = ['pyairvisual==1.0.0']
_LOGGER = getLogger(__name__)
ATTR_CITY = 'city'
ATTR_COUNTRY = 'country'
ATTR_POLLUTANT_SYMBOL = 'pollutant_symbol'
ATTR_POLLUTANT_UNIT = 'pollutant_unit'
ATTR_REGION = 'region'
CONF_CITY = 'city'
CONF_COUNTRY = 'country'
CONF_ATTRIBUTION = "Data provided by AirVisual"
MASS_PARTS_PER_MILLION = 'ppm'
MASS_PARTS_PER_BILLION = 'ppb'
VOLUME_MICROGRAMS_PER_CUBIC_METER = 'µg/m3'
MIN_TIME_BETWEEN_UPDATES = timedelta(minutes=10)
SENSOR_TYPES = [
('AirPollutionLevelSensor', 'Air Pollution Level', 'mdi:scale'),
('AirQualityIndexSensor', 'Air Quality Index', 'mdi:format-list-numbers'),
('MainPollutantSensor', 'Main Pollutant', 'mdi:chemical-weapon'),
]
POLLUTANT_LEVEL_MAPPING = [
{'label': 'Good', 'minimum': 0, 'maximum': 50},
{'label': 'Moderate', 'minimum': 51, 'maximum': 100},
{'label': 'Unhealthy for sensitive group', 'minimum': 101, 'maximum': 150},
{'label': 'Unhealthy', 'minimum': 151, 'maximum': 200},
{'label': 'Very Unhealthy', 'minimum': 201, 'maximum': 300},
{'label': 'Hazardous', 'minimum': 301, 'maximum': 10000}
]
POLLUTANT_MAPPING = {
'co': {'label': 'Carbon Monoxide', 'unit': MASS_PARTS_PER_MILLION},
'n2': {'label': 'Nitrogen Dioxide', 'unit': MASS_PARTS_PER_BILLION},
'o3': {'label': 'Ozone', 'unit': MASS_PARTS_PER_BILLION},
'p1': {'label': 'PM10', 'unit': VOLUME_MICROGRAMS_PER_CUBIC_METER},
'p2': {'label': 'PM2.5', 'unit': VOLUME_MICROGRAMS_PER_CUBIC_METER},
's2': {'label': 'Sulfur Dioxide', 'unit': MASS_PARTS_PER_BILLION},
}
SENSOR_LOCALES = {'cn': 'Chinese', 'us': 'U.S.'}
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
vol.Required(CONF_API_KEY): cv.string,
vol.Required(CONF_MONITORED_CONDITIONS):
vol.All(cv.ensure_list, [vol.In(SENSOR_LOCALES)]),
vol.Optional(CONF_CITY): cv.string,
vol.Optional(CONF_COUNTRY): cv.string,
vol.Optional(CONF_LATITUDE): cv.latitude,
vol.Optional(CONF_LONGITUDE): cv.longitude,
vol.Optional(CONF_RADIUS, default=1000): cv.positive_int,
vol.Optional(CONF_SHOW_ON_MAP, default=True): cv.boolean,
vol.Optional(CONF_STATE): cv.string,
})
def setup_platform(hass, config, add_devices, discovery_info=None):
"""Configure the platform and add the sensors."""
from pyairvisual import Client
classes = {
'AirPollutionLevelSensor': AirPollutionLevelSensor,
'AirQualityIndexSensor': AirQualityIndexSensor,
'MainPollutantSensor': MainPollutantSensor
}
api_key = config.get(CONF_API_KEY)
monitored_locales = config.get(CONF_MONITORED_CONDITIONS)
latitude = config.get(CONF_LATITUDE, hass.config.latitude)
longitude = config.get(CONF_LONGITUDE, hass.config.longitude)
radius = config.get(CONF_RADIUS)
city = config.get(CONF_CITY)
state = config.get(CONF_STATE)
country = config.get(CONF_COUNTRY)
show_on_map = config.get(CONF_SHOW_ON_MAP)
if city and state and country:
_LOGGER.debug(
"Using city, state, and country: %s, %s, %s", city, state, country)
location_id = ','.join((city, state, country))
data = AirVisualData(
Client(api_key), city=city, state=state, country=country,
show_on_map=show_on_map)
else:
_LOGGER.debug(
"Using latitude and longitude: %s, %s", latitude, longitude)
location_id = ','.join((str(latitude), str(longitude)))
data = AirVisualData(
Client(api_key), latitude=latitude, longitude=longitude,
radius=radius, show_on_map=show_on_map)
data.update()
sensors = []
for locale in monitored_locales:
for sensor_class, name, icon in SENSOR_TYPES:
sensors.append(classes[sensor_class](
data,
name,
icon,
locale,
location_id
))
add_devices(sensors, True)
class AirVisualBaseSensor(Entity):
"""Define a base class for all of our sensors."""
def __init__(self, data, name, icon, locale, entity_id):
"""Initialize the sensor."""
self.data = data
self._attrs = {}
self._icon = icon
self._locale = locale
self._name = name
self._state = None
self._entity_id = entity_id
self._unit = None
@property
def device_state_attributes(self):
"""Return the device state attributes."""
self._attrs.update({
ATTR_ATTRIBUTION: CONF_ATTRIBUTION,
})
if self.data.show_on_map:
self._attrs[ATTR_LATITUDE] = self.data.latitude
self._attrs[ATTR_LONGITUDE] = self.data.longitude
else:
self._attrs['lati'] = self.data.latitude
self._attrs['long'] = self.data.longitude
return self._attrs
@property
def icon(self):
"""Return the icon."""
return self._icon
@property
def name(self):
"""Return the name."""
return '{0} {1}'.format(SENSOR_LOCALES[self._locale], self._name)
@property
def state(self):
"""Return the state."""
return self._state
class AirPollutionLevelSensor(AirVisualBaseSensor):
"""Define a sensor to measure air pollution level."""
@property
def unique_id(self):
"""Return a unique, HASS-friendly identifier for this entity."""
return '{0}_pollution_level'.format(self._entity_id)
def update(self):
"""Update the status of the sensor."""
self.data.update()
aqi = self.data.pollution_info.get('aqi{0}'.format(self._locale))
try:
[level] = [
i for i in POLLUTANT_LEVEL_MAPPING
if i['minimum'] <= aqi <= i['maximum']
]
self._state = level.get('label')
except TypeError:
self._state = None
except ValueError:
self._state = None
class AirQualityIndexSensor(AirVisualBaseSensor):
"""Define a sensor to measure AQI."""
@property
def unique_id(self):
"""Return a unique, HASS-friendly identifier for this entity."""
return '{0}_aqi'.format(self._entity_id)
@property
def unit_of_measurement(self):
"""Return the unit the value is expressed in."""
return 'AQI'
def update(self):
"""Update the status of the sensor."""
self.data.update()
self._state = self.data.pollution_info.get(
'aqi{0}'.format(self._locale))
class MainPollutantSensor(AirVisualBaseSensor):
"""Define a sensor to the main pollutant of an area."""
def __init__(self, data, name, icon, locale, entity_id):
"""Initialize the sensor."""
super().__init__(data, name, icon, locale, entity_id)
self._symbol = None
self._unit = None
@property
def unique_id(self):
"""Return a unique, HASS-friendly identifier for this entity."""
return '{0}_main_pollutant'.format(self._entity_id)
def update(self):
"""Update the status of the sensor."""
self.data.update()
symbol = self.data.pollution_info.get('main{0}'.format(self._locale))
pollution_info = POLLUTANT_MAPPING.get(symbol, {})
self._state = pollution_info.get('label')
self._unit = pollution_info.get('unit')
self._symbol = symbol
self._attrs.update({
ATTR_POLLUTANT_SYMBOL: self._symbol,
ATTR_POLLUTANT_UNIT: self._unit
})
class AirVisualData(object):
"""Define an object to hold sensor data."""
def __init__(self, client, **kwargs):
"""Initialize the AirVisual data element."""
self._client = client
self.attrs = {}
self.pollution_info = None
self.city = kwargs.get(CONF_CITY)
self.state = kwargs.get(CONF_STATE)
self.country = kwargs.get(CONF_COUNTRY)
self.latitude = kwargs.get(CONF_LATITUDE)
self.longitude = kwargs.get(CONF_LONGITUDE)
self._radius = kwargs.get(CONF_RADIUS)
self.show_on_map = kwargs.get(CONF_SHOW_ON_MAP)
@Throttle(MIN_TIME_BETWEEN_UPDATES)
def update(self):
"""Update with new AirVisual data."""
from pyairvisual.exceptions import HTTPError
try:
if self.city and self.state and self.country:
resp = self._client.city(
self.city, self.state, self.country).get('data')
self.longitude, self.latitude = resp.get('location').get(
'coordinates')
else:
resp = self._client.nearest_city(
self.latitude, self.longitude, self._radius).get('data')
_LOGGER.debug("New data retrieved: %s", resp)
self.pollution_info = resp.get('current', {}).get('pollution', {})
self.attrs = {
ATTR_CITY: resp.get('city'),
ATTR_REGION: resp.get('state'),
ATTR_COUNTRY: resp.get('country')
}
except HTTPError as exc_info:
_LOGGER.error("Unable to retrieve data on this location: %s",
self.__dict__)
_LOGGER.debug(exc_info)
self.pollution_info = {}
|
py | b4007e3d50401cfb71022ad7d700ecfe36c2a076 | import pyaf.Bench.TS_datasets as tsds
import pyaf.tests.artificial.process_artificial_dataset as art
art.process_dataset(N = 1024 , FREQ = 'D', seed = 0, trendtype = "ConstantTrend", cycle_length = 7, transform = "Anscombe", sigma = 0.0, exog_count = 20, ar_order = 0); |
py | b4007e709857a4d0b5b89584b76b95eb3391afec | #!/usr/bin/env python3
# @generated AUTOGENERATED file. Do not Change!
from enum import Enum
class WorkOrderStatus(Enum):
PLANNED = "PLANNED"
IN_PROGRESS = "IN_PROGRESS"
PENDING = "PENDING"
SUBMITTED = "SUBMITTED"
CLOSED = "CLOSED"
DONE = "DONE"
BLOCKED = "BLOCKED"
CANCELED = "CANCELED"
SUSPENDED = "SUSPENDED"
MISSING_ENUM = ""
@classmethod
def _missing_(cls, value: object) -> "WorkOrderStatus":
return cls.MISSING_ENUM
|
py | b4007ef7c9f9c63e369c6141e7578855688a9f61 | import random
class Voter:
def __init__(self, opinion):
self.__opinion = opinion
self.__friends = []
def get_friends(self):
return self.__friends
def get_opinion(self):
return self.__opinion
def push_friend(self, friend):
self.__friends.append(friend)
class UserDict:
def __init__(self):
self.dict = {}
def push_usr(self, uid):
if self.dict.has_key(uid):
return self.dict[uid]
else:
self.dict[uid] = Voter(int((random.random() * 3 + 1) / 1) - 2)
return self.dict[uid]
def parser_main(fname = './facebook/0.edges', directed = False):
udict = UserDict()
func = lambda x, y: x.push_friend(y)
print 'Loading ' + fname + ' file...'
with open(fname, 'r') as fin:
for line in fin:
nod_from, nod_to = map(udict.push_usr, map(int,line.strip().split(' ')))
func(nod_from, nod_to)
if not directed: func(nod_to, nod_from)
print 'Edges are successfully loaded!'
return udict |
py | b4007f97f623e498bbbc9a886559bb0fc6929c4f | from __future__ import absolute_import
import sys
import unittest
from testutils import ADMIN_CLIENT, CHART_API_CLIENT, TEARDOWN, harbor_server, harbor_url, suppress_urllib3_warning
from testutils import created_user, created_project
from library.user import User
from library.project import Project
from library.robot import Robot
from library.repository import Repository
from library.artifact import Artifact
from library.repository import pull_harbor_image
from library.repository import push_self_build_image_to_project
from library.base import _assert_status_code
from library.scan import Scan
from library.label import Label
from library.chart import Chart
import library.helm
import base
import v2_swagger_client
class TestRobotAccount(unittest.TestCase):
@suppress_urllib3_warning
def setUp(self):
self.project = Project()
self.user = User()
self.repo = Repository()
self.artifact = Artifact()
self.robot = Robot()
self.scan = Scan()
self.label = Label()
self.chart= Chart()
TestRobotAccount.url = ADMIN_CLIENT["endpoint"]
TestRobotAccount.user_ra_password = "Aa123456"
print("setup")
@unittest.skipIf(TEARDOWN == True, "Test data won't be erased.")
def do_01_tearDown(self):
#1. Delete repository(RA) by user(UA);
self.repo.delete_repository(self.project_ra_name_a, self.repo_name_in_project_a.split('/')[1], **self.USER_RA_CLIENT)
self.repo.delete_repository(self.project_ra_name_b, self.repo_name_in_project_b.split('/')[1], **self.USER_RA_CLIENT)
self.repo.delete_repository(self.project_ra_name_c, self.repo_name_in_project_c.split('/')[1], **self.USER_RA_CLIENT)
self.repo.delete_repository(self.project_ra_name_a, self.repo_name_pa.split('/')[1], **self.USER_RA_CLIENT)
#2. Delete project(PA);
self.project.delete_project(self.project_ra_id_a, **self.USER_RA_CLIENT)
self.project.delete_project(self.project_ra_id_b, **self.USER_RA_CLIENT)
self.project.delete_project(self.project_ra_id_c, **self.USER_RA_CLIENT)
#3. Delete user(UA).
self.user.delete_user(self.user_ra_id, **ADMIN_CLIENT)
def test_01_ProjectlevelRobotAccount(self):
"""
Test case:
Robot Account
Test step and expected result:
1. Create user(UA);
2. Create private project(PA), private project(PB) and public project(PC) by user(UA);
3. Push image(ImagePA) to project(PA), image(ImagePB) to project(PB) and image(ImagePC) to project(PC) by user(UA);
4. Create a new robot account(RA) with pull and push privilige in project(PA) by user(UA);
5. Check robot account info, it should has both pull and push priviliges;
6. Pull image(ImagePA) from project(PA) by robot account(RA), it must be successful;
7. Push image(ImageRA) to project(PA) by robot account(RA), it must be successful;
8. Push image(ImageRA) to project(PB) by robot account(RA), it must be not successful;
9. Pull image(ImagePB) from project(PB) by robot account(RA), it must be not successful;
10. Pull image from project(PC), it must be successful;
11. Push image(ImageRA) to project(PC) by robot account(RA), it must be not successful;
12. Update action property of robot account(RA);
13. Pull image(ImagePA) from project(PA) by robot account(RA), it must be not successful;
14. Push image(ImageRA) to project(PA) by robot account(RA), it must be not successful;
15. Delete robot account(RA), it must be not successful.
Tear down:
1. Delete repository(RA) by user(UA);
2. Delete project(PA);
3. Delete user(UA).
"""
image_project_a = "haproxy"
image_project_b = "hello-world"
image_project_c = "httpd"
image_robot_account = "alpine"
tag = "latest"
#1. Create user(UA);"
self.user_ra_id, user_ra_name = self.user.create_user(user_password = TestRobotAccount.user_ra_password, **ADMIN_CLIENT)
self.USER_RA_CLIENT=dict(endpoint = TestRobotAccount.url, username = user_ra_name, password = TestRobotAccount.user_ra_password)
#2. Create private project(PA), private project(PB) and public project(PC) by user(UA);
self.project_ra_id_a, self.project_ra_name_a = self.project.create_project(metadata = {"public": "false"}, **self.USER_RA_CLIENT)
self.project_ra_id_b, self.project_ra_name_b = self.project.create_project(metadata = {"public": "false"}, **self.USER_RA_CLIENT)
self.project_ra_id_c, self.project_ra_name_c = self.project.create_project(metadata = {"public": "true"}, **self.USER_RA_CLIENT)
#3. Push image(ImagePA) to project(PA), image(ImagePB) to project(PB) and image(ImagePC) to project(PC) by user(UA);
self.repo_name_in_project_a, tag_a = push_self_build_image_to_project(self.project_ra_name_a, harbor_server, user_ra_name, TestRobotAccount.user_ra_password, image_project_a, tag)
self.repo_name_in_project_b, tag_b = push_self_build_image_to_project(self.project_ra_name_b, harbor_server, user_ra_name, TestRobotAccount.user_ra_password, image_project_b, tag)
self.repo_name_in_project_c, tag_c = push_self_build_image_to_project(self.project_ra_name_c, harbor_server, user_ra_name, TestRobotAccount.user_ra_password, image_project_c, tag)
#4. Create a new robot account(RA) with pull and push privilege in project(PA) by user(UA);
robot_id, robot_account = self.robot.create_project_robot(self.project_ra_name_a,
30 ,**self.USER_RA_CLIENT)
#5. Check robot account info, it should has both pull and push privilege;
data = self.robot.get_robot_account_by_id(robot_id, **self.USER_RA_CLIENT)
_assert_status_code(robot_account.name, data.name)
#6. Pull image(ImagePA) from project(PA) by robot account(RA), it must be successful;
pull_harbor_image(harbor_server, robot_account.name, robot_account.secret, self.repo_name_in_project_a, tag_a)
#7. Push image(ImageRA) to project(PA) by robot account(RA), it must be successful;
self.repo_name_pa, _ = push_self_build_image_to_project(self.project_ra_name_a, harbor_server, robot_account.name, robot_account.secret, image_robot_account, tag)
#8. Push image(ImageRA) to project(PB) by robot account(RA), it must be not successful;
push_self_build_image_to_project(self.project_ra_name_b, harbor_server, robot_account.name, robot_account.secret, image_robot_account, tag, expected_error_message = "unauthorized to access repository")
#9. Pull image(ImagePB) from project(PB) by robot account(RA), it must be not successful;
pull_harbor_image(harbor_server, robot_account.name, robot_account.secret, self.repo_name_in_project_b, tag_b, expected_error_message = "unauthorized to access repository")
#10. Pull image from project(PC), it must be successful;
pull_harbor_image(harbor_server, robot_account.name, robot_account.secret, self.repo_name_in_project_c, tag_c)
#11. Push image(ImageRA) to project(PC) by robot account(RA), it must be not successful;
push_self_build_image_to_project(self.project_ra_name_c, harbor_server, robot_account.name, robot_account.secret, image_robot_account, tag, expected_error_message = "unauthorized to access repository")
#12. Update action property of robot account(RA);"
self.robot.disable_robot_account(robot_id, True, **self.USER_RA_CLIENT)
#13. Pull image(ImagePA) from project(PA) by robot account(RA), it must be not successful;
pull_harbor_image(harbor_server, robot_account.name, robot_account.secret, self.repo_name_in_project_a, tag_a, expected_login_error_message = "unauthorized: authentication required")
#14. Push image(ImageRA) to project(PA) by robot account(RA), it must be not successful;
push_self_build_image_to_project(self.project_ra_name_a, harbor_server, robot_account.name, robot_account.secret, image_robot_account, tag, expected_login_error_message = "unauthorized: authentication required")
#15. Delete robot account(RA), it must be not successful.
self.robot.delete_robot_account(robot_id, **self.USER_RA_CLIENT)
self.do_01_tearDown()
def verify_repository_pushable(self, project_access_list, system_ra_client):
for project_access in project_access_list:
print(r"project_access:", project_access)
if project_access["check_list"][1]: #---repository:push---
repo = push_self_build_image_to_project(project_access["project_name"], harbor_server, system_ra_client["username"], system_ra_client["password"], "test_pushable"+base._random_name("repo"), "v6.8.1"+base._random_name("tag"))
else:
push_self_build_image_to_project(project_access["project_name"], harbor_server, system_ra_client["username"], system_ra_client["password"], "test_unpushable"+base._random_name("repo"), "v6.8.1"+base._random_name("tag"), expected_error_message = "unauthorized to access repository")
def verify_repository_unpushable(self, project_access_list, system_ra_client, expected_login_error_message = "unauthorized: authentication required", expected_error_message = ""):
for project_access in project_access_list: #---repository:push---
push_self_build_image_to_project(
project_access["project_name"],
harbor_server, system_ra_client["username"], system_ra_client["password"],
"test_unpushable"+base._random_name("repo"), "v6.8.1"+base._random_name("tag"),
expected_login_error_message = expected_login_error_message,
expected_error_message = expected_error_message
)
def test_02_SystemlevelRobotAccount(self):
"""
Test case:
Robot Account
Test step and expected result:
1. Define a number of access lists;
2. Create the same number of private projects;
3. Create a system robot account has permission for those projects;
4. Verify the system robot account has all the corresponding rights;
5. Disable the system robot account;
6. Verify the system robot account has no the corresponding rights;
7. Enable the system robot account;
8. Verify the system robot account has the corresponding rights;
9. Refresh secret for the system robot account;
10. Verify the system robot account has no the corresponding right with the old secret already;
11. Verify the system robot account still has the corresponding right with the new secret;
12. List system robot account, then add a new project to the system robot account project permission list;
13. Verify the system robot account has the corresponding right for this new project;
14. Edit the system robot account as removing this new project from it;
15. Verify the system robot account has no the corresponding right for this new project;
16. Delete this project;
17. List system robot account successfully;
18. Delete the system robot account;
19. Verify the system robot account has no the corresponding right;
20. Add a system robot account with all projects coverd;
21. Verify the system robot account has no the corresponding right;
"""
#1. Define a number of access lists;
CHART_FILE_LIST = [dict(name = 'prometheus', version='7.0.2'), dict(name = 'harbor', version='0.2.0')]
for i in range(2):
base.run_command( ["curl", r"-o", "./tests/apitests/python/{}-{}.tgz".format(CHART_FILE_LIST[i]["name"], CHART_FILE_LIST[i]["version"]), "https://storage.googleapis.com/harbor-builds/helm-chart-test-files/{}-{}.tgz".format(CHART_FILE_LIST[i]["name"], CHART_FILE_LIST[i]["version"])])
#Make sure that whether 'True' or 'False' must be included in each line or row.
check_list = [
[True, True, True, True, True, True, False, True, False, True],
[False, False, False, False, True, True, False, True, True, False],
[True, False, True, False, True, False, True, False, True, True],
[False, False, False, True, False, True, False, True, True, False]
]
access_list_list = []
for i in range(len(check_list)):
access_list_list.append(self.robot.create_access_list(check_list[i]))
#2. Create the same number of private projects;
robot_account_Permissions_list = []
project_access_list = []
for i in range(len(check_list)):
with created_user(TestRobotAccount.user_ra_password, _teardown = False) as (user_id, username):
with created_project(metadata={"public": "false"}, user_id=user_id, _teardown = False) as (project_id, project_name):
project_access_list.append(dict(project_name = project_name, project_id = project_id, check_list = check_list[i]))
robot_account_Permissions = v2_swagger_client.RobotPermission(kind = "project", namespace = project_name, access = access_list_list[i])
robot_account_Permissions_list.append(robot_account_Permissions)
#3. Create a system robot account has permission for those projects;
system_robot_account_id, system_robot_account = self.robot.create_system_robot(robot_account_Permissions_list, 300)
print("system_robot_account:", system_robot_account)
SYSTEM_RA_CLIENT = dict(endpoint = TestRobotAccount.url, username = system_robot_account.name, password = system_robot_account.secret)
SYSTEM_RA_CHART_CLIENT = dict(endpoint = CHART_API_CLIENT["endpoint"], username = SYSTEM_RA_CLIENT["username"], password = SYSTEM_RA_CLIENT["password"])
#4. Verify the system robot account has all the corresponding rights;
for project_access in project_access_list:
print(r"project_access:", project_access)
if project_access["check_list"][1]: #---repository:push---
repo = push_self_build_image_to_project(project_access["project_name"], harbor_server, SYSTEM_RA_CLIENT["username"], SYSTEM_RA_CLIENT["password"], "test_pushable", "v6.8.1")
else:
push_self_build_image_to_project(project_access["project_name"], harbor_server, SYSTEM_RA_CLIENT["username"], SYSTEM_RA_CLIENT["password"], "test_unpushable", "v6.8.1", expected_error_message = "unauthorized to access repository")
tag_for_del = "v1.0.0"
repo_name, tag = push_self_build_image_to_project(project_access["project_name"], harbor_server, ADMIN_CLIENT["username"], ADMIN_CLIENT["password"], "test_del_artifact", tag_for_del)
if project_access["check_list"][0]: #---repository:pull---
pull_harbor_image(harbor_server, SYSTEM_RA_CLIENT["username"], SYSTEM_RA_CLIENT["password"], repo_name, tag_for_del)
else:
pull_harbor_image(harbor_server, SYSTEM_RA_CLIENT["username"], SYSTEM_RA_CLIENT["password"], repo_name, tag_for_del, expected_error_message = "action: pull: unauthorized to access repository")
if project_access["check_list"][2]: #---artifact:delete---
self.artifact.delete_artifact(project_access["project_name"], repo_name.split('/')[1], tag_for_del, **SYSTEM_RA_CLIENT)
else:
self.artifact.delete_artifact(project_access["project_name"], repo_name.split('/')[1], tag_for_del, expect_status_code = 403, **SYSTEM_RA_CLIENT)
#Prepare for chart read and delete
self.chart.upload_chart(project_access["project_name"], r'./tests/apitests/python/{}-{}.tgz'.format(CHART_FILE_LIST[1]["name"], CHART_FILE_LIST[1]["version"]), **CHART_API_CLIENT)
if project_access["check_list"][3]: #---helm-chart:read---
library.helm.helm2_fetch_chart_file("chart_repo_" + base._random_name("repo"), harbor_url, project_access["project_name"], SYSTEM_RA_CLIENT["username"], SYSTEM_RA_CLIENT["password"], CHART_FILE_LIST[1]["name"])
else:
library.helm.helm2_fetch_chart_file("chart_repo_" + base._random_name("repo"), harbor_url, project_access["project_name"], SYSTEM_RA_CLIENT["username"], SYSTEM_RA_CLIENT["password"], CHART_FILE_LIST[1]["name"], expected_add_repo_error_message = "403 Forbidden")
if project_access["check_list"][4]: #---helm-chart-version:create---
self.chart.upload_chart(project_access["project_name"], r'./tests/apitests/python/{}-{}.tgz'.format(CHART_FILE_LIST[0]["name"], CHART_FILE_LIST[0]["version"]), **SYSTEM_RA_CHART_CLIENT)
else:
self.chart.upload_chart(project_access["project_name"], r'./tests/apitests/python/{}-{}.tgz'.format(CHART_FILE_LIST[0]["name"], CHART_FILE_LIST[0]["version"]), expect_status_code = 403, **SYSTEM_RA_CHART_CLIENT)
if project_access["check_list"][5]: #---helm-chart-version:delete---
self.chart.delete_chart_with_version(project_access["project_name"], CHART_FILE_LIST[1]["name"], CHART_FILE_LIST[1]["version"], **SYSTEM_RA_CHART_CLIENT)
else:
self.chart.delete_chart_with_version(project_access["project_name"], CHART_FILE_LIST[1]["name"], CHART_FILE_LIST[1]["version"], expect_status_code = 403, **SYSTEM_RA_CHART_CLIENT)
repo_name, tag = push_self_build_image_to_project(project_access["project_name"], harbor_server, ADMIN_CLIENT["username"], ADMIN_CLIENT["password"], "test_create_tag", "latest_1")
self.artifact.create_tag(project_access["project_name"], repo_name.split('/')[1], tag, "for_delete", **ADMIN_CLIENT)
if project_access["check_list"][6]: #---tag:create---
self.artifact.create_tag(project_access["project_name"], repo_name.split('/')[1], tag, "1.0", **SYSTEM_RA_CLIENT)
else:
self.artifact.create_tag(project_access["project_name"], repo_name.split('/')[1], tag, "1.0", expect_status_code = 403, **SYSTEM_RA_CLIENT)
if project_access["check_list"][7]: #---tag:delete---
self.artifact.delete_tag(project_access["project_name"], repo_name.split('/')[1], tag, "for_delete", **SYSTEM_RA_CLIENT)
else:
self.artifact.delete_tag(project_access["project_name"], repo_name.split('/')[1], tag, "for_delete", expect_status_code = 403, **SYSTEM_RA_CLIENT)
repo_name, tag = push_self_build_image_to_project(project_access["project_name"], harbor_server, ADMIN_CLIENT["username"], ADMIN_CLIENT["password"], "test_create_artifact_label", "latest_1")
#Add project level label to artifact
label_id, _ = self.label.create_label(project_id = project_access["project_id"], scope = "p", **ADMIN_CLIENT)
if project_access["check_list"][8]: #---artifact-label:create---
self.artifact.add_label_to_reference(project_access["project_name"], repo_name.split('/')[1], tag, int(label_id), **SYSTEM_RA_CLIENT)
else:
self.artifact.add_label_to_reference(project_access["project_name"], repo_name.split('/')[1], tag, int(label_id), expect_status_code = 403, **SYSTEM_RA_CLIENT)
if project_access["check_list"][9]: #---scan:create---
self.scan.scan_artifact(project_access["project_name"], repo_name.split('/')[1], tag, **SYSTEM_RA_CLIENT)
else:
self.scan.scan_artifact(project_access["project_name"], repo_name.split('/')[1], tag, expect_status_code = 403, **SYSTEM_RA_CLIENT)
#5. Disable the system robot account;
self.robot.update_system_robot_account(system_robot_account_id, system_robot_account.name, robot_account_Permissions_list, disable = True, **ADMIN_CLIENT)
#6. Verify the system robot account has no the corresponding rights;
self.verify_repository_unpushable(project_access_list, SYSTEM_RA_CLIENT)
#7. Enable the system robot account;
self.robot.update_system_robot_account(system_robot_account_id, system_robot_account.name, robot_account_Permissions_list, disable = False, **ADMIN_CLIENT)
#8. Verify the system robot account has the corresponding rights;
self.verify_repository_pushable(project_access_list, SYSTEM_RA_CLIENT)
#9. Refresh secret for the system robot account;
new_secret = "new_secret_At_321"
self.robot.refresh_robot_account_secret(system_robot_account_id, new_secret, **ADMIN_CLIENT)
#10. Verify the system robot account has no the corresponding right with the old secret already;
self.verify_repository_unpushable(project_access_list, SYSTEM_RA_CLIENT)
#11. Verify the system robot account still has the corresponding right with the new secret;
SYSTEM_RA_CLIENT["password"] = new_secret
self.verify_repository_pushable(project_access_list, SYSTEM_RA_CLIENT)
#12. List system robot account, then add a new project to the system robot account project permission list;
self.robot.list_robot(**ADMIN_CLIENT)
project_for_del_id, project_for_del_name = self.project.create_project(metadata = {"public": "true"}, **ADMIN_CLIENT)
robot_account_Permissions = v2_swagger_client.RobotPermission(kind = "project", namespace = project_for_del_name, access = access_list_list[0])
robot_account_Permissions_list.append(robot_account_Permissions)
self.robot.update_system_robot_account(system_robot_account_id, system_robot_account.name, robot_account_Permissions_list, **ADMIN_CLIENT)
self.robot.list_robot(**ADMIN_CLIENT)
#13. Verify the system robot account has the corresponding right for this new project;
project_access_list.append(dict(project_name = project_for_del_name, project_id = project_for_del_id, check_list = [True] * 10))
self.verify_repository_pushable(project_access_list, SYSTEM_RA_CLIENT)
#14. Edit the system robot account as removing this new project from it;
robot_account_Permissions_list.remove(robot_account_Permissions)
self.robot.update_system_robot_account(system_robot_account_id, system_robot_account.name, robot_account_Permissions_list, **ADMIN_CLIENT)
self.robot.list_robot(**ADMIN_CLIENT)
#15. Verify the system robot account has no the corresponding right for this new project;
project_access_list_for_del = [dict(project_name = project_for_del_name, project_id = project_for_del_id, check_list = [True] * 10)]
self.verify_repository_unpushable(
project_access_list_for_del, SYSTEM_RA_CLIENT,
expected_login_error_message = "",
expected_error_message = "action: push: unauthorized to access repository"
)
#16. Delete this project;
self.repo.clear_repositories(project_for_del_name, **ADMIN_CLIENT)
self.project.delete_project(project_for_del_id, **ADMIN_CLIENT)
#17. List system robot account successfully;
self.robot.list_robot(**ADMIN_CLIENT)
#18. Delete the system robot account;
self.robot.delete_robot_account(system_robot_account_id, **ADMIN_CLIENT)
#19. Verify the system robot account has no the corresponding right;
self.verify_repository_unpushable(project_access_list, SYSTEM_RA_CLIENT)
#20. Add a system robot account with all projects coverd;
all_true_access_list= self.robot.create_access_list( [True] * 10 )
robot_account_Permissions_list = []
robot_account_Permissions = v2_swagger_client.RobotPermission(kind = "project", namespace = "*", access = all_true_access_list)
robot_account_Permissions_list.append(robot_account_Permissions)
_, system_robot_account_cover_all = self.robot.create_system_robot(robot_account_Permissions_list, 300)
#21. Verify the system robot account has no the corresponding right;
print("system_robot_account_cover_all:", system_robot_account_cover_all)
SYSTEM_RA_CLIENT_COVER_ALL = dict(endpoint = TestRobotAccount.url, username = system_robot_account_cover_all.name, password = system_robot_account_cover_all.secret)
projects = self.project.get_projects(dict(), **ADMIN_CLIENT)
print("All projects:", projects)
project_access_list = []
for i in range(len(projects)):
project_access_list.append(dict(project_name = projects[i].name, project_id = projects[i].project_id, check_list = all_true_access_list))
self.verify_repository_pushable(project_access_list, SYSTEM_RA_CLIENT_COVER_ALL)
if __name__ == '__main__':
suite = unittest.TestSuite(unittest.makeSuite(TestRobotAccount))
result = unittest.TextTestRunner(sys.stdout, verbosity=2, failfast=True).run(suite)
if not result.wasSuccessful():
raise Exception(r"Robot account test failed: {}".format(result))
|
py | b4007fbfcbec928abc9cd0a3571c4ed5d6d7d395 | x=[[0]*10 for _ in range(2)]
for i in range(2):
for j in range(10): x[i][j]=int(input())
x[i].sort()
print(x[0][-1]+x[0][-2]+x[0][-3],x[1][-1]+x[1][-2]+x[1][-3]) |
py | b4008090d871e11f3795fd95f034dafb14dbb538 | import astropy.units as u
import astropy.constants as c
import matplotlib.pyplot as plt
import numpy as np
from astropy.cosmology import WMAP9 as cosmo
import general
class Source():
"""
The base class for a gravitational wave source.
"""
name = "Generic Source"
frequencies = np.logspace(-5, 5, 1000) * u.hertz
M = 30 * u.solMass
r = 300 * u.parsec
def __init__(self, frequencies=None, M=None, r=None):
if frequencies: self.frequencies = frequencies
if r: self.r = r
if M: self.M = M
def raw_strain(self, frequencies=None):
if not frequencies: frequencies = self.frequencies
return ((1./self.r) * ((5*np.pi)/(24*c.c**3))**(0.5) * (c.G * self.chirp_mass())**(5./6) * (np.pi*frequencies)**(-7./6)).to(1/u.hertz)
def psd(self, frequencies=None):
"""
The one-sided power spectral density
Parameters
----------
frequencies : ndarray
An array of frequencies where the PSD should be calculated.
Returns : ndarray
An array of the PSDs at the given frequencies for this source.
"""
if not frequencies: frequencies = self.frequencies
return 2 * (frequencies**0.5) * np.abs(self.raw_strain(frequencies))
def srpsd(self, frequencies=None):
if not frequencies: frequencies = self.frequencies
return np.sqrt(self.psd(frequencies))
def characteristic_strain(self, frequencies=None):
if not frequencies: frequencies = self.frequencies
return np.sqrt(4 * frequencies**2 * np.abs(self.raw_strain(frequencies))**2)
def energy_density(frequencies=None):
if not frequencies: frequencies = self.frequencies
return (2*pi**2)/3 * frequencies**3 * self.psd(frequencies)
def plot(self, axis):
if axis:
axis.loglog(self.frequencies, self.characteristic_strain(self.frequencies), label=self.name, lw=2)
axis.set_xlabel('Frequency [Hz]')
#axis.set_ylabel('Root Noise Power spectral density')
axis.legend()
def snr(self, detector):
return general.snr(self, detector)
class CBC(Source):
"""
A compact binary coallescence source
"""
name = "CBC"
M = 30 * u.solMass
r = 300 * u.parsec
def __init__(self, frequencies=None, m1=None, m2=None, r=None):
if frequencies: self.frequencies = frequencies
if r: self.r = r
if m1: self.m1 = m1
if m2: self.m2 = m2
self.M = self.chirp_mass()
def fdot(self, frequencies=None, M=None):
"""
Calculate the first time derivative of the CBC's frequency.
Parameters
---------
frequencies : ndarray
The frequencies at which the number of cycles need to be found.
M : float
The chirp mass of the CBC.
Returns
-------
fdot : ndarray
The df/dt of each frequency.
"""
if not frequencies: frequencies = 0.5*self.frequencies
if not M: M = self.chirp_mass()
return (((96*np.pi**(8./3)) / (5 * c.c**5)) * (c.G*M)**(5./3) * frequencies**(11./3))#.to(u.hertz**2)
def ncycles(self, frequencies=None, M=None):
"""
Calculate the number of cycles that the CBC spends in each frequency bin.
Parameters
---------
frequencies : ndarray
The frequencies at which the number of cycles need to be found.
M : float
The chirp mass of the CBC.
Returns
-------
ncycles : ndarray
The number of cycles in each frequency bin.
"""
if not frequencies: frequencies = 0.5*self.frequencies
if not M: M = self.chirp_mass()
return np.sqrt(frequencies**2/ self.fdot(frequencies, M))#.to(1)
def characteristic_strain(self, frequencies=None):
if not frequencies: frequencies = self.frequencies
return np.sqrt(2*self.ncycles())*np.sqrt(4 * frequencies**2 * np.abs(self.raw_strain())**2)
def chirp_mass(self):
return ((self.m1*self.m2)**(3./5) / (self.m1 + self.m2)**(1./5)).to(u.kilogram)
def fisco(self):
return ((c.c**3) / (np.pi*c.G*(self.m1+self.m2)*6*6**0.5 )).to(u.hertz)
def raw_strain(self, frequencies=None):
if not frequencies: frequencies = self.frequencies
h = ((1./self.r) * ((5*np.pi)/(24*c.c**3))**(0.5) * (c.G * self.M)**(5./6) * (np.pi*frequencies)**(-7./6)).to(1/u.hertz)
h[frequencies>2*self.fisco()] = np.nan
return h
class LALException(Exception):
pass
class BHBH(CBC):
@u.quantity_input(frequencies=u.hertz, m1=u.kilogram, m2=u.kilogram, R=u.meter)
def __init__(self, frequencies, m1, m2, S1, S2, R, inclination=0):
self.frequencies = frequencies
self.m1, self.m2 = m1, m2
self.S1, self.S2 = S1, S2
self.R = R
self.inclination = inclination
def IMRPhenom(self):
try:
from lalsimulation import SimInspiralChooseFDWaveform as InFD
from lalsimulation import IMRPhenomP
except ImportError:
LALException("You need to install LALSimulation to use this waveform.")
return -1
fmin, fmax = self.frequencies.min(), self.frequencies.max()
m1, m2 = self.m1, self.m2
S1, S2= self.S1, self.S2
R = self.R
fref = 0 # reference frequency of waveform (Hz)
fdiff = 10*u.hertz #self.frequencies.diff()[0]#.value #(frequencies.max() - frequencies.min())/(len(frequencies))
approx = IMRPhenomP # the waveform approximant
ampO = 0 # pN amplitude order
phaseO = 7
phiref = 0
hp, hc = InFD(phiref, fdiff.value, #frequencies.diff()[0].value,
m1.to(u.kilogram).value, m2.to(u.kilogram).value,
S1[0], S1[1], S1[2], S2[0], S2[1], S2[2],
fmin.value, fmax.value, fref,
R.to(u.meter).value, self.inclination, 0., 0.,
None, None,
ampO, phaseO, approx)
hrss = np.sqrt(hp.data.data.real**2 + hc.data.data.real**2)
return hrss
def raw_strain(self, frequencies=None):
if not frequencies: frequencies = self.frequencies
return self.IMRPhenom() |
py | b40080aa1df40be003d06b038a8a971007ab9983 | """From OpenAI baselines:
https://github.com/openai/baselines/blob/master/baselines/common/running_mean_std.py
"""
from typing import Sequence, Tuple
import numpy as np
import torch
from ..lib import mpi
from ..prelude import Array, Self
from ..utils import Device
from ..utils.state_dict import TensorStateDict
class RunningMeanStd:
"""Calcurate running mean and variance"""
def __init__(self, shape: Sequence[int], epsilon: float = 1.0e-4) -> None:
self.mean = np.zeros(shape, dtype=np.float64)
self.var = np.ones(shape, dtype=np.float64)
self.count = epsilon
def update(self, x: Array[float]) -> None:
x_mean, x_var = mpi.array_mean_and_var(x)
self.mean, self.var, self.count = _update_rms(
self.mean,
self.var,
self.count,
x_mean,
x_var,
x.shape[0] * mpi.global_size(),
)
def std(self, eps: float = 1.0e-8) -> Array[float]:
return np.sqrt(self.var + eps)
def copyto(self, other: Self) -> None:
np.copyto(other.mean, self.mean)
np.copyto(other.var, self.var)
def __repr__(self) -> str:
return f"RMS(mean: {self.mean}, var: {self.var})"
def _update_rms(
mean: Array[float],
var: Array[float],
count: float,
batch_mean: float,
batch_var: float,
batch_count: int,
) -> Tuple[Array[float], Array[float], float]:
"""https://en.wikipedia.org/wiki/Algorithms_for_calculating_variance#Parallel_algorithm"""
delta = batch_mean - mean
tot_count = count + batch_count
new_mean = np.add(mean, delta * batch_count / tot_count)
m_a = var * count
m_b = batch_var * batch_count
M2 = m_a + m_b + np.square(delta) * count * batch_count / tot_count
new_var = M2 / tot_count
new_count = tot_count
return np.asarray(new_mean), np.asarray(new_var), new_count
class RunningMeanStdTorch(TensorStateDict):
"""Same as RunningMeanStd, but uses PyTorch Tensor"""
def __init__(
self, shape: torch.Size, device: Device, epsilon: float = 1.0e-4
) -> None:
self.mean = device.zeros(shape, dtype=torch.float64)
self.var = device.ones(shape, dtype=torch.float64)
self.count = torch.tensor(epsilon, dtype=torch.float64, device=device.unwrapped)
self.device = device
@torch.no_grad()
def update(self, x: torch.Tensor) -> None:
x_mean, x_var = mpi.tensor_mean_and_var(x)
_update_rms_torch(
self.mean,
self.var,
self.count,
x_mean,
x_var,
torch.tensor(x.size(0) * mpi.global_size(), device=self.device.unwrapped),
)
def std(self, eps: float = 1.0e-8) -> torch.Tensor:
return torch.sqrt(self.var + eps)
def _update_rms_torch(mean, var, count, batch_mean, batch_var, batch_count):
"""Inplace, PyTorch implementation of _update_rms"""
delta = batch_mean - mean
tot_count = count + batch_count
mean.add_(delta * batch_count / tot_count)
m_b = batch_var * batch_count
delta.pow_(2).mul_(count).mul_(batch_count).div_(tot_count)
var.mul_(count).add_(m_b).add_(delta).div_(tot_count)
count.add_(batch_count)
|
py | b400812100b68bf8e82cb749e687090bb1b0a43f | API_TOKEN = ''
OMDB_TOKEN = ''
import discord
import text2emotion as te
import random
import requests
# Movie title arrays
surprise = ["Alien", "Midsommar", "Shutter Island", "Gone Girl", "Get Out", "A Queit Place", "North by Northwest", "The Prestige", "28 Days Later", "Oldboy", "Prisoners", "Donnie Darko", "Sicario", "The Usual Suspects", "Tenet", "Rosemary's Baby", "Inception", "Interstellar", "Seven Samurai", "The Shining"]
fear =["Ocean's Eleven", "Good Will Hunting", "Almost Famous", "Ferris Bueller’s Day Off", "Billy Madison", "The Italian Job", "This is Spinal Tap", "The Secret Life of Walter Mitty", "Groundhog Day", "Jurassic Park", "Die Hard", "The Fifth Element", "Trading Places", "Starship Troopers", "Raiders of the Lost Ark", "Face/Off", "The Rock", "Office Space", "Creed", "Jack Reacher"]
angry = ["The Matrix", "Apocalypse Now", "Gladiator", "Scarface", "Judas and the Black Messiah", "Fight Club", "Warrior", "Reservoir Dogs", "V for Vendetta", "Heat", "Snowpiercer", "Joker", "Pulp Fiction", "The Dark Knight", "A Clockwork Orange", "American Psycho", "Sin City", "Taxi Driver", "The Equalizer", "Unforgiven"]
sad = ["The Shawshank Redemption", "The Godfather", "Eternal Sunshine of the Spotless Mind", "Schindler's List", "One Flew Over the Cuckoo's Nest", "Requiem for a Dream", "American Beauty", "A Beautiful Mind", "Spotlight", "Hotel Rwanda", "Her", "12 Angry Men", "Forrest Gump", "City of God", "The Green Mile", "Whiplash", "American History X", "Braveheart", "Lawrence of Arabia", "12 Years a Slave"]
happy = ["Coming to America", "Parasite", "Once Upon a Time... In Hollywood", "Thor: Ragnarok", "The Gentlemen", "Deadpool", "Back to the Future", "Snatch", "The Sting", "Lock, Stock and Two Smoking Barrels", "The Grand Budapest Hotel", "The Truman Show", "Aladdin", "The Big Lebowski", "The Blues Brothers", "The Goonies", "The Princess Bride", "Wall-E", "Toy Story", "Friday"]
client = discord.Client()
@client.event
async def on_ready():
print('We have logged in as {0.user}'.format(client))
# Bot gets a message from channel
@client.event
async def on_message(message):
user_id = message.author.id
messages = ""
if message.content.startswith('!moviemotion'):
# Get user messages in the guild their in
for chan in message.guild.text_channels:
async for msg in chan.history(limit=5): # Last 5 messages in each channel
if msg.author.id == user_id:
# Save the messages to an array
messages = messages + " " + msg.content
# Format messages
messages = messages.replace('\n', '')
messages = messages.replace('\t', '')
messages = messages.strip('\n')
messages = messages.strip('\t')
# Run text2emotion on messages
emotions = te.get_emotion(messages)
# Result from text2emotion mapped to movie genres
Keymax = max(emotions, key=emotions.get)
# Generate 3 random numbers
RandomListOfIntegers = []
for i in range(3):
r = random.randint(0, 19)
if r not in RandomListOfIntegers: RandomListOfIntegers.append(r)
# Choose correct movie title array based on Keymax
if Keymax == 'Happy':
movie_array = happy
if Keymax == 'Angry':
movie_array = angry
if Keymax == 'Surprise':
movie_array = surprise
if Keymax == 'Sad':
movie_array = sad
if Keymax == 'Fear':
movie_array = fear
# Assign 3 random movie titles
movie1 = movie_array[RandomListOfIntegers[0]]
movie2 = movie_array[RandomListOfIntegers[1]]
movie3 = movie_array[RandomListOfIntegers[2]]
# OMBD api call with selected movies
movie1_url = 'https://www.omdbapi.com/?apikey=' + OMDB_TOKEN + '&t=' + movie1
movie1_response = requests.get(movie1_url)
movie1_json = movie1_response.json()
movie2_url = 'https://www.omdbapi.com/?apikey=' + OMDB_TOKEN + '&t=' + movie2
movie2_response = requests.get(movie2_url)
movie2_json = movie2_response.json()
movie3_url = 'https://www.omdbapi.com/?apikey=' + OMDB_TOKEN + '&t=' + movie3
movie3_response = requests.get(movie3_url)
movie3_json = movie3_response.json()
# Process and format result from OMBD api
newline = '\n'
movie1_poster = movie1_json['Poster']
movie2_poster = movie2_json['Poster']
movie3_poster = movie3_json['Poster']
movie1_year = movie1_json['Year']
movie2_year = movie2_json['Year']
movie3_year = movie3_json['Year']
movie1_plot = movie1_json['Plot']
movie2_plot = movie2_json['Plot']
movie3_plot = movie3_json['Plot']
movie1_id = movie1_json['imdbID']
movie1_link = "https://www.imdb.com/title/" + movie1_id
movie2_id = movie2_json['imdbID']
movie2_link = "https://www.imdb.com/title/" + movie2_id
movie3_id = movie3_json['imdbID']
movie3_link = "https://www.imdb.com/title/" + movie3_id
# Format embeds and DM the user
embed = discord.Embed(title="Hi there!", description="These are your top 3 movie recommendations based on your recent Discord messages. Enjoy!", color=discord.Color.blue())
embed.set_author(name="moviemotion", icon_url="https://i.ibb.co/9yF0trY/logo.png")
embed.add_field(name=f"1. {movie1}", value=f"({movie1_year}) {newline} {movie1_link}", inline=False)
embed.set_image(url=f'{movie1_poster}')
embed.set_footer(text=f"{movie1_plot}")
await message.author.send(embed=embed)
embed2 = discord.Embed(color=discord.Color.blue())
embed2.add_field(name=f"2. {movie2}", value=f"({movie2_year}) {newline} {movie2_link}", inline=False)
embed2.set_image(url=f'{movie2_poster}')
embed2.set_footer(text=f"{movie2_plot}")
await message.author.send(embed=embed2)
embed3 = discord.Embed(color=discord.Color.blue())
embed3.add_field(name=f"3. {movie3}", value=f"({movie3_year}) {newline} {movie3_link}", inline=False)
embed3.set_image(url=f'{movie3_poster}')
embed3.set_footer(text=f"{movie3_plot}")
await message.author.send(embed=embed3)
client.run(API_TOKEN)
|
py | b400827de0f386b5f0570a677b91b2def219bd84 | from typing import Any, Dict
from django.http import HttpRequest, HttpResponse
from django.utils.translation import ugettext as _
from zerver.decorator import webhook_view
from zerver.lib.request import REQ, has_request_variables
from zerver.lib.response import json_error, json_success
from zerver.lib.webhooks.common import check_send_webhook_message
from zerver.models import UserProfile
@webhook_view('Zapier', notify_bot_owner_on_invalid_json=False)
@has_request_variables
def api_zapier_webhook(request: HttpRequest, user_profile: UserProfile,
payload: Dict[str, Any]=REQ(argument_type='body')) -> HttpResponse:
if payload.get('type') == 'auth':
# The bot's details are used by our Zapier app to format a connection
# label for users to be able to distinguish between different Zulip
# bots and API keys in their UI
return json_success({
'full_name': user_profile.full_name,
'email': user_profile.email,
'id': user_profile.id,
})
topic = payload.get('topic')
content = payload.get('content')
if topic is None:
topic = payload.get('subject') # Backwards-compatibility
if topic is None:
return json_error(_("Topic can't be empty"))
if content is None:
return json_error(_("Content can't be empty"))
check_send_webhook_message(request, user_profile, topic, content)
return json_success()
|
py | b40082b96edd6a908f060e8f819c4736634e4ce4 | from django.conf import settings
from drf_yasg.utils import swagger_auto_schema
from django.shortcuts import get_object_or_404, redirect
from rest_framework import status
from accounts.serializer import AccountSerializer
from contacts.serializer import ContactSerializer
from opportunity.serializer import OpportunitySerializer
from leads.serializer import LeadSerializer
from teams.serializer import TeamsSerializer
from common.serializer import *
from cases.serializer import CaseSerializer
from accounts.models import Account, Contact
from opportunity.models import Opportunity
from accounts.models import Tags
from cases.models import Case
from leads.models import Lead
from teams.models import Teams
from common.utils import ROLES
from common.serializer import (
RegisterUserSerializer,
CreateUserSerializer,
)
from common.models import User, Company, Document, APISettings
from common.tasks import (
resend_activation_link_to_user,
send_email_to_new_user,
send_email_user_delete,
send_email_user_status,
send_email_to_reset_password,
)
from django.utils.translation import gettext as _
from rest_framework.views import APIView
from rest_framework.response import Response
from rest_framework_jwt.serializers import jwt_encode_handler
from common.utils import jwt_payload_handler
from rest_framework.exceptions import APIException
from rest_framework.permissions import IsAuthenticated
from rest_framework.pagination import LimitOffsetPagination
from common.custom_auth import JSONWebTokenAuthentication
from common import swagger_params
from django.db.models import Q
from rest_framework.decorators import api_view
import json
from django.contrib.auth.tokens import default_token_generator
from django.utils.encoding import force_text
from django.utils.http import urlsafe_base64_decode
from common.token_generator import account_activation_token
from common.models import Profile
from django.utils import timezone
from django.conf import settings
def index(request):
return redirect("/app/")
class GetTeamsAndUsersView(APIView):
authentication_classes = (JSONWebTokenAuthentication,)
permission_classes = (IsAuthenticated,)
@swagger_auto_schema(
tags=["Users"],
)
def get(self, request, *args, **kwargs):
if self.request.user.role != "ADMIN" and not self.request.user.is_superuser:
return Response(
{"error": True, "errors": "Permission Denied"},
status=status.HTTP_403_FORBIDDEN,
)
data = {}
teams = Teams.objects.filter.all()
teams_data = TeamsSerializer(teams, many=True).data
users = User.objects.filter(is_active=True)
users_data = UserSerializer(users, many=True).data
data["teams"] = teams_data
data["users_data"] = users_data
return Response(data)
class UserDetailView(APIView):
authentication_classes = (JSONWebTokenAuthentication,)
permission_classes = (IsAuthenticated,)
def get_object(self, pk):
user = get_object_or_404(User, pk=pk)
return user
@swagger_auto_schema(
tags=["Users"],
)
def get(self, request, pk, format=None):
user_obj = self.get_object(pk)
if (
self.request.user.role != "ADMIN"
and not self.request.user.is_superuser
and self.request.user.id != user_obj.id
):
return Response(
{"error": True, "errors": "Permission Denied"},
status=status.HTTP_403_FORBIDDEN,
)
users_data = []
for each in User.objects.all():
assigned_dict = {}
assigned_dict["id"] = each.id
assigned_dict["name"] = each.username
users_data.append(assigned_dict)
context = {}
context["user_obj"] = UserSerializer(user_obj).data
opportunity_list = Opportunity.objects.filter(assigned_to=user_obj)
context["opportunity_list"] = OpportunitySerializer(
opportunity_list, many=True
).data
contacts = Contact.objects.filter(assigned_to=user_obj)
context["contacts"] = ContactSerializer(contacts, many=True).data
cases = Case.objects.filter(assigned_to=user_obj)
context["cases"] = CaseSerializer(cases, many=True).data
context["assigned_data"] = users_data
comments = user_obj.user_comments.all()
context["comments"] = CommentSerializer(comments, many=True).data
return Response(
{"error": False, "data": context},
status=status.HTTP_200_OK,
)
@swagger_auto_schema(
tags=["Users"], manual_parameters=swagger_params.user_update_params
)
def put(self, request, pk, format=None):
params = request.query_params if len(request.data) == 0 else request.data
user = self.get_object(pk)
if (
self.request.user.role != "ADMIN"
and not self.request.user.is_superuser
and self.request.user.id != user.id
):
return Response(
{"error": True, "errors": "Permission Denied"},
status=status.HTTP_403_FORBIDDEN,
)
serializer = CreateUserSerializer(user, data=params, request_user=request.user)
if serializer.is_valid():
user = serializer.save()
if self.request.user.role == "ADMIN":
if params.getlist("teams"):
user_teams = user.user_teams.all()
team_obj = Teams.objects.all()
for team in params.getlist("teams"):
try:
team_obj = team_obj.filter(id=team).first()
if team_obj != user_teams:
team_obj.users.add(user)
except:
return Response(
{"detail": "No such Team Available"},
status=status.HTTP_404_NOT_FOUND,
)
return Response(
{"error": False, "message": "User Updated Successfully"},
status=status.HTTP_200_OK,
)
return Response(
{"error": True, "errors": serializer.errors},
status=status.HTTP_400_BAD_REQUEST,
)
@swagger_auto_schema(
tags=["Users"],
)
def delete(self, request, pk, format=None):
if self.request.user.role != "ADMIN" and not self.request.user.is_superuser:
return Response(
{"error": True, "errors": "Permission Denied"},
status=status.HTTP_403_FORBIDDEN,
)
self.object = self.get_object(pk)
if self.object.id == request.user.id:
return Response(
{"error": True, "errors": "Permission Denied"},
status=status.HTTP_403_FORBIDDEN,
)
deleted_by = self.request.user.email
send_email_user_delete.delay(
self.object.email,
deleted_by=deleted_by,
domain=settings.Domain,
protocol=request.scheme,
)
self.object.delete()
return Response({"status": "success"}, status=status.HTTP_200_OK)
class ChangePasswordView(APIView):
authentication_classes = (JSONWebTokenAuthentication,)
permission_classes = (IsAuthenticated,)
@swagger_auto_schema(
tags=["Profile"],
operation_description="This is change password api",
manual_parameters=swagger_params.change_password_params,
)
def post(self, request, format=None):
params = request.query_params if len(request.data) == 0 else request.data
old_password = params.get("old_password", None)
new_password = params.get("new_password", None)
retype_password = params.get("retype_password", None)
errors = {}
if old_password:
if not request.user.check_password(old_password):
errors["old_password"] = "old password entered is incorrect."
if new_password:
if len(new_password) < 8:
errors["new_password"] = "Password must be at least 8 characters long!"
if new_password == old_password:
errors[
"new_password"
] = "New password and old password should not be same"
if retype_password:
if new_password != retype_password:
errors[
"retype_password"
] = "New_password and Retype_password did not match."
if errors:
return Response(
{"error": True, "errors": errors},
status=status.HTTP_400_BAD_REQUEST,
)
user = request.user
user.set_password(new_password)
user.save()
return Response(
{"error": False, "message": "Password Changed Successfully"},
status=status.HTTP_200_OK,
)
# check_header not working
class ApiHomeView(APIView):
authentication_classes = (JSONWebTokenAuthentication,)
permission_classes = (IsAuthenticated,)
@swagger_auto_schema(
tags=["dashboard"],
)
def get(self, request, format=None):
accounts = Account.objects.filter(status="open")
contacts = Contact.objects.all()
leads = Lead.objects.all().exclude(Q(status="converted") | Q(status="closed"))
opportunities = Opportunity.objects.all()
if self.request.user.role == "ADMIN" or self.request.user.is_superuser:
pass
else:
accounts = accounts.filter(
Q(assigned_to__id__in=[self.request.user.id])
| Q(created_by=self.request.user.id)
)
contacts = contacts.filter(
Q(assigned_to__id__in=[self.request.user.id])
| Q(created_by=self.request.user.id)
)
leads = leads.filter(
Q(assigned_to__id__in=[self.request.user.id])
| Q(created_by=self.request.user.id)
).exclude(status="closed")
opportunities = opportunities.filter(
Q(assigned_to__id__in=[self.request.user.id])
| Q(created_by=self.request.user.id)
)
context = {}
context["accounts_count"] = accounts.count()
context["contacts_count"] = contacts.count()
context["leads_count"] = leads.count()
context["opportunities_count"] = opportunities.count()
context["accounts"] = AccountSerializer(accounts, many=True).data
context["contacts"] = ContactSerializer(contacts, many=True).data
context["leads"] = LeadSerializer(leads, many=True).data
context["opportunities"] = OpportunitySerializer(opportunities, many=True).data
return Response(context, status=status.HTTP_200_OK)
class LoginView(APIView):
@swagger_auto_schema(
tags=["Auth"],
operation_description="This is login api",
manual_parameters=swagger_params.login_page_params,
)
def post(self, request, format=None):
params = request.query_params if len(request.data) == 0 else request.data
username = params.get("email", None)
password = params.get("password", None)
if not username:
username_field = "User Name/Email"
msg = _('Must include "{username_field}"')
msg = msg.format(username_field=username_field)
return Response(
{"error": True, "errors": msg},
status=status.HTTP_400_BAD_REQUEST,
)
user = User.objects.filter(email=username).first()
if not user:
return Response(
{"error": True, "errors": "user not avaliable in our records"},
status=status.HTTP_400_BAD_REQUEST,
)
if not user.is_active:
return Response(
{"error": True, "errors": "Please activate account to proceed."},
status=status.HTTP_400_BAD_REQUEST,
)
if user.check_password(password):
payload = jwt_payload_handler(user)
response_data = {
"token": jwt_encode_handler(payload),
"error": False,
}
return Response(response_data, status=status.HTTP_200_OK)
else:
password_field = "doesnot match"
msg = _("Email and password {password_field}")
msg = msg.format(password_field=password_field)
return Response(
{"error": True, "errors": msg},
status=status.HTTP_400_BAD_REQUEST,
)
class RegistrationView(APIView):
@swagger_auto_schema(
tags=["Auth"],
operation_description="This is registration api",
manual_parameters=swagger_params.registration_page_params,
)
def post(self, request, format=None):
params = request.query_params if len(request.data) == 0 else request.data
user_serializer = RegisterUserSerializer(
data=params,
request_user=request.user,
)
errors = {}
if not user_serializer.is_valid():
errors.update(user_serializer.errors)
if errors:
return Response(
{"error": True, "errors": errors},
status=status.HTTP_400_BAD_REQUEST,
)
if user_serializer.is_valid():
user = user_serializer.save(
role="ADMIN",
is_superuser=False,
has_marketing_access=True,
has_sales_access=True,
is_admin=True,
)
if params.get("password"):
user.set_password(params.get("password"))
user.save()
protocol = request.scheme
send_email_to_new_user.delay(
user.email,
user.email,
domain=settings.Domain,
protocol=protocol,
)
return Response(
{"error": False, "message": "User created Successfully."},
status=status.HTTP_200_OK,
)
class ProfileView(APIView):
authentication_classes = (JSONWebTokenAuthentication,)
permission_classes = (IsAuthenticated,)
@swagger_auto_schema(
tags=["Profile"],
)
def get(self, request, format=None):
context = {}
context["user_obj"] = UserSerializer(request.user).data
return Response(context, status=status.HTTP_200_OK)
class UsersListView(APIView, LimitOffsetPagination):
authentication_classes = (JSONWebTokenAuthentication,)
permission_classes = (IsAuthenticated,)
@swagger_auto_schema(
tags=["Users"], manual_parameters=swagger_params.user_create_params
)
def post(self, request, format=None):
if self.request.user.role != "ADMIN" and not self.request.user.is_superuser:
return Response(
{"error": True, "errors": "Permission Denied"},
status=status.HTTP_403_FORBIDDEN,
)
else:
params = request.query_params if len(request.data) == 0 else request.data
if params:
user_serializer = CreateUserSerializer(
data=params, request_user=request.user
)
if user_serializer.is_valid():
user = user_serializer.save()
if params.get("password"):
user.set_password(params.get("password"))
user.is_active = False
protocol = request.scheme
send_email_to_new_user.delay(
user.email,
self.request.user.email,
domain=settings.Domain,
protocol=protocol,
)
return Response(
{"error": False, "message": "User Created Successfully"},
status=status.HTTP_201_CREATED,
)
return Response(
{"error": True, "errors": user_serializer.errors},
status=status.HTTP_400_BAD_REQUEST,
)
@swagger_auto_schema(
tags=["Users"], manual_parameters=swagger_params.user_list_params
)
def get(self, request, format=None):
if self.request.user.role != "ADMIN" and not self.request.user.is_superuser:
return Response(
{"error": True, "errors": "Permission Denied"},
status=status.HTTP_403_FORBIDDEN,
)
else:
queryset = User.objects.all()
params = (
self.request.query_params
if len(self.request.data) == 0
else self.request.data
)
if params:
if params.get("username"):
queryset = queryset.filter(
username__icontains=params.get("username")
)
if params.get("email"):
queryset = queryset.filter(email__icontains=params.get("email"))
if params.get("role"):
queryset = queryset.filter(role=params.get("role"))
if params.get("status"):
queryset = queryset.filter(is_active=params.get("status"))
context = {}
queryset_active_users = queryset.filter(is_active=True)
results_active_users = self.paginate_queryset(
queryset_active_users.distinct(), self.request, view=self
)
active_users = UserSerializer(results_active_users, many=True).data
context["per_page"] = 10
context["active_users"] = {
"active_users_count": self.count,
"next": self.get_next_link(),
"previous": self.get_previous_link(),
"page_number": int(self.offset / 10) + 1,
"active_users": active_users,
}
queryset_inactive_users = queryset.filter(is_active=False)
results_inactive_users = self.paginate_queryset(
queryset_inactive_users.distinct(), self.request, view=self
)
inactive_users = UserSerializer(results_inactive_users, many=True).data
context["inactive_users"] = {
"inactive_users_count": self.count,
"next": self.get_next_link(),
"previous": self.get_previous_link(),
"page_number": int(self.offset / 10) + 1,
"inactive_users": inactive_users,
}
context["admin_email"] = settings.ADMIN_EMAIL
context["roles"] = ROLES
context["status"] = [("True", "Active"), ("False", "In Active")]
return Response(context)
class DocumentListView(APIView, LimitOffsetPagination):
authentication_classes = (JSONWebTokenAuthentication,)
permission_classes = (IsAuthenticated,)
model = Document
def get_context_data(self, **kwargs):
params = (
self.request.query_params
if len(self.request.data) == 0
else self.request.data
)
queryset = self.model.objects.all()
if self.request.user.is_superuser or self.request.user.role == "ADMIN":
queryset = queryset
else:
if self.request.user.documents():
doc_ids = self.request.user.documents().values_list("id", flat=True)
shared_ids = queryset.filter(
Q(status="active") & Q(shared_to__id__in=[self.request.user.id])
).values_list("id", flat=True)
queryset = queryset.filter(Q(id__in=doc_ids) | Q(id__in=shared_ids))
else:
queryset = queryset.filter(
Q(status="active") & Q(shared_to__id__in=[self.request.user.id])
)
request_post = params
if request_post:
if request_post.get("title"):
queryset = queryset.filter(title__icontains=request_post.get("title"))
if request_post.get("status"):
queryset = queryset.filter(status=request_post.get("status"))
if request_post.get("shared_to"):
queryset = queryset.filter(
shared_to__id__in=json.loads(request_post.get("shared_to"))
)
context = {}
if self.request.user.role == "ADMIN" or self.request.user.is_superuser:
users = User.objects.filter(is_active=True).order_by("email")
else:
users = User.objects.filter(role="ADMIN").order_by("email")
search = False
if (
params.get("document_file")
or params.get("status")
or params.get("shared_to")
):
search = True
context["search"] = search
queryset_documents_active = queryset.filter(status="active")
results_documents_active = self.paginate_queryset(
queryset_documents_active.distinct(), self.request, view=self
)
documents_active = DocumentSerializer(results_documents_active, many=True).data
context["per_page"] = 10
context["documents_active"] = {
"documents_active_count": self.count,
"next": self.get_next_link(),
"previous": self.get_previous_link(),
"page_number": int(self.offset / 10) + 1,
"documents_active": documents_active,
}
queryset_documents_inactive = queryset.filter(status="inactive")
results_documents_inactive = self.paginate_queryset(
queryset_documents_inactive.distinct(), self.request, view=self
)
documents_inactive = DocumentSerializer(
results_documents_inactive, many=True
).data
context["documents_inactive"] = {
"documents_inactive_count": self.count,
"next": self.get_next_link(),
"previous": self.get_previous_link(),
"page_number": int(self.offset / 10) + 1,
"documents_inactive": documents_inactive,
}
context["users"] = UserSerializer(users, many=True).data
context["status_choices"] = Document.DOCUMENT_STATUS_CHOICE
return context
@swagger_auto_schema(
tags=["documents"], manual_parameters=swagger_params.document_get_params
)
def get(self, request, *args, **kwargs):
context = self.get_context_data(**kwargs)
return Response(context)
@swagger_auto_schema(
tags=["documents"], manual_parameters=swagger_params.document_create_params
)
def post(self, request, *args, **kwargs):
params = request.query_params if len(request.data) == 0 else request.data
serializer = DocumentCreateSerializer(data=params, request_obj=request)
if serializer.is_valid():
doc = serializer.save(
created_by=request.user,
document_file=request.FILES.get("document_file"),
)
if params.get("shared_to"):
assinged_to_users_ids = json.loads(params.get("shared_to"))
for user_id in assinged_to_users_ids:
user = User.objects.filter(id=user_id)
if user.exists():
doc.shared_to.add(user_id)
else:
doc.delete()
return Response(
{"error": True, "errors": "Enter Valid User"},
status=status.HTTP_400_BAD_REQUEST,
)
if self.request.user.role == "ADMIN":
if params.get("teams"):
teams = json.loads(params.get("teams"))
for team in teams:
teams_ids = Teams.objects.filter(id=team)
if teams_ids.exists():
doc.teams.add(team)
else:
doc.delete()
return Response(
{"error": True, "errors": "Enter Valid Team"},
status=status.HTTP_400_BAD_REQUEST,
)
return Response(
{"error": False, "message": "Document Created Successfully"},
status=status.HTTP_201_CREATED,
)
return Response(
{"error": True, "errors": serializer.errors},
status=status.HTTP_400_BAD_REQUEST,
)
class DocumentDetailView(APIView):
authentication_classes = (JSONWebTokenAuthentication,)
permission_classes = (IsAuthenticated,)
def get_object(self, pk):
return Document.objects.filter(id=pk).first()
@swagger_auto_schema(
tags=["documents"],
)
def get(self, request, pk, format=None):
self.object = self.get_object(pk)
if not self.object:
return Response(
{"error": True, "errors": "Document does not exist"},
status=status.HTTP_403_FORBIDDEN,
)
if self.request.user.role != "ADMIN" and not self.request.user.is_superuser:
if not (
(self.request.user == self.object.created_by)
or (self.request.user in self.object.shared_to.all())
):
return Response(
{
"error": True,
"errors": "You do not have Permission to perform this action",
},
status=status.HTTP_403_FORBIDDEN,
)
if request.user.role == "ADMIN" or request.user.is_superuser:
users = User.objects.filter(is_active=True).order_by("email")
else:
users = User.objects.filter(role="ADMIN").order_by("email")
context = {}
context.update(
{
"doc_obj": DocumentSerializer(self.object).data,
"file_type_code": self.object.file_type()[1],
"users": UserSerializer(users, many=True).data,
}
)
return Response(context, status=status.HTTP_200_OK)
@swagger_auto_schema(
tags=["documents"],
)
def delete(self, request, pk, format=None):
document = self.get_object(pk)
if not document:
return Response(
{"error": True, "errors": "Documdnt does not exist"},
status=status.HTTP_403_FORBIDDEN,
)
if self.request.user.role != "ADMIN" and not self.request.user.is_superuser:
if (
self.request.user != document.created_by
): # or (self.request.user not in document.shared_to.all()):
return Response(
{
"error": True,
"errors": "You do not have Permission to perform this action",
},
status=status.HTTP_403_FORBIDDEN,
)
document.delete()
return Response(
{"error": False, "message": "Document deleted Successfully"},
status=status.HTTP_200_OK,
)
@swagger_auto_schema(
tags=["documents"], manual_parameters=swagger_params.document_update_params
)
def put(self, request, pk, format=None):
self.object = self.get_object(pk)
params = request.query_params if len(request.data) == 0 else request.data
if not self.object:
return Response(
{"error": True, "errors": "Document does not exist"},
status=status.HTTP_403_FORBIDDEN,
)
if self.request.user.role != "ADMIN" and not self.request.user.is_superuser:
if not (
(self.request.user == self.object.created_by)
or (self.request.user in self.object.shared_to.all())
):
return Response(
{
"error": True,
"errors": "You do not have Permission to perform this action",
},
status=status.HTTP_403_FORBIDDEN,
)
serializer = DocumentCreateSerializer(
data=params, instance=self.object, request_obj=request
)
if serializer.is_valid():
doc = serializer.save(
document_file=request.FILES.get("document_file"),
status=params.get("status"),
)
doc.shared_to.clear()
if params.get("shared_to"):
assinged_to_users_ids = json.loads(params.get("shared_to"))
for user_id in assinged_to_users_ids:
user = User.objects.filter(id=user_id)
if user.exists():
doc.shared_to.add(user_id)
else:
return Response(
{"error": True, "errors": "Enter Valid User"},
status=status.HTTP_400_BAD_REQUEST,
)
if self.request.user.role == "ADMIN":
doc.teams.clear()
if params.get("teams"):
teams = json.loads(params.get("teams"))
for team in teams:
teams_ids = Teams.objects.filter(id=team)
if teams_ids.exists():
doc.teams.add(team)
else:
return Response(
{"error": True, "errors": "Enter Valid Team"},
status=status.HTTP_400_BAD_REQUEST,
)
return Response(
{"error": False, "message": "Document Updated Successfully"},
status=status.HTTP_200_OK,
)
return Response(
{"error": True, "errors": serializer.errors},
status=status.HTTP_400_BAD_REQUEST,
)
class ForgotPasswordView(APIView):
@swagger_auto_schema(
tags=["Auth"], manual_parameters=swagger_params.forgot_password_params
)
def post(self, request, format=None):
params = request.query_params if len(request.data) == 0 else request.data
serializer = ForgotPasswordSerializer(data=params)
if serializer.is_valid():
user = get_object_or_404(User, email=params.get("email"))
if not user.is_active:
return Response(
{"error": True, "errors": "Please activate account to proceed."},
status=status.HTTP_406_NOT_ACCEPTABLE,
)
protocol = self.request.scheme
send_email_to_reset_password.delay(
user.email, protocol=protocol, domain=settings.Domain
)
data = {
"error": False,
"message": "We have sent you an email. please reset password",
}
return Response(data, status=status.HTTP_200_OK)
else:
data = {"error": True, "errors": serializer.errors}
response_status = status.HTTP_400_BAD_REQUEST
return Response(data, status=response_status)
class ResetPasswordView(APIView):
@swagger_auto_schema(
tags=["Auth"], manual_parameters=swagger_params.reset_password_params
)
def post(self, request, uid, token, format=None):
params = request.query_params if len(request.data) == 0 else request.data
serializer = ResetPasswordSerailizer(data=params)
if serializer.is_valid():
password = params.get("new_password1")
user = serializer.user
user.set_password(password)
user.save()
data = {
"error": False,
"message": "Password Updated Successfully. Please login",
}
else:
data = {"error": True, "errors": serializer.errors}
return Response(data, status=status.HTTP_400_BAD_REQUEST)
class UserStatusView(APIView):
authentication_classes = (JSONWebTokenAuthentication,)
permission_classes = (IsAuthenticated,)
@swagger_auto_schema(
tags=["Users"], manual_parameters=swagger_params.users_status_params
)
def post(self, request, pk, format=None):
if self.request.user.role != "ADMIN" and not self.request.user.is_superuser:
return Response(
{
"error": True,
"errors": "You do not have permission to perform this action",
},
status=status.HTTP_403_FORBIDDEN,
)
params = request.query_params if len(request.data) == 0 else request.data
user = User.objects.get(id=pk)
if params.get("status"):
status = params.get("status")
if status == "Active":
user.is_active = True
elif status == "Inactive":
user.is_active = False
else:
return Response(
{"error": True, "errors": "Please enter Valid Status for user"},
status=status.HTTP_400_BAD_REQUEST,
)
user.save()
context = {}
users_Active = User.objects.filter(is_active=True)
users_Inactive = User.objects.filter(is_active=False)
context["Users_Active"] = UserSerializer(users_Active, many=True).data
context["Users_Inactive"] = UserSerializer(users_Inactive, many=True).data
return Response(context)
class DomainList(APIView):
model = APISettings
authentication_classes = (JSONWebTokenAuthentication,)
permission_classes = (IsAuthenticated,)
@swagger_auto_schema(
tags=["Settings"],
)
def get(self, request, *args, **kwargs):
api_settings = APISettings.objects.all()
users = User.objects.filter(is_active=True).order_by("email")
return Response(
{
"error": False,
"api_settings": APISettingsListSerializer(api_settings, many=True).data,
"users": UserSerializer(users, many=True).data,
},
status=status.HTTP_200_OK,
)
@swagger_auto_schema(
tags=["Settings"], manual_parameters=swagger_params.api_setting_create_params
)
def post(self, request, *args, **kwargs):
params = (
self.request.query_params
if len(self.request.data) == 0
else self.request.data
)
assign_to_list = []
if params.get("lead_assigned_to"):
assign_to_list = json.loads(params.get("lead_assigned_to"))
serializer = APISettingsSerializer(data=params)
if serializer.is_valid():
settings_obj = serializer.save(created_by=request.user)
if params.get("tags"):
tags = json.loads(params.get("tags"))
for tag in tags:
tag_obj = Tags.objects.filter(name=tag).first()
if not tag_obj:
tag_obj = Tags.objects.create(name=tag)
settings_obj.tags.add(tag_obj)
if assign_to_list:
settings_obj.lead_assigned_to.add(*assign_to_list)
return Response(
{"error": False, "message": "API key added sucessfully"},
status=status.HTTP_201_CREATED,
)
return Response(
{"error": True, "errors": serializer.errors},
status=status.HTTP_400_BAD_REQUEST,
)
class DomainDetailView(APIView):
model = APISettings
authentication_classes = (JSONWebTokenAuthentication,)
permission_classes = (IsAuthenticated,)
def get_object(self, pk):
return self.model.objects.get(pk=pk)
@swagger_auto_schema(
tags=["Settings"],
)
def get(self, request, pk, format=None):
api_setting = self.get_object(pk)
return Response(
{"error": False, "domain": APISettingsListSerializer(api_setting).data},
status=status.HTTP_200_OK,
)
@swagger_auto_schema(
tags=["Settings"], manual_parameters=swagger_params.api_setting_create_params
)
def put(self, request, pk, **kwargs):
api_setting = self.get_object(pk)
params = (
self.request.query_params
if len(self.request.data) == 0
else self.request.data
)
assign_to_list = []
if params.get("lead_assigned_to"):
assign_to_list = json.loads(params.get("lead_assigned_to"))
serializer = APISettingsSerializer(data=params, instance=api_setting)
if serializer.is_valid():
api_setting = serializer.save()
api_setting.tags.clear()
api_setting.lead_assigned_to.clear()
if params.get("tags"):
tags = json.loads(params.get("tags"))
for tag in tags:
tag_obj = Tags.objects.filter(name=tag).first()
if not tag_obj:
tag_obj = Tags.objects.create(name=tag)
api_setting.tags.add(tag_obj)
if assign_to_list:
api_setting.lead_assigned_to.add(*assign_to_list)
return Response(
{"error": False, "message": "API setting Updated sucessfully"},
status=status.HTTP_200_OK,
)
return Response(
{"error": True, "errors": serializer.errors},
status=status.HTTP_400_BAD_REQUEST,
)
@swagger_auto_schema(
tags=["Settings"],
)
def delete(self, request, pk, **kwargs):
api_setting = self.get_object(pk)
if api_setting:
api_setting.delete()
return Response(
{"error": False, "message": "API setting deleted sucessfully"},
status=status.HTTP_200_OK,
)
class ActivateUserView(APIView):
@swagger_auto_schema(
tags=["Auth"],
)
def post(self, request, uid, token, activation_key, format=None):
profile = get_object_or_404(Profile, activation_key=activation_key)
if profile.user:
if timezone.now() > profile.key_expires:
protocol = request.scheme
resend_activation_link_to_user.delay(
profile.user.email,
domain=settings.Domain,
protocol=protocol,
)
return Response(
{
"error": False,
"message": "Link expired. Please use the Activation link sent now to your mail.",
},
status=status.HTTP_406_NOT_ACCEPTABLE,
)
else:
try:
uid = force_text(urlsafe_base64_decode(uid))
user = User.objects.get(pk=uid)
except (TypeError, ValueError, OverflowError, User.DoesNotExist):
user = None
if user is not None and account_activation_token.check_token(
user, token
):
user.is_active = True
user.save()
return Response(
{
"error": False,
"message": "Thank you for your email confirmation. Now you can login to your account.",
},
status=status.HTTP_200_OK,
)
else:
return Response(
{"error": True, "errors": "Activation link is invalid!"},
status=status.HTTP_400_BAD_REQUEST,
)
class ResendActivationLinkView(APIView):
@swagger_auto_schema(
tags=["Auth"], manual_parameters=swagger_params.forgot_password_params
)
def post(self, request, format=None):
params = request.query_params if len(request.data) == 0 else request.data
user = get_object_or_404(User, email=params.get("email"))
if user.is_active:
return Response(
{"error": False, "message": "Account is active. Please login"},
status=status.HTTP_200_OK,
)
protocol = request.scheme
resend_activation_link_to_user.delay(
user.email,
domain=settings.Domain,
protocol=protocol,
)
data = {
"error": False,
"message": "Please use the Activation link sent to your mail to activate account.",
}
return Response(data, status=status.HTTP_200_OK)
|
py | b4008321f5ae104c1aa54217619e95ad8fc8e3e3 | import datetime
import numpy as np
import matplotlib.pyplot as plt
import pandas as pds
import OMMBV
import pysat
from OMMBV.tests.test_core import gen_data_fixed_alt, gen_trace_data_fixed_alt
from OMMBV.tests.test_core import gen_plot_grid_fixed_alt
from OMMBV.tests.test_core import dview, dc
class TestMaxApexHeight():
def test_plot_apex_heights(self):
"""Check meridional vector along max in apex height gradient"""
date = pysat.datetime(2010, 1, 1)
delta = 1.
ecef_x, ecef_y, ecef_z = OMMBV.geodetic_to_ecef([0.], [320.], [550.])
# get basis vectors
zx, zy, zz, _, _, _, mx, my, mz = OMMBV.calculate_mag_drift_unit_vectors_ecef(ecef_x, ecef_y, ecef_z,
[date], ecef_input=True)
# get apex height for step along meridional directions, then around that direction
_, _, _, _, _, nominal_max = OMMBV.apex_location_info(ecef_x + delta * mx,
ecef_y + delta * my,
ecef_z + delta * mz,
[date],
ecef_input=True,
return_geodetic=True)
steps = (np.arange(101) - 50.) * delta / 10000.
output_max = []
for step in steps:
del_x = delta * mx + step * zx
del_y = delta * my + step * zy
del_z = delta * mz + step * zz
norm = np.sqrt(del_x ** 2 + del_y ** 2 + del_z ** 2)
del_x /= norm
del_y /= norm
del_z /= norm
_, _, _, _, _, loop_h = OMMBV.apex_location_info(ecef_x + del_x,
ecef_y + del_y,
ecef_z + del_z,
[date],
ecef_input=True,
return_geodetic=True)
output_max.append(loop_h)
try:
plt.figure()
plt.plot(steps, output_max)
plt.plot([0], nominal_max, color='r', marker='o', markersize=12)
plt.ylabel('Apex Height (km)')
plt.xlabel('Distance along Zonal Direction (km)')
plt.savefig('comparison_apex_heights_and_meridional.pdf')
plt.close()
except:
pass
# make sure meridional direction is correct
assert np.all(np.max(output_max) == nominal_max)
class TestApex():
def __init__(self):
# placeholder for data management features
self.inst = pysat.Instrument('pysat', 'testing')
self.inst.yr = 2010.
self.inst.doy = 1.
self.dview = dview
self.dc = dc
return
def test_apex_info_accuracy(self):
"""Characterize performance of apex_location_info as fine_step_size varied"""
lats, longs, alts = gen_trace_data_fixed_alt(550.)
ecf_x, ecf_y, ecf_z = OMMBV.geodetic_to_ecef(lats,
longs,
alts)
# step size to be tried
fine_steps_goal = np.array([25.6, 12.8, 6.4, 3.2, 1.6, 0.8, 0.4, 0.2,
0.1, 0.05, .025, .0125, .00625, .003125,
.0015625, .00078125, .000390625, .0001953125,
.0001953125 / 2., .0001953125 / 4., .0001953125 / 8.,
.0001953125 / 16., .0001953125 / 32., .0001953125 / 64.,
.0001953125 / 128., .0001953125 / 256., .0001953125 / 512.,
.0001953125 / 1024., .0001953125 / 2048., .0001953125 / 4096.])
date = datetime.datetime(2000, 1, 1)
dx = []
dy = []
dz = []
dh = []
# set up multi
if self.dc is not None:
import itertools
targets = itertools.cycle(dc.ids)
pending = []
for lat, lon, alt in zip(lats, longs, alts):
for steps in fine_steps_goal:
# iterate through target cyclicly and run commands
dview.targets = next(targets)
pending.append(dview.apply_async(OMMBV.apex_location_info, [lat],
[lon], [alt], [date], fine_step_size=steps,
return_geodetic=True))
out = []
for steps in fine_steps_goal:
# collect output
x, y, z, _, _, apex_height = pending.pop(0).get()
pt = [x[0], y[0], z[0], apex_height[0]]
out.append(pt)
final_pt = pds.DataFrame(out, columns=['x', 'y', 'z', 'h'])
dx.append(np.abs(final_pt.loc[1:, 'x'].values - final_pt.loc[:, 'x'].values[:-1]))
dy.append(np.abs(final_pt.loc[1:, 'y'].values - final_pt.loc[:, 'y'].values[:-1]))
dz.append(np.abs(final_pt.loc[1:, 'z'].values - final_pt.loc[:, 'z'].values[:-1]))
dh.append(np.abs(final_pt.loc[1:, 'h'].values - final_pt.loc[:, 'h'].values[:-1]))
else:
for lat, lon, alt in zip(lats, longs, alts):
out = []
for steps in fine_steps_goal:
x, y, z, _, _, apex_height = OMMBV.apex_location_info([lat], [lon], [alt], [date],
fine_step_size=steps,
return_geodetic=True)
pt = [x[0], y[0], z[0], apex_height[0]]
out.append(pt)
final_pt = pds.DataFrame(out, columns=['x', 'y', 'z', 'h'])
dx.append(np.abs(final_pt.loc[1:, 'x'].values - final_pt.loc[:, 'x'].values[:-1]))
dy.append(np.abs(final_pt.loc[1:, 'y'].values - final_pt.loc[:, 'y'].values[:-1]))
dz.append(np.abs(final_pt.loc[1:, 'z'].values - final_pt.loc[:, 'z'].values[:-1]))
dh.append(np.abs(final_pt.loc[1:, 'h'].values - final_pt.loc[:, 'h'].values[:-1]))
dx = pds.DataFrame(dx)
dy = pds.DataFrame(dy)
dz = pds.DataFrame(dz)
dh = pds.DataFrame(dh)
try:
plt.figure()
yerrx = np.nanstd(np.log10(dx), axis=0)
yerry = np.nanstd(np.log10(dy), axis=0)
yerrz = np.nanstd(np.log10(dz), axis=0)
yerrh = np.nanstd(np.log10(dh), axis=0)
plt.errorbar(np.log10(fine_steps_goal[1:]), np.log10(dx.mean(axis=0)),
yerr=yerrx,
label='x')
plt.errorbar(np.log10(fine_steps_goal[1:]), np.log10(dy.mean(axis=0)),
yerr=yerry,
label='y')
plt.errorbar(np.log10(fine_steps_goal[1:]), np.log10(dz.mean(axis=0)),
yerr=yerrz,
label='z')
plt.errorbar(np.log10(fine_steps_goal[1:]), np.log10(dh.mean(axis=0)),
yerr=yerrh,
label='h')
plt.xlabel('Log Step Size (km)')
plt.ylabel('Change in Apex Position (km)')
plt.title("Change in Field Apex Position vs Fine Step Size")
plt.legend()
plt.tight_layout()
plt.savefig('apex_location_vs_step_size.pdf')
plt.close()
except:
pass
def test_apex_plots(self):
"""Plot basic apex parameters"""
import matplotlib.pyplot as plt
p_lats, p_longs, p_alts = gen_plot_grid_fixed_alt(120.)
# data returned are the locations along each direction
# the full range of points obtained by iterating over all
# recasting alts into a more convenient form for later calculation
p_alts = [p_alts[0]] * len(p_longs)
# set the date
date = datetime.datetime(2000, 1, 1)
# memory for results
apex_lat = np.zeros((len(p_lats), len(p_longs) + 1))
apex_lon = np.zeros((len(p_lats), len(p_longs) + 1))
apex_alt = np.zeros((len(p_lats), len(p_longs) + 1))
# set up multi
if self.dc is not None:
import itertools
targets = itertools.cycle(dc.ids)
pending = []
for i, p_lat in enumerate(p_lats):
print (i, p_lat)
# iterate through target cyclicly and run commands
dview.targets = next(targets)
pending.append(dview.apply_async(OMMBV.apex_location_info, [p_lat] * len(p_longs), p_longs,
p_alts, [date] * len(p_longs),
return_geodetic=True))
for i, p_lat in enumerate(p_lats):
print ('collecting ', i, p_lat)
# collect output
x, y, z, olat, olon, oalt = pending.pop(0).get()
apex_lat[i, :-1] = olat
apex_lon[i, :-1] = olon
apex_alt[i, :-1] = oalt
else:
# single processor case
for i, p_lat in enumerate(p_lats):
print (i, p_lat)
x, y, z, olat, olon, oalt = OMMBV.apex_location_info([p_lat] * len(p_longs), p_longs,
p_alts, [date] * len(p_longs),
return_geodetic=True)
apex_lat[i, :-1] = olat
apex_lon[i, :-1] = olon
apex_alt[i, :-1] = oalt
# calculate difference between apex longitude and original longitude
# values for apex long are -180 to 180, shift to 0 to 360
# process degrees a bit to make the degree difference the most meaningful (close to 0)
idx, idy, = np.where(apex_lon < 0.)
apex_lon[idx, idy] += 360.
idx, idy, = np.where(apex_lon >= 360.)
apex_lon[idx, idy] -= 360.
apex_lon[:, :-1] -= p_longs
idx, idy, = np.where(apex_lon > 180.)
apex_lon[idx, idy] -= 360.
idx, idy, = np.where(apex_lon <= -180.)
apex_lon[idx, idy] += 360.
# account for periodicity
apex_lat[:, -1] = apex_lat[:, 0]
apex_lon[:, -1] = apex_lon[:, 0]
apex_alt[:, -1] = apex_alt[:, 0]
ytickarr = np.array([0, 0.25, 0.5, 0.75, 1]) * (len(p_lats) - 1)
xtickarr = np.array([0, 0.2, 0.4, 0.6, 0.8, 1]) * len(p_longs)
ytickvals = ['-25', '-12.5', '0', '12.5', '25']
try:
fig = plt.figure()
plt.imshow(apex_lat, origin='lower')
plt.colorbar()
plt.yticks(ytickarr, ytickvals)
plt.xticks(xtickarr, ['0', '72', '144', '216', '288', '360'])
plt.title('Apex Latitude (Degrees) at 120 km')
plt.xlabel('Geodetic Longitude (Degrees)')
plt.ylabel('Geodetic Latitude (Degrees)')
plt.savefig('apex_lat.pdf')
plt.close()
fig = plt.figure()
plt.imshow(apex_lon, origin='lower')
plt.colorbar()
plt.yticks(ytickarr, ytickvals)
plt.xticks(xtickarr, ['0', '72', '144', '216', '288', '360'])
plt.title('Apex Longitude Difference (Degrees) at 120 km')
plt.xlabel('Geodetic Longitude (Degrees)')
plt.ylabel('Geodetic Latitude (Degrees)')
plt.savefig('apex_lon.pdf')
plt.close()
fig = plt.figure()
plt.imshow(np.log10(apex_alt), origin='lower')
plt.colorbar()
plt.yticks(ytickarr, ytickvals)
plt.xticks(xtickarr, ['0', '72', '144', '216', '288', '360'])
plt.title('Log Apex Altitude (km) at 120 km')
plt.xlabel('Geodetic Longitude (Degrees)')
plt.ylabel('Geodetic Latitude (Degrees)')
plt.savefig('apex_alt.pdf')
plt.close()
except:
pass
def test_apex_diff_plots(self):
"""Uncertainty of apex location determination at default fine_step_size"""
import matplotlib.pyplot as plt
# on_travis = os.environ.get('ONTRAVIS') == 'True'
p_lats, p_longs, p_alts = gen_plot_grid_fixed_alt(550.)
# data returned are the locations along each direction
# the full range of points obtained by iterating over all
# recasting alts into a more convenient form for later calculation
p_alts = [p_alts[0]] * len(p_longs)
# set the date
date = datetime.datetime(2000, 1, 1)
# memory for results
apex_lat = np.zeros((len(p_lats), len(p_longs) + 1))
apex_lon = np.zeros((len(p_lats), len(p_longs) + 1))
apex_alt = np.zeros((len(p_lats), len(p_longs) + 1))
apex_z = np.zeros((len(p_lats), len(p_longs) + 1))
norm_alt = np.zeros((len(p_lats), len(p_longs) + 1))
# set up multi
if self.dc is not None:
import itertools
targets = itertools.cycle(dc.ids)
pending = []
for i, p_lat in enumerate(p_lats):
print (i, p_lat)
# iterate through target cyclicly and run commands
dview.targets = next(targets)
pending.append(dview.apply_async(OMMBV.apex_location_info, [p_lat] * len(p_longs), p_longs,
p_alts, [date] * len(p_longs),
fine_step_size=1.E-5,
return_geodetic=True))
pending.append(dview.apply_async(OMMBV.apex_location_info, [p_lat] * len(p_longs), p_longs,
p_alts, [date] * len(p_longs),
fine_step_size=5.E-6,
return_geodetic=True))
for i, p_lat in enumerate(p_lats):
print ('collecting ', i, p_lat)
# collect output
x, y, z, _, _, h = pending.pop(0).get()
x2, y2, z2, _, _, h2 = pending.pop(0).get()
apex_lat[i, :-1] = np.abs(x2 - x)
apex_lon[i, :-1] = np.abs(y2 - y)
apex_z[i, :-1] = np.abs(z2 - z)
apex_alt[i, :-1] = np.abs(h2 - h)
else:
# single processor case
for i, p_lat in enumerate(p_lats):
print (i, p_lat)
x, y, z, _, _, h = OMMBV.apex_location_info([p_lat] * len(p_longs), p_longs,
p_alts, [date] * len(p_longs),
fine_step_size=1.E-5, return_geodetic=True)
x2, y2, z2, _, _, h2 = OMMBV.apex_location_info([p_lat] * len(p_longs), p_longs,
p_alts, [date] * len(p_longs),
fine_step_size=5.E-6, return_geodetic=True)
norm_alt[i, :-1] = h
apex_lat[i, :-1] = np.abs(x2 - x)
apex_lon[i, :-1] = np.abs(y2 - y)
apex_z[i, :-1] = np.abs(z2 - z)
apex_alt[i, :-1] = np.abs(h2 - h)
# account for periodicity
apex_lat[:, -1] = apex_lat[:, 0]
apex_lon[:, -1] = apex_lon[:, 0]
apex_z[:, -1] = apex_z[:, 0]
apex_alt[:, -1] = apex_alt[:, 0]
norm_alt[:, -1] = norm_alt[:, 0]
idx, idy, = np.where(apex_lat > 10.)
print('Locations with large apex x (ECEF) location differences.', p_lats[idx], p_longs[idx])
ytickarr = np.array([0, 0.25, 0.5, 0.75, 1]) * (len(p_lats) - 1)
xtickarr = np.array([0, 0.2, 0.4, 0.6, 0.8, 1]) * len(p_longs)
ytickvals = ['-50', '-25', '0', '25', '50']
try:
fig = plt.figure()
plt.imshow(np.log10(apex_lat), origin='lower')
plt.colorbar()
plt.yticks(ytickarr, ytickvals)
plt.xticks(xtickarr, ['0', '72', '144', '216', '288', '360'])
plt.title('Log Apex Location Difference (ECEF-x km)')
plt.xlabel('Geodetic Longitude (Degrees)')
plt.ylabel('Geodetic Latitude (Degrees)')
plt.savefig('apex_loc_diff_x.pdf')
plt.close()
fig = plt.figure()
plt.imshow(np.log10(apex_lon), origin='lower')
plt.colorbar()
plt.yticks(ytickarr, ytickvals)
plt.xticks(xtickarr, ['0', '72', '144', '216', '288', '360'])
plt.title('Log Apex Location Difference (ECEF-y km)')
plt.xlabel('Geodetic Longitude (Degrees)')
plt.ylabel('Geodetic Latitude (Degrees)')
plt.savefig('apex_loc_diff_y.pdf')
plt.close()
fig = plt.figure()
plt.imshow(np.log10(apex_z), origin='lower')
plt.colorbar()
plt.yticks(ytickarr, ytickvals)
plt.xticks(xtickarr, ['0', '72', '144', '216', '288', '360'])
plt.title('Log Apex Location Difference (ECEF-z km)')
plt.xlabel('Geodetic Longitude (Degrees)')
plt.ylabel('Geodetic Latitude (Degrees)')
plt.savefig('apex_loc_diff_z.pdf')
plt.close()
fig = plt.figure()
plt.imshow(np.log10(apex_alt / norm_alt), origin='lower')
plt.colorbar()
plt.yticks(ytickarr, ytickvals)
plt.xticks(xtickarr, ['0', '72', '144', '216', '288', '360'])
plt.title('Log Apex Altitude Normalized Difference (km)')
plt.xlabel('Geodetic Longitude (Degrees)')
plt.ylabel('Geodetic Latitude (Degrees)')
plt.savefig('apex_norm_loc_diff_h.pdf')
plt.close()
fig = plt.figure()
plt.imshow(np.log10(apex_alt), origin='lower')
plt.colorbar()
plt.yticks(ytickarr, ytickvals)
plt.xticks(xtickarr, ['0', '72', '144', '216', '288', '360'])
plt.title('Log Apex Altitude Difference (km)')
plt.xlabel('Geodetic Longitude (Degrees)')
plt.ylabel('Geodetic Latitude (Degrees)')
plt.savefig('apex_loc_diff_h.pdf')
plt.close()
except:
pass
def test_apex_fine_max_step_diff_plots(self):
"""Test apex location info for sensitivity to fine_steps parameters"""
import matplotlib.pyplot as plt
# on_travis = os.environ.get('ONTRAVIS') == 'True'
p_lats, p_longs, p_alts = gen_plot_grid_fixed_alt(550.)
# data returned are the locations along each direction
# the full range of points obtained by iterating over all
# recasting alts into a more convenient form for later calculation
p_alts = [p_alts[0]] * len(p_longs)
# set the date
date = datetime.datetime(2000, 1, 1)
# memory for results
apex_lat = np.zeros((len(p_lats), len(p_longs) + 1))
apex_lon = np.zeros((len(p_lats), len(p_longs) + 1))
apex_alt = np.zeros((len(p_lats), len(p_longs) + 1))
apex_z = np.zeros((len(p_lats), len(p_longs) + 1))
norm_alt = np.zeros((len(p_lats), len(p_longs) + 1))
# set up multi
if self.dc is not None:
import itertools
targets = itertools.cycle(dc.ids)
pending = []
for i, p_lat in enumerate(p_lats):
print (i, p_lat)
# iterate through target cyclicly and run commands
dview.targets = next(targets)
pending.append(dview.apply_async(OMMBV.apex_location_info, [p_lat] * len(p_longs), p_longs,
p_alts, [date] * len(p_longs),
fine_max_steps=5,
return_geodetic=True))
pending.append(dview.apply_async(OMMBV.apex_location_info, [p_lat] * len(p_longs), p_longs,
p_alts, [date] * len(p_longs),
fine_max_steps=10,
return_geodetic=True))
for i, p_lat in enumerate(p_lats):
print ('collecting ', i, p_lat)
# collect output
x, y, z, _, _, h = pending.pop(0).get()
x2, y2, z2, _, _, h2 = pending.pop(0).get()
apex_lat[i, :-1] = np.abs(x2 - x)
apex_lon[i, :-1] = np.abs(y2 - y)
apex_z[i, :-1] = np.abs(z2 - z)
apex_alt[i, :-1] = np.abs(h2 - h)
else:
# single processor case
for i, p_lat in enumerate(p_lats):
print (i, p_lat)
x, y, z, _, _, h = OMMBV.apex_location_info([p_lat] * len(p_longs), p_longs,
p_alts, [date] * len(p_longs),
fine_max_steps=5, return_geodetic=True)
x2, y2, z2, _, _, h2 = OMMBV.apex_location_info([p_lat] * len(p_longs), p_longs,
p_alts, [date] * len(p_longs),
fine_max_steps=10, return_geodetic=True)
norm_alt[i, :-1] = h
apex_lat[i, :-1] = np.abs(x2 - x)
apex_lon[i, :-1] = np.abs(y2 - y)
apex_z[i, :-1] = np.abs(z2 - z)
apex_alt[i, :-1] = np.abs(h2 - h)
# account for periodicity
apex_lat[:, -1] = apex_lat[:, 0]
apex_lon[:, -1] = apex_lon[:, 0]
apex_z[:, -1] = apex_z[:, 0]
apex_alt[:, -1] = apex_alt[:, 0]
norm_alt[:, -1] = norm_alt[:, 0]
idx, idy, = np.where(apex_lat > 10.)
print('Locations with large apex x (ECEF) location differences.', p_lats[idx], p_longs[idx])
ytickarr = np.array([0, 0.25, 0.5, 0.75, 1]) * (len(p_lats) - 1)
xtickarr = np.array([0, 0.2, 0.4, 0.6, 0.8, 1]) * len(p_longs)
ytickvals = ['-50', '-25', '0', '25', '50']
try:
fig = plt.figure()
plt.imshow(np.log10(apex_lat), origin='lower')
plt.colorbar()
plt.yticks(ytickarr, ytickvals)
plt.xticks(xtickarr, ['0', '72', '144', '216', '288', '360'])
plt.title('Log Apex Location Difference (ECEF-x km)')
plt.xlabel('Geodetic Longitude (Degrees)')
plt.ylabel('Geodetic Latitude (Degrees)')
plt.savefig('apex_loc_max_steps_diff_x.pdf')
plt.close()
fig = plt.figure()
plt.imshow(np.log10(apex_lon), origin='lower')
plt.colorbar()
plt.yticks(ytickarr, ytickvals)
plt.xticks(xtickarr, ['0', '72', '144', '216', '288', '360'])
plt.title('Log Apex Location Difference (ECEF-y km)')
plt.xlabel('Geodetic Longitude (Degrees)')
plt.ylabel('Geodetic Latitude (Degrees)')
plt.savefig('apex_loc_max_steps_diff_y.pdf')
plt.close()
fig = plt.figure()
plt.imshow(np.log10(apex_z), origin='lower')
plt.colorbar()
plt.yticks(ytickarr, ytickvals)
plt.xticks(xtickarr, ['0', '72', '144', '216', '288', '360'])
plt.title('Log Apex Location Difference (ECEF-z km)')
plt.xlabel('Geodetic Longitude (Degrees)')
plt.ylabel('Geodetic Latitude (Degrees)')
plt.savefig('apex_loc_max_steps_diff_z.pdf')
plt.close()
fig = plt.figure()
plt.imshow(np.log10(apex_alt / norm_alt), origin='lower')
plt.colorbar()
plt.yticks(ytickarr, ytickvals)
plt.xticks(xtickarr, ['0', '72', '144', '216', '288', '360'])
plt.title('Log Apex Altitude Normalized Difference (km)')
plt.xlabel('Geodetic Longitude (Degrees)')
plt.ylabel('Geodetic Latitude (Degrees)')
plt.savefig('apex_norm_loc_max_steps_diff_h.pdf')
plt.close()
fig = plt.figure()
plt.imshow(np.log10(apex_alt), origin='lower')
plt.colorbar()
plt.yticks(ytickarr, ytickvals)
plt.xticks(xtickarr, ['0', '72', '144', '216', '288', '360'])
plt.title('Log Apex Altitude Normalized Difference (km)')
plt.xlabel('Geodetic Longitude (Degrees)')
plt.ylabel('Geodetic Latitude (Degrees)')
plt.savefig('apex_loc_max_steps_diff_h.pdf')
plt.close()
except:
pass
def test_ecef_geodetic_apex_diff_plots(self):
"""Characterize uncertainty of ECEF and Geodetic transformations"""
import matplotlib.pyplot as plt
# on_travis = os.environ.get('ONTRAVIS') == 'True'
p_lats, p_longs, p_alts = gen_plot_grid_fixed_alt(550.)
# data returned are the locations along each direction
# the full range of points obtained by iterating over all
# recasting alts into a more convenient form for later calculation
p_alts = [p_alts[0]] * len(p_longs)
# set the date
date = datetime.datetime(2000, 1, 1)
# memory for results
apex_x = np.zeros((len(p_lats), len(p_longs) + 1))
apex_y = np.zeros((len(p_lats), len(p_longs) + 1))
apex_z = np.zeros((len(p_lats), len(p_longs) + 1))
apex_alt = np.zeros((len(p_lats), len(p_longs) + 1))
norm_alt = np.zeros((len(p_lats), len(p_longs) + 1))
# set up multi
if self.dc is not None:
import itertools
targets = itertools.cycle(dc.ids)
pending = []
for i, p_lat in enumerate(p_lats):
print (i, p_lat)
# iterate through target cyclicly and run commands
dview.targets = next(targets)
pending.append(dview.apply_async(OMMBV.geodetic_to_ecef, np.array([p_lat] * len(p_longs)), p_longs,
p_alts))
for i, p_lat in enumerate(p_lats):
print ('collecting ', i, p_lat)
# collect output
x, y, z = pending.pop(0).get()
# iterate through target cyclicly and run commands
dview.targets = next(targets)
pending.append(dview.apply_async(OMMBV.python_ecef_to_geodetic, x, y, z))
for i, p_lat in enumerate(p_lats):
print ('collecting 2', i, p_lat)
# collect output
lat2, lon2, alt2 = pending.pop(0).get()
# iterate through target cyclicly and run commands
dview.targets = next(targets)
pending.append(dview.apply_async(OMMBV.apex_location_info, np.array([p_lat] * len(p_longs)), p_longs,
p_alts, [date] * len(p_longs),
return_geodetic=True))
pending.append(dview.apply_async(OMMBV.apex_location_info, lat2, lon2, alt2,
[date] * len(p_longs),
return_geodetic=True))
for i, p_lat in enumerate(p_lats):
print ('collecting 3', i, p_lat)
x, y, z, _, _, h = pending.pop(0).get()
x2, y2, z2, _, _, h2 = pending.pop(0).get()
norm_alt[i, :-1] = np.abs(h)
apex_x[i, :-1] = np.abs(x2 - x)
apex_y[i, :-1] = np.abs(y2 - y)
apex_z[i, :-1] = np.abs(z2 - z)
apex_alt[i, :-1] = np.abs(h2 - h)
else:
# single processor case
for i, p_lat in enumerate(p_lats):
print (i, p_lat)
x, y, z = OMMBV.geodetic_to_ecef([p_lat] * len(p_longs), p_longs, p_alts)
lat2, lon2, alt2 = OMMBV.ecef_to_geodetic(x, y, z)
x2, y2, z2 = OMMBV.geodetic_to_ecef(lat2, lon2, alt2)
apex_x[i, :-1] = np.abs(x2 - x)
apex_y[i, :-1] = np.abs(y2 - y)
apex_z[i, :-1] = np.abs(z2 - z)
# account for periodicity
apex_x[:, -1] = apex_x[:, 0]
apex_y[:, -1] = apex_y[:, 0]
apex_z[:, -1] = apex_z[:, 0]
apex_alt[:, -1] = apex_alt[:, 0]
norm_alt[:, -1] = norm_alt[:, 0]
ytickarr = np.array([0, 0.25, 0.5, 0.75, 1]) * (len(p_lats) - 1)
xtickarr = np.array([0, 0.2, 0.4, 0.6, 0.8, 1]) * len(p_longs)
ytickvals = ['-50', '-25', '0', '25', '50']
try:
fig = plt.figure()
plt.imshow(np.log10(apex_x), origin='lower')
plt.colorbar()
plt.yticks(ytickarr, ytickvals)
plt.xticks(xtickarr, ['0', '72', '144', '216', '288', '360'])
plt.title('Log ECEF-Geodetic Apex Difference (ECEF-x km)')
plt.xlabel('Geodetic Longitude (Degrees)')
plt.ylabel('Geodetic Latitude (Degrees)')
plt.savefig('ecef_geodetic_apex_diff_x.pdf')
plt.close()
fig = plt.figure()
plt.imshow(np.log10(apex_y), origin='lower')
plt.colorbar()
plt.yticks(ytickarr, ytickvals)
plt.xticks(xtickarr, ['0', '72', '144', '216', '288', '360'])
plt.title('Log ECEF-Geodetic Apex Difference (ECEF-y km)')
plt.xlabel('Geodetic Longitude (Degrees)')
plt.ylabel('Geodetic Latitude (Degrees)')
plt.savefig('ecef_geodetic_apex_diff_y.pdf')
plt.close()
fig = plt.figure()
plt.imshow(np.log10(apex_z), origin='lower')
plt.colorbar()
plt.yticks(ytickarr, ytickvals)
plt.xticks(xtickarr, ['0', '72', '144', '216', '288', '360'])
plt.title('Log ECEF-Geodetic Apex Difference (ECEF-z km)')
plt.xlabel('Geodetic Longitude (Degrees)')
plt.ylabel('Geodetic Latitude (Degrees)')
plt.savefig('ecef_geodetic_apex_diff_z.pdf')
plt.close()
fig = plt.figure()
plt.imshow(np.log10(apex_alt), origin='lower')
plt.colorbar()
plt.yticks(ytickarr, ytickvals)
plt.xticks(xtickarr, ['0', '72', '144', '216', '288', '360'])
plt.title('Log ECEF-Geodetic Apex Altitude Difference (km)')
plt.xlabel('Geodetic Longitude (Degrees)')
plt.ylabel('Geodetic Latitude (Degrees)')
plt.savefig('ecef_geodetic_apex_diff_h.pdf')
plt.close()
fig = plt.figure()
plt.imshow(np.log10(apex_alt / norm_alt), origin='lower')
plt.colorbar()
plt.yticks(ytickarr, ytickvals)
plt.xticks(xtickarr, ['0', '72', '144', '216', '288', '360'])
plt.title('Log ECEF-Geodetic Apex Normalized Altitude Difference (km)')
plt.xlabel('Geodetic Longitude (Degrees)')
plt.ylabel('Geodetic Latitude (Degrees)')
plt.savefig('ecef_geodetic_apex_norm_diff_h.pdf')
plt.close()
except:
pass
|
py | b40083b16b89fdc410890cda2c3864e0d20bcd23 | import os
import json
import base64
import numpy as np
from collections import namedtuple
import tensorflow as tf
import tensorflowjs as tfjs
import tensorflowjs.converters.common as tfjs_common
from tensorflowjs.read_weights import read_weights
from google.protobuf.json_format import ParseDict, MessageToDict
TFJS_NODE_KEY = 'node'
TFJS_NODE_ATTR_KEY = 'attr'
TFJS_NODE_CONST_KEY = 'Const'
TFJS_NODE_PLACEHOLDER_KEY = 'Placeholder'
TFJS_ATTR_DTYPE_KEY = 'dtype'
TFJS_ATTR_SHAPE_KEY = 'shape'
TFJS_ATTR_VALUE_KEY = 'value'
TFJS_ATTR_STRING_VALUE_KEY = 's'
TFJS_ATTR_INT_VALUE_KEY = 'i'
TFJS_NAME_KEY = 'name'
TFJS_DATA_KEY = 'data'
def _parse_path_and_model_json(model_dir):
"""
Parse model directory name and return path and file name
Args:
model_dir: Model file path - either directory name or path + file name
Returns:
Tuple of directory name and model file name (without directory)
"""
if model_dir.endswith('.json'):
if not os.path.isfile(model_dir):
raise ValueError("Model not found: {}".format(model_dir))
return os.path.split(model_dir)
elif os.path.isdir(model_dir):
return model_dir, tfjs_common.ARTIFACT_MODEL_JSON_FILE_NAME
else:
raise ValueError("Model path is not a directory: {}".format(model_dir))
def _find_if_has_key(obj, key, of_type=None):
"""
Recursively find all objects with a given key in a dictionary
Args:
obj: Dictionary to search
key: Key to find
of_type: [optional] Type of the referenced item
Returns:
List of all objects that contain an item with the given key and matching type
"""
def children(item): return [
val for val in item.values() if isinstance(val, dict)]
found = []
stack = children(obj)
while len(stack) > 0:
item = stack.pop()
if key in item and (of_type is None or isinstance(item[key], of_type)):
found.append(item)
stack.extend(children(item))
return found
def _convert_string_attrs(node):
"""
Deep search string attributes (labelled "s" in GraphDef proto)
and convert ascii code lists to base64-encoded strings if necessary
"""
attr_key = TFJS_NODE_ATTR_KEY
str_key = TFJS_ATTR_STRING_VALUE_KEY
attrs = _find_if_has_key(node[attr_key], key=str_key, of_type=list)
for attr in attrs:
array = attr[str_key]
# check if conversion is actually necessary
if len(array) > 0 and isinstance(array, list) and isinstance(array[0], int):
string = ''.join(map(chr, array))
binary = string.encode('utf8')
attr[str_key] = base64.encodebytes(binary)
elif len(array) == 0:
attr[str_key] = None
return
def _fix_dilation_attrs(node):
"""
Search dilations-attribute and convert
misaligned dilation rates if necessary see
https://github.com/patlevin/tfjs-to-tf/issues/1
"""
path = ['attr', 'dilations', 'list']
values = node
for key in path:
if key in values:
values = values[key]
else:
values = None
break
# if dilations are present, they're stored in 'values' now
ints = TFJS_ATTR_INT_VALUE_KEY
if values is not None and ints in values and isinstance(values[ints], list):
v = values[ints]
if len(v) != 4:
# must be NCHW-formatted 4D tensor or else TF can't handle it
raise ValueError(
"Unsupported 'dilations'-attribute in node {}".format(node[
TFJS_NAME_KEY]))
# check for [>1,>1,1,1], which is likely a mistranslated [1,>1,>1,1]
if int(v[0], 10) > 1:
values[ints] = ['1', v[0], v[1], '1']
return
def _convert_attr_values(message_dict):
"""
Node attributes in deserialised JSON contain strings as lists of ascii codes.
The TF GraphDef proto expects these values to be base64 encoded so convert all
strings here.
"""
if TFJS_NODE_KEY in message_dict:
nodes = message_dict[TFJS_NODE_KEY]
for node in nodes:
_convert_string_attrs(node)
_fix_dilation_attrs(node)
return message_dict
def _convert_graph_def(message_dict):
"""
Convert JSON to TF GraphDef message
Args:
message_dict: deserialised JSON message
Returns:
TF GraphDef message
"""
message_dict = _convert_attr_values(message_dict)
return ParseDict(message_dict, tf.compat.v1.GraphDef())
def _convert_weight_list_to_dict(weight_list):
"""
Convert list of weight entries to dictionary
Args:
weight_list: List of numpy arrays or tensors formatted as
{'name': 'entry0', 'data': np.array([1,2,3], 'float32')}
Returns:
Dictionary that maps weight names to tensor data, e.g.
{'entry0:': np.array(...), 'entry1': np.array(...), ...}
"""
weight_dict = {}
for entry in weight_list:
weight_dict[entry[TFJS_NAME_KEY]] = entry[TFJS_DATA_KEY]
return weight_dict
def _create_graph(graph_def, weight_dict):
"""
Create a TF Graph from nodes
Args:
graph_def: TF GraphDef message containing the node graph
weight_dict: Dictionary from node names to tensor data
Returns:
TF Graph for inference or saving
"""
graph = tf.Graph()
with tf.compat.v1.Session(graph=graph):
for k, v in weight_dict.items():
weight_dict[k] = tf.convert_to_tensor(v)
tf.graph_util.import_graph_def(graph_def, weight_dict, name='')
return graph
def _convert_graph_model_to_graph(model_json, base_path):
"""
Convert TFJS JSON model to TF Graph
Args:
model_json: JSON dict from TFJS model file
base_path: Path to the model file (where to find the model weights)
Returns:
TF Graph for inference or saving
"""
if not tfjs_common.ARTIFACT_MODEL_TOPOLOGY_KEY in model_json:
raise ValueError("model_json is missing key '{}'".format(
tfjs_common.ARTIFACT_MODEL_TOPOLOGY_KEY))
topology = model_json[tfjs_common.ARTIFACT_MODEL_TOPOLOGY_KEY]
if not tfjs_common.ARTIFACT_WEIGHTS_MANIFEST_KEY in model_json:
raise ValueError("model_json is missing key '{}'".format(
tfjs_common.ARTIFACT_WEIGHTS_MANIFEST_KEY))
weights_manifest = model_json[tfjs_common.ARTIFACT_WEIGHTS_MANIFEST_KEY]
weight_list = read_weights(weights_manifest, base_path, flatten=True)
graph_def = _convert_graph_def(topology)
weight_dict = _convert_weight_list_to_dict(weight_list)
return _create_graph(graph_def, weight_dict)
def load_graph_model(model_dir):
"""
Load a TFJS Graph Model from a directory
Args:
model_dir: Directory that contains the tfjs model.json and weights;
alternatively name and path of the model.json if the name
differs from the default ("model.json")
Returns:
TF frozen graph for inference or saving
"""
model_path, model_name = _parse_path_and_model_json(model_dir)
model_file_path = os.path.join(model_path, model_name)
with open(model_file_path, "r") as f:
model_json = json.load(f)
return _convert_graph_model_to_graph(model_json, model_path)
_DTYPE_MAP = [
None,
np.float32,
np.float64,
np.int32,
np.uint8,
np.int16,
np.int8,
None,
np.complex64,
np.int64,
np.bool
]
NodeInfo = namedtuple('NodeInfo', 'name shape dtype tensor')
def _is_op_node(node):
return node.op not in (TFJS_NODE_CONST_KEY, TFJS_NODE_PLACEHOLDER_KEY)
def _op_nodes(graph_def):
return [node for node in graph_def.node if _is_op_node(node)]
def _map_type(type_id):
if type_id < 0 or type_id > len(_DTYPE_MAP):
raise ValueError("Unsupported data type: {}".format(type_id))
np_type = _DTYPE_MAP[type_id]
return np_type
def _get_shape(node):
def shape(attr): return attr.shape.dim
def size(dim): return dim.size if dim.size > 0 else None
return [size(dim) for dim in shape(node.attr[TFJS_ATTR_SHAPE_KEY])]
def _node_info(node):
def dtype(n): return _map_type(n.attr[TFJS_ATTR_DTYPE_KEY].type)
return NodeInfo(name=node.name, shape=_get_shape(node), dtype=dtype(node),
tensor=node.name + ':0')
def get_input_nodes(graph):
"""
Return information about a graph's inputs.
Arguments:
graph: Graph or GraphDef object
Returns:
List of NodeInfo objects holding name, shape, and type of the input
"""
if isinstance(graph, tf.Graph):
graph_def = graph.as_graph_def()
else:
graph_def = graph
nodes = [n for n in graph_def.node if n.op in (
TFJS_NODE_PLACEHOLDER_KEY)]
return [_node_info(node) for node in nodes]
def get_output_nodes(graph):
"""
Return information about a graph's outputs.
Arguments:
graph: Graph or GraphDef object
Returns:
List of NodeInfo objects holding name, shape, and type of the input;
shape will be left empty
"""
if isinstance(graph, tf.Graph):
graph_def = graph.as_graph_def()
else:
graph_def = graph
ops = _op_nodes(graph_def)
outputs = []
for i in range(0, len(ops)):
node = ops[i]
has_ref = False
for test in ops[i+1:]:
if node.name in test.input:
has_ref = True
break
if not has_ref:
outputs.append(node)
return [_node_info(node) for node in outputs]
def get_input_tensors(graph):
"""
Return the names of the graph's input tensors.
Arguments:
graph: Graph or GraphDef object
Returns:
List of tensor names
"""
return [node.tensor for node in get_input_nodes(graph)]
def get_output_tensors(graph):
"""
Return the names of the graph's output tensors.
Arguments:
graph: Graph or GraphDef object
Returns:
List of tensor names
"""
return [node.tensor for node in get_output_nodes(graph)]
|
py | b4008462336e893bf8f0babfcc912477ae6a8435 | import typing
from qtpy import QtCore as QC
from qtpy import QtWidgets as QW
from hydrus.core import HydrusConstants as HC
from hydrus.core import HydrusExceptions
from hydrus.core import HydrusGlobals as HG
from hydrus.client import ClientConstants as CC
from hydrus.client import ClientSearch
from hydrus.client.gui import ClientGUIRatings
from hydrus.client.gui import QtPorting as QP
from hydrus.client.gui.search import ClientGUIPredicatesSingle
from hydrus.client.gui.widgets import ClientGUICommon
from hydrus.client.metadata import ClientRatings
class PredicateSystemRatingLikeControl( QW.QWidget ):
def __init__( self, parent: QW.QWidget, service_key: bytes, predicate: typing.Optional[ ClientSearch.Predicate ] ):
QW.QWidget.__init__( self, parent )
self._service_key = service_key
service = HG.client_controller.services_manager.GetService( self._service_key )
name = service.GetName()
name_st = ClientGUICommon.BetterStaticText( self, name )
name_st.setAlignment( QC.Qt.AlignLeft | QC.Qt.AlignVCenter )
self._rated_checkbox = QW.QCheckBox( 'rated', self )
self._not_rated_checkbox = QW.QCheckBox( 'not rated', self )
self._rating_control = ClientGUIRatings.RatingLikeDialog( self, service_key )
#
if predicate is not None:
value = predicate.GetValue()
if value is not None:
( operator, rating, service_key ) = value
if rating == 'rated':
self._rated_checkbox.setChecked( True )
elif rating == 'not rated':
self._not_rated_checkbox.setChecked( True )
else:
if rating == 0:
self._rating_control.SetRatingState( ClientRatings.DISLIKE )
else:
self._rating_control.SetRatingState( ClientRatings.LIKE )
#
hbox = QP.HBoxLayout()
QP.AddToLayout( hbox, name_st, CC.FLAGS_EXPAND_BOTH_WAYS )
QP.AddToLayout( hbox, self._rated_checkbox, CC.FLAGS_CENTER_PERPENDICULAR )
QP.AddToLayout( hbox, self._not_rated_checkbox, CC.FLAGS_CENTER_PERPENDICULAR )
QP.AddToLayout( hbox, self._rating_control, CC.FLAGS_CENTER_PERPENDICULAR )
self.setLayout( hbox )
def GetPredicates( self ):
rating = None
if self._rated_checkbox.isChecked():
rating = 'rated'
elif self._not_rated_checkbox.isChecked():
rating = 'not rated'
else:
rating_state = self._rating_control.GetRatingState()
if rating_state == ClientRatings.LIKE:
rating = 1
elif rating_state == ClientRatings.DISLIKE:
rating = 0
if rating is None:
return []
else:
predicate = ClientSearch.Predicate( ClientSearch.PREDICATE_TYPE_SYSTEM_RATING, ( '=', rating, self._service_key ) )
return [ predicate ]
class PredicateSystemRatingNumericalControl( QW.QWidget ):
def __init__( self, parent: QW.QWidget, service_key: bytes, predicate: typing.Optional[ ClientSearch.Predicate ] ):
QW.QWidget.__init__( self, parent )
self._service_key = service_key
service = HG.client_controller.services_manager.GetService( self._service_key )
name = service.GetName()
name_st = ClientGUICommon.BetterStaticText( self, name )
name_st.setAlignment( QC.Qt.AlignLeft | QC.Qt.AlignVCenter )
self._rated_checkbox = QW.QCheckBox( 'rated', self )
self._not_rated_checkbox = QW.QCheckBox( 'not rated', self )
self._operator = QP.RadioBox( self, choices = [ '>', '<', '=', CC.UNICODE_ALMOST_EQUAL_TO ] )
self._rating_control = ClientGUIRatings.RatingNumericalDialog( self, service_key )
self._operator.Select( 2 )
#
if predicate is not None:
value = predicate.GetValue()
if value is not None:
( operator, rating, service_key ) = value
if rating == 'rated':
self._rated_checkbox.setChecked( True )
elif rating == 'not rated':
self._not_rated_checkbox.setChecked( True )
else:
self._operator.SetStringSelection( operator )
self._rating_control.SetRating( rating )
#
hbox = QP.HBoxLayout()
QP.AddToLayout( hbox, name_st, CC.FLAGS_EXPAND_BOTH_WAYS )
QP.AddToLayout( hbox, self._rated_checkbox, CC.FLAGS_CENTER_PERPENDICULAR )
QP.AddToLayout( hbox, self._not_rated_checkbox, CC.FLAGS_CENTER_PERPENDICULAR )
QP.AddToLayout( hbox, self._operator, CC.FLAGS_CENTER_PERPENDICULAR )
QP.AddToLayout( hbox, self._rating_control, CC.FLAGS_CENTER_PERPENDICULAR )
self.setLayout( hbox )
def GetPredicates( self ):
rating = None
if self._rated_checkbox.isChecked():
operator = '='
rating = 'rated'
elif self._not_rated_checkbox.isChecked():
operator = '='
rating = 'not rated'
elif self._rating_control.GetRatingState() != ClientRatings.NULL:
operator = self._operator.GetStringSelection()
rating = self._rating_control.GetRating()
if rating is None:
return []
else:
predicate = ClientSearch.Predicate( ClientSearch.PREDICATE_TYPE_SYSTEM_RATING, ( operator, rating, self._service_key ) )
return [ predicate ]
class PanelPredicateSystemMultiple( ClientGUIPredicatesSingle.PanelPredicateSystem ):
def _FilterWhatICanEdit( self, predicates: typing.Collection[ ClientSearch.Predicate ] ) -> typing.Collection[ ClientSearch.Predicate ]:
raise NotImplementedError()
def _GetPredicatesToInitialisePanelWith( self, predicates: typing.Collection[ ClientSearch.Predicate ] ) -> typing.Collection[ ClientSearch.Predicate ]:
raise NotImplementedError()
def ClearCustomDefault( self ):
raise NotImplementedError()
def GetDefaultPredicates( self ) -> typing.Collection[ ClientSearch.Predicate ]:
raise NotImplementedError()
def GetPredicates( self ):
raise NotImplementedError()
def SaveCustomDefault( self ):
raise NotImplementedError()
def SetPredicates( self, predicates: typing.Collection[ ClientSearch.Predicate ] ):
raise NotImplementedError()
def UsesCustomDefault( self ) -> bool:
raise NotImplementedError()
class PanelPredicateSystemRating( PanelPredicateSystemMultiple ):
def __init__( self, parent, predicates ):
PanelPredicateSystemMultiple.__init__( self, parent )
#
local_like_service_keys = HG.client_controller.services_manager.GetServiceKeys( ( HC.LOCAL_RATING_LIKE, ) )
self._like_checkboxes_to_info = {}
self._like_rating_ctrls = []
gridbox = QP.GridLayout( cols = 5 )
gridbox.setColumnStretch( 0, 1 )
predicates = self._GetPredicatesToInitialisePanelWith( predicates )
service_keys_to_predicates = { predicate.GetValue()[2] : predicate for predicate in predicates }
self._rating_panels = []
for service_key in local_like_service_keys:
if service_key in service_keys_to_predicates:
predicate = service_keys_to_predicates[ service_key ]
else:
predicate = None
panel = PredicateSystemRatingLikeControl( self, service_key, predicate )
self._rating_panels.append( panel )
#
local_numerical_service_keys = HG.client_controller.services_manager.GetServiceKeys( ( HC.LOCAL_RATING_NUMERICAL, ) )
self._numerical_checkboxes_to_info = {}
self._numerical_rating_ctrls_to_info = {}
for service_key in local_numerical_service_keys:
if service_key in service_keys_to_predicates:
predicate = service_keys_to_predicates[ service_key ]
else:
predicate = None
panel = PredicateSystemRatingNumericalControl( self, service_key, predicate )
self._rating_panels.append( panel )
#
vbox = QP.VBoxLayout()
for panel in self._rating_panels:
QP.AddToLayout( vbox, panel, CC.FLAGS_EXPAND_PERPENDICULAR )
self.setLayout( vbox )
def _FilterWhatICanEdit( self, predicates: typing.Collection[ ClientSearch.Predicate ] ) -> typing.Collection[ ClientSearch.Predicate ]:
local_rating_service_keys = HG.client_controller.services_manager.GetServiceKeys( ( HC.LOCAL_RATING_LIKE, HC.LOCAL_RATING_NUMERICAL ) )
good_predicates = []
for predicate in predicates:
value = predicate.GetValue()
if value is not None:
( operator, rating, service_key ) = value
if service_key in local_rating_service_keys:
good_predicates.append( predicate )
return good_predicates
def _GetPredicatesToInitialisePanelWith( self, predicates: typing.Collection[ ClientSearch.Predicate ] ) -> typing.Collection[ ClientSearch.Predicate ]:
predicates = self._FilterWhatICanEdit( predicates )
if len( predicates ) > 0:
return predicates
custom_default_predicates = HG.client_controller.new_options.GetCustomDefaultSystemPredicates( predicate_type = ClientSearch.PREDICATE_TYPE_SYSTEM_RATING )
custom_default_predicates = self._FilterWhatICanEdit( custom_default_predicates )
if len( custom_default_predicates ) > 0:
return custom_default_predicates
default_predicates = self.GetDefaultPredicates()
return default_predicates
def ClearCustomDefault( self ):
HG.client_controller.new_options.ClearCustomDefaultSystemPredicates( predicate_type = ClientSearch.PREDICATE_TYPE_SYSTEM_RATING )
def GetDefaultPredicates( self ):
return []
def GetPredicates( self ):
predicates = []
for panel in self._rating_panels:
predicates.extend( panel.GetPredicates() )
return predicates
def SaveCustomDefault( self ):
predicates = self.GetPredicates()
HG.client_controller.new_options.SetCustomDefaultSystemPredicates( predicate_type = ClientSearch.PREDICATE_TYPE_SYSTEM_RATING, predicates = predicates )
def UsesCustomDefault( self ) -> bool:
custom_default_predicates = HG.client_controller.new_options.GetCustomDefaultSystemPredicates( predicate_type = ClientSearch.PREDICATE_TYPE_SYSTEM_RATING )
custom_default_predicates = self._FilterWhatICanEdit( custom_default_predicates )
return len( custom_default_predicates ) > 0
|
py | b400849b2afaf4e5d5fe22b655392a54914772bf | # -*- coding: utf-8 -*-
# Generated by Django 1.11.16 on 2018-12-17 15:48
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('trade', '0005_auto_20181217_1535'),
]
operations = [
migrations.RenameField(
model_name='ordergoods',
old_name='goods_num',
new_name='nums',
),
]
|
py | b4008532eefad021793c882524d45f256c856f72 | """
PostgreSQL database backend for Django.
Requires psycopg 2: http://initd.org/projects/psycopg2
"""
import warnings
from django.conf import settings
from django.core.exceptions import ImproperlyConfigured
from django.db import DEFAULT_DB_ALIAS
from django.db.backends.base.base import BaseDatabaseWrapper
from django.db.utils import DatabaseError as WrappedDatabaseError
from django.utils import six
from django.utils.encoding import force_str
from django.utils.functional import cached_property
from django.utils.safestring import SafeBytes, SafeText
try:
import psycopg2 as Database
import psycopg2.extensions
import psycopg2.extras
except ImportError as e:
raise ImproperlyConfigured("Error loading psycopg2 module: %s" % e)
def psycopg2_version():
version = psycopg2.__version__.split(' ', 1)[0]
return tuple(int(v) for v in version.split('.') if v.isdigit())
PSYCOPG2_VERSION = psycopg2_version()
if PSYCOPG2_VERSION < (2, 4, 5):
raise ImproperlyConfigured("psycopg2_version 2.4.5 or newer is required; you have %s" % psycopg2.__version__)
# Some of these import psycopg2, so import them after checking if it's installed.
from .client import DatabaseClient # NOQA isort:skip
from .creation import DatabaseCreation # NOQA isort:skip
from .features import DatabaseFeatures # NOQA isort:skip
from .introspection import DatabaseIntrospection # NOQA isort:skip
from .operations import DatabaseOperations # NOQA isort:skip
from .schema import DatabaseSchemaEditor # NOQA isort:skip
from .utils import utc_tzinfo_factory # NOQA isort:skip
from .version import get_version # NOQA isort:skip
if six.PY2:
psycopg2.extensions.register_type(psycopg2.extensions.UNICODE)
psycopg2.extensions.register_type(psycopg2.extensions.UNICODEARRAY)
psycopg2.extensions.register_adapter(SafeBytes, psycopg2.extensions.QuotedString)
psycopg2.extensions.register_adapter(SafeText, psycopg2.extensions.QuotedString)
psycopg2.extras.register_uuid()
# Register support for inet[] manually so we don't have to handle the Inet()
# object on load all the time.
INETARRAY_OID = 1041
INETARRAY = psycopg2.extensions.new_array_type(
(INETARRAY_OID,),
'INETARRAY',
psycopg2.extensions.UNICODE,
)
psycopg2.extensions.register_type(INETARRAY)
class DatabaseWrapper(BaseDatabaseWrapper):
vendor = 'postgresql'
# This dictionary maps Field objects to their associated PostgreSQL column
# types, as strings. Column-type strings can contain format strings; they'll
# be interpolated against the values of Field.__dict__ before being output.
# If a column type is set to None, it won't be included in the output.
data_types = {
'AutoField': 'serial',
'BigAutoField': 'bigserial',
'BinaryField': 'bytea',
'BooleanField': 'boolean',
'CharField': 'varchar(%(max_length)s)',
'CommaSeparatedIntegerField': 'varchar(%(max_length)s)',
'DateField': 'date',
'DateTimeField': 'timestamp with time zone',
'DecimalField': 'numeric(%(max_digits)s, %(decimal_places)s)',
'DurationField': 'interval',
'FileField': 'varchar(%(max_length)s)',
'FilePathField': 'varchar(%(max_length)s)',
'FloatField': 'double precision',
'IntegerField': 'integer',
'BigIntegerField': 'bigint',
'IPAddressField': 'inet',
'GenericIPAddressField': 'inet',
'NullBooleanField': 'boolean',
'OneToOneField': 'integer',
'PositiveIntegerField': 'integer',
'PositiveSmallIntegerField': 'smallint',
'SlugField': 'varchar(%(max_length)s)',
'SmallIntegerField': 'smallint',
'TextField': 'text',
'TimeField': 'time',
'UUIDField': 'uuid',
}
data_type_check_constraints = {
'PositiveIntegerField': '"%(column)s" >= 0',
'PositiveSmallIntegerField': '"%(column)s" >= 0',
}
operators = {
'exact': '= %s',
'iexact': '= UPPER(%s)',
'contains': 'LIKE %s',
'icontains': 'LIKE UPPER(%s)',
'regex': '~ %s',
'iregex': '~* %s',
'gt': '> %s',
'gte': '>= %s',
'lt': '< %s',
'lte': '<= %s',
'startswith': 'LIKE %s',
'endswith': 'LIKE %s',
'istartswith': 'LIKE UPPER(%s)',
'iendswith': 'LIKE UPPER(%s)',
}
# The patterns below are used to generate SQL pattern lookup clauses when
# the right-hand side of the lookup isn't a raw string (it might be an expression
# or the result of a bilateral transformation).
# In those cases, special characters for LIKE operators (e.g. \, *, _) should be
# escaped on database side.
#
# Note: we use str.format() here for readability as '%' is used as a wildcard for
# the LIKE operator.
pattern_esc = r"REPLACE(REPLACE(REPLACE({}, '\', '\\'), '%%', '\%%'), '_', '\_')"
pattern_ops = {
'contains': "LIKE '%%' || {} || '%%'",
'icontains': "LIKE '%%' || UPPER({}) || '%%'",
'startswith': "LIKE {} || '%%'",
'istartswith': "LIKE UPPER({}) || '%%'",
'endswith': "LIKE '%%' || {}",
'iendswith': "LIKE '%%' || UPPER({})",
}
Database = Database
SchemaEditorClass = DatabaseSchemaEditor
# Classes instantiated in __init__().
client_class = DatabaseClient
creation_class = DatabaseCreation
features_class = DatabaseFeatures
introspection_class = DatabaseIntrospection
ops_class = DatabaseOperations
def get_connection_params(self):
settings_dict = self.settings_dict
# None may be used to connect to the default 'postgres' db
if settings_dict['NAME'] == '':
raise ImproperlyConfigured(
"settings.DATABASES is improperly configured. "
"Please supply the NAME value.")
conn_params = {
'database': settings_dict['NAME'] or 'postgres',
}
conn_params.update(settings_dict['OPTIONS'])
conn_params.pop('isolation_level', None)
if settings_dict['USER']:
conn_params['user'] = settings_dict['USER']
if settings_dict['PASSWORD']:
conn_params['password'] = force_str(settings_dict['PASSWORD'])
if settings_dict['HOST']:
conn_params['host'] = settings_dict['HOST']
if settings_dict['PORT']:
conn_params['port'] = settings_dict['PORT']
return conn_params
def get_new_connection(self, conn_params):
connection = Database.connect(**conn_params)
# self.isolation_level must be set:
# - after connecting to the database in order to obtain the database's
# default when no value is explicitly specified in options.
# - before calling _set_autocommit() because if autocommit is on, that
# will set connection.isolation_level to ISOLATION_LEVEL_AUTOCOMMIT.
options = self.settings_dict['OPTIONS']
try:
self.isolation_level = options['isolation_level']
except KeyError:
self.isolation_level = connection.isolation_level
else:
# Set the isolation level to the value from OPTIONS.
if self.isolation_level != connection.isolation_level:
connection.set_session(isolation_level=self.isolation_level)
return connection
def ensure_timezone(self):
self.ensure_connection()
conn_timezone_name = self.connection.get_parameter_status('TimeZone')
timezone_name = self.timezone_name
if timezone_name and conn_timezone_name != timezone_name:
with self.connection.cursor() as cursor:
cursor.execute(self.ops.set_time_zone_sql(), [timezone_name])
return True
return False
def init_connection_state(self):
self.connection.set_client_encoding('UTF8')
timezone_changed = self.ensure_timezone()
if timezone_changed:
# Commit after setting the time zone (see #17062)
if not self.get_autocommit():
self.connection.commit()
def create_cursor(self):
cursor = self.connection.cursor()
cursor.tzinfo_factory = utc_tzinfo_factory if settings.USE_TZ else None
return cursor
def _set_autocommit(self, autocommit):
with self.wrap_database_errors:
self.connection.autocommit = autocommit
def check_constraints(self, table_names=None):
"""
To check constraints, we set constraints to immediate. Then, when, we're done we must ensure they
are returned to deferred.
"""
self.cursor().execute('SET CONSTRAINTS ALL IMMEDIATE')
self.cursor().execute('SET CONSTRAINTS ALL DEFERRED')
def is_usable(self):
try:
# Use a psycopg cursor directly, bypassing Django's utilities.
self.connection.cursor().execute("SELECT 1")
except Database.Error:
return False
else:
return True
@property
def _nodb_connection(self):
nodb_connection = super(DatabaseWrapper, self)._nodb_connection
try:
nodb_connection.ensure_connection()
except (Database.DatabaseError, WrappedDatabaseError):
warnings.warn(
"Normally Django will use a connection to the 'postgres' database "
"to avoid running initialization queries against the production "
"database when it's not needed (for example, when running tests). "
"Django was unable to create a connection to the 'postgres' database "
"and will use the default database instead.",
RuntimeWarning
)
settings_dict = self.settings_dict.copy()
settings_dict['NAME'] = settings.DATABASES[DEFAULT_DB_ALIAS]['NAME']
nodb_connection = self.__class__(
self.settings_dict.copy(),
alias=self.alias,
allow_thread_sharing=False)
return nodb_connection
@cached_property
def psycopg2_version(self):
return PSYCOPG2_VERSION
@cached_property
def pg_version(self):
with self.temporary_connection():
return get_version(self.connection)
|
py | b4008596ecdccf0829a844ff80e6ad465cb51ff8 | import os
import sys
import subprocess
from time import sleep
from os.path import exists, join, abspath
from shutil import rmtree
from tempfile import mkdtemp
from twisted.trial import unittest
from twisted.internet import defer
from scrapy.utils.python import retry_on_eintr
from scrapy.utils.test import get_testenv
from scrapy.utils.testsite import SiteTest
from scrapy.utils.testproc import ProcessTest
class ProjectTest(unittest.TestCase):
project_name = 'testproject'
def setUp(self):
self.temp_path = mkdtemp()
self.cwd = self.temp_path
self.proj_path = join(self.temp_path, self.project_name)
self.proj_mod_path = join(self.proj_path, self.project_name)
self.env = get_testenv()
def tearDown(self):
rmtree(self.temp_path)
def call(self, *new_args, **kwargs):
out = os.tmpfile()
args = (sys.executable, '-m', 'scrapy.cmdline') + new_args
return subprocess.call(args, stdout=out, stderr=out, cwd=self.cwd, \
env=self.env, **kwargs)
def proc(self, *new_args, **kwargs):
args = (sys.executable, '-m', 'scrapy.cmdline') + new_args
p = subprocess.Popen(args, cwd=self.cwd, env=self.env,
stdout=subprocess.PIPE, stderr=subprocess.PIPE,
**kwargs)
waited = 0
interval = 0.2
while p.poll() is None:
sleep(interval)
waited += interval
if waited > 15:
p.kill()
assert False, 'Command took too much time to complete'
return p
class StartprojectTest(ProjectTest):
def test_startproject(self):
self.assertEqual(0, self.call('startproject', self.project_name))
assert exists(join(self.proj_path, 'scrapy.cfg'))
assert exists(join(self.proj_path, 'testproject'))
assert exists(join(self.proj_mod_path, '__init__.py'))
assert exists(join(self.proj_mod_path, 'items.py'))
assert exists(join(self.proj_mod_path, 'pipelines.py'))
assert exists(join(self.proj_mod_path, 'settings.py'))
assert exists(join(self.proj_mod_path, 'spiders', '__init__.py'))
self.assertEqual(1, self.call('startproject', self.project_name))
self.assertEqual(1, self.call('startproject', 'wrong---project---name'))
class CommandTest(ProjectTest):
def setUp(self):
super(CommandTest, self).setUp()
self.call('startproject', self.project_name)
self.cwd = join(self.temp_path, self.project_name)
self.env['SCRAPY_SETTINGS_MODULE'] = '%s.settings' % self.project_name
class GenspiderCommandTest(CommandTest):
def test_arguments(self):
# only pass one argument. spider script shouldn't be created
self.assertEqual(2, self.call('genspider', 'test_name'))
assert not exists(join(self.proj_mod_path, 'spiders', 'test_name.py'))
# pass two arguments <name> <domain>. spider script should be created
self.assertEqual(0, self.call('genspider', 'test_name', 'test.com'))
assert exists(join(self.proj_mod_path, 'spiders', 'test_name.py'))
def test_template(self, tplname='crawl'):
args = ['--template=%s' % tplname] if tplname else []
spname = 'test_spider'
p = self.proc('genspider', spname, 'test.com', *args)
out = retry_on_eintr(p.stdout.read)
self.assert_("Created spider %r using template %r in module" % (spname, tplname) in out)
self.assert_(exists(join(self.proj_mod_path, 'spiders', 'test_spider.py')))
p = self.proc('genspider', spname, 'test.com', *args)
out = retry_on_eintr(p.stdout.read)
self.assert_("Spider %r already exists in module" % spname in out)
def test_template_basic(self):
self.test_template('basic')
def test_template_csvfeed(self):
self.test_template('csvfeed')
def test_template_xmlfeed(self):
self.test_template('xmlfeed')
def test_list(self):
self.assertEqual(0, self.call('genspider', '--list'))
def test_dump(self):
self.assertEqual(0, self.call('genspider', '--dump=basic'))
self.assertEqual(0, self.call('genspider', '-d', 'basic'))
def test_same_name_as_project(self):
self.assertEqual(2, self.call('genspider', self.project_name))
assert not exists(join(self.proj_mod_path, 'spiders', '%s.py' % self.project_name))
class MiscCommandsTest(CommandTest):
def test_list(self):
self.assertEqual(0, self.call('list'))
class RunSpiderCommandTest(CommandTest):
def test_runspider(self):
tmpdir = self.mktemp()
os.mkdir(tmpdir)
fname = abspath(join(tmpdir, 'myspider.py'))
with open(fname, 'w') as f:
f.write("""
from scrapy import log
from scrapy.spider import Spider
class MySpider(Spider):
name = 'myspider'
def start_requests(self):
self.log("It Works!")
return []
""")
p = self.proc('runspider', fname)
log = p.stderr.read()
self.assert_("[myspider] DEBUG: It Works!" in log, log)
self.assert_("[myspider] INFO: Spider opened" in log, log)
self.assert_("[myspider] INFO: Closing spider (finished)" in log, log)
self.assert_("[myspider] INFO: Spider closed (finished)" in log, log)
def test_runspider_no_spider_found(self):
tmpdir = self.mktemp()
os.mkdir(tmpdir)
fname = abspath(join(tmpdir, 'myspider.py'))
with open(fname, 'w') as f:
f.write("""
from scrapy import log
from scrapy.spider import Spider
""")
p = self.proc('runspider', fname)
log = p.stderr.read()
self.assert_("No spider found in file" in log)
def test_runspider_file_not_found(self):
p = self.proc('runspider', 'some_non_existent_file')
log = p.stderr.read()
self.assert_("File not found: some_non_existent_file" in log)
def test_runspider_unable_to_load(self):
tmpdir = self.mktemp()
os.mkdir(tmpdir)
fname = abspath(join(tmpdir, 'myspider.txt'))
with open(fname, 'w') as f:
f.write("")
p = self.proc('runspider', fname)
log = p.stderr.read()
self.assert_("Unable to load" in log)
class ParseCommandTest(ProcessTest, SiteTest, CommandTest):
command = 'parse'
def setUp(self):
super(ParseCommandTest, self).setUp()
self.spider_name = 'parse_spider'
fname = abspath(join(self.proj_mod_path, 'spiders', 'myspider.py'))
with open(fname, 'w') as f:
f.write("""
from scrapy import log
from scrapy.spider import Spider
from scrapy.item import Item
class MySpider(Spider):
name = '{0}'
def parse(self, response):
if getattr(self, 'test_arg', None):
self.log('It Works!')
return [Item()]
""".format(self.spider_name))
fname = abspath(join(self.proj_mod_path, 'pipelines.py'))
with open(fname, 'w') as f:
f.write("""
from scrapy import log
class MyPipeline(object):
component_name = 'my_pipeline'
def process_item(self, item, spider):
log.msg('It Works!')
return item
""")
fname = abspath(join(self.proj_mod_path, 'settings.py'))
with open(fname, 'a') as f:
f.write("""
ITEM_PIPELINES = {'%s.pipelines.MyPipeline': 1}
""" % self.project_name)
@defer.inlineCallbacks
def test_spider_arguments(self):
_, _, stderr = yield self.execute(['--spider', self.spider_name,
'-a', 'test_arg=1',
'-c', 'parse',
self.url('/html')])
self.assert_("[parse_spider] DEBUG: It Works!" in stderr, stderr)
@defer.inlineCallbacks
def test_pipelines(self):
_, _, stderr = yield self.execute(['--spider', self.spider_name,
'--pipelines',
'-c', 'parse',
self.url('/html')])
self.assert_("[scrapy] INFO: It Works!" in stderr, stderr)
class BenchCommandTest(CommandTest):
def test_run(self):
p = self.proc('bench', '-s', 'LOGSTATS_INTERVAL=0.001',
'-s', 'CLOSESPIDER_TIMEOUT=0.01')
log = p.stderr.read()
self.assert_('INFO: Crawled' in log, log)
|
py | b4008597bf6391e4328c19a3e44138c7a196b6e7 | # -*- coding: utf-8 -*-
# Generated by Django 1.9.2 on 2016-02-23 14:54
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('monolith', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='pkginfo',
name='requires',
field=models.ManyToManyField(related_name='_pkginfo_requires_+', to='monolith.PkgInfoName'),
),
migrations.AddField(
model_name='pkginfo',
name='update_for',
field=models.ManyToManyField(related_name='_pkginfo_update_for_+', to='monolith.PkgInfoName'),
),
]
|
py | b40085b00862b1328510d7ccb6c9507c18ad37e2 | #
# PySNMP MIB module QLGC-adapterInfo-MIB (http://snmplabs.com/pysmi)
# ASN.1 source file:///Users/davwang4/Dev/mibs.snmplabs.com/asn1/QLGC-adapterInfo-MIB
# Produced by pysmi-0.3.4 at Mon Apr 29 20:35:13 2019
# On host DAVWANG4-M-1475 platform Darwin version 18.5.0 by user davwang4
# Using Python version 3.7.3 (default, Mar 27 2019, 09:23:15)
#
ObjectIdentifier, Integer, OctetString = mibBuilder.importSymbols("ASN1", "ObjectIdentifier", "Integer", "OctetString")
NamedValues, = mibBuilder.importSymbols("ASN1-ENUMERATION", "NamedValues")
ValueRangeConstraint, ConstraintsIntersection, ConstraintsUnion, SingleValueConstraint, ValueSizeConstraint = mibBuilder.importSymbols("ASN1-REFINEMENT", "ValueRangeConstraint", "ConstraintsIntersection", "ConstraintsUnion", "SingleValueConstraint", "ValueSizeConstraint")
InetAddressIPv6, = mibBuilder.importSymbols("INET-ADDRESS-MIB", "InetAddressIPv6")
ModuleCompliance, NotificationGroup = mibBuilder.importSymbols("SNMPv2-CONF", "ModuleCompliance", "NotificationGroup")
ObjectIdentity, iso, NotificationType, IpAddress, MibScalar, MibTable, MibTableRow, MibTableColumn, Counter32, Unsigned32, MibIdentifier, enterprises, Counter64, ModuleIdentity, Gauge32, TimeTicks, Bits, Integer32 = mibBuilder.importSymbols("SNMPv2-SMI", "ObjectIdentity", "iso", "NotificationType", "IpAddress", "MibScalar", "MibTable", "MibTableRow", "MibTableColumn", "Counter32", "Unsigned32", "MibIdentifier", "enterprises", "Counter64", "ModuleIdentity", "Gauge32", "TimeTicks", "Bits", "Integer32")
TextualConvention, DisplayString, PhysAddress = mibBuilder.importSymbols("SNMPv2-TC", "TextualConvention", "DisplayString", "PhysAddress")
qlogic = MibIdentifier((1, 3, 6, 1, 4, 1, 3873))
enet = MibIdentifier((1, 3, 6, 1, 4, 1, 3873, 1))
qlasp = MibIdentifier((1, 3, 6, 1, 4, 1, 3873, 1, 2))
ifControllers = MibIdentifier((1, 3, 6, 1, 4, 1, 3873, 1, 3))
qlaspConfig = MibIdentifier((1, 3, 6, 1, 4, 1, 3873, 1, 2, 1))
qlaspStat = MibIdentifier((1, 3, 6, 1, 4, 1, 3873, 1, 2, 2))
qlaspTrap = MibIdentifier((1, 3, 6, 1, 4, 1, 3873, 1, 2, 3))
ifiNumber = MibScalar((1, 3, 6, 1, 4, 1, 3873, 1, 3, 1), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ifiNumber.setStatus('mandatory')
ifiTable = MibTable((1, 3, 6, 1, 4, 1, 3873, 1, 3, 2), )
if mibBuilder.loadTexts: ifiTable.setStatus('mandatory')
ifiEntry = MibTableRow((1, 3, 6, 1, 4, 1, 3873, 1, 3, 2, 1), ).setIndexNames((0, "QLGC-adapterInfo-MIB", "ifiIndex"))
if mibBuilder.loadTexts: ifiEntry.setStatus('mandatory')
ifiIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 3873, 1, 3, 2, 1, 1), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 65535)))
if mibBuilder.loadTexts: ifiIndex.setStatus('mandatory')
ifName = MibTableColumn((1, 3, 6, 1, 4, 1, 3873, 1, 3, 2, 1, 2), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ifName.setStatus('mandatory')
ifiDescr = MibTableColumn((1, 3, 6, 1, 4, 1, 3873, 1, 3, 2, 1, 3), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ifiDescr.setStatus('mandatory')
ifNetworkAddress = MibTableColumn((1, 3, 6, 1, 4, 1, 3873, 1, 3, 2, 1, 4), IpAddress()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ifNetworkAddress.setStatus('mandatory')
ifSubnetMask = MibTableColumn((1, 3, 6, 1, 4, 1, 3873, 1, 3, 2, 1, 5), IpAddress()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ifSubnetMask.setStatus('mandatory')
ifiPhysAddress = MibTableColumn((1, 3, 6, 1, 4, 1, 3873, 1, 3, 2, 1, 6), PhysAddress()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ifiPhysAddress.setStatus('mandatory')
ifPermPhysAddress = MibTableColumn((1, 3, 6, 1, 4, 1, 3873, 1, 3, 2, 1, 7), PhysAddress()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ifPermPhysAddress.setStatus('mandatory')
ifLinkStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 3873, 1, 3, 2, 1, 8), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("link-up", 1), ("link-fail", 2)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: ifLinkStatus.setStatus('mandatory')
ifState = MibTableColumn((1, 3, 6, 1, 4, 1, 3873, 1, 3, 2, 1, 9), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4))).clone(namedValues=NamedValues(("normal-mode", 1), ("diagnotic-mode", 2), ("adapter-removed", 3), ("lowpower-mode", 4)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: ifState.setStatus('mandatory')
ifLineSpeed = MibTableColumn((1, 3, 6, 1, 4, 1, 3873, 1, 3, 2, 1, 10), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5, 6))).clone(namedValues=NamedValues(("unknown", 1), ("speed-10-Mbps", 2), ("speed-100-Mbps", 3), ("speed-1000-Mbps", 4), ("speed-2500-Mbps", 5), ("speed-10-Gbps", 6)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: ifLineSpeed.setStatus('mandatory')
ifDuplexMode = MibTableColumn((1, 3, 6, 1, 4, 1, 3873, 1, 3, 2, 1, 11), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("unknown", 1), ("half-duplex", 2), ("full-duplex", 3)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: ifDuplexMode.setStatus('mandatory')
ifMemBaseLow = MibTableColumn((1, 3, 6, 1, 4, 1, 3873, 1, 3, 2, 1, 12), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ifMemBaseLow.setStatus('mandatory')
ifMemBaseHigh = MibTableColumn((1, 3, 6, 1, 4, 1, 3873, 1, 3, 2, 1, 13), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ifMemBaseHigh.setStatus('mandatory')
ifInterrupt = MibTableColumn((1, 3, 6, 1, 4, 1, 3873, 1, 3, 2, 1, 14), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ifInterrupt.setStatus('mandatory')
ifBusNumber = MibTableColumn((1, 3, 6, 1, 4, 1, 3873, 1, 3, 2, 1, 15), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ifBusNumber.setStatus('mandatory')
ifDeviceNumber = MibTableColumn((1, 3, 6, 1, 4, 1, 3873, 1, 3, 2, 1, 16), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ifDeviceNumber.setStatus('mandatory')
ifFunctionNumber = MibTableColumn((1, 3, 6, 1, 4, 1, 3873, 1, 3, 2, 1, 17), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ifFunctionNumber.setStatus('mandatory')
ifIpv6NetworkAddress = MibTableColumn((1, 3, 6, 1, 4, 1, 3873, 1, 3, 2, 1, 18), InetAddressIPv6()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ifIpv6NetworkAddress.setStatus('mandatory')
mibBuilder.exportSymbols("QLGC-adapterInfo-MIB", ifInterrupt=ifInterrupt, ifIpv6NetworkAddress=ifIpv6NetworkAddress, qlaspStat=qlaspStat, ifiIndex=ifiIndex, ifMemBaseHigh=ifMemBaseHigh, ifiDescr=ifiDescr, ifiNumber=ifiNumber, ifLineSpeed=ifLineSpeed, ifiTable=ifiTable, ifiEntry=ifiEntry, ifState=ifState, ifiPhysAddress=ifiPhysAddress, ifFunctionNumber=ifFunctionNumber, ifName=ifName, ifControllers=ifControllers, qlasp=qlasp, ifNetworkAddress=ifNetworkAddress, qlogic=qlogic, ifDuplexMode=ifDuplexMode, ifBusNumber=ifBusNumber, ifLinkStatus=ifLinkStatus, enet=enet, ifMemBaseLow=ifMemBaseLow, ifPermPhysAddress=ifPermPhysAddress, qlaspTrap=qlaspTrap, ifSubnetMask=ifSubnetMask, ifDeviceNumber=ifDeviceNumber, qlaspConfig=qlaspConfig)
|
py | b400861a77cd651f90b6463e529c8cf975275012 | # Generated by Django 4.0.2 on 2022-03-16 16:12
import django.contrib.postgres.fields
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('api', '0003_rename_building_address_building_address'),
]
operations = [
migrations.RemoveField(
model_name='office',
name='layout',
),
migrations.AddField(
model_name='desk',
name='x_pos_px',
field=models.PositiveIntegerField(default=0),
),
migrations.AddField(
model_name='desk',
name='x_size_m',
field=models.FloatField(default=0.0),
),
migrations.AddField(
model_name='desk',
name='y_pos_px',
field=models.PositiveIntegerField(default=0),
),
migrations.AddField(
model_name='desk',
name='y_size_m',
field=models.FloatField(default=0.0),
),
migrations.AddField(
model_name='office',
name='desk_ids',
field=django.contrib.postgres.fields.ArrayField(base_field=models.PositiveIntegerField(), blank=True, null=True, size=None),
),
migrations.AddField(
model_name='office',
name='x_size_m',
field=models.FloatField(default=0.0),
),
migrations.AddField(
model_name='office',
name='y_size_m',
field=models.FloatField(default=0.0),
),
]
|
py | b40088076ff81fe84baa092055aaf84b08174468 | import tensorflow as tf
from tensorflow.python.training.summary_io import SummaryWriterCache
from luminoth.utils.image_vis import image_vis_summaries
class ImageVisHook(tf.train.SessionRunHook):
def __init__(self, prediction_dict, image, config=None, gt_bboxes=None,
every_n_steps=None, every_n_secs=None, output_dir=None,
summary_writer=None, image_visualization_mode=None):
super(ImageVisHook, self).__init__()
if (every_n_secs is None) == (every_n_steps is None):
raise ValueError(
'Only one of "every_n_secs" and "every_n_steps" must be '
'provided.')
if output_dir is None and summary_writer is None:
tf.logging.warning(
'ImageVisHook is not saving summaries. One of "output_dir" '
'and "summary_writer" must be provided')
self._timer = tf.train.SecondOrStepTimer(
every_steps=every_n_steps, every_secs=every_n_secs)
self._prediction_dict = prediction_dict
self._config = config
self._output_dir = output_dir
self._summary_writer = summary_writer
self._image_visualization_mode = image_visualization_mode
self._image = image
self._gt_bboxes = gt_bboxes
tf.logging.info('ImageVisHook was created with mode = "{}"'.format(
image_visualization_mode
))
def begin(self):
if self._summary_writer is None and self._output_dir:
self._summary_writer = SummaryWriterCache.get(self._output_dir)
self._next_step = None
self._global_step = tf.train.get_global_step()
if self._global_step is None:
raise RuntimeError('Global step must be created for ImageVisHook.')
def before_run(self, run_context):
fetches = {'global_step': self._global_step}
self._draw_images = (
self._next_step is None or
self._timer.should_trigger_for_step(self._next_step)
)
if self._draw_images:
fetches['prediction_dict'] = self._prediction_dict
fetches['image'] = self._image
if self._gt_bboxes is not None:
fetches['gt_bboxes'] = self._gt_bboxes
return tf.train.SessionRunArgs(fetches)
def after_run(self, run_context, run_values):
results = run_values.results
global_step = results.get('global_step')
if self._draw_images:
self._timer.update_last_triggered_step(global_step)
prediction_dict = results.get('prediction_dict')
if prediction_dict is not None:
summaries = image_vis_summaries(
prediction_dict, config=self._config,
image_visualization_mode=self._image_visualization_mode,
image=results.get('image'),
gt_bboxes=results.get('gt_bboxes')
)
if self._summary_writer is not None:
for summary in summaries:
self._summary_writer.add_summary(summary, global_step)
self._next_step = global_step + 1
def end(self, session=None):
if self._summary_writer:
self._summary_writer.flush()
|
py | b4008997d9043b5281a806ff0519cc17eb4dbcd3 | from django.db import models
class Topic(models.Model):
name = models.CharField(max_length=264, unique=True)
def __str__(self):
return self.name
class Webpage(models.Model):
topic = models.ForeignKey('Topic', on_delete=models.PROTECT)
name = models.CharField(max_length=264, unique=True)
url = models.URLField(unique=True)
def __str__(self):
return self.name
class AccessRecord(models.Model):
name = models.ForeignKey(Webpage, on_delete=models.PROTECT)
date = models.DateField()
def __str__(self):
return str(self.date)
|
py | b40089b4591ebf7c3633a1d21376939288091d73 | import logging
import sys
import traceback
from config import Config
from notifications import NotificationManager
import sites
logging.basicConfig(
format="%(asctime)s [%(levelname)s] %(message)s",
level=logging.INFO,
datefmt="%Y/%m/%d %H:%M:%S",
handlers=[
#logging.FileHandler(filename="enum.log", mode="a"),
logging.StreamHandler()
]
)
defang = lambda u: u.replace("http", "hxxp").replace(".onion", "[.]onion")
def main(argv):
logging.info("Initializing")
sites_to_analyze = [
sites.Avaddon,
sites.Conti,
sites.DarkSide,
sites.REvil,
sites.Babuk,
sites.Ranzy,
sites.Astro,
sites.Pay2Key,
sites.Cuba,
sites.RansomEXX,
sites.Mount,
sites.Ragnarok,
sites.Ragnar,
sites.Suncrypt,
sites.Everest,
sites.Nefilim,
sites.Cl0p,
sites.Pysa
]
logging.info(f"Found {len(sites_to_analyze)} sites")
for site in sites_to_analyze:
logging.info(f"Starting process for {site.actor}")
if site.actor.lower() not in Config["sites"]:
logging.warning(f"No URL found in config for this actor, skipping")
continue
s = site(Config["sites"][site.actor.lower()])
if not s.is_up:
logging.warning(f"{site.actor} is down, notifying + skipping")
NotificationManager.send_site_down_notification(s.site)
continue
if s.first_run:
logging.info(f"This is the first scrape for {site.actor}, no victim notifications will be sent")
logging.info(f"Scraping victims")
try:
s.scrape_victims()
except:
logging.error(f"Got an error while scraping {site.actor}, notifying")
tb = traceback.format_exc()
# send error notifications
NotificationManager.send_error_notification(f"{site.actor} scraping", tb)
# log exception
logging.error(tb.strip()) # there is a trailing newline
# close db session
s.session.close()
# skip the rest of the site since the data may be messed up
continue
logging.info(f"There are {len(s.new_victims)} new victims")
# send notifications for new victims
if not s.first_run and len(s.new_victims) > 0:
logging.info("Notifying for new victims")
for v in s.new_victims:
NotificationManager.send_new_victim_notification(v)
logging.info(f"Identifying removed victims")
removed = s.identify_removed_victims()
logging.info(f"There are {len(removed)} removed victims")
# send notifications for removed victims
if not s.first_run and len(removed) > 0:
logging.info("Notifying for removed victims")
for v in removed:
NotificationManager.send_victim_removed_notification(v)
# close db session
s.session.close()
logging.info(f"Finished {site.actor}")
logging.info("Finished all sites, exiting")
if __name__ == "__main__":
try:
main(sys.argv)
except:
logging.error(f"Got a fatal error, notifying + aborting")
tb = traceback.format_exc()
# send slack error notifications
NotificationManager.send_error_notification(f"Non-scraping failure", tb, fatal=True)
# log exception
logging.error(tb.strip()) # there is a trailing newline
|
py | b4008a02bd38aaa8c1349f15f5e5d8160063071f | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
from .discrete_c51dqn import DiscreteC51DQN
from .discrete_dqn import DiscreteDQN
from .discrete_qrdqn import DiscreteQRDQN
__all__ = ["DiscreteC51DQN", "DiscreteDQN", "DiscreteQRDQN"]
|
py | b4008b09a6821b05b6fbfde9f679db5f9cb4cde3 | from flaky import (
flaky,
)
@flaky(max_runs=3)
def test_miner_hashrate(webu_empty, wait_for_miner_start):
webu = webu_empty
hashrate = webu.miner.hashrate
assert hashrate > 0
|
py | b4008c33f7b46d7123c2a4f230358f5fc4341236 | import win32com
import win32gui
import win32process
hwnd = win32gui.GetForegroundWindow()
pid = win32process.GetWindowThreadProcessId(hwnd)
print(pid)
|
py | b4008c773c6db7f87fb79ff9590b346bb6e62a79 | # Copyright 2012 OpenStack Foundation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
# service type constants:
CORE = "CORE"
DUMMY = "DUMMY"
VNFM = "VNFM"
NFVO = "NFVO"
COMMONSERVICES = "COMMONSERVICES"
COMMON_PREFIXES = {
CORE: "",
DUMMY: "/dummy_svc",
VNFM: "",
NFVO: "",
COMMONSERVICES: ""
}
# Service operation status constants
ACTIVE = "ACTIVE"
DOWN = "DOWN"
PENDING_CREATE = "PENDING_CREATE"
PENDING_UPDATE = "PENDING_UPDATE"
PENDING_DELETE = "PENDING_DELETE"
PENDING_SCALE_IN = "PENDING_SCALE_IN"
PENDING_SCALE_OUT = "PENDING_SCALE_OUT"
INACTIVE = "INACTIVE"
DEAD = "DEAD"
ERROR = "ERROR"
ACTIVE_PENDING_STATUSES = (
ACTIVE,
PENDING_CREATE,
PENDING_UPDATE
)
POLICY_SCALING = 'tosca.policies.tacker.Scaling'
POLICY_SCALING_ACTIONS = (ACTION_SCALE_OUT,
ACTION_SCALE_IN) = ('out', 'in')
POLICY_ACTIONS = {POLICY_SCALING: POLICY_SCALING_ACTIONS}
POLICY_ALARMING = 'tosca.policies.tacker.Alarming'
DEFAULT_ALARM_ACTIONS = ['respawn', 'log', 'log_and_kill', 'notify']
RES_TYPE_VNFD = "vnfd"
RES_TYPE_NSD = "nsd"
RES_TYPE_NS = "ns"
RES_TYPE_VNF = "vnf"
RES_TYPE_VIM = "vim"
RES_EVT_CREATE = "CREATE"
RES_EVT_DELETE = "DELETE"
RES_EVT_UPDATE = "UPDATE"
RES_EVT_MONITOR = "MONITOR"
RES_EVT_SCALE = "SCALE"
RES_EVT_NA_STATE = "Not Applicable"
RES_EVT_ONBOARDED = "OnBoarded"
RES_EVT_CREATED_FLD = "created_at"
RES_EVT_DELETED_FLD = "deleted_at"
RES_EVT_UPDATED_FLD = "updated_at"
|
py | b4008dc1662b263140d8df5f113822a0b454496c | from dataclasses import dataclass
from typing import Optional
@dataclass
class awg_slave:
awg_name: str
marker_name: str
sync_latency: Optional[float] = None |
py | b4008e6afbdcc25c23faa0dc62da082cd6248bba | import enum
import logging
from typing import Dict, List, TypedDict
import base58
logger = logging.getLogger(__name__)
class SolanaInstructionType(str, enum.Enum):
u64 = "u64"
string = "string"
EthereumAddress = "EthereumAddress"
UnixTimestamp = "UnixTimestamp"
solanaInstructionSpace = {
SolanaInstructionType.u64: 8,
SolanaInstructionType.string: 4,
SolanaInstructionType.EthereumAddress: 20,
SolanaInstructionType.UnixTimestamp: 8,
}
class InstructionFormat(TypedDict):
name: str
type: SolanaInstructionType
def parse_instruction_data(data: str, instructionFormat: List[InstructionFormat]):
"""Parses encoded instruction data into a dictionary based on instruction format"""
decoded = base58.b58decode(data)[1:]
last_end = 0
decoded_params: Dict = {}
for intr in instructionFormat:
name = intr["name"]
type = intr["type"]
if type == SolanaInstructionType.u64:
type_len = solanaInstructionSpace[type]
decoded_params[name] = int.from_bytes(
decoded[last_end : last_end + type_len], "little"
)
last_end = last_end + type_len
elif type == SolanaInstructionType.string:
type_len = solanaInstructionSpace[type]
instr_len = int.from_bytes(
decoded[last_end : last_end + type_len], "little"
)
start, end = last_end + type_len, last_end + type_len + instr_len
decoded_value: bytes = decoded[start:end]
decoded_params[name] = str(decoded_value, "utf-8")
last_end = end
elif type == SolanaInstructionType.EthereumAddress:
type_len = solanaInstructionSpace[type]
decoded_int = int.from_bytes(decoded[last_end : last_end + type_len], "big")
# Ensure stored address is of length 40 characters
# Pads zeros if present at start of string
# https://stackoverflow.com/a/12638477
decoded_params[name] = f"0x{decoded_int:040x}"
last_end = last_end + type_len
elif type == SolanaInstructionType.UnixTimestamp:
type_len = solanaInstructionSpace[type]
decoded_params[name] = int.from_bytes(
decoded[last_end : last_end + type_len], "little"
)
last_end = last_end + type_len
return decoded_params
|
py | b4008f0d6f91ff0886031da02d5f4d7f3f18b31b | #!/usr/bin/env python
# coding: utf8
from crm import CRM
crm = CRM(authToken=authToken, scope="ZohoCRM/crmapi")
crm_insert = crm.insertRecords(
module="CustomModule3",
xmlData=data,
version=2,
duplicateCheck=2
)
|
py | b40090ab4bd2dddaa34c665e22a7acc5b3f3567a | # Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import tensorflow as tf
from tensorflow_addons.utils import types
from typeguard import typechecked
@tf.keras.utils.register_keras_serializable(package="Addons")
class Lookahead(tf.keras.optimizers.Optimizer):
"""This class allows to extend optimizers with the lookahead mechanism.
The mechanism is proposed by Michael R. Zhang et.al in the paper
[Lookahead Optimizer: k steps forward, 1 step back]
(https://arxiv.org/abs/1907.08610v1). The optimizer iteratively updates two
sets of weights: the search directions for weights are chosen by the inner
optimizer, while the "slow weights" are updated each `k` steps based on the
directions of the "fast weights" and the two sets of weights are
synchronized. This method improves the learning stability and lowers the
variance of its inner optimizer.
Example of usage:
```python
opt = tf.keras.optimizers.SGD(learning_rate)
opt = tfa.optimizers.Lookahead(opt)
```
"""
@typechecked
def __init__(
self,
optimizer: types.Optimizer,
sync_period: int = 6,
slow_step_size: types.FloatTensorLike = 0.5,
name: str = "Lookahead",
**kwargs
):
r"""Wrap optimizer with the lookahead mechanism.
Args:
optimizer: The original optimizer that will be used to compute
and apply the gradients.
sync_period: An integer. The synchronization period of lookahead.
Enable lookahead mechanism by setting it with a positive value.
slow_step_size: A floating point value.
The ratio for updating the slow weights.
name: Optional name for the operations created when applying
gradients. Defaults to "Lookahead".
**kwargs: keyword arguments. Allowed to be {`clipnorm`,
`clipvalue`, `lr`, `decay`}. `clipnorm` is clip gradients
by norm; `clipvalue` is clip gradients by value, `decay` is
included for backward compatibility to allow time inverse
decay of learning rate. `lr` is included for backward
compatibility, recommended to use `learning_rate` instead.
"""
super().__init__(name, **kwargs)
if isinstance(optimizer, str):
optimizer = tf.keras.optimizers.get(optimizer)
if not isinstance(optimizer, tf.keras.optimizers.Optimizer):
raise TypeError(
"optimizer is not an object of tf.keras.optimizers.Optimizer"
)
self._optimizer = optimizer
self._set_hyper("sync_period", sync_period)
self._set_hyper("slow_step_size", slow_step_size)
self._initialized = False
def _create_slots(self, var_list):
self._optimizer._create_slots(
var_list=var_list
) # pylint: disable=protected-access
for var in var_list:
self.add_slot(var, "slow")
def _create_hypers(self):
self._optimizer._create_hypers() # pylint: disable=protected-access
def _prepare(self, var_list):
return self._optimizer._prepare(
var_list=var_list
) # pylint: disable=protected-access
def apply_gradients(self, grads_and_vars, name=None):
self._optimizer._iterations = (
self.iterations
) # pylint: disable=protected-access
return super().apply_gradients(grads_and_vars, name)
def _init_op(self, var):
slow_var = self.get_slot(var, "slow")
return slow_var.assign(
tf.where(
tf.equal(self.iterations, tf.constant(0, dtype=self.iterations.dtype)),
var,
slow_var,
),
use_locking=self._use_locking,
)
def _look_ahead_op(self, var):
var_dtype = var.dtype.base_dtype
slow_var = self.get_slot(var, "slow")
local_step = tf.cast(self.iterations + 1, tf.dtypes.int64)
sync_period = self._get_hyper("sync_period", tf.dtypes.int64)
slow_step_size = self._get_hyper("slow_step_size", var_dtype)
step_back = slow_var + slow_step_size * (var - slow_var)
sync_cond = tf.equal(
tf.math.floordiv(local_step, sync_period) * sync_period, local_step
)
with tf.control_dependencies([step_back]):
slow_update = slow_var.assign(
tf.where(sync_cond, step_back, slow_var,), use_locking=self._use_locking
)
var_update = var.assign(
tf.where(sync_cond, step_back, var,), use_locking=self._use_locking
)
return tf.group(slow_update, var_update)
@property
def weights(self):
return self._weights + self._optimizer.weights
def _resource_apply_dense(self, grad, var):
init_op = self._init_op(var)
with tf.control_dependencies([init_op]):
train_op = self._optimizer._resource_apply_dense(
grad, var
) # pylint: disable=protected-access
with tf.control_dependencies([train_op]):
look_ahead_op = self._look_ahead_op(var)
return tf.group(init_op, train_op, look_ahead_op)
def _resource_apply_sparse(self, grad, var, indices):
init_op = self._init_op(var)
with tf.control_dependencies([init_op]):
train_op = self._optimizer._resource_apply_sparse( # pylint: disable=protected-access
grad, var, indices
)
with tf.control_dependencies([train_op]):
look_ahead_op = self._look_ahead_op(var)
return tf.group(init_op, train_op, look_ahead_op)
def get_config(self):
config = {
"optimizer": tf.keras.optimizers.serialize(self._optimizer),
"sync_period": self._serialize_hyperparameter("sync_period"),
"slow_step_size": self._serialize_hyperparameter("slow_step_size"),
}
base_config = super().get_config()
return {**base_config, **config}
@property
def learning_rate(self):
return self._optimizer._get_hyper("learning_rate")
@learning_rate.setter
def learning_rate(self, learning_rate):
self._optimizer._set_hyper("learning_rate", learning_rate)
@property
def lr(self):
return self.learning_rate
@lr.setter
def lr(self, lr):
self.learning_rate = lr
@classmethod
def from_config(cls, config, custom_objects=None):
optimizer = tf.keras.optimizers.deserialize(
config.pop("optimizer"), custom_objects=custom_objects,
)
return cls(optimizer, **config)
|
py | b400915e7fb54a785ee47fc4669c4264e7cc3d91 | import os
import sys
import time
from dotenv import load_dotenv
from telethon import TelegramClient, events
import logging
from webdav3.client import Client
logging.basicConfig(
level=logging.INFO, format="%(asctime)s - %(name)s - %(levelname)s - %(message)s"
)
logger = logging.getLogger(__name__)
load_dotenv()
logger.info("Creating Client...")
client = TelegramClient("SESSION", os.environ["API_ID"], os.environ["API_HASH"])
def webdavhandle(filename):
options = {
"webdav_hostname": os.environ["WEBDAV_HOSTNAME"],
"webdav_login": os.environ["WEBDAV_LOGIN"],
"webdav_password": os.environ["WEBDAV_PASSWORD"],
}
webdav_folder_path = os.environ["WEBDAV_FOLDER_PATH"]
client = Client(options)
client.verify = False
logger.info("Sending to remote")
client.upload_sync(
remote_path=f"{webdav_folder_path}/{filename}".replace(
os.environ["FOLDER_PATH"] + "/", ""
),
local_path=filename,
)
logger.info("Package delievered")
async def handle_file_event(event):
r = await event.message.download_media(os.environ["FOLDER_PATH"])
webdavhandle(r)
os.remove(r)
# This is our update handler. It is called when a new update arrives.
@client.on(events.NewMessage)
async def handler(event):
if event.message.out:
return
if event.message.photo:
logger.info("Saving Photo")
await handle_file_event(event)
elif event.message.video:
logger.info("Saving Video")
await handle_file_event(event)
elif event.message.media and not (event.message.sticker):
logger.info("Saving File")
await handle_file_event(event)
else:
logger.info("not doing anything")
logger.info("Starting...")
client.start(
phone=os.environ["TELEGRAM_PHONE"], password=os.environ["TELEGRAM_PASSWORD"]
)
client.run_until_disconnected()
|
py | b400920e3ffa513d8360078eeb59407d9408948c | #################################################################################
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR #
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, #
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE #
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER #
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, #
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE #
# SOFTWARE. #
#################################################################################
#
# Contributors :
# Copyright (c) 2020 slavi010 [email protected]
#
import tkinter as tk
from typing import Optional
from src.view.Deck import Deck
from src.view.Observer import Observer, Observable
class ListDice(Observer):
"""
A widget list where you can select dices from a specific Deck
"""
def __init__(self, root, deck: Deck, lbl_text: str, dices_index_selected: Optional[list]):
self.root = root
self.deck = deck
self.lbl_text = lbl_text
# Frame
self.frm = tk.Frame(self.root, width=2, height=60)
# widgets
self.lbl = tk.Label(self.frm, text=self.lbl_text, anchor="w")
self.lst_dices = tk.Listbox(self.frm, selectmode=tk.MULTIPLE, exportselection=0, width=20, height=5)
self.update(deck)
for idx_dices in dices_index_selected:
self.lst_dices.select_set(idx_dices)
# widgets layouts
self.lbl.pack()
self.lst_dices.pack()
def get_selected_dices(self):
return [self.deck.dices[idx_dice_deck] for idx_dice_deck in self.lst_dices.curselection()]
def update(self, observable: Observable) -> None:
assert isinstance(observable, Deck)
self.lst_dices.delete(0, tk.END)
for dice in observable.dices:
self.lst_dices.insert(tk.END, dice.name)
class FieldInt:
"""
A int field + label with min/max.
"""
def __init__(self, root, lbl_text: str, min_value: int, default_value: int, max_value: int):
assert min_value <= default_value <= max_value
self.root = root
self.lbl_text = lbl_text
self.min_value = min_value
self.default_value = default_value
self.max_value = max_value
# Frame
self.frm = tk.Frame(self.root)
# widgets
self.lbl = tk.Label(self.frm, text=self.lbl_text, anchor="w")
self.entry_var = tk.IntVar(self.frm, default_value)
self.entry = tk.Entry(self.frm, textvariable=self.entry_var)
# widgets layouts
self.lbl.grid(row=0, column=0)
self.entry.grid(row=0, column=1)
def get_value(self):
value = self.entry_var.get()
if value < self.min_value:
return self.min_value
elif value > self.max_value:
return self.max_value
return value
class FieldString:
"""
A str field + label.
"""
def __init__(self, root, lbl_text: str, default_value: str):
self.root = root
self.lbl_text = lbl_text
self.default_value = default_value
# Frame
self.frm = tk.Frame(self.root)
# widgets
self.lbl = tk.Label(self.frm, text=self.lbl_text, anchor="w")
self.entry_var = tk.StringVar(self.frm, default_value)
self.entry = tk.Entry(self.frm, textvariable=self.entry_var)
# widgets layouts
self.lbl.grid(row=0, column=0)
self.entry.grid(row=0, column=1)
def get_value(self):
value = self.entry_var.get()
return value
class FieldRadioBinary:
"""
2 radio buttons, like a check box
"""
def __init__(self, root, lbl_main: str, lbl1: str, lbl2: str, default: int):
self.root = root
self.lbl_main_text = lbl_main
self.lbl1_text = lbl1
self.lbl2_text = lbl2
# Frame
self.frm = tk.Frame(self.root)
# widgets
self.lbl_main = tk.Label(self.frm, text=self.lbl_main_text, anchor="w")
self.radio_val = range(2) # 0 = first, 1 = second
self.radio_etiqs = [self.lbl1_text, self.lbl2_text]
self.radio_varGr = tk.IntVar(self.frm, self.radio_val[default-1])
self.radios = []
for i in range(2):
b = tk.Radiobutton(self.frm,
variable=self.radio_varGr,
text=self.radio_etiqs[i],
value=self.radio_val[i])
self.radios.append(b)
# widgets layouts
self.lbl_main.grid(row=0, column=0)
self.radios[0].grid(row=0, column=2)
self.radios[1].grid(row=0, column=4)
def get_value(self):
"""
:return: 1 or 2
"""
return self.radio_varGr.get() + 1
|
py | b4009225ab9c09d9ab0bf0a356c801ae179a45da | # A modification version from chainercv repository.
# (See https://github.com/chainer/chainercv/blob/master/chainercv/evaluations/eval_detection_voc.py)
from __future__ import division
import os
from collections import defaultdict
import numpy as np
from mrcnn.structures.bounding_box import BoxList
from mrcnn.structures.boxlist_ops import boxlist_iou
def do_voc_evaluation(dataset, predictions, output_folder, logger):
# TODO need to make the use_07_metric format available
# for the user to choose
pred_boxlists = []
gt_boxlists = []
for image_id, prediction in enumerate(predictions):
img_info = dataset.get_img_info(image_id)
image_width = img_info["width"]
image_height = img_info["height"]
prediction = prediction.resize((image_width, image_height))
pred_boxlists.append(prediction)
gt_boxlist = dataset.get_groundtruth(image_id)
gt_boxlists.append(gt_boxlist)
result = eval_detection_voc(
pred_boxlists=pred_boxlists,
gt_boxlists=gt_boxlists,
iou_thresh=0.85,
use_07_metric=True,
)
result_str = "mAP: {:.4f}, recall:{}\n".format(result["map"],len(result["rec"][1]))
for i, ap in enumerate(result["ap"]):
if i == 0: # skip background
continue
result_str += "{:<16}: {:.4f}\n".format(
dataset.map_class_id_to_class_name(i), ap
)
logger.info(result_str)
if output_folder:
with open(os.path.join(output_folder, "result.txt"), "w") as fid:
fid.write(result_str)
return result
def eval_detection_voc(pred_boxlists, gt_boxlists, iou_thresh=0.5, use_07_metric=False):
"""Evaluate on voc dataset.
Args:
pred_boxlists(list[BoxList]): pred boxlist, has labels and scores fields.
gt_boxlists(list[BoxList]): ground truth boxlist, has labels field.
iou_thresh: iou thresh
use_07_metric: boolean
Returns:
dict represents the results
"""
assert len(gt_boxlists) == len(
pred_boxlists
), "Length of gt and pred lists need to be same."
prec, rec = calc_detection_voc_prec_rec(
pred_boxlists=pred_boxlists, gt_boxlists=gt_boxlists, iou_thresh=iou_thresh
)
ap = calc_detection_voc_ap(prec, rec, use_07_metric=use_07_metric)
return {"ap": ap, "map": np.nanmean(ap),
"rec":rec}
def calc_detection_voc_prec_rec(gt_boxlists, pred_boxlists, iou_thresh=0.5):
"""Calculate precision and recall based on evaluation code of PASCAL VOC.
This function calculates precision and recall of
predicted bounding boxes obtained from a dataset which has :math:`N`
images.
The code is based on the evaluation code used in PASCAL VOC Challenge.
"""
n_pos = defaultdict(int)
score = defaultdict(list)
match = defaultdict(list)
for gt_boxlist, pred_boxlist in zip(gt_boxlists, pred_boxlists):
pred_bbox = pred_boxlist.bbox.numpy()
pred_label = pred_boxlist.get_field("labels").numpy()
pred_score = pred_boxlist.get_field("scores").numpy()
gt_bbox = gt_boxlist.bbox.numpy()
gt_label = gt_boxlist.get_field("labels").numpy()
#gt_difficult = gt_boxlist.get_field("difficult").numpy()
#print('labe shapes {}'.format(gt_label.shape))
gt_difficult = np.zeros(gt_label.shape)
for l in np.unique(np.concatenate((pred_label, gt_label)).astype(int)):
pred_mask_l = pred_label == l
pred_bbox_l = pred_bbox[pred_mask_l]
pred_score_l = pred_score[pred_mask_l]
# sort by score
order = pred_score_l.argsort()[::-1]
pred_bbox_l = pred_bbox_l[order]
pred_score_l = pred_score_l[order]
gt_mask_l = gt_label == l
gt_bbox_l = gt_bbox[gt_mask_l]
gt_difficult_l = gt_difficult[gt_mask_l]
n_pos[l] += np.logical_not(gt_difficult_l).sum()
score[l].extend(pred_score_l)
if len(pred_bbox_l) == 0:
continue
if len(gt_bbox_l) == 0:
match[l].extend((0,) * pred_bbox_l.shape[0])
continue
# VOC evaluation follows integer typed bounding boxes.
pred_bbox_l = pred_bbox_l.copy()
pred_bbox_l[:, 2:] += 1
gt_bbox_l = gt_bbox_l.copy()
gt_bbox_l[:, 2:] += 1
iou = boxlist_iou(
BoxList(pred_bbox_l, gt_boxlist.size),
BoxList(gt_bbox_l, gt_boxlist.size),
).numpy()
gt_index = iou.argmax(axis=1)
# set -1 if there is no matching ground truth
gt_index[iou.max(axis=1) < iou_thresh] = -1
del iou
selec = np.zeros(gt_bbox_l.shape[0], dtype=bool)
for gt_idx in gt_index:
if gt_idx >= 0:
#if gt_difficult_l[gt_idx]:
# match[l].append(-1)
#else:
if not selec[gt_idx]:
match[l].append(1)
else:
match[l].append(0)
selec[gt_idx] = True
else:
match[l].append(0)
n_fg_class = max(n_pos.keys()) + 1
prec = [None] * n_fg_class
rec = [None] * n_fg_class
for l in n_pos.keys():
score_l = np.array(score[l])
match_l = np.array(match[l], dtype=np.int8)
order = score_l.argsort()[::-1]
match_l = match_l[order]
tp = np.cumsum(match_l == 1)
fp = np.cumsum(match_l == 0)
# If an element of fp + tp is 0,
# the corresponding element of prec[l] is nan.
prec[l] = tp / (fp + tp)
# If n_pos[l] is 0, rec[l] is None.
if n_pos[l] > 0:
rec[l] = tp / n_pos[l]
return prec, rec
def calc_detection_voc_ap(prec, rec, use_07_metric=False):
"""Calculate average precisions based on evaluation code of PASCAL VOC.
This function calculates average precisions
from given precisions and recalls.
The code is based on the evaluation code used in PASCAL VOC Challenge.
Args:
prec (list of numpy.array): A list of arrays.
:obj:`prec[l]` indicates precision for class :math:`l`.
If :obj:`prec[l]` is :obj:`None`, this function returns
:obj:`numpy.nan` for class :math:`l`.
rec (list of numpy.array): A list of arrays.
:obj:`rec[l]` indicates recall for class :math:`l`.
If :obj:`rec[l]` is :obj:`None`, this function returns
:obj:`numpy.nan` for class :math:`l`.
use_07_metric (bool): Whether to use PASCAL VOC 2007 evaluation metric
for calculating average precision. The default value is
:obj:`False`.
Returns:
~numpy.ndarray:
This function returns an array of average precisions.
The :math:`l`-th value corresponds to the average precision
for class :math:`l`. If :obj:`prec[l]` or :obj:`rec[l]` is
:obj:`None`, the corresponding value is set to :obj:`numpy.nan`.
"""
n_fg_class = len(prec)
ap = np.empty(n_fg_class)
for l in range(n_fg_class):
if prec[l] is None or rec[l] is None:
ap[l] = np.nan
continue
if use_07_metric:
# 11 point metric
ap[l] = 0
for t in np.arange(0.0, 1.1, 0.1):
if np.sum(rec[l] >= t) == 0:
p = 0
else:
p = np.max(np.nan_to_num(prec[l])[rec[l] >= t])
ap[l] += p / 11
else:
# correct AP calculation
# first append sentinel values at the end
mpre = np.concatenate(([0], np.nan_to_num(prec[l]), [0]))
mrec = np.concatenate(([0], rec[l], [1]))
mpre = np.maximum.accumulate(mpre[::-1])[::-1]
# to calculate area under PR curve, look for points
# where X axis (recall) changes value
i = np.where(mrec[1:] != mrec[:-1])[0]
# and sum (\Delta recall) * prec
ap[l] = np.sum((mrec[i + 1] - mrec[i]) * mpre[i + 1])
return ap
|
py | b400935b185ddb77fd2fcc88028f03c1dd79f53d | # --------------------------------------------------------
# SiamMask
# Licensed under The MIT License
# Written by Qiang Wang (wangqiang2015 at ia.ac.cn)
# --------------------------------------------------------
import argparse
import logging
import os
import cv2
import shutil
import time
import json
import math
import torch
from torch.utils.data import DataLoader
from utils.log_helper import init_log, print_speed, add_file_handler, Dummy
from utils.load_helper import load_pretrain, restore_from
from utils.average_meter_helper import AverageMeter
from datasets.siam_mask_dataset import DataSets
from utils.lr_helper import build_lr_scheduler
from tensorboardX import SummaryWriter
from utils.config_helper import load_config
from torch.utils.collect_env import get_pretty_env_info
torch.backends.cudnn.benchmark = True
parser = argparse.ArgumentParser(description='PyTorch Tracking SiamMask Training')
parser.add_argument('-j', '--workers', default=16, type=int, metavar='N',
help='number of data loading workers (default: 16)')
parser.add_argument('--epochs', default=50, type=int, metavar='N',
help='number of total epochs to run')
parser.add_argument('--start-epoch', default=0, type=int, metavar='N',
help='manual epoch number (useful on restarts)')
parser.add_argument('-b', '--batch', default=64, type=int,
metavar='N', help='mini-batch size (default: 64)')
parser.add_argument('--lr', '--learning-rate', default=0.001, type=float,
metavar='LR', help='initial learning rate')
parser.add_argument('--momentum', default=0.9, type=float, metavar='M',
help='momentum')
parser.add_argument('--weight-decay', '--wd', default=1e-4, type=float,
metavar='W', help='weight decay (default: 1e-4)')
parser.add_argument('--clip', default=10.0, type=float,
help='gradient clip value')
parser.add_argument('--print-freq', '-p', default=10, type=int,
metavar='N', help='print frequency (default: 10)')
parser.add_argument('--resume', default='', type=str, metavar='PATH',
help='path to latest checkpoint (default: none)')
parser.add_argument('--pretrained', dest='pretrained', default='',
help='use pre-trained model')
parser.add_argument('--config', dest='config', required=True,
help='hyperparameter of SiamMask in json format')
parser.add_argument('--arch', dest='arch', default='', choices=['Custom',],
help='architecture of pretrained model')
parser.add_argument('-l', '--log', default="log.txt", type=str,
help='log file')
parser.add_argument('-s', '--save_dir', default='snapshot', type=str,
help='save dir')
parser.add_argument('--log-dir', default='board', help='TensorBoard log dir')
best_acc = 0.
def collect_env_info():
env_str = get_pretty_env_info()
env_str += "\n OpenCV ({})".format(cv2.__version__)
return env_str
def build_data_loader(cfg):
logger = logging.getLogger('global')
logger.info("build train dataset") # train_dataset
train_set = DataSets(cfg['train_datasets'], cfg['anchors'], args.epochs)
train_set.shuffle()
logger.info("build val dataset") # val_dataset
if not 'val_datasets' in cfg.keys():
cfg['val_datasets'] = cfg['train_datasets']
val_set = DataSets(cfg['val_datasets'], cfg['anchors'])
val_set.shuffle()
train_loader = DataLoader(train_set, batch_size=args.batch, num_workers=args.workers,
pin_memory=True, sampler=None)
val_loader = DataLoader(val_set, batch_size=args.batch, num_workers=args.workers,
pin_memory=True, sampler=None)
logger.info('build dataset done')
return train_loader, val_loader
def build_opt_lr(model, cfg, args, epoch):
backbone_feature = model.features.param_groups(cfg['lr']['start_lr'], cfg['lr']['feature_lr_mult'])
if len(backbone_feature) == 0:
trainable_params = model.rpn_model.param_groups(cfg['lr']['start_lr'], cfg['lr']['rpn_lr_mult'], 'mask')
else:
trainable_params = backbone_feature + \
model.rpn_model.param_groups(cfg['lr']['start_lr'], cfg['lr']['rpn_lr_mult']) + \
model.mask_model.param_groups(cfg['lr']['start_lr'], cfg['lr']['mask_lr_mult'])
optimizer = torch.optim.SGD(trainable_params, args.lr,
momentum=args.momentum,
weight_decay=args.weight_decay)
lr_scheduler = build_lr_scheduler(optimizer, cfg['lr'], epochs=args.epochs)
lr_scheduler.step(epoch)
return optimizer, lr_scheduler
def main():
global args, best_acc, tb_writer, logger
args = parser.parse_args()
init_log('global', logging.INFO)
if args.log != "":
add_file_handler('global', args.log, logging.INFO)
print("Init logger")
logger = logging.getLogger('global')
print(44)
#logger.info("\n" + collect_env_info())
print(99)
logger.info(args)
cfg = load_config(args)
logger.info("config \n{}".format(json.dumps(cfg, indent=4)))
print(2)
if args.log_dir:
tb_writer = SummaryWriter(args.log_dir)
else:
tb_writer = Dummy()
# build dataset
train_loader, val_loader = build_data_loader(cfg)
print(3)
path = "/usr4/alg504/cliao25/siammask/experiments/siammask_base/snapshot/checkpoint_e{}.pth"
for epoch in range(1,21):
if args.arch == 'Custom':
from custom import Custom
model = Custom(pretrain=True, anchors=cfg['anchors'])
else:
exit()
print(4)
if args.pretrained:
model = load_pretrain(model, args.pretrained)
model = model.cuda()
#model.features.unfix((epoch - 1) / 20)
optimizer, lr_scheduler = build_opt_lr(model, cfg, args, epoch)
filepath = path.format(epoch)
assert os.path.isfile(filepath)
model, _, _, _, _ = restore_from(model, optimizer, filepath)
#model = load_pretrain(model, filepath)
model = torch.nn.DataParallel(model, list(range(torch.cuda.device_count()))).cuda()
model.train()
device = torch.device('cuda')
model = model.to(device)
valid(val_loader, model, cfg)
print("Done")
def valid(val_loader, model, cfg):
#model.train()
#model = model.cuda()
for iter, input in enumerate(val_loader):
x = {
'cfg': cfg,
'template': torch.autograd.Variable(input[0]).cuda(),
'search': torch.autograd.Variable(input[1]).cuda(),
'label_cls': torch.autograd.Variable(input[2]).cuda(),
'label_loc': torch.autograd.Variable(input[3]).cuda(),
'label_loc_weight': torch.autograd.Variable(input[4]).cuda(),
'label_mask': torch.autograd.Variable(input[6]).cuda(),
'label_mask_weight': torch.autograd.Variable(input[7]).cuda(),
}
outputs = model(x)
# can't calculate rpn_loc_loss on validation set
rpn_cls_loss, rpn_mask_loss = torch.mean(outputs['losses'][0]), torch.mean(outputs['losses'][1])
mask_iou_mean, mask_iou_at_5, mask_iou_at_7 = torch.mean(outputs['accuracy'][0]), torch.mean(outputs['accuracy'][1]), torch.mean(outputs['accuracy'][2])
cls_weight, reg_weight, mask_weight = cfg['loss']['weight']
print(rpn_cls_loss)
print(mask_iou_mean)
#loss = rpn_cls_loss * cls_weight + rpn_loc_loss * reg_weight + rpn_mask_loss * mask_weight
break
if __name__ == '__main__':
main()
|
py | b40093ddab78c0c5106b459e47f3ca8440205efa | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import cv2
import numpy as np
from PIL import Image
from .error import TemplateInputError
def generate_result(middle_point, pypts, confi):
"""Format the result: 定义图像识别结果格式."""
ret = dict(result=middle_point,
rectangle=pypts,
confidence=confi)
return ret
def check_image_valid(im_source, im_search):
"""Check if the input images valid or not."""
if im_source is not None and im_source.any() and im_search is not None and im_search.any():
return True
else:
return False
def check_source_larger_than_search(im_source, im_search):
"""检查图像识别的输入."""
# 图像格式, 确保输入图像为指定的矩阵格式:
# 图像大小, 检查截图宽、高是否大于了截屏的宽、高:
h_search, w_search = im_search.shape[:2]
h_source, w_source = im_source.shape[:2]
if h_search > h_source or w_search > w_source:
raise TemplateInputError("error: in template match, found im_search bigger than im_source.")
def img_mat_rgb_2_gray(img_mat):
"""
Turn img_mat into gray_scale, so that template match can figure the img data.
"print(type(im_search[0][0])") can check the pixel type.
"""
assert isinstance(img_mat[0][0], np.ndarray), "input must be instance of np.ndarray"
return cv2.cvtColor(img_mat, cv2.COLOR_BGR2GRAY)
def img_2_string(img):
_, png = cv2.imencode('.png', img)
return png.tostring()
def string_2_img(pngstr):
nparr = np.fromstring(pngstr, np.uint8)
img = cv2.imdecode(nparr, cv2.IMREAD_COLOR)
return img
def pil_2_cv2(pil_image):
open_cv_image = np.array(pil_image)
# Convert RGB to BGR (method-1):
open_cv_image = cv2.cvtColor(open_cv_image, cv2.COLOR_RGB2BGR)
# Convert RGB to BGR (method-2):
# b, g, r = cv2.split(open_cv_image)
# open_cv_image = cv2.merge([r, g, b])
return open_cv_image
def cv2_2_pil(cv2_image):
cv2_im = cv2.cvtColor(cv2_image, cv2.COLOR_BGR2RGB)
pil_im = Image.fromarray(cv2_im)
return pil_im
def compress_image(pil_img, path, quality, max_width=300, max_height=300):
'''
生成缩略图
'''
pil_img.thumbnail((max_width, max_height), Image.ANTIALIAS)
quality = int(round(quality))
if quality <= 0 or quality >= 100:
raise Exception("SNAPSHOT_QUALITY (" + str(quality) + ") should be an integer in the range [1,99]")
pil_img.save(path, quality=quality, optimize=True)
|
py | b40094071e79e3e2ddc82e644fba44514a044b81 | """
Credits:
Copyright (c) 2017-2022 Matej Aleksandrov, Matej Batič, Grega Milčinski, Domagoj Korais, Matic Lubej (Sinergise)
Copyright (c) 2017-2022 Žiga Lukšič, Devis Peressutti, Nejc Vesel, Jovan Višnjić, Anže Zupanc (Sinergise)
Copyright (c) 2019-2020 Jernej Puc, Lojze Žust (Sinergise)
Copyright (c) 2017-2019 Blaž Sovdat, Andrej Burja (Sinergise)
This source code is licensed under the MIT license found in the LICENSE
file in the root directory of this source tree.
"""
import pytest
import numpy as np
from numpy.testing import assert_array_equal
from eolearn.core import EOPatch, FeatureType
from eolearn.geometry import ErosionTask, MorphologicalFilterTask, MorphologicalOperations, MorphologicalStructFactory
CLASSES = (0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10)
MASK_FEATURE = FeatureType.MASK, "mask"
MASK_TIMELESS_FEATURE = FeatureType.MASK_TIMELESS, "timeless_mask"
@pytest.mark.parametrize("invalid_input", [None, 0, "a"])
def test_erosion_value_error(invalid_input):
with pytest.raises(ValueError):
ErosionTask((FeatureType.MASK_TIMELESS, "LULC", "TEST"), disk_radius=invalid_input)
def test_erosion_full(test_eopatch):
mask_before = test_eopatch.mask_timeless["LULC"].copy()
erosion_task = ErosionTask((FeatureType.MASK_TIMELESS, "LULC", "LULC_ERODED"), 1)
eopatch = erosion_task.execute(test_eopatch)
mask_after = eopatch.mask_timeless["LULC_ERODED"].copy()
assert not np.all(mask_before == mask_after)
for label in CLASSES:
if label == 0:
assert np.sum(mask_after == label) >= np.sum(mask_before == label), "Error in the erosion process"
else:
assert np.sum(mask_after == label) <= np.sum(mask_before == label), "Error in the erosion process"
def test_erosion_partial(test_eopatch):
mask_before = test_eopatch.mask_timeless["LULC"].copy()
# skip forest and artificial surface
specific_labels = [0, 1, 3, 4]
erosion_task = ErosionTask(
mask_feature=(FeatureType.MASK_TIMELESS, "LULC", "LULC_ERODED"), disk_radius=1, erode_labels=specific_labels
)
eopatch = erosion_task.execute(test_eopatch)
mask_after = eopatch.mask_timeless["LULC_ERODED"].copy()
assert not np.all(mask_before == mask_after)
for label in CLASSES:
if label == 0:
assert np.sum(mask_after == label) >= np.sum(mask_before == label), "Error in the erosion process"
elif label in specific_labels:
assert np.sum(mask_after == label) <= np.sum(mask_before == label), "Error in the erosion process"
else:
assert_array_equal(mask_after == label, mask_before == label, err_msg="Error in the erosion process")
@pytest.mark.parametrize("morph_operation", MorphologicalOperations)
@pytest.mark.parametrize(
"struct_element", [None, MorphologicalStructFactory.get_disk(5), MorphologicalStructFactory.get_rectangle(5, 6)]
)
def test_morphological_filter(morph_operation, struct_element):
eopatch = EOPatch()
eopatch[MASK_FEATURE] = np.random.randint(20, size=(10, 100, 100, 3), dtype=np.uint8)
eopatch[MASK_TIMELESS_FEATURE] = np.random.randint(20, 50, size=(100, 100, 5), dtype=np.uint8)
task = MorphologicalFilterTask(
[MASK_FEATURE, MASK_TIMELESS_FEATURE], morph_operation=morph_operation, struct_elem=struct_element
)
task.execute(eopatch)
assert eopatch[MASK_FEATURE].shape == (10, 100, 100, 3)
assert eopatch[MASK_TIMELESS_FEATURE].shape == (100, 100, 5)
|
py | b400965b3f066d1b3dd3d4936a2e1ec0279bc446 | # Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
from spack import architecture
class Sqlite(AutotoolsPackage):
"""SQLite3 is an SQL database engine in a C library. Programs that
link the SQLite3 library can have SQL database access without
running a separate RDBMS process.
"""
homepage = "https://www.sqlite.org"
version('3.33.0', sha256='106a2c48c7f75a298a7557bcc0d5f4f454e5b43811cc738b7ca294d6956bbb15')
version('3.31.1', sha256='62284efebc05a76f909c580ffa5c008a7d22a1287285d68b7825a2b6b51949ae')
version('3.30.1', sha256='8c5a50db089bd2a1b08dbc5b00d2027602ca7ff238ba7658fabca454d4298e60')
version('3.30.0', sha256='e0a8cf4c7a87455e55e10413d16f358ca121ccec687fe1301eac95e2d340fc58')
version('3.29.0', sha256='8e7c1e2950b5b04c5944a981cb31fffbf9d2ddda939d536838ebc854481afd5b')
version('3.28.0', sha256='d61b5286f062adfce5125eaf544d495300656908e61fca143517afcc0a89b7c3')
version('3.27.2', sha256='50c39e85ea28b5ecfdb3f9e860afe9ba606381e21836b2849efca6a0bfe6ef6e')
version('3.27.1', sha256='54a92b8ff73ff6181f89b9b0c08949119b99e8cccef93dbef90e852a8b10f4f8')
version('3.27.0', sha256='dbfb0fb4fc32569fa427d3658e888f5e3b84a0952f706ccab1fd7c62a54f10f0')
version('3.26.0', sha256='5daa6a3fb7d1e8c767cd59c4ded8da6e4b00c61d3b466d0685e35c4dd6d7bf5d')
# All versions prior to 3.26.0 are vulnerable to Magellan when FTS
# is enabled, see https://blade.tencent.com/magellan/index_en.html
depends_on('readline')
depends_on('zlib')
variant('functions', default=False,
description='Provide mathematical and string extension functions '
'for SQL queries using the loadable extensions '
'mechanism.')
variant('fts', default=True,
description='Enable FTS support '
'(unsafe for <3.26.0.0 due to Magellan).')
variant('rtree', default=False, description='Build with Rtree module')
variant('column_metadata', default=True, description="Build with COLUMN_METADATA")
# See https://blade.tencent.com/magellan/index_en.html
conflicts('+fts', when='@:3.25.99.99')
resource(name='extension-functions',
url='https://sqlite.org/contrib/download/extension-functions.c/download/extension-functions.c?get=25',
sha256='991b40fe8b2799edc215f7260b890f14a833512c9d9896aa080891330ffe4052',
expand=False,
placement={'extension-functions.c?get=25':
'extension-functions.c'},
when='+functions')
# On some platforms (e.g., PPC) the include chain includes termios.h which
# defines a macro B0. Sqlite has a shell.c source file that declares a
# variable named B0 and will fail to compile when the macro is found. The
# following patch undefines the macro in shell.c
patch('sqlite_b0.patch', when='@3.18.0:3.21.0')
# Starting version 3.17.0, SQLite uses compiler built-ins
# __builtin_sub_overflow(), __builtin_add_overflow(), and
# __builtin_mul_overflow(), which are not supported by Intel compiler.
# Starting version 3.21.0 SQLite doesn't use the built-ins if Intel
# compiler is used.
patch('remove_overflow_builtins.patch', when='@3.17.0:3.20%intel')
def url_for_version(self, version):
full_version = list(version.version) + [0 * (4 - len(version.version))]
version_string\
= str(full_version[0]) + \
''.join(['%02d' % v for v in full_version[1:]])
# See https://sqlite.org/chronology.html for version -> year
# correspondence.
if version >= Version('3.31.0'):
year = '2020'
elif version >= Version('3.27.0'):
year = '2019'
elif version >= Version('3.22.0'):
year = '2018'
elif version >= Version('3.16.0'):
year = '2017'
elif version >= Version('3.10.0'):
year = '2016'
elif version >= Version('3.8.8'):
year = '2015'
elif version >= Version('3.8.3'):
year = '2014'
elif version >= Version('3.7.16'):
year = '2013'
else:
raise ValueError('Unsupported version {0}'.format(version))
return 'https://sqlite.org/{0}/sqlite-autoconf-{1}.tar.gz'.format(year, version_string)
@property
def libs(self):
return find_libraries('libsqlite3', root=self.prefix.lib)
def get_arch(self):
arch = architecture.Arch()
arch.platform = architecture.platform()
return str(arch.platform.target('default_target'))
def configure_args(self):
args = []
if self.get_arch() == 'ppc64le':
args.append('--build=powerpc64le-redhat-linux-gnu')
if '+fts' not in self.spec:
args.extend(['--disable-fts4', '--disable-fts5'])
# Ref: https://sqlite.org/rtree.html
if '+rtree' in self.spec:
args.append('CPPFLAGS=-DSQLITE_ENABLE_RTREE=1')
# Ref: https://sqlite.org/compile.html
if '+column_metadata' in self.spec:
args.append('CPPFLAGS=-DSQLITE_ENABLE_COLUMN_METADATA=1')
return args
@run_after('install')
def build_libsqlitefunctions(self):
if '+functions' in self.spec:
libraryname = 'libsqlitefunctions.' + dso_suffix
cc = Executable(spack_cc)
cc(self.compiler.cc_pic_flag, '-lm', '-shared',
'extension-functions.c', '-o', libraryname)
install(libraryname, self.prefix.lib)
|
py | b400966b832d87d818d43f826be24401d1b9b9c4 | #!/usr/bin/env python
"""
__init__.py
Represents the constraint satisfaction problem (CSP) interface.
Full import: `from cspy import Variable, Constraint, CSP`
"""
import copy
import pprint
from cspy.solver import Solver
class Variable(object):
"""A variable.
If involved in a CSP, the goal will be to assign this variable a value that satisfies the constraints
and possibly works together with other variables to maximize or minimize an objective value.
"""
def __init__(self, name, domain=(), value=None):
self.name = name
self.domain = domain
self.init_domain = copy.deepcopy(domain)
self.value = value # in theory, only the solver should assign this attribute
@staticmethod
def parse_value(var):
return var.value if isinstance(var, Variable) else var
def __repr__(self):
return 'cspy.Variable(%r)' % self.__dict__
def __str__(self):
return 'a CSPy Variable with attributes %r' % self.__dict__
def __lt__(self, other):
return self.value < Variable.parse_value(other)
def __le__(self, other):
return self.value <= Variable.parse_value(other)
def __eq__(self, other):
return self.value == Variable.parse_value(other)
def __ne__(self, other):
return self.value != Variable.parse_value(other)
def __gt__(self, other):
return self.value > Variable.parse_value(other)
def __ge__(self, other):
return self.value >= Variable.parse_value(other)
def __contains__(self, value):
return value in self.domain
def __len__(self):
return len(self.domain)
def __abs__(self):
return abs(self.value)
def __add__(self, other):
return self.value + Variable.parse_value(other)
def __and__(self, other):
return self.value and Variable.parse_value(other)
def __bool__(self):
return bool(self.value)
def __int__(self):
return int(self.value)
def __mod__(self, other):
return self.value % Variable.parse_value(other)
def __mul__(self, other):
return self.value * Variable.parse_value(other)
def __neg__(self):
return -self.value
def __nonzero__(self):
return bool(self.value)
def __or__(self, other):
return self.value or Variable.parse_value(other)
def __sub__(self, other):
return self.value - Variable.parse_value(other)
def __truediv__(self, other):
return self.value / Variable.parse_value(other)
class Constraint(object):
"""A constraint.
A constraint specifies allowable combinations of values for a subset of variables;
in `CSPy`, every constraint is represented by (a) an ordered tuple of variable names
and (b) a function which takes in the variables associated with those names
and returns True or False depending on whether or not the constraint has been met.
"""
def __init__(self, var_names, satisfied, name=None):
try:
self.var_names = tuple(var_names) # names of variables involved in the constraint
except TypeError:
self.var_names = (var_names,)
print('WARNING: `var_names` is not a collection; casting it to one automatically')
self.satisfied = satisfied # fn: (vars, in order specified by `var_names`) -> True/False
self.name = name
def __contains__(self, value):
"""Check whether or not a variable (identified by its name) is involved in the constraint."""
return value in self.var_names
class CSP(object):
"""A constraint satisfaction problem (CSP).
A CSP is defined over a set of variables and constraints, and involves assigning a value to each variable
s.t. all of the constraints are satisfied and an objective function, if one exists, is maximized.
"""
def __init__(self, variables=(), constraints=(), objective_fn=None):
self.var_list = list(variables)
self.var_dict = {var.name: var for var in variables}
self.constraints = list(constraints)
self.objective_fn = objective_fn
def reset(self):
"""Unassigns all variables and re-initializes their domains."""
for var in self.var_list:
var.value = None
var.domain = copy.deepcopy(var.init_domain)
def add_variable(self, var):
"""Adds a variable to the registry of the CSP."""
self.var_list.append(var)
self.var_dict[var.name] = var
def add_constraint(self, constraint):
"""Adds a constraint to the registry of the CSP."""
self.constraints.append(constraint)
def set_objective_fn(self, objective_fn):
"""Assigns an objective function to the CSP.
An objective function should take in all variables
and return a scalar representing the quantity to be maximized."""
self.objective_fn = objective_fn
def get_solution(self, algorithm='backtracking', **kwargs):
"""Returns the optimal solution as defined by the constraints and the objective function.
If no objective function exists, returns an arbitrary valid solution.
If no solution exists (i.e. the feasible set is empty), returns None.
"""
return Solver(self).solve(algorithm=algorithm, take_first=True, **kwargs)
def get_all_solutions(self, algorithm='backtracking', **kwargs):
"""Returns all solutions to the CSP.
If an objective function exists, this will return all optimal solutions.
If no objective function exists, this will return all valid solutions.
"""
return Solver(self).solve(algorithm=algorithm, take_first=False, **kwargs)
def all_variables_assigned(self):
return all(var.value is not None for var in self.var_list)
def get_unassigned_vars(self):
return [v for v in self.var_list if v.value is None]
def get_unassigned_var_names(self):
return [v.name for v in self.var_list if v.value is None]
def get_assigned_vars(self):
return [v for v in self.var_list if v.value is not None]
def get_assigned_var_names(self):
return [v.name for v in self.var_list if v.value is not None]
def get_constraints_with(self, var):
"""Return all constraints involving VAR."""
return [c for c in self.constraints if var.name in c.var_names]
def solved(self):
"""Return True if all of the variables have been assigned a value
and no constraints are currently being violated. Otherwise return False.
"""
if not self.all_variables_assigned():
return False
for constraint in self.constraints:
if not constraint.satisfied(*[self.var_dict[name] for name in constraint.var_names]):
return False
return True
def num_constraints_violated(self):
return len([c for c in self.constraints if not c.satisfied(*[self.var_dict[name] for name in c.var_names])])
def print_current_assignment(self):
pprint.pprint({var.name: var.value for var in self.var_list if var.value is not None})
|
py | b400966e2aa6e589bf70488f4b772ab5b3f9000d | import os
import unittest
import shutil
from invest_natcap.testing import GISTest
from invest_natcap import raster_utils
from natcap.opal import static_maps
DATA = os.path.join(os.path.dirname(__file__), '..', 'data')
CLIPPED_DATA = os.path.join(DATA, 'colombia_clipped')
FULL_DATA = os.path.join(DATA, 'colombia_tool_data')
INVEST_DATA = os.path.join(os.path.dirname(__file__), '..', '..',
'invest-natcap.invest-3', 'test', 'invest-data')
class SedimentStaticMapTest(GISTest):
def setUp(self):
self.config = {
"workspace_dir": "",
"dem_uri": os.path.join(CLIPPED_DATA, 'dem.tif'),
"erosivity_uri": os.path.join(CLIPPED_DATA, "erosivity.tif"),
"erodibility_uri": os.path.join(CLIPPED_DATA, "erodibility.tif"),
"landuse_uri": "",
"watersheds_uri": os.path.join(CLIPPED_DATA, "servicesheds_col.shp"),
"reservoir_locations_uri": os.path.join(CLIPPED_DATA, "reservoirs.shp"),
"reservoir_properties_uri": "",
"biophysical_table_uri": os.path.join(FULL_DATA, "Biophysical_Colombia.csv"),
"threshold_flow_accumulation": 40,
"slope_threshold": "5",
"sediment_threshold_table_uri": os.path.join(FULL_DATA, "sediment_threshold.csv"),
}
def test_execute_sediment_smoke(self):
lulc_uri = os.path.join(CLIPPED_DATA, 'ecosystems.tif')
workspace = 'test_workspace'
static_maps.execute_model('sediment', lulc_uri, workspace,
config=self.config)
def test_sediment_static_map(self):
lulc_uri = os.path.join(CLIPPED_DATA, 'ecosystems.tif')
target_lucode = 124
static_map_uri = 'sediment_static_map.tif'
static_maps.build_static_map('sediment', lulc_uri, target_lucode,
static_map_uri, base_run=lulc_uri, config=self.config)
def test_sediment_static_map_quality_sims(self):
lulc_uri = os.path.join(CLIPPED_DATA, 'ecosystems.tif')
target_lucode = 124
static_map_uri = 'sediment_static_map.tif'
workspace = 'simulations_workspace'
static_maps.build_static_map('sediment', lulc_uri, target_lucode,
static_map_uri, base_run=lulc_uri, workspace=workspace, config=self.config, num_simulations=5)
@unittest.skip("This takes 13 hours to run.")
def test_sediment_static_map_full(self):
lulc_uri = os.path.join(FULL_DATA, 'ecosystems.tif')
target_lucode = 124
static_map_uri = 'sediment_static_map_full.tif'
static_maps.build_static_map('sediment', lulc_uri, target_lucode,
static_map_uri)
def test_execute(self):
self.config['workspace_dir'] = os.path.join(os.getcwd(),
'sed_execute_test')
self.config['model_name'] = 'sediment'
self.config['paved_landcover_code'] = 60
self.config['bare_landcover_code'] = 80
self.config['landuse_uri'] = os.path.join(CLIPPED_DATA,
'ecosystems.tif')
self.config['do_parallelism'] = True
static_maps.execute(self.config)
def test_static_map_quality(self):
lulc_uri = os.path.join(FULL_DATA, 'ecosystems.tif')
impact_lucode = 60 # paved landcover code
model_name = 'sediment'
num_iterations = 10
workspace = os.path.join(os.getcwd(), 'static_map_quality')
impact_region = os.path.join(FULL_DATA, 'servicesheds_col.shp')
if os.path.exists(workspace):
shutil.rmtree(workspace)
os.makedirs(workspace)
# tweak the config for running on full datasets
self.config = {
"workspace_dir": "",
"dem_uri": os.path.join(FULL_DATA, 'DEM.tif'),
"erosivity_uri": os.path.join(FULL_DATA, "Erosivity.tif"),
"erodibility_uri": os.path.join(FULL_DATA, "Erodability.tif"),
"landuse_uri": "",
# "watersheds_uri": os.path.join(FULL_DATA, "Servicesheds_Col.shp"),
"watersheds_uri": os.path.join(FULL_DATA, "watersheds_cuencas.shp"),
"reservoir_locations_uri": os.path.join(FULL_DATA, "Reservoirs.shp"),
"reservoir_properties_uri": "",
"biophysical_table_uri": os.path.join(FULL_DATA, "Biophysical_Colombia.csv"),
"threshold_flow_accumulation": 40,
"slope_threshold": "5",
"sediment_threshold_table_uri": os.path.join(FULL_DATA, "sediment_threshold.csv"),
}
watersheds_uri = self.config['watersheds_uri']
#base_workspace = os.path.join(workspace, 'base_run')
#static_maps.execute_model(model_name, lulc_uri, base_workspace, self.config)
# assume that I've generated the static map before
base_workspace = os.path.join(os.path.dirname(__file__), '..',
'sediment_static_maps', 'sediment_base')
base_run = os.path.join(base_workspace, 'output', 'sed_export.tif')
base_static_map = os.path.join(base_workspace, '..',
'sediment_paved_static_map.tif')
if not os.path.exists(base_run):
raise IOError(('You must generate a sediment static map. Its '
'base export must be located here: %s' % os.path.abspath(base_run)))
if not os.path.exists(base_static_map):
raise IOError(('You must generate a sediment static map in its '
'usual place: %s' % os.path.abspath(base_static_map)))
static_maps.test_static_map_quality(base_run, base_static_map,
lulc_uri, impact_lucode, watersheds_uri, model_name, workspace,
self.config)
csv_path = os.path.join(workspace, 'impact_site_simulation.csv')
static_maps.graph_it(csv_path, os.path.join(workspace,
'results_plot.png'))
def test_static_map_quality_willamette(self):
TERRESTRIAL = os.path.join(INVEST_DATA, 'Base_Data', 'Terrestrial')
FRESHWATER = os.path.join(INVEST_DATA, 'Base_Data', 'Freshwater')
lulc_uri = os.path.join(INVEST_DATA, 'Base_Data', 'Terrestrial',
'lulc_samp_cur')
impact_lucode = 88
model_name = 'sediment'
num_iterations = 50 # not used unless explicitly pased to function
workspace = os.path.join(os.getcwd(), 'static_map_quality')
watersheds = os.path.join(FRESHWATER, 'watersheds.shp')
self.config['dem_uri'] = os.path.join(FRESHWATER, 'dem')
self.config['erosivity_uri'] = os.path.join(FRESHWATER, 'erosivity')
self.config['erodibility_uri'] = os.path.join(FRESHWATER, 'erodibility')
self.config['watersheds_uri'] = os.path.join(FRESHWATER,
'watersheds.shp')
self.config['biophysical_table_uri'] = os.path.join(FRESHWATER,
'biophysical_table.csv')
self.config['threshold_flow_accumulation'] = 400
self.config['slope_threshold'] = "5",
self.config['k_param'] = 2
self.config['sdr_max'] = 0.8
self.config['ic_0_param'] = 0.5
self.config['sediment_threshold_table_uri'] = os.path.join(INVEST_DATA,
'Sedimentation', 'input', 'sediment_threshold_table.csv')
if os.path.exists(workspace):
shutil.rmtree(workspace)
os.makedirs(workspace)
base_workspace = os.path.join(workspace, 'base_run')
static_maps.execute_model(model_name, lulc_uri, base_workspace, self.config)
base_run = os.path.join(base_workspace, 'output', 'sed_export.tif')
static_map_uri = os.path.join(workspace, 'base_static_map.tif')
static_map_workspace = os.path.join(workspace, 'static_map')
static_maps.build_static_map(model_name, lulc_uri, impact_lucode,
static_map_uri, base_run, self.config,
workspace=static_map_workspace)
# difference_raster = os.path.join(workspace, 'difference.tif')
# converted_raster = os.path.join(static_map_workspace,
# 'sediment_converted', 'output', 'sed_export.tif')
# static_maps.subtract_rasters(base_run, converted_raster,
# difference_raster)
# invert is False here because we're running sediment on the paved
# scenario.
static_maps.test_static_map_quality(base_run, static_map_uri,
lulc_uri, impact_lucode, watersheds, model_name, workspace,
self.config, num_iterations=num_iterations, invert=False)
print 'graphing'
log_file = os.path.join(workspace, 'impact_site_simulation.csv')
graph_file = os.path.join(workspace, 'results_plot.png')
static_maps.graph_it(log_file, graph_file)
def test_graphing(self):
workspace = os.path.join(os.getcwd(), 'static_map_quality')
csv_path = os.path.join(workspace, 'impact_site_simulation.csv')
graph_file = os.path.join(workspace, 'results_plot.png')
static_maps.graph_it(csv_path, graph_file)
class CarbonStaticMapTest(GISTest):
def setUp(self):
self.config = {
"workspace_dir": "",
"do_biophysical": True,
"do_uncertainty": False,
"do_valuation": False,
"lulc_cur_uri": "",
"carbon_pools_uri": os.path.join(FULL_DATA, "Carbon_pools_Colombia.csv")
}
def test_execute_carbon_smoke(self):
lulc_uri = os.path.join(CLIPPED_DATA, 'ecosystems.tif')
workspace = 'test_workspace_carbon'
static_maps.execute_model('carbon', lulc_uri, workspace,
config=self.config)
def test_carbon_static_map(self):
lulc_uri = os.path.join(CLIPPED_DATA, 'ecosystems.tif')
target_lucode = 124
static_map_uri = 'carbon_static_map.tif'
static_maps.build_static_map('carbon', lulc_uri, target_lucode,
static_map_uri, config=self.config)
@unittest.skip("This takes a long time to run")
def test_carbon_static_map_full(self):
lulc_uri = os.path.join(FULL_DATA, 'ecosystems.tif')
target_lucode = 124
static_map_uri = 'carbon_static_map_full.tif'
static_maps.build_static_map('carbon', lulc_uri, target_lucode,
static_map_uri)
def test_carbon_static_map_quality(self):
TERRESTRIAL = os.path.join(INVEST_DATA, 'Base_Data', 'Terrestrial')
FRESHWATER = os.path.join(INVEST_DATA, 'Base_Data', 'Freshwater')
lulc_uri = os.path.join(INVEST_DATA, 'Base_Data', 'Terrestrial',
'lulc_samp_cur')
impact_lucode = 88
model_name = 'carbon'
num_iterations = 15 # not used unless explicitly pased to function
workspace = os.path.join(os.getcwd(), 'static_map_quality_carbon')
watersheds = os.path.join(FRESHWATER, 'watersheds.shp')
self.config['carbon_pools_uri'] = os.path.join(INVEST_DATA, 'Carbon',
'Input', 'carbon_pools_samp.csv')
if os.path.exists(workspace):
shutil.rmtree(workspace)
os.makedirs(workspace)
base_workspace = os.path.join(workspace, 'base_run')
static_maps.execute_model(model_name, lulc_uri, base_workspace, self.config)
base_run = os.path.join(base_workspace, 'output', 'tot_C_cur.tif')
static_map_uri = os.path.join(workspace, 'base_static_map.tif')
static_map_workspace = os.path.join(workspace, 'static_map')
static_maps.build_static_map(model_name, lulc_uri, impact_lucode,
static_map_uri, base_run, self.config,
workspace=static_map_workspace)
static_maps.test_static_map_quality(base_run, static_map_uri,
lulc_uri, impact_lucode, watersheds, model_name, workspace,
self.config, num_iterations=num_iterations)
print 'graphing'
log_file = os.path.join(workspace, 'impact_site_simulation.csv')
graph_file = os.path.join(workspace, 'results_plot.png')
static_maps.graph_it(log_file, graph_file)
class RasterMathTest(GISTest):
def setUp(self):
self.workspace = os.path.join(os.path.dirname(__file__), 'raster_math')
if os.path.exists(self.workspace):
shutil.rmtree(self.workspace)
os.makedirs(self.workspace)
def test_raster_math_smoke(self):
sample_raster = os.path.join(FULL_DATA, 'ecosystems.tif')
args = {
'workspace': self.workspace,
'name': 'sample',
'base_uri': sample_raster,
'paved_uri': sample_raster,
'bare_uri': sample_raster,
'protection_uri': sample_raster,
}
static_maps.raster_math(args)
for filename_base in ['bare', 'protection', 'paved']:
filename = os.path.join(self.workspace, '%s_%s_static_map.tif' % (
args['name'], filename_base))
self.assertEqual(os.path.exists(filename), True)
class NutrientStaticMapTest(GISTest):
def setUp(self):
self.config = {
"workspace_dir": "",
"dem_uri": os.path.join(CLIPPED_DATA, 'dem.tif'),
"lulc_uri": "",
"watersheds_uri": os.path.join(CLIPPED_DATA, "servicesheds_col.shp"),
"biophysical_table_uri": os.path.join(FULL_DATA, "Biophysical_Colombia.csv"),
"soil_depth_uri": os.path.join(FULL_DATA, 'Soil_depth.tif'),
"precipitation": os.path.join(FULL_DATA, 'Precipitation.tif'),
"pawc_uri": os.path.join(FULL_DATA, 'Plant_available_water_content.tif'),
"eto_uri": os.path.join(FULL_DATA, 'Ref_evapotranspiration.tif'),
"seasonality_constant": 5,
"calc_p": False,
"calc_n": True,
"water_purification_threshold_table_uri": os.path.join(FULL_DATA,
'sediment_threshold.csv'),
"accum_threshold": 1000,
"depth_to_root_rest_layer_uri": os.path.join(FULL_DATA,
'Soil_depth.tif'),
"valuation_enabled": False,
}
def test_execute_sediment_smoke(self):
lulc_uri = os.path.join(CLIPPED_DATA, 'ecosystems.tif')
workspace = 'test_workspace'
static_maps.execute_model('sediment', lulc_uri, workspace,
config=self.config)
def test_sediment_static_map(self):
lulc_uri = os.path.join(CLIPPED_DATA, 'ecosystems.tif')
target_lucode = 124
static_map_uri = 'sediment_static_map.tif'
static_maps.build_static_map('sediment', lulc_uri, target_lucode,
static_map_uri, config=self.config)
def test_execute(self):
self.config['workspace_dir'] = os.path.join(os.getcwd(),
'nut_execute_test')
self.config['model_name'] = 'nutrient'
self.config['paved_landcover_code'] = 60
self.config['bare_landcover_code'] = 80
self.config['landuse_uri'] = os.path.join(CLIPPED_DATA,
'ecosystems.tif')
self.config['do_parallelism'] = True
self.config['fut_landuse_uri'] = os.path.join(CLIPPED_DATA,
'ecosystems.tif') # just to ensure it runs.
static_maps.execute(self.config)
def test_execute_quality(self):
self.config['workspace_dir'] = os.path.join(os.getcwd(),
'nut_execute_test')
self.config['model_name'] = 'nutrient'
self.config['paved_landcover_code'] = 60
self.config['bare_landcover_code'] = 80
self.config['landuse_uri'] = os.path.join(CLIPPED_DATA,
'ecosystems.tif')
self.config['do_parallelism'] = True
self.config['fut_landuse_uri'] = os.path.join(CLIPPED_DATA,
'ecosystems.tif') # just to ensure it runs.
self.config['num_simulations'] = 5
if os.path.exists(self.config['workspace_dir']):
shutil.rmtree(self.config['workspace_dir'])
static_maps.execute(self.config)
|
py | b40096d410aeb1ea9030439081afad9a4b44424a | # -*- coding: utf-8 -*-
#
# This file is part of REANA.
# Copyright (C) 2018 CERN.
#
# REANA is free software; you can redistribute it and/or modify it
# under the terms of the MIT License; see LICENSE file for more details.
"""REANA Workflow Engine Serial config."""
import logging
import os
SHARED_VOLUME_PATH = os.getenv('SHARED_VOLUME_PATH', '/var/reana')
"""Path to the mounted REANA shared volume."""
BROKER_URL = os.getenv('RABBIT_MQ_URL',
'message-broker.default.svc.cluster.local')
BROKER_USER = os.getenv('RABBIT_MQ_USER', 'test')
BROKER_PASS = os.getenv('RABBIT_MQ_PASS', '1234')
BROKER_PORT = os.getenv('RABBIT_MQ_PORT', 5672)
BROKER = os.getenv('RABBIT_MQ', 'amqp://{0}:{1}@{2}//'.format(BROKER_USER,
BROKER_PASS,
BROKER_URL))
MOUNT_CVMFS = os.getenv('REANA_MOUNT_CVMFS', 'false')
JOB_STATUS_POLLING_INTERVAL = os.getenv('POLLING_INTERVAL', 3)
"""Polling interval in seconds for status of running jobs."""
CACHE_ENABLED = False
"""Determines if jobs caching is enabled."""
|
py | b40096eefe95435424c5c1060476203f0af59703 | import json
registry = {}
def register(cls):
registry[cls.__name__] = cls
class Meta(type):
def __new__(meta, name, bases, class_dict): # m nbc
cls = type.__new__(meta, name, bases, class_dict)
register(cls)
return cls
class Serializable(object, metaclass=Meta):
"""Provide method to serialize the args of a class"""
def __init__(self, *args):
self.args = args
def serialize(self):
return json.dumps({'args': self.args,
'class': self.__class__.__name__
})
def deserialize(serialized):
params = json.loads(serialized)
cls = registry[params['class']]
return cls(*params['args'])
class Point3Db(Serializable):
def __init__(self, x, y, z):
super().__init__(x, y, z)
self.x = x
self.y = y
self.z = z
|
py | b40097d1235843a7752374395edcc5e9fe7291cc | import info
class subinfo(info.infoclass):
def setTargets(self):
for v in ['3.5.1', '3.4.3']:
self.targets[v] = 'https://github.com/libarchive/libarchive/archive/v' + v + '.tar.gz'
self.targetInstSrc[v] = 'libarchive-' + v
self.targetDigests['3.4.3'] = (['19556c1c67aacdff547fd719729630444dbc7161c63eca661a310676a022bb01'], CraftHash.HashAlgorithm.SHA256)
self.targetDigests['3.5.1'] = (['6d92e669e259a55a0119c135873469778f2718acbe605717f9d341487b4d0cba'], CraftHash.HashAlgorithm.SHA256)
self.description = "C library and command-line tools for reading and writing tar, cpio, zip, ISO, and other archive formats"
if CraftCore.compiler.isAndroid:
self.defaultTarget = '3.5.1'
else:
self.defaultTarget = '3.4.3'
def setDependencies(self):
self.buildDependencies["libs/liblzma"] = None
self.buildDependencies["libs/libbzip2"] = None
self.buildDependencies["libs/zlib"] = None
self.buildDependencies["libs/openssl"] = None
self.buildDependencies["libs/libxml2"] = None
self.buildDependencies["libs/pcre"] = None
self.buildDependencies["libs/iconv"] = None
self.runtimeDependencies["virtual/base"] = None
# self.runtimeDependencies["libs/expat"] = None
from Package.CMakePackageBase import *
class Package(CMakePackageBase):
def __init__(self):
CMakePackageBase.__init__(self)
# use openssl for encryption
self.subinfo.options.configure.args += "-DENABLE_OPENSSL=ON -DENABLE_CNG=OFF -DENABLE_NETTLE=OFF -DENABLE_WERROR=OFF"
if CraftCore.compiler.isAndroid:
self.subinfo.options.configure.args += f" -DCMAKE_C_FLAGS='-I {self.sourceDir()}/contrib/android/include'"
|
py | b40097e78788e0e60383be962d302adf34776615 | ############################################################################
#
# Copyright 2013 Lee Smith
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
############################################################################
import os
import sys
import re
from collections import OrderedDict
from functools import partial
import xbmc, xbmcgui
import utils
ARCH = utils.get_arch()
if not ARCH.startswith('RPi'):
sys.exit(1)
class Main(object):
def __init__(self):
utils.log("Started service")
revision = utils.get_revision()
utils.log("Board revision: {}".format(revision))
if revision is not None:
utils.set_property_setting('revision', revision)
max_ram = utils.get_max_ram()
utils.log("RAM size: {}MB".format(max_ram))
utils.set_property_setting('max_ram', max_ram)
board_type = utils.get_type()
utils.log("Board type: {}".format(board_type))
if board_type is not None:
utils.set_property_setting('type', board_type)
try:
utils.maybe_init_settings()
except IOError:
utils.log_exception()
self.monitor = MyMonitor(updated_settings_callback=self.apply_config)
while (not xbmc.abortRequested):
xbmc.sleep(1000)
def apply_config(self):
utils.log("Applying settings to {}".format(utils.CONFIG_PATH))
config = OrderedDict()
overclock_preset = utils.get_setting('overclock_preset')
utils.log("Using {} overclock settings".format(overclock_preset))
if overclock_preset == 'Custom':
for prop in utils.OVERCLOCK_PRESET_PROPERTIES:
config[prop] = utils.get_property_setting(prop)
elif overclock_preset in utils.OVERCLOCK_PRESETS:
config = OrderedDict(zip(utils.OVERCLOCK_PRESET_PROPERTIES,
utils.OVERCLOCK_PRESETS[overclock_preset]))
for prop in utils.OTHER_PROPERTIES:
value = utils.get_property_setting(prop)
if value is not None:
config[prop] = value
if ('force_turbo' in config and config['force_turbo'] == 1 and
'over_voltage' in config and config['over_voltage'] > 0):
if not xbmcgui.Dialog().yesno("OpenELEC RPi Config WARNING!!",
"Overvolting with dynamic overclock disabled",
"will void your warranty!!",
"Continue, or fix by enabling dynamic overclock?",
"Fix",
"Continue"):
utils.log("Enabling dynamic overclock")
config['force_turbo'] = 0
else:
utils.log("Warranty warning was ignored")
if 'max_usb_current' in config and config['max_usb_current'] == 1:
if not xbmcgui.Dialog().yesno("OpenELEC RPi Config WARNING!",
"To output 1.2A from the USB ports",
"you will need to use a good 2A power supply.",
"Are you sure you want to set max_usb_current?"):
config['max_usb_current'] = 0
updated = False
if os.path.isfile(utils.CONFIG_PATH):
with open(utils.CONFIG_PATH, 'r') as f:
config_txt = f.read()
config_txt_new = config_txt
for prop, value in config.iteritems():
utils.log("==== {} ====".format(prop))
config_property_re = re.compile(utils.CONFIG_SUB_RE_STR.format(prop), re.MULTILINE)
match = config_property_re.search(config_txt)
if match:
comment = bool(match.group(1))
old_value = match.group(3)
if value is None:
utils.log(" Commenting out")
config_txt_new = config_property_re.sub(utils.comment_out, config_txt_new)
updated = True
elif comment or str(value) != old_value:
utils.log(" Setting to {}".format(value))
config_txt_new = config_property_re.sub(partial(utils.replace_value, value),
config_txt_new)
updated = True
else:
utils.log(" Unchanged ({})".format(value))
elif value is not None:
utils.log(" Appending {}={}".format(prop, value))
config_txt_new += utils.property_value_str(prop, value) + '\n'
updated = True
else:
utils.log("A new {} will be created".format(utils.CONFIG_PATH))
config_txt_new = utils.add_property_values(config)
updated = True
reboot_needed = False
if updated:
reboot_needed = True
with utils.remount():
try:
utils.write_config(config_txt_new)
except (OSError, IOError) as e:
reboot_needed = False
utils.write_error(utils.CONFIG_PATH, str(e))
if reboot_needed:
if utils.restart_countdown("Ready to reboot to apply changes in config.txt"):
xbmc.restart()
else:
utils.log("Cancelled reboot")
else:
utils.log("No changes made")
class MyMonitor(xbmc.Monitor):
def __init__(self, updated_settings_callback):
xbmc.Monitor.__init__(self)
self.updated_settings_callback = updated_settings_callback
def onSettingsChanged(self):
self.updated_settings_callback()
Main()
|
py | b400981720d6b8715248453b35e88b09a4173088 | #!/usr/local/bin/python3
# coding: utf-8
# ytdlbot - downloader.py
# 8/14/21 16:53
#
__author__ = "Benny <[email protected]>"
import logging
import os
import pathlib
import random
import re
import subprocess
import time
from io import StringIO
from unittest.mock import MagicMock
import fakeredis
import ffmpeg
import filetype
import yt_dlp as ytdl
from tqdm import tqdm
from yt_dlp import DownloadError
from config import AUDIO_FORMAT, ENABLE_VIP, MAX_DURATION, TG_MAX_SIZE
from db import Redis
from limit import VIP
from utils import (adjust_formats, apply_log_formatter, current_time,
get_user_settings)
r = fakeredis.FakeStrictRedis()
apply_log_formatter()
def sizeof_fmt(num: int, suffix='B'):
for unit in ['', 'Ki', 'Mi', 'Gi', 'Ti', 'Pi', 'Ei', 'Zi']:
if abs(num) < 1024.0:
return "%3.1f%s%s" % (num, unit, suffix)
num /= 1024.0
return "%.1f%s%s" % (num, 'Yi', suffix)
def edit_text(bot_msg, text):
key = f"{bot_msg.chat.id}-{bot_msg.message_id}"
# if the key exists, we shouldn't send edit message
if not r.exists(key):
time.sleep(random.random())
r.set(key, "ok", ex=3)
bot_msg.edit_text(text)
def tqdm_progress(desc, total, finished, speed="", eta=""):
def more(title, initial):
if initial:
return f"{title} {initial}"
else:
return ""
f = StringIO()
tqdm(total=total, initial=finished, file=f, ascii=False, unit_scale=True, ncols=30,
bar_format="{l_bar}{bar} |{n_fmt}/{total_fmt} "
)
raw_output = f.getvalue()
tqdm_output = raw_output.split("|")
progress = f"`[{tqdm_output[1]}]`"
detail = tqdm_output[2]
text = f"""
{desc}
{progress}
{detail}
{more("Speed:", speed)}
{more("ETA:", eta)}
"""
f.close()
return text
def remove_bash_color(text):
return re.sub(r'\u001b|\[0;94m|\u001b\[0m|\[0;32m|\[0m|\[0;33m', "", text)
def download_hook(d: dict, bot_msg):
# since we're using celery, server location may be located in different continent.
# Therefore, we can't trigger the hook very often.
# the key is user_id + download_link
original_url = d["info_dict"]["original_url"]
key = f"{bot_msg.chat.id}-{original_url}"
if d['status'] == 'downloading':
downloaded = d.get("downloaded_bytes", 0)
total = d.get("total_bytes") or d.get("total_bytes_estimate", 0)
# percent = remove_bash_color(d.get("_percent_str", "N/A"))
speed = remove_bash_color(d.get("_speed_str", "N/A"))
if ENABLE_VIP and not r.exists(key):
result, err_msg = check_quota(total, bot_msg.chat.id)
if result is False:
raise ValueError(err_msg)
eta = remove_bash_color(d.get("_eta_str", d.get("eta")))
text = tqdm_progress("Downloading...", total, downloaded, speed, eta)
edit_text(bot_msg, text)
r.set(key, "ok", ex=5)
def upload_hook(current, total, bot_msg):
# filesize = sizeof_fmt(total)
text = tqdm_progress("Uploading...", total, current)
edit_text(bot_msg, text)
def check_quota(file_size, chat_id) -> ("bool", "str"):
remain, _, ttl = VIP().check_remaining_quota(chat_id)
if file_size > remain:
refresh_time = current_time(ttl + time.time())
err = f"Quota exceed, you have {sizeof_fmt(remain)} remaining, " \
f"but you want to download a video with {sizeof_fmt(file_size)} in size. \n" \
f"Try again in {ttl} seconds({refresh_time})"
logging.warning(err)
Redis().update_metrics("quota_exceed")
return False, err
else:
return True, ""
def convert_to_mp4(resp: dict, bot_msg):
default_type = ["video/x-flv", "video/webm"]
if resp["status"]:
# all_converted = []
for path in resp["filepath"]:
# if we can't guess file type, we assume it's video/mp4
mime = getattr(filetype.guess(path), "mime", "video/mp4")
if mime in default_type:
if not can_convert_mp4(path, bot_msg.chat.id):
logging.warning("Conversion abort for non VIP %s", bot_msg.chat.id)
bot_msg._client.send_message(
bot_msg.chat.id,
"You're not VIP, so you can't convert longer video to streaming formats.")
break
edit_text(bot_msg, f"{current_time()}: Converting {path.name} to mp4. Please wait.")
new_file_path = path.with_suffix(".mp4")
logging.info("Detected %s, converting to mp4...", mime)
subprocess.check_output(["ffmpeg", "-y", "-i", path, new_file_path])
index = resp["filepath"].index(path)
resp["filepath"][index] = new_file_path
return resp
def can_convert_mp4(video_path, uid):
if not ENABLE_VIP:
return True
video_streams = ffmpeg.probe(video_path, select_streams="v")
try:
duration = int(float(video_streams["format"]["duration"]))
except Exception:
duration = 0
if duration > MAX_DURATION and not VIP().check_vip(uid):
logging.info("Video duration: %s, not vip, can't convert", duration)
return False
else:
return True
def ytdl_download(url, tempdir, bm) -> dict:
chat_id = bm.chat.id
response = {"status": True, "error": "", "filepath": []}
output = pathlib.Path(tempdir, "%(title).70s.%(ext)s").as_posix()
ydl_opts = {
'progress_hooks': [lambda d: download_hook(d, bm)],
'outtmpl': output,
'restrictfilenames': False,
'quiet': True
}
formats = [
"bestvideo[ext=mp4]+bestaudio[ext=m4a]/bestvideo+bestaudio",
"bestvideo[vcodec^=avc]+bestaudio[acodec^=mp4a]/best[vcodec^=avc]/best",
""
]
adjust_formats(chat_id, url, formats)
add_instagram_cookies(url, ydl_opts)
for f in formats:
if f:
ydl_opts["format"] = f
try:
logging.info("Downloading for %s with format %s", url, f)
with ytdl.YoutubeDL(ydl_opts) as ydl:
ydl.download([url])
response["status"] = True
response["error"] = ""
break
except (ValueError, DownloadError) as e:
logging.error("Download failed for %s ", url)
response["status"] = False
response["error"] = str(e)
except Exception as e:
logging.error("UNKNOWN EXCEPTION: %s", e)
logging.info("%s - %s", url, response)
if response["status"] is False:
return response
for i in os.listdir(tempdir):
p = pathlib.Path(tempdir, i)
file_size = os.stat(p).st_size
if ENABLE_VIP:
remain, _, ttl = VIP().check_remaining_quota(chat_id)
result, err_msg = check_quota(file_size, chat_id)
else:
result, err_msg = True, ""
if result is False:
response["status"] = False
response["error"] = err_msg
else:
VIP().use_quota(bm.chat.id, file_size)
response["status"] = True
response["filepath"].append(p)
# convert format if necessary
settings = get_user_settings(str(chat_id))
if settings[2] == "video" or isinstance(settings[2], MagicMock):
# only convert if send type is video
convert_to_mp4(response, bm)
if settings[2] == "audio":
check_audio_format(response)
# disable it for now
# split_large_video(response)
return response
def check_audio_format(resp: "dict"):
if resp["status"]:
# all_converted = []
path: pathlib.PosixPath
for path in resp["filepath"]:
if path.suffix != f".{AUDIO_FORMAT}":
new_path = path.with_suffix(f".{AUDIO_FORMAT}")
subprocess.check_output(["ffmpeg", "-y", "-i", path, new_path])
path.unlink()
index = resp["filepath"].index(path)
resp["filepath"][index] = new_path
def add_instagram_cookies(url: "str", opt: "dict"):
if url.startswith("https://www.instagram.com"):
opt["cookiefi22"] = pathlib.Path(__file__).parent.joinpath("instagram.com_cookies.txt").as_posix()
def run_splitter(video_path: "str"):
subprocess.check_output(f"sh split-video.sh {video_path} {TG_MAX_SIZE} ".split())
os.remove(video_path)
def split_large_video(response: "dict"):
original_video = None
split = False
for original_video in response.get("filepath", []):
size = os.stat(original_video).st_size
if size > TG_MAX_SIZE:
split = True
logging.warning("file is too large %s, splitting...", size)
run_splitter(original_video)
if split and original_video:
response["filepath"] = [i.as_posix() for i in pathlib.Path(original_video).parent.glob("*")]
|
py | b400987b547fbf21c50d0cac415e01c4d9c47edd | # coding: utf-8
import re
import six
from huaweicloudsdkcore.sdk_response import SdkResponse
from huaweicloudsdkcore.utils.http_utils import sanitize_for_serialization
class RecognizeBusinessLicenseResponse(SdkResponse):
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
sensitive_list = []
openapi_types = {
'result': 'BusinessLicenseResult'
}
attribute_map = {
'result': 'result'
}
def __init__(self, result=None):
"""RecognizeBusinessLicenseResponse - a model defined in huaweicloud sdk"""
super(RecognizeBusinessLicenseResponse, self).__init__()
self._result = None
self.discriminator = None
if result is not None:
self.result = result
@property
def result(self):
"""Gets the result of this RecognizeBusinessLicenseResponse.
:return: The result of this RecognizeBusinessLicenseResponse.
:rtype: BusinessLicenseResult
"""
return self._result
@result.setter
def result(self, result):
"""Sets the result of this RecognizeBusinessLicenseResponse.
:param result: The result of this RecognizeBusinessLicenseResponse.
:type: BusinessLicenseResult
"""
self._result = result
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
if attr in self.sensitive_list:
result[attr] = "****"
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
import simplejson as json
if six.PY2:
import sys
reload(sys)
sys.setdefaultencoding("utf-8")
return json.dumps(sanitize_for_serialization(self), ensure_ascii=False)
def __repr__(self):
"""For `print`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, RecognizeBusinessLicenseResponse):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
|
py | b400996b85f67a125531564696b40a87657ae693 | import os
# import pytest
import testinfra.utils.ansible_runner
testinfra_hosts = testinfra.utils.ansible_runner.AnsibleRunner(
os.environ["MOLECULE_INVENTORY_FILE"]
).get_hosts("all")
def test_csstudio_version(host):
version = host.file("/opt/cs-studio/ess-version.txt").content_string
assert version.strip() == "4.6.1.25"
def test_csstudio_startup_script(host):
script = host.file("/usr/local/bin/cs-studio")
assert script.is_file
assert script.contains("/opt/cs-studio/ESS CS-Studio")
def test_csstudio_binary(host):
assert host.file("/opt/cs-studio/ESS CS-Studio").exists
def test_fonts(host):
cmd = host.run('fc-match -s "monospace"')
# assert cmd.stdout.startswith('n022003l.pfb: "Nimbus Mono L" "Regular"')
assert "Source Sans Pro" in cmd.stdout
# assert 'Titillium' in cmd.stdout
cmd = host.run("fc-list")
assert "Open Sans" in cmd.stdout
assert "Roboto" in cmd.stdout
def test_xulrunner(host):
cmd = host.run("/opt/xulrunner/xulrunner -v 2>&1")
assert cmd.stdout.strip() == "Mozilla XULRunner 1.9.2.29pre - 20120513033204"
|
py | b4009af38aa65258c3cc5940ee3b20784c7f606c | # coding: utf-8
"""
Customer Journey as a Service (CJaaS)
Something amazing, something special - the Customer Journey as a Service (CJaaS) is a core data layer to enable Journeys across products built upon serverless multi-cloud architecture, to be available as a SaaS service for applications inside and outside of Cisco. [**Cisco Experimental - Not For Production Use**] # noqa: E501
OpenAPI spec version: 0.5.0
Contact: [email protected]
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import unittest
import swagger_client
from swagger_client.models.profile_view_builder_template import ProfileViewBuilderTemplate # noqa: E501
from swagger_client.rest import ApiException
class TestProfileViewBuilderTemplate(unittest.TestCase):
"""ProfileViewBuilderTemplate unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testProfileViewBuilderTemplate(self):
"""Test ProfileViewBuilderTemplate"""
# FIXME: construct object with mandatory attributes with example values
# model = swagger_client.models.profile_view_builder_template.ProfileViewBuilderTemplate() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
|
py | b4009b1773036e9461c717630a65c3313786fdd7 | #!/usr/bin/env python
# -*- coding: UTF-8 -*-
"""
UL call demonstrated: DaqoDevice.daq_out_scan()
Purpose: Synchronous output on analog and
digital output channels
Demonstration: Continuously outputs a user specified
waveform on an analog output channel
and/or a digital output channel
Steps:
1. Call get_daq_device_inventory() to get the list of available DAQ devices
2. Call DaqDevice() to create a DaqDevice object
3. Call DaqDevice.get_daqo_device() to get the DaqoDevice object for the DAQ
output subsystem
4. Verify the DaqoDevice object is valid
5. Get the channel types supported by the DAQ output subsystem
6. Call DaqDevice.connect() to connect to the device
7. Configure the analog and digital channels
8. Call DaqoDevice.daq_out_scan() to output the waveforms
9. Call DaqoDevice.get_scan_status() to get the scan status and display the
status.
10. Call DaqoDevice.scan_stop() to stop the scan
11. Call DaqDevice.disconnect() and DaqDevice.release() before exiting the
process
"""
from __future__ import print_function
from math import pi, sin
from time import sleep
from sys import stdout
from os import system
from uldaq import (get_daq_device_inventory, DaqDevice, create_float_buffer,
InterfaceType, DaqOutScanFlag, Range, ScanOption,
DigitalDirection, DigitalPortType, ScanStatus,
DaqOutChanType, DaqOutChanDescriptor)
# Constants
CURSOR_UP = '\x1b[1A'
ERASE_LINE = '\x1b[2K'
def main():
"""Multi-subsystem simultaneous output scan example."""
# Parameters for DaqoDevice.daq_out_scan
channel_descriptors = []
samples_per_channel = 2000 # Two second buffer (sample_rate * 2)
sample_rate = 1000 # Hz
scan_options = ScanOption.CONTINUOUS
scan_flags = DaqOutScanFlag.DEFAULT
# Parameters used when creating channel_descriptors list
analog_low_channel = 0
analog_high_channel = 0
analog_range_index = 0
digital_low_port_index = 0
digital_high_port_index = 0
interface_type = InterfaceType.ANY
daq_device = None
daqo_device = None
try:
# Get descriptors for all of the available DAQ devices.
devices = get_daq_device_inventory(interface_type)
number_of_devices = len(devices)
# Verify at least one DAQ device is detected.
if number_of_devices == 0:
raise RuntimeError('Error: No DAQ devices found')
print('Found', number_of_devices, 'DAQ device(s):')
for i in range(number_of_devices):
print(' [', i, '] ', devices[i].product_name, ' (',
devices[i].unique_id, ')', sep='')
descriptor_index = input('\nPlease select a DAQ device, enter a number'
+ ' between 0 and '
+ str(number_of_devices - 1) + ': ')
descriptor_index = int(descriptor_index)
if descriptor_index not in range(number_of_devices):
raise RuntimeError('Error: Invalid descriptor index')
# Create the DAQ device from the descriptor at the specified index.
daq_device = DaqDevice(devices[descriptor_index])
daqo_device = daq_device.get_daqo_device()
# Verify the specified DAQ device supports DAQ output.
if daqo_device is None:
raise RuntimeError('Error: The DAQ device does not support DAQ '
'output')
daqo_info = daqo_device.get_info()
# Establish a connection to the device.
descriptor = daq_device.get_descriptor()
print('\nConnecting to', descriptor.dev_string, '- please wait...')
# For Ethernet devices using a connection_code other than the default
# value of zero, change the line below to enter the desired code.
daq_device.connect(connection_code=0)
# Configure supported analog input and digital input channels
amplitudes = []
samples_per_cycle = int(sample_rate / 10.0) # 10 Hz sine wave
supported_channel_types = daqo_info.get_channel_types()
if DaqOutChanType.ANALOG in supported_channel_types:
configure_analog_channels(daq_device, analog_low_channel,
analog_high_channel, analog_range_index,
channel_descriptors, amplitudes)
if DaqOutChanType.DIGITAL in supported_channel_types:
configure_digital_channels(daq_device, digital_low_port_index,
digital_high_port_index,
channel_descriptors, amplitudes)
num_channels = len(channel_descriptors)
# Create a buffer for output data.
out_buffer = create_float_buffer(num_channels, samples_per_channel)
# Fill the output buffer with data.
create_output_data(channel_descriptors, samples_per_channel,
samples_per_cycle, amplitudes, out_buffer)
print('\n', descriptor.dev_string, 'ready')
print(' Function demonstrated: DaqoDevice.daq_out_scan')
print(' Number of Scan Channels:', num_channels)
for chan in range(num_channels):
chan_descriptor = channel_descriptors[chan] # type: DaqOutChanDescriptor
print(' Scan Channel', chan, end='')
print(': type =', DaqOutChanType(chan_descriptor.type).name, end='')
if chan_descriptor.type == DaqOutChanType.ANALOG:
print(', channel =', chan_descriptor.channel, end='')
print(', range =', Range(chan_descriptor.range).name, end='')
else:
print(', port =', DigitalPortType(chan_descriptor.channel).name,
end='')
print('')
print(' Samples per channel:', samples_per_channel)
print(' Rate:', sample_rate, 'Hz')
print(' Scan options:', display_scan_options(scan_options))
try:
input('\nHit ENTER to continue')
except (NameError, SyntaxError):
pass
system('clear')
# Start the output scan.
sample_rate = daqo_device.daq_out_scan(channel_descriptors,
samples_per_channel, sample_rate,
scan_options, scan_flags,
out_buffer)
print('Please enter CTRL + C to terminate the process\n')
print('Active DAQ device: ', descriptor.dev_string, ' (',
descriptor.unique_id, ')\n', sep='')
print(' Actual scan rate: ', sample_rate, 'Hz')
try:
while True:
# Get and display the scan status.
scan_status, transfer_status = daqo_device.get_scan_status()
if scan_status != ScanStatus.RUNNING:
break
print(' Current scan count: ',
transfer_status.current_scan_count)
print(' Current total count:',
transfer_status.current_total_count)
print(' Current index: ',
transfer_status.current_index)
stdout.flush()
sleep(0.1)
# Clear the previous status before displaying the next status.
for _line in range(3):
stdout.write(CURSOR_UP + ERASE_LINE)
except KeyboardInterrupt:
pass
except RuntimeError as error:
print('\n', error)
finally:
if daq_device:
# Stop the scan.
if daqo_device:
daqo_device.scan_stop()
# before disconnecting, set digital ports back to input
dio_device = daq_device.get_dio_device()
for chan in channel_descriptors:
if chan.type == DaqOutChanType.DIGITAL:
dio_device.d_config_port(chan.channel,
DigitalDirection.INPUT)
# Disconnect from the DAQ device.
if daq_device.is_connected():
daq_device.disconnect()
# Release the DAQ device resource.
daq_device.release()
def configure_analog_channels(daq_device, low_channel, high_channel,
range_index, channel_descriptors, amplitudes):
"""
Add analog output channels to the channel_descriptors list.
Raises:
RuntimeError if a channel is not in range.
"""
ao_device = daq_device.get_ao_device()
ao_info = ao_device.get_info()
# Validate the low_channel and high_channel values
num_channels = ao_info.get_num_chans()
valid_channels_string = ('valid channels are 0 - '
'{0:d}'.format(num_channels - 1))
if low_channel < 0 or low_channel >= num_channels:
error_message = ' '.join([('Error: Invalid analog_low_channel '
'selection,'), valid_channels_string])
raise RuntimeError(error_message)
if high_channel < 0 or high_channel >= num_channels:
error_message = ' '.join([('Error: Invalid analog_high_channel '
'selection,'), valid_channels_string])
raise RuntimeError(error_message)
# Validate the range_index value
voltage_ranges = ao_info.get_ranges()
if range_index < 0:
range_index = 0
elif range_index >= len(voltage_ranges):
range_index = len(voltage_ranges) - 1
voltage_range = voltage_ranges[range_index]
# Create a channel descriptor for each channel and add it to the list
for channel in range(low_channel, high_channel + 1):
descriptor = DaqOutChanDescriptor(channel, DaqOutChanType.ANALOG,
voltage_range)
channel_descriptors.append(descriptor)
amplitudes.append(1.0) # Volts peak
def configure_digital_channels(daq_device, low_port_index, high_port_index,
channel_descriptors, amplitudes):
"""
Add digital output ports to the channel_descriptors list.
Raises:
RuntimeError if a port index is not in range
"""
dio_device = daq_device.get_dio_device()
dio_info = dio_device.get_info()
port_types = dio_info.get_port_types()
# Validate the low_port_index and high_port_index values
number_of_ports = len(port_types)
valid_ports_string = ('valid digital port index values are 0 - '
'{0:d}'.format(number_of_ports - 1))
if low_port_index < 0 or low_port_index >= number_of_ports:
error_message = ' '.join([('Error: Invalid digital_low_port_index '
'selection,'), valid_ports_string])
raise RuntimeError(error_message)
if high_port_index < 0 or high_port_index >= number_of_ports:
error_message = ' '.join([('Error: Invalid digital_high_port_index '
'selection,'), valid_ports_string])
raise RuntimeError(error_message)
# Create a channel descriptor for each port and add it to the list
# Also calculate the amplitude to be used for the digital port waveform
for port_index in range(low_port_index, high_port_index + 1):
port = port_types[port_index]
dio_device.d_config_port(port, DigitalDirection.OUTPUT)
descriptor = DaqOutChanDescriptor(port, DaqOutChanType.DIGITAL)
channel_descriptors.append(descriptor)
port_info = dio_info.get_port_info(port)
amplitudes.append((pow(2, port_info.number_of_bits) - 1) / 2)
def create_output_data(channel_descriptors, samples_per_channel,
samples_per_cycle, amplitudes, data_buffer):
"""Populate the buffer with sine wave data."""
cycles_per_buffer = int(samples_per_channel / samples_per_cycle)
i = 0
for _cycle in range(cycles_per_buffer):
for sample in range(samples_per_cycle):
for chan in channel_descriptors:
sin_val = sin(2 * pi * sample / samples_per_cycle)
if chan.type == DaqOutChanType.ANALOG:
offset = amplitudes[0] if chan.range > 1000 else 0.0
data_buffer[i] = amplitudes[0] * sin_val + offset
else:
offset = amplitudes[1]
data_buffer[i] = round(amplitudes[1] * sin_val + offset)
i += 1
def display_scan_options(bit_mask):
"""Create a displays string for all scan options."""
options = []
if bit_mask == ScanOption.DEFAULTIO:
options.append(ScanOption.DEFAULTIO.name)
for option in ScanOption:
if option & bit_mask:
options.append(option.name)
return ', '.join(options)
if __name__ == '__main__':
main()
|
py | b4009bb0d9adf3fb2bea7f97a61eec5867efb294 | '''
Function Name : main()
Description : How To Open File & Write The Data Using Open, Write
Function Date : 15 Mar 2021
Function Author : Prasad Dangare
Input : Int
Output : Int
'''
def main():
name = input("Enter the file name that you want to Write : ")
fobj = open(name, "w") # create new file
str = input("Enter The Data That You Want To Write In The File : ")
fobj.write(str)
if __name__ == "__main__":
main() |
py | b4009c11538947bbcbaf7b1320aa46613bba6a8f | # 'abcde'
# 'abfde'
# => True
# 'abcde'
# 'abde'
# => True
# 'abde'
# 'abfde'
# => True
def handle_different_sized_strings(s1, s2):
indice1, indice2, number_of_differences = 0, 0, 0
while indice2 < len(s2):
if number_of_differences > 1:
return False
if s1[indice1] != s2[indice2]:
number_of_differences += 1
indice1 += 1
indice1 += 1
indice2 += 1
return True
def handle_same_sized_strings(s1, s2):
number_of_differences = 0
for indice in range(len(s1)):
if s1[indice] != s2[indice]:
number_of_differences += 1
return number_of_differences == 1
def is_one_away(s1, s2):
if len(s1) - len(s2) > 1 or len(s2) - len(s1) > 1:
return False
if s1 == s2:
return True
if len(s1) > len(s2):
return handle_different_sized_strings(s1, s2)
if len(s2) > len(s1):
return handle_different_sized_strings(s2, s1)
if len(s1) == len(s2):
return handle_same_sized_strings(s1, s2)
print(is_one_away('abcde', 'abfde'))
print(is_one_away('abcde', 'abde'))
print(is_one_away('abde', 'abfde'))
print(is_one_away('', ''))
print(is_one_away('', 'a'))
print(is_one_away('a', ''))
print(is_one_away('aabb', 'aacc'))
print()
print(is_one_away("abcde", "abcd")) # should return True
print(is_one_away("abde", "abcde")) # should return True
print(is_one_away("a", "a")) # should return True
print(is_one_away("abcdef", "abqdef")) # should return True
print(is_one_away("abcdef", "abccef")) # should return True
print(is_one_away("abcdef", "abcde")) # should return True
print(is_one_away("aaa", "abc")) # should return False
print(is_one_away("abcde", "abc")) # should return False
print(is_one_away("abc", "abcde")) # should return False
print(is_one_away("abc", "bcc")) # should return False
|
py | b4009e469ffa18604437bdb85d5cafc3b6ca7f1a | from contextlib import contextmanager
import logging
@contextmanager
def mute_logging():
import logging
logging.disable()
try:
yield
finally:
logging.disable(0)
class DataFrameBatchIterator:
def __init__(self, dataframe, batch_size):
self.df = dataframe
self.index = 0
self.batch_size = batch_size
def __iter__(self):
return self
def __next__(self):
if self.index < len(self.df):
self.index += self.batch_size
return self.df[self.index - self.batch_size: self.index].copy()
else:
raise StopIteration
# Timezone and log
from datetime import datetime,tzinfo,timedelta
class Zone(tzinfo):
def __init__(self,offset,isdst,name):
self.offset = offset
self.isdst = isdst
self.name = name
def utcoffset(self, dt):
return timedelta(hours=self.offset) + self.dst(dt)
def dst(self, dt):
return timedelta(hours=1) if self.isdst else timedelta(0)
def tzname(self,dt):
return self.name
d = datetime.now(tz=Zone(8,False,'GMT'))
now_time_string = d.strftime("%m_%d_%H:%M:%S")
log_file_name = f"{now_time_string}.log"
print(f"{log_file_name=}")
def lprint(text, *args, **kwargs):
texts = [text] + list(args)
texts = map(lambda x: str(x), texts)
text = ' '.join(texts)
print(text)
with open(f'./log/{log_file_name}', 'a') as f:
f.write(datetime.now(tz=Zone(8,False,'GMT')).strftime("[%m_%d_%H:%M:%S] \t") + text + "\n")
def change_log_file_name(text):
global log_file_name
log_file_name = text |
py | b4009f1ffc074fc76a2d9d7fb64e5ecf09de8c26 | import numpy as np
import pandas as pd
import os
import random
import pickle
import scipy.io as sio
from keras.datasets import mnist
from scipy import linalg
from sklearn.utils import shuffle
from scipy import ndimage
import cv2
import imageio
from skimage.color import rgb2hsv
import matplotlib.pyplot as plt
##############################################
########### PREPROCESSING STEPS ##############
##############################################
# given training data, return ZCA transformation properties
# as well as transformed training data
def ZCA(data_flat, eps=1e-5):
# flatten data array and convert to zero mean
data_var = (np.var(data_flat, axis = 1) + 10)**(1/2)
data_flat = (data_flat - np.mean(data_flat, axis = 1)[:,None]) / data_var[:, None]
# calculate covariance matrix
mean_zca = np.mean(data_flat, axis = 0)
cov = np.dot(data_flat.T, data_flat) / data_flat.shape[0]
U,S,V = np.linalg.svd(cov)
W_zca = np.dot(np.dot(U,np.diag(np.sqrt(1.0/(S + eps)))),U.T)
data_zca = np.dot(data_flat - mean_zca, W_zca)
return W_zca, mean_zca, data_zca
# transform data with pre-existing zca parameters
def white_data(data_flat, mean_zca, W_zca, norm_f):
data_var = (np.var(data_flat, axis = 1) + 10)**(1/2)
data_flat = (data_flat - np.mean(data_flat, axis = 1)[:,None]) / data_var[:, None]
if norm_f:
data_out = np.dot(data_flat - mean_zca, W_zca)
else:
data_out = data_flat
return data_out
def normalization(configs, args, x_, x_eval):
# no normalization
if args.norm_flag == 0:
configs['im_scale'] = [0, 255]
# per image standardization
elif args.norm_flag == 1:
configs['im_scale'] = [-3, 3]
x_ = (x_ - np.mean(x_, axis=(1,2,3))[:,None,None,None]) / np.std(x_, axis=(1,2,3))[:,None,None,None]
x_eval = (x_eval - np.mean(x_eval, axis=(1,2,3))[:,None,None,None]) / np.std(x_eval, axis=(1,2,3))[:,None,None,None]
# per image zca whitening
elif args.norm_flag == 2:
configs['im_scale'] = [-3, 3]
# zca
eps = 1e-5
# train
shape = x_.shape
data_flat = x_.reshape((shape[0], -1))
# flatten data array and convert to zero mean
data_var = (np.var(data_flat, axis = 1) + 10)**(1/2)
data_flat = (data_flat - np.mean(data_flat, axis = 1)[:,None]) / data_var[:, None]
# calculate covariance matrix
mean_zca = np.mean(data_flat, axis = 0)
cov = np.dot(data_flat.T, data_flat) / data_flat.shape[0]
U,S,V = np.linalg.svd(cov)
W_zca = np.dot(np.dot(U,np.diag(np.sqrt(1.0/(S + eps)))),U.T)
data_zca = np.dot(data_flat - mean_zca, W_zca)
x_ = data_zca.reshape(shape)
# val
shape = x_eval.shape
data_flat = x_eval.reshape((shape[0], -1))
# flatten data array and convert to zero mean
data_var = (np.var(data_flat, axis = 1) + 10)**(1/2)
data_flat = (data_flat - np.mean(data_flat, axis = 1)[:,None]) / data_var[:, None]
# calculate covariance matrix
mean_zca = np.mean(data_flat, axis = 0)
cov = np.dot(data_flat.T, data_flat) / data_flat.shape[0]
U,S,V = np.linalg.svd(cov)
W_zca = np.dot(np.dot(U,np.diag(np.sqrt(1.0/(S + eps)))),U.T)
data_zca = np.dot(data_flat - mean_zca, W_zca)
x_eval = data_zca.reshape(shape)
# per image sobel filtering
elif args.norm_flag == 3:
configs['im_scale'] = [0, 1]
# normalization
for i in range(len(x_)):
dx = ndimage.sobel(x_[i], 0) # horizontal derivative
dy = ndimage.sobel(x_[i], 1) # vertical derivative
mag = np.hypot(dx, dy) # magnitude
x_[i] = mag / np.max(mag)
for i in range(len(x_eval)):
dx = ndimage.sobel(x_eval[i], 0) # horizontal derivative
dy = ndimage.sobel(x_eval[i], 1) # vertical derivative
mag = np.hypot(dx, dy) # magnitude
x_eval[i] = mag / np.max(mag)
# put into -1,1 range
elif args.norm_flag == 4:
configs['im_scale'] = [-1, 1]
x_ = (x_ * 2.0 / 255.0 - 1.0)
x_eval = (x_eval * 2.0 / 255.0 - 1.0)
# put into 0,1 range
elif args.norm_flag == 5:
configs['im_scale'] = [0, 1]
x_ = (x_ - np.mean(x_, axis=(1,2,3))[:,None,None,None]) / (np.std(x_, axis=(1,2,3)))[:,None,None,None]
x_eval = (x_eval - np.mean(x_eval, axis=(1,2,3))[:,None,None,None]) / (np.std(x_eval, axis=(1,2,3)))[:,None,None,None]
# put into -1,1 range
elif args.norm_flag == 6:
configs['im_scale'] = [-1, 1]
x_ = (x_ - np.mean(x_, axis=(1,2,3))[:,None,None,None]) / (3*np.std(x_, axis=(1,2,3)))[:,None,None,None]
x_eval = (x_eval - np.mean(x_eval, axis=(1,2,3))[:,None,None,None]) / (3*np.std(x_eval, axis=(1,2,3)))[:,None,None,None]
return configs, x_, x_eval
# function to transform RBG to grayscale
def rgb2grey(rgb):
return np.dot(rgb[...,:3], [0.2989, 0.5870, 0.1140])
###################################
######### LOAD DATASETS ###########
###################################
# load cifar-10 dataset
def load_cifar10(color_format='rgb'):
data_dir = os.path.join(os.getcwd(), 'datasets/cifar-10')
# load train data
for i in range(1, 6):
train_batch = os.path.join(data_dir, 'data_batch_' + str(i))
with open(train_batch, 'rb') as fo:
dict = pickle.load(fo, encoding='latin1')
x_batch = np.transpose(np.reshape(dict['data'], (10000, 3, 32, 32)), (0, 2, 3, 1))
y_batch = np.array(dict['labels'])
if i == 1:
x_train = x_batch
y_train = y_batch
else:
x_train = np.concatenate((x_train, x_batch))
y_train = np.concatenate((y_train, y_batch))
# load test data
test_batch = os.path.join(data_dir, 'test_batch')
with open(test_batch, 'rb') as fo:
dict = pickle.load(fo, encoding='latin1')
x_test = np.transpose(np.reshape(dict['data'], (10000, 3, 32, 32)), (0, 2, 3, 1))
y_test = np.array(dict['labels'])
# cast
x_train = x_train.astype('float32')
x_test = x_test.astype('float32')
# color format
if color_format == 'gray':
x_train = rgb2grey(x_train)[:,:,:,None]
x_test = rgb2grey(x_test)[:,:,:,None]
elif color_format == 'hsv':
x_train = rgb2hsv(x_train)
x_test = rgb2hsv(x_test)
elif color_format == 'hv':
x_train = rgb2hsv(x_train)[:, :, :, [0,2]]
x_test = rgb2hsv(x_test)[:, :, :, [0,2]]
# labels
class_labels = ['airplane', 'auto.', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse', 'ship', 'truck']
return (x_train, y_train), (x_test, y_test), class_labels
# load cifar-100 dataset
def load_cifar100(color_format='rgb'):
data_dir = os.path.join(os.getcwd(), 'datasets/cifar-100')
# load train data
train_batch = os.path.join(data_dir, 'train')
with open(train_batch, 'rb') as fo:
dict = pickle.load(fo, encoding='latin1')
x_train = np.transpose(np.reshape(dict['data'], (-1, 3, 32, 32)), (0, 2, 3, 1))
y_train = np.array(dict['coarse_labels'])
# load test data
test_batch = os.path.join(data_dir, 'test')
with open(test_batch, 'rb') as fo:
dict = pickle.load(fo, encoding='latin1')
x_test = np.transpose(np.reshape(dict['data'], (-1, 3, 32, 32)), (0, 2, 3, 1))
y_test = np.array(dict['coarse_labels'])
# cast
x_train = x_train.astype('float32')
x_test = x_test.astype('float32')
# color format
if color_format == 'gray':
x_train = rgb2grey(x_train)[:,:,:,None]
x_test = rgb2grey(x_test)[:,:,:,None]
elif color_format == 'hsv':
x_train = rgb2hsv(x_train)
x_test = rgb2hsv(x_test)
elif color_format == 'hv':
x_train = rgb2hsv(x_train)[:, :, :, [0,2]]
x_test = rgb2hsv(x_test)[:, :, :, [0,2]]
# labels
class_labels = ['aquatic m', 'fish', 'flowers', 'food containers', 'fruit/veggies', 'electric', 'furniture', 'insects', 'carnivores', 'man made', 'omnivores', 'mammals', 'invertebrates', 'people', 'reptiles', 'sm mammals', 'trees', 'vehicles 1', 'vehicles 2']
return (x_train, y_train), (x_test, y_test), class_labels
# load emnist dataset
def load_emnist():
# load training data
train = pd.read_csv("datasets/emnist-balanced-train.csv")
x_train = train.values[:,1:].reshape(-1, 28, 28)
x_train = np.transpose(x_train, (0,2,1))
y_train = train.values[:,0]
# load testing data
test = pd.read_csv("datasets/emnist-balanced-test.csv")
x_test = test.values[:,1:].reshape(-1, 28, 28)
x_test = np.transpose(x_test, (0,2,1))
y_test = test.values[:,0]
# cast
x_train = x_train.astype('float32')[:,:,:,None]
x_test = x_test.astype('float32')[:,:,:,None]
# labels
class_labels = ['0','1','2','3','4','5','6','7','8','9',
'a','b','c','d','e','f','g','h','i','j',
'k','l','m','n','o','p','q','r','s','t',
'u','v','w','x','y','z','A','B','D','E',
'F','G','H','N','Q','R','T']
return (x_train, y_train), (x_test, y_test), class_labels
# load mnist dataset
def load_mnist():
# load from already required keras
(x_train, y_train), (x_test, y_test) = mnist.load_data()
# cast
x_train = x_train.astype('float32')[:,:,:,None]
x_test = x_test.astype('float32')[:,:,:,None]
# labels
class_labels = ['0','1','2','3','4','5','6','7','8','9']
return (x_train, y_train), (x_test, y_test), class_labels
# load svhn dataset
def load_svhn(color_format='rgb'):
# load
train_data = sio.loadmat('datasets/svhn/train_32x32.mat')
x_train = np.transpose(train_data['X'], (3,0,1,2))
y_train = np.squeeze(train_data['y']) - 1
test_data = sio.loadmat('datasets/svhn/test_32x32.mat')
x_test = np.transpose(test_data['X'], (3,0,1,2))
y_test = np.squeeze(test_data['y']) - 1
extra_data = sio.loadmat('datasets/svhn/extra_32x32.mat')
x_extra = np.transpose(extra_data['X'], (3,0,1,2))
y_extra = np.squeeze(extra_data['y']) - 1
x_train = np.concatenate((x_train, x_extra[:10000]), axis=0)
y_train = np.concatenate((y_train, y_extra[:10000]), axis=0)
# cast
x_train = x_train.astype('float32')
x_test = x_test.astype('float32')
# color format
if color_format == 'gray':
x_train = rgb2grey(x_train)[:,:,:,None]
x_test = rgb2grey(x_test)[:,:,:,None]
elif color_format == 'hsv':
x_train = rgb2hsv(x_train)
x_test = rgb2hsv(x_test)
elif color_format == 'hv':
x_train = rgb2hsv(x_train)[:, :, :, [0,2]]
x_test = rgb2hsv(x_test)[:, :, :, [0,2]]
# want class to match digit
y_train += 1
y_test += 1
y_train[y_train == 10] = 0
y_test[y_test == 10] = 0
#label
#class_labels = ['one','two','three','four','five','six','seven','eight','nine','zero']
class_labels = ['0','1','2','3','4','5','6','7','8','9']
#class_labels = ['\'1\'','\'2\'','\'3\'','\'4\'','\'5\'','\'6\'','\'7\'','\'8\'','\'9\'','\'0\'']
return (x_train, y_train), (x_test, y_test), class_labels
def load_core50(schedule_flag, configs, args, color_format='rgb'):
pkl_file = open('datasets/core50.p', 'rb')
data = pickle.load(pkl_file)
x_train = []
y_train = []
x_test = []
y_test = []
class_labels = ['plug', 'phone', 'sciccors', 'light_bulb', 'can', 'sun_glasses', 'ball', 'marker', 'cup', 'remote']
if schedule_flag == 3:
train_sessions = [1,2,4,5,6,8,9,11]
test_sessions = [3,7,10]
#object_order = [1, 2, 4, 10, 8, 3, 5, 6, 7, 9,
# 11, 12, 14, 20, 18, 13, 15, 16, 17, 19,
# 21, 22, 24, 30, 28, 23, 25, 26, 27, 29,
# 31, 32, 34, 40, 38, 33, 35, 36, 37, 39,
# 41, 42, 44, 50, 48, 43, 45, 46, 47, 49]
object_order = [1, 6, 16, 46, 36, 11, 21, 26, 31, 40,
2, 7, 17, 47, 37, 12, 22, 27, 32, 41,
3, 8, 18, 48, 38, 13, 23, 28, 33, 42,
4, 9, 19, 49, 39, 14, 24, 29, 34, 43,
5, 10, 20, 50, 40, 15, 25, 30, 35, 44]
x_train = {}
x_test = {}
y_train = {}
y_test = {}
for s, session in enumerate(train_sessions):
x_train[s] = {}
y_train[s] = {}
for i, obj in enumerate(object_order):
temp = []
for j in range(len(data[session][obj])):
temp.append(cv2.resize(data[session][obj][j], (64, 64)))
x_train[s][i] = np.array(temp)
y_train[s][i] = np.array([i for x in range(len(data[session][obj]))])
for s, session in enumerate(test_sessions):
x_test[s] = {}
y_test[s] = {}
for i, obj in enumerate(object_order):
temp = []
for j in range(len(data[session][obj])):
temp.append(cv2.resize(data[session][obj][j], (64, 64)))
x_test[s][i] = np.array(temp)
y_test[s][i] = np.array([i for x in range(len(data[session][obj]))])
if color_format == 'gray':
for session in range(len(train_sessions)):
for i in range(50):
x_train[session][i] = rgb2grey(x_train[session][i])[:,:,:,None]
for session in range(len(test_sessions)):
for i in range(50):
x_test[session][i] = rgb2grey(x_test[session][i])[:,:,:,None]
elif color_format == 'hsv':
for session in range(len(train_sessions)):
for i in range(50):
x_train[session][i] = rgb2hsv(x_train[session][i])
for session in range(len(test_sessions)):
for i in range(50):
x_test[session][i] = rgb2hsv(x_test[session][i])
elif color_format == 'hv':
for session in range(len(train_sessions)):
for i in range(50):
x_train[session][i] = rgb2hsv(x_train[session][i])[:, :, :, [0,2]]
for session in range(len(test_sessions)):
for i in range(50):
x_test[session][i] = rgb2hsv(x_test[session][i])[:, :, :, [0,2]]
for session in range(len(train_sessions)):
for i in range(50):
configs, x_train[session][i], _ = normalization(configs, args, x_train[session][i], x_train[session][i])
for session in range(len(test_sessions)):
for i in range(50):
configs, _, x_test[session][i] = normalization(configs, args, x_test[session][i], x_test[session][i])
return (x_train, y_train), (x_test, y_test), class_labels
def load_tiny_imagenet(gray_flag=True, hsv_flag = False):
x_train = np.zeros((100000, 64, 64, 3))
x_test = np.zeros((10000, 64, 64, 3))
y_train = []
y_test = []
label = 0
i = 0
# Load Training Data
for d in os.listdir(os.getcwd() + '/datasets/tiny-imagenet-200/train'):
for im in os.listdir(os.getcwd() + '/datasets/tiny-imagenet-200/train/' + d + '/images'):
image = imageio.imread(os.getcwd() + '/datasets/tiny-imagenet-200/train/' + d + '/images/' + im)
if image.shape != (64, 64, 3):
image = np.repeat(image[:, :, np.newaxis], 3, axis=2)
x_train[i] = image
y_train.append(label)
i += 1
label += 1
# Load Validation Data
match_dict = {}
label_dict = {}
counter = 0
f = open(os.getcwd() + '/datasets/tiny-imagenet-200/val/val_annotations.txt', 'r')
line = f.readline()
while line:
im_file = line.split('\t')[0]
code = line.split('\t')[1]
if code in match_dict:
label_dict[im_file] = match_dict[code]
else:
match_dict[code] = counter
label_dict[im_file] = match_dict[code]
counter += 1
line = f.readline()
for i, im in enumerate(os.listdir(os.getcwd() + '/datasets/tiny-imagenet-200/val/images')):
image = imageio.imread(os.getcwd() + '/datasets/tiny-imagenet-200/val/images/' + im)
if image.shape != (64, 64, 3):
image = np.repeat(image[:, :, np.newaxis], 3, axis=2)
label = label_dict[im]
x_test[i] = image
y_test.append(label)
y_train = np.array(y_train)
y_test = np.array(y_test)
# color format
if color_format == 'gray':
x_train = rgb2grey(x_train)[:,:,:,None]
x_test = rgb2grey(x_test)[:,:,:,None]
elif color_format == 'hsv':
x_train = rgb2hsv(x_train)
x_test = rgb2hsv(x_test)
elif color_format == 'hv':
x_train = rgb2hsv(x_train)[:, :, :, [0,2]]
x_test = rgb2hsv(x_test)[:, :, :, [0,2]]
return (x_train, y_train), (x_test, y_test), np.arange(200)
# loads the dataset depending on experiment arguments
# includes dataset normalization
def load_dataset(configs, args):
color_format = args.color_format
# load dataset
if args.dataset == 'mnist':
(x_, y_), (x_2, y_2), class_labels = load_mnist()
configs['im_size'] = 28
configs['channels'] = 1
configs['num_phases'] = 5
configs['class_labels'] = class_labels
num_classes = 10
if args.dataset == 'emnist':
(x_, y_), (x_2, y_2), class_labels = load_emnist()
configs['im_size'] = 28
configs['channels'] = 1
configs['num_phases'] = 23
configs['class_labels'] = class_labels
num_classes = 47
if args.dataset == 'cifar-10':
(x_, y_), (x_2, y_2), class_labels = load_cifar10(color_format=args.color_format)
configs['im_size'] = 32
if color_format == 'gray':
configs['channels'] = 1
elif color_format == 'hv':
configs['channels'] = 2
else:
configs['channels'] = 3
configs['num_phases'] = 5
configs['class_labels'] = class_labels
num_classes = 10
if args.dataset == 'cifar-100':
(x_, y_), (x_2, y_2), class_labels = load_cifar100(color_format=args.color_format)
configs['im_size'] = 32
if color_format == 'gray':
configs['channels'] = 1
elif color_format == 'hv':
configs['channels'] = 2
else:
configs['channels'] = 3
configs['num_phases'] = 10
configs['class_labels'] = class_labels
num_classes = 20
if args.dataset == 'svhn':
(x_, y_), (x_2, y_2), class_labels = load_svhn(color_format=args.color_format)
configs['im_size'] = 32
if color_format == 'gray':
configs['channels'] = 1
elif color_format == 'hv':
configs['channels'] = 2
else:
configs['channels'] = 3
configs['num_phases'] = 5
configs['class_labels'] = class_labels
num_classes = 10
if args.dataset == 'core50':
(x_, y_), (x_2, y_2), class_labels = load_core50(args.schedule_flag, configs, args, color_format=args.color_format)
configs['im_size'] = 64
if color_format == 'gray':
configs['channels'] = 1
elif color_format == 'hv':
configs['channels'] = 2
else:
configs['channels'] = 3
if args.schedule_flag == 3:
configs['num_phases'] = 10
num_classes = 50
configs['class_labels'] = class_labels
if args.dataset == 'tinyimagenet':
(x_, y_), (x_2, y_2), class_labels = load_tiny_imagenet(color_format=args.color_format)
configs['im_size'] = 64
if color_format == 'gray':
configs['channels'] = 1
elif color_format == 'hv':
configs['channels'] = 2
else:
configs['channels'] = 3
configs['num_phases'] = 20
configs['class_labels'] = class_labels
num_classes = 200
#############################
# split dataset (testing vs validation)
x_eval = x_2
y_eval = y_2
if args.dataset != 'core50':
configs, x_, x_eval = normalization(configs, args, x_, x_eval)
# add info to configs
configs['num_classes'] = num_classes
configs['class_labels'] = class_labels
configs['scale_flag'] = args.scale_flag
configs['transfer'] = args.transfer
return (x_, y_), (x_eval, y_eval), configs
if __name__ == "__main__":
x, xx, y, yy, l = load_tiny_imagenet(True) |
py | b4009f47ca410d8a6a4c929a4bac293085b680d9 | import torch.nn as nn
import torch.nn.functional as F
class Upsample(nn.Module):
def __init__(self, scale_factor, mode='nearest'):
super(Upsample, self).__init__()
self.scale_factor = scale_factor
self.mode = mode
def forward(self, x):
x = F.interpolate(x, scale_factor=self.scale_factor, mode=self.mode)
return x
class SamePadConv2d(nn.Conv2d):
"""
Conv with TF padding='same'
https://github.com/pytorch/pytorch/issues/3867#issuecomment-349279036
"""
def __init__(self, in_channels, out_channels, kernel_size, stride=1, dilation=1, groups=1, bias=True, padding_mode="zeros"):
super().__init__(in_channels, out_channels, kernel_size, stride, 0, dilation, groups, bias, padding_mode)
def get_pad_odd(self, in_, weight, stride, dilation):
effective_filter_size_rows = (weight - 1) * dilation + 1
out_rows = (in_ + stride - 1) // stride
padding_needed = max(0, (out_rows - 1) * stride + effective_filter_size_rows - in_)
padding_rows = max(0, (out_rows - 1) * stride + (weight - 1) * dilation + 1 - in_)
rows_odd = (padding_rows % 2 != 0)
return padding_rows, rows_odd
def forward(self, x):
padding_rows, rows_odd = self.get_pad_odd(x.shape[2], self.weight.shape[2], self.stride[0], self.dilation[0])
padding_cols, cols_odd = self.get_pad_odd(x.shape[3], self.weight.shape[3], self.stride[1], self.dilation[1])
if rows_odd or cols_odd:
x = F.pad(x, [0, int(cols_odd), 0, int(rows_odd)])
return F.conv2d(x, self.weight, self.bias, self.stride,
padding=(padding_rows // 2, padding_cols // 2),
dilation=self.dilation, groups=self.groups)
class Conv_bn_relu(nn.Module):
def __init__(self, in_planes, planes,
kernel_size, stride=1, padding=0, bias=True, leaky=False):
super(Conv_bn_relu, self).__init__()
if padding == 'SAME':
self.conv = SamePadConv2d(in_planes, planes,
stride=stride, kernel_size=kernel_size, bias=bias)
else:
self.conv = nn.Conv2d(in_planes, planes,
stride=stride, kernel_size=kernel_size,
padding=padding, bias=bias)
self.bn = nn.BatchNorm2d(planes)
if leaky:
self.relu = nn.LeakyReLU(0.1)
else:
self.relu = nn.ReLU()
def forward(self, x):
return self.relu(self.bn(self.conv(x)))
class Conv_bn(nn.Module):
def __init__(self, in_planes, planes,
kernel_size, stride=1, padding=0, bias=True):
super(Conv_bn, self).__init__()
self.conv = nn.Conv2d(in_planes, planes,
kernel_size=kernel_size, stride=stride,
padding=padding, bias=bias)
self.bn = nn.BatchNorm2d(planes)
def forward(self, x):
return self.bn(self.conv(x))
class Conv_dw(nn.Module):
def __init__(self, in_planes, planes,
stride, kernel_size=3, padding=1, bias=False):
super(Conv_dw, self).__init__()
self.dw_conv = nn.Conv2d(in_planes, in_planes,
kernel_size=kernel_size, stride=stride,
padding=padding, bias=bias)
self.bn1 = nn.BatchNorm2d(in_planes)
self.relu1 = nn.ReLU()
self.pw_conv = nn.Conv2d(in_planes, planes,
kernel_size=1, stride=1,
padding=0, bias=bias)
self.bn2 = nn.BatchNorm2d(planes)
self.relu2 = nn.ReLU()
def forward(self, x):
x = self.dw_conv(x)
x = self.bn1(x)
x = self.relu1(x)
x = self.pw_conv(x)
x = self.bn2(x)
x = self.relu2(x)
return x
|
py | b400a017c8e193f8a7e9b351a321e95c72a81f37 | """The pyodine logging system.
Mainly consisting of
- stderr (usually passed to systemd logging)
- application debug log
- logging of measurements
This acts like a singleton class. It does all the initialization on import and
the module's methods will act on module-level ("static") variables.
"""
import asyncio
import logging
from logging.handlers import (BufferingHandler, MemoryHandler,
TimedRotatingFileHandler)
import os
import re
from typing import Dict, List, Union # pylint: disable=unused-import
from .util import asyncio_tools as tools
PRIMARY_LOG_LOCATION = 'log/'
"""The main log location. Must be writable or creatable.
OSError will be raised if this isn't writeable or createable.
"""
SECONDARY_LOG_LOCATION = '/media/sdcard/pyodine_log/'
"""The location of the redundant logs.
If this isn't writeable or createable, a non-fatal Error will be displayed.
"""
PROGRAM_LOG_DIR = 'messages/' # Log program/debug messages here.
PROGRAM_LOG_FILE = 'pyodine.log' # Log program/debug messages here.
QTY_LOG_DIR = 'quantities/' # Log readings ("quantities") here.
# We need to avoid name clashes with existing loggers.
QTY_LOGGER_PREFIX = 'qty_logger.'
# We will use these module-scope globals here to make our module behave like a
# singleton class. Pylint doesn't like that.
# pylint: disable=global-statement
_LOGGERS = {} # type: Dict[str, logging.Logger]
# Those two are not constants but actually keep track of the current state of
# the loaded module. Pylint doesn't like that either.
# pylint: disable=invalid-name
_is_inited = False
_is_flushing = False # A task for flushing buffers to disk is running.
_VALID_LOG_LOCATIONS = [] # type: List[str]
"""List of writeable logging directories to use."""
def init() -> None:
"""Call this on first import. Don't call it again later.
This sets up the default loggers and logging locations.
"""
global _is_inited
if _is_inited:
raise RuntimeError('This is a "singleton module". Only init() once.')
_is_inited = True
root_logger = logging.getLogger()
# We need to default to DEBUG in order to be able to filter downstream.
root_logger.level = logging.DEBUG
root_logger.name = 'pyodine'
# Log to files in two separate locations.
_setup_log_dir(PRIMARY_LOG_LOCATION) # Will raise if primary logging can't work.
_VALID_LOG_LOCATIONS.append(PRIMARY_LOG_LOCATION)
try:
_setup_log_dir(SECONDARY_LOG_LOCATION)
except OSError:
logging.error("Can't set up secondary log location!")
else:
_VALID_LOG_LOCATIONS.append(SECONDARY_LOG_LOCATION)
# We need to specify 3600 seconds here instead of one hour, to force
# detailed file name suffixes for manual log rotation. This may lead to
# problems if the program is started/stopped multiple times per second.
writers = [TimedRotatingFileHandler(directory + PROGRAM_LOG_DIR + PROGRAM_LOG_FILE,
when='s', interval=3600)
for directory in _VALID_LOG_LOCATIONS]
for writer in writers:
writer.doRollover() # Start a new file every time pyodine is run.
writer.formatter = logging.Formatter(
"{asctime} {name} {levelname} - {message} [{module}.{funcName}]",
style='{')
buffers = [MemoryHandler(200, target=writer) for writer in writers]
for log_buffer in buffers:
root_logger.addHandler(log_buffer)
# Log to stderr.
stderr = logging.StreamHandler()
stderr.setLevel(logging.INFO)
stderr.formatter = logging.Formatter(
"{levelname:<7} {message} "
"[{module}:{lineno}] ({name})", style='{')
root_logger.addHandler(stderr)
def log_quantity(qty_id: str, value: Union[float, str], time: float = None) -> None:
"""Append "value" to the logfile of given name.
:param id: This distinguishes logfiles from each other.
:param time: Unix time of when the passed "value" was measured. If passed,
this will be printed in addition to the current time.
:param value: Value to log. None is fine as well.
"""
logger = _get_qty_logger(qty_id)
if time:
logger.info('%s\t%s', time, value)
else:
logger.info('%s', value)
def flush_to_disk() -> None:
"""Flush all log entries from buffer memory to disk."""
global _LOGGERS
# Act the root logger and our quantity loggers.
loggers = [logging.getLogger()] + list(_LOGGERS.values())
handlers = [h for l in loggers for h in l.handlers
if isinstance(h, BufferingHandler)]
for handler in handlers:
handler.flush()
def start_flushing_regularly(seconds: float) -> None:
"""Schedule regular flushing of the the buffered data to disk.
This needs a running asyncio event loop to work. Make sure one is running,
otherwise a warning is issued and the flushing is scheduled anyway.
Specifying a long interval does not reliably avoid frequent writes, as the
buffers will flush automatically if necessary to prevent overflow.
:param seconds: Interval for flushing. See note on flushing interval above.
"""
global _is_flushing
if _is_flushing:
logging.error("Flushing was already scheduled already. Ignoring.")
return
_is_flushing = True
if not seconds > .5:
raise ValueError("Choose a flushing interval larger than 0.5s.")
asyncio.ensure_future(tools.repeat_task(flush_to_disk, seconds))
def start_new_files() -> None:
"""Start new log files now. Don't wait for the usual period."""
global _LOGGERS
# Act the root logger and our quantity loggers.
loggers = [logging.getLogger()] + list(_LOGGERS.values())
handlers = [h for l in loggers for h in l.handlers
if isinstance(h, TimedRotatingFileHandler)]
for handler in handlers:
handler.doRollover()
def ellipsicate(message: str, max_length: int = 40, strip: bool = True) -> str:
"""Return a shortened version of a string if it exceeds max_length.
This will turn 'bizbazfrobnicator' into "biz ... tor".
"""
msg = re.sub(r'\s+', ' ', str(message)) # only allow ' ' for whitespace
if strip:
msg = msg.strip()
if len(msg) <= max_length:
return msg
snip_length = int((max_length - 5) / 2) # ellipsis padded with spaces
return str(msg[:snip_length] + ' ... ' + msg[-snip_length:])
def _get_qty_logger(name: str) -> logging.Logger:
name = str(name)
if not name.isidentifier():
raise ValueError("Invalid log ID \"{}\". Only valid python "
"identifiers are allowed for log IDs.".format(name))
logger_name = QTY_LOGGER_PREFIX + name
# Actually the logging class provides a singleton behaviour of Logger
# objects. We keep our own list however, as we need some specific
# configuration and handlers attached.
global _LOGGERS
try:
return _LOGGERS[logger_name]
except KeyError:
# Create the logger.
# We need to specify 3600 seconds here instead of one hour, to force
# detailed file name suffixes for manual log rotation.
writers = [TimedRotatingFileHandler(directory + QTY_LOG_DIR + str(name) + '.log',
when='s', interval=3600)
for directory in _VALID_LOG_LOCATIONS]
for writer in writers:
writer.formatter = logging.Formatter("{asctime}\t{message}", style='{')
# Start a new file for each pyodine run.
writer.doRollover()
# Buffer file writes to keep I/O down. We will flush the buffer at
# given time intervals. If that flushing should fail, however, we'll
# flush at 100 entries (which is about 4kB of data).
buffers = [MemoryHandler(100, target=writer) for writer in writers]
logger = logging.getLogger(logger_name)
for log_buffer in buffers:
logger.addHandler(log_buffer)
logger.propagate = False # Don't pass messages to root logger.
_LOGGERS[logger_name] = logger
return logger
def _setup_log_dir(path: str) -> None:
"""Check / prepare the passed folder to accept log files.
Needs to exist and be writable, basically.
:raises OSError: Didn't succeed.
"""
log_dirs = [path + sub for sub in [PROGRAM_LOG_DIR, QTY_LOG_DIR]]
for directory in log_dirs:
os.makedirs(directory, exist_ok=True) # Raises OSError
if not os.access(directory, os.W_OK):
raise OSError("Couldn't write log location {}.".format(directory))
|
py | b400a0ca33ef46d5a6d3c24f55201702acd6deb3 | import pandas as pd
from scipy.stats import ttest_rel
def t_test_report(perf_df_a, tag_a, perf_df_b, tag_b, metric_cols):
for col in metric_cols:
report = dict(A=tag_a, B=tag_b, metric=col,
mean_A=perf_df_a[col].mean(),
std_A=perf_df_a[col].std(),
mean_B=perf_df_b[col].mean(),
std_B=perf_df_b[col].std())
t, p = ttest_rel(perf_df_a[col], perf_df_b[col])
report["t-statistic"] = t
report["p-value"] = p
yield report
if __name__ == "__main__":
metric_cols = ["test_AUPRC", "test_AUROC", "test_AVGRANK"]
gwava_perf = pd.read_csv("./experiment_result/gwava_performance_wo_chr5_30_CERENKOV2_1337.tsv", sep="\t", usecols=metric_cols)
c1_perf = pd.read_csv("./experiment_result/c1_cross_validate_xv_report.tsv", sep="\t", usecols=metric_cols)
c2_perf = pd.read_csv("./experiment_result/c2_performance_wo_chr5_30_CERENKOV2_1337.tsv", sep="\t", usecols=metric_cols)
c3_perf = pd.read_csv("./experiment_result/c3_cross_validate_xv_report.tsv", sep="\t", usecols=metric_cols) # C3 = C1 + LS + N2V
c1_perf.loc[:, "test_AVGRANK"] = -1 * c1_perf.loc[:, "test_AVGRANK"]
c3_perf.loc[:, "test_AVGRANK"] = -1 * c3_perf.loc[:, "test_AVGRANK"]
def generate_report():
quadruples = [(gwava_perf, "GWAVA", c1_perf, "C1"),
(gwava_perf, "GWAVA", c2_perf, "C2"),
(gwava_perf, "GWAVA", c3_perf, "C3"),
(c1_perf, "C1", c2_perf, "C2"),
(c1_perf, "C1", c3_perf, "C3"),
(c2_perf, "C2", c3_perf, "C3")]
for q in quadruples:
report_list = list(t_test_report(*q, metric_cols))
yield pd.DataFrame(report_list)
report_df = pd.concat(list(generate_report()), axis=0)
report_df.to_csv("t_test_results.tsv", sep="\t", index=False)
|
py | b400a133855c990673f96537b8950b20141c9cc3 | #!/usr/bin/env python
# coding: utf-8
#
# Load a previously saved model and make predictions on the test data set
#
# ## Import required libraries
import pandas as pd
from ludwig.api import LudwigModel
import os
import os.path
from sklearn.metrics import accuracy_score
# create data set for predictions
test_data = {'image_path': [], 'label': []}
current_dir = os.getcwd()
test_dir = os.path.join(current_dir, 'data', 'mnist_png', 'testing')
for label in os.listdir(test_dir):
files = os.listdir(os.path.join(test_dir, label))
test_data['image_path'] += [os.path.join(test_dir, label, f) for f in files]
test_data['label'] += len(files) * [label]
# collect data into a data frame
test_df = pd.DataFrame(test_data)
print(test_df.head())
# retrieve a trained model
model = LudwigModel.load('./results/multiple_experiment_Option3/model')
# make predictions
pred_df = model.predict(data_df=test_df)
print(pred_df.head())
# print accuracy on test data set
print('predicted accuracy', accuracy_score(test_df['label'], pred_df['label_predictions']))
|
py | b400a28766c5a8dd6f9b3a3d7dc6e6463efbf2cc | """ Functionality to test analyse random telegraph signals."""
import unittest
import numpy as np
import matplotlib.pyplot as plt
from qtt.algorithms.random_telegraph_signal import tunnelrates_RTS, generate_RTS_signal
import warnings
class TestRandomTelegraphSignal(unittest.TestCase):
def test_RTS(self, fig=100, verbose=2):
data = generate_RTS_signal(100, std_gaussian_noise=0.1, uniform_noise=.1)
samplerate = 2e6
data = generate_RTS_signal(100000, std_gaussian_noise=0.1, rate_up=10e3, rate_down=20e3, samplerate=samplerate)
with warnings.catch_warnings(): # catch any warnings
warnings.simplefilter("ignore")
tunnelrate_dn, tunnelrate_up, parameters = tunnelrates_RTS(data, samplerate=samplerate, fig=fig,
verbose=verbose)
self.assertTrue(parameters['up_segments']['mean'] > 0)
self.assertTrue(parameters['down_segments']['mean'] > 0)
samplerate = 1e6
rate_up = 200e3
rate_down = 20e3
data = generate_RTS_signal(100000, std_gaussian_noise=0.01, rate_up=rate_up,
rate_down=rate_down, samplerate=samplerate)
tunnelrate_dn, tunnelrate_up, _ = tunnelrates_RTS(data, samplerate=samplerate, min_sep=1.0, max_sep=2222,
min_duration=1, num_bins=40, fig=fig, verbose=verbose)
self.assertTrue(np.abs(tunnelrate_dn - rate_up * 1e-3) < 100)
self.assertTrue(np.abs(tunnelrate_up - rate_down * 1e-3) < 10)
plt.close('all') |
py | b400a3054f87935cabe0120071e339a20cdb54d7 | #!/usr/bin/env python3
import datetime
import os
import time
from pathlib import Path
from typing import Dict, Optional, Tuple
from collections import namedtuple, OrderedDict
import psutil
from smbus2 import SMBus
import cereal.messaging as messaging
from cereal import log
from common.filter_simple import FirstOrderFilter
from common.numpy_fast import interp
from common.params import Params, ParamKeyType
from common.realtime import DT_TRML, sec_since_boot
# from common.dict_helpers import strip_deprecated_keys
from selfdrive.controls.lib.alertmanager import set_offroad_alert
from selfdrive.controls.lib.pid import PIController
from selfdrive.hardware import EON, TICI, PC, HARDWARE, JETSON
from selfdrive.loggerd.config import get_available_percent
from selfdrive.pandad import get_expected_signature
from selfdrive.swaglog import cloudlog
from selfdrive.thermald.power_monitoring import PowerMonitoring
from selfdrive.version import tested_branch, terms_version, training_version
FW_SIGNATURE = get_expected_signature()
ThermalStatus = log.DeviceState.ThermalStatus
NetworkType = log.DeviceState.NetworkType
NetworkStrength = log.DeviceState.NetworkStrength
CURRENT_TAU = 15. # 15s time constant
TEMP_TAU = 5. # 5s time constant
DAYS_NO_CONNECTIVITY_MAX = 14 # do not allow to engage after this many days
DAYS_NO_CONNECTIVITY_PROMPT = 10 # send an offroad prompt after this many days
DISCONNECT_TIMEOUT = 5. # wait 5 seconds before going offroad after disconnect so you get an alert
ThermalBand = namedtuple("ThermalBand", ['min_temp', 'max_temp'])
# List of thermal bands. We will stay within this region as long as we are within the bounds.
# When exiting the bounds, we'll jump to the lower or higher band. Bands are ordered in the dict.
THERMAL_BANDS = OrderedDict({
ThermalStatus.green: ThermalBand(None, 80.0),
ThermalStatus.yellow: ThermalBand(75.0, 96.0),
ThermalStatus.red: ThermalBand(80.0, 107.),
ThermalStatus.danger: ThermalBand(94.0, None),
})
# Override to highest thermal band when offroad and above this temp
OFFROAD_DANGER_TEMP = 79.5 if TICI else 70.0
prev_offroad_states: Dict[str, Tuple[bool, Optional[str]]] = {}
params = Params()
from common.dp_time import LAST_MODIFIED_THERMALD
from common.dp_common import get_last_modified, param_get_if_updated
LEON = False
def read_tz(x):
if x is None:
return 0
try:
with open(f"/sys/devices/virtual/thermal/thermal_zone{x}/temp") as f:
return int(f.read())
except FileNotFoundError:
return 0
def read_thermal(thermal_config):
dat = messaging.new_message('deviceState')
dat.deviceState.cpuTempC = [read_tz(z) / thermal_config.cpu[1] for z in thermal_config.cpu[0]]
dat.deviceState.gpuTempC = [read_tz(z) / thermal_config.gpu[1] for z in thermal_config.gpu[0]]
dat.deviceState.memoryTempC = read_tz(thermal_config.mem[0]) / thermal_config.mem[1]
dat.deviceState.ambientTempC = read_tz(thermal_config.ambient[0]) / thermal_config.ambient[1]
return dat
def setup_eon_fan():
global LEON
os.system("echo 2 > /sys/module/dwc3_msm/parameters/otg_switch")
bus = SMBus(7, force=True)
try:
bus.write_byte_data(0x21, 0x10, 0xf) # mask all interrupts
bus.write_byte_data(0x21, 0x03, 0x1) # set drive current and global interrupt disable
bus.write_byte_data(0x21, 0x02, 0x2) # needed?
bus.write_byte_data(0x21, 0x04, 0x4) # manual override source
except IOError:
print("LEON detected")
LEON = True
bus.close()
last_eon_fan_val = None
def set_eon_fan(val):
global LEON, last_eon_fan_val
if last_eon_fan_val is None or last_eon_fan_val != val:
bus = SMBus(7, force=True)
if LEON:
try:
i = [0x1, 0x3 | 0, 0x3 | 0x08, 0x3 | 0x10][val]
bus.write_i2c_block_data(0x3d, 0, [i])
except IOError:
# tusb320
if val == 0:
bus.write_i2c_block_data(0x67, 0xa, [0])
#bus.write_i2c_block_data(0x67, 0x45, [1<<2])
else:
#bus.write_i2c_block_data(0x67, 0x45, [0])
bus.write_i2c_block_data(0x67, 0xa, [0x20])
bus.write_i2c_block_data(0x67, 0x8, [(val - 1) << 6])
else:
bus.write_byte_data(0x21, 0x04, 0x2)
bus.write_byte_data(0x21, 0x03, (val*2)+1)
bus.write_byte_data(0x21, 0x04, 0x4)
bus.close()
last_eon_fan_val = val
# temp thresholds to control fan speed - high hysteresis
_TEMP_THRS_H = [50., 65., 80., 10000]
# temp thresholds to control fan speed - low hysteresis
_TEMP_THRS_L = [42.5, 57.5, 72.5, 10000]
# fan speed options
_FAN_SPEEDS = [0, 16384, 32768, 65535]
def handle_fan_eon(dp_fan_mode, controller, max_cpu_temp, fan_speed, ignition):
_fan_speed = _FAN_SPEEDS
if dp_fan_mode == 2:
_fan_speed = [0, 65535, 65535, 65535]
_bat_temp_threshold = 15.
elif dp_fan_mode == 1:
_fan_speed = [0, 16384, 16384, 32768]
new_speed_h = next(speed for speed, temp_h in zip(_fan_speed, _TEMP_THRS_H) if temp_h > max_cpu_temp)
new_speed_l = next(speed for speed, temp_l in zip(_fan_speed, _TEMP_THRS_L) if temp_l > max_cpu_temp)
if new_speed_h > fan_speed:
# update speed if using the high thresholds results in fan speed increment
fan_speed = new_speed_h
elif new_speed_l < fan_speed:
# update speed if using the low thresholds results in fan speed decrement
fan_speed = new_speed_l
set_eon_fan(fan_speed // 16384)
return fan_speed
def handle_fan_uno(dp_fan_mode, controller, max_cpu_temp, fan_speed, ignition):
if dp_fan_mode == 2:
new_speed = 80
elif dp_fan_mode == 1:
new_speed = int(interp(max_cpu_temp, [65.0, 80.0, 90.0], [0, 20, 60]))
else:
new_speed = int(interp(max_cpu_temp, [40.0, 80.0], [0, 80]))
if not ignition:
new_speed = min(10 if dp_fan_mode == 2 else 30, new_speed)
return new_speed
def handle_fan_tici(dp_fan_mode, controller, max_cpu_temp, fan_speed, ignition):
controller.neg_limit = -(80 if ignition else 30)
controller.pos_limit = -(30 if ignition else 0)
fan_pwr_out = -int(controller.update(
setpoint=(75 if ignition else (OFFROAD_DANGER_TEMP - 2)),
measurement=max_cpu_temp,
feedforward=interp(max_cpu_temp, [60.0, 100.0], [0, -80])
))
return fan_pwr_out
# we add dp_fan_mode here so we don't forget to update.
def handle_fan_jetson(dp_fan_mode, controller, max_cpu_temp, fan_speed, ignition):
new_speed = int(interp(max_cpu_temp, [40.0, 80.0], [100, 255]))
if not ignition:
new_speed = min(100, new_speed)
if fan_speed != new_speed:
os.system("echo %s > /sys/devices/pwm-fan/target_pwm" % new_speed)
return new_speed
def set_offroad_alert_if_changed(offroad_alert: str, show_alert: bool, extra_text: Optional[str]=None):
if prev_offroad_states.get(offroad_alert, None) == (show_alert, extra_text):
return
prev_offroad_states[offroad_alert] = (show_alert, extra_text)
set_offroad_alert(offroad_alert, show_alert, extra_text)
def thermald_thread():
pm = messaging.PubMaster(['deviceState'])
pandaState_timeout = int(1000 * 2.5 * DT_TRML) # 2.5x the expected pandaState frequency
pandaState_sock = messaging.sub_sock('pandaStates', timeout=pandaState_timeout)
sm = messaging.SubMaster(["peripheralState", "gpsLocationExternal", "managerState"])
fan_speed = 0
count = 0
startup_conditions = {
"ignition": False,
}
startup_conditions_prev = startup_conditions.copy()
off_ts = None
started_ts = None
started_seen = False
thermal_status = ThermalStatus.green
usb_power = True
network_type = NetworkType.none
network_strength = NetworkStrength.unknown
network_info = None
modem_version = None
registered_count = 0
nvme_temps = None
modem_temps = None
current_filter = FirstOrderFilter(0., CURRENT_TAU, DT_TRML)
temp_filter = FirstOrderFilter(0., TEMP_TAU, DT_TRML)
pandaState_prev = None
should_start_prev = False
in_car = False
handle_fan = None
is_uno = False
ui_running_prev = False
power_monitor = PowerMonitoring()
no_panda_cnt = 0
HARDWARE.initialize_hardware()
thermal_config = HARDWARE.get_thermal_config()
# TODO: use PI controller for UNO
controller = PIController(k_p=0, k_i=2e-3, neg_limit=-80, pos_limit=0, rate=(1 / DT_TRML))
# Leave flag for loggerd to indicate device was left onroad
if params.get_bool("IsOnroad"):
params.put_bool("BootedOnroad", True)
# dp
dp_no_batt = params.get_bool("dp_no_batt")
dp_temp_monitor = True
dp_last_modified_temp_monitor = None
dp_auto_shutdown = False
dp_last_modified_auto_shutdown = None
dp_auto_shutdown_last = False
dp_auto_shutdown_in = 90
dp_last_modified_auto_shutdown_in = None
dp_auto_shutdown_in_last = 90
dp_fan_mode = 0
dp_fan_mode_last = None
modified = None
last_modified = None
last_modified_check = None
if JETSON:
handle_fan = handle_fan_jetson
while True:
# dp - load temp monitor conf
last_modified_check, modified = get_last_modified(LAST_MODIFIED_THERMALD, last_modified_check, modified)
if last_modified != modified:
dp_temp_monitor, dp_last_modified_temp_monitor = param_get_if_updated("dp_temp_monitor", "bool", dp_temp_monitor, dp_last_modified_temp_monitor)
dp_auto_shutdown, dp_last_modified_auto_shutdown = param_get_if_updated("dp_auto_shutdown", "bool", dp_auto_shutdown, dp_last_modified_auto_shutdown)
dp_auto_shutdown_in, dp_last_modified_auto_shutdown_in = param_get_if_updated("dp_auto_shutdown_in", "int", dp_auto_shutdown_in, dp_last_modified_auto_shutdown_in)
dp_fan_mode, dp_fan_mode_last = param_get_if_updated("dp_fan_mode", "int", dp_fan_mode, dp_fan_mode_last)
last_modified = modified
pandaStates = messaging.recv_sock(pandaState_sock, wait=True)
sm.update(0)
peripheralState = sm['peripheralState']
msg = read_thermal(thermal_config)
if pandaStates is not None and len(pandaStates.pandaStates) > 0:
pandaState = pandaStates.pandaStates[0]
# If we lose connection to the panda, wait 5 seconds before going offroad
if pandaState.pandaType == log.PandaState.PandaType.unknown:
no_panda_cnt += 1
if no_panda_cnt > DISCONNECT_TIMEOUT / DT_TRML:
if startup_conditions["ignition"]:
cloudlog.error("Lost panda connection while onroad")
startup_conditions["ignition"] = False
else:
no_panda_cnt = 0
startup_conditions["ignition"] = pandaState.ignitionLine or pandaState.ignitionCan
in_car = pandaState.harnessStatus != log.PandaState.HarnessStatus.notConnected
usb_power = peripheralState.usbPowerMode != log.PeripheralState.UsbPowerMode.client
# Setup fan handler on first connect to panda
if not JETSON and handle_fan is None and peripheralState.pandaType != log.PandaState.PandaType.unknown:
is_uno = peripheralState.pandaType == log.PandaState.PandaType.uno
if TICI:
cloudlog.info("Setting up TICI fan handler")
handle_fan = handle_fan_tici
elif is_uno or PC:
cloudlog.info("Setting up UNO fan handler")
handle_fan = handle_fan_uno
else:
cloudlog.info("Setting up EON fan handler")
setup_eon_fan()
handle_fan = handle_fan_eon
# Handle disconnect
if pandaState_prev is not None:
if pandaState.pandaType == log.PandaState.PandaType.unknown and \
pandaState_prev.pandaType != log.PandaState.PandaType.unknown:
params.clear_all(ParamKeyType.CLEAR_ON_PANDA_DISCONNECT)
pandaState_prev = pandaState
# these are expensive calls. update every 10s
if (count % int(10. / DT_TRML)) == 0:
try:
network_type = HARDWARE.get_network_type()
network_strength = HARDWARE.get_network_strength(network_type)
network_info = HARDWARE.get_network_info() # pylint: disable=assignment-from-none
nvme_temps = HARDWARE.get_nvme_temperatures()
modem_temps = HARDWARE.get_modem_temperatures()
# Log modem version once
if modem_version is None:
modem_version = HARDWARE.get_modem_version() # pylint: disable=assignment-from-none
if modem_version is not None:
cloudlog.warning(f"Modem version: {modem_version}")
if TICI and (network_info.get('state', None) == "REGISTERED"):
registered_count += 1
else:
registered_count = 0
if registered_count > 10:
cloudlog.warning(f"Modem stuck in registered state {network_info}. nmcli conn up lte")
os.system("nmcli conn up lte")
registered_count = 0
except Exception:
cloudlog.exception("Error getting network status")
msg.deviceState.freeSpacePercent = get_available_percent(default=100.0)
msg.deviceState.memoryUsagePercent = int(round(psutil.virtual_memory().percent))
msg.deviceState.cpuUsagePercent = [int(round(n)) for n in psutil.cpu_percent(percpu=True)]
msg.deviceState.gpuUsagePercent = int(round(HARDWARE.get_gpu_usage_percent()))
msg.deviceState.networkType = network_type
msg.deviceState.networkStrength = network_strength
if network_info is not None:
msg.deviceState.networkInfo = network_info
if nvme_temps is not None:
msg.deviceState.nvmeTempC = nvme_temps
if modem_temps is not None:
msg.deviceState.modemTempC = modem_temps
msg.deviceState.screenBrightnessPercent = HARDWARE.get_screen_brightness()
msg.deviceState.batteryPercent = HARDWARE.get_battery_capacity()
msg.deviceState.batteryCurrent = HARDWARE.get_battery_current()
msg.deviceState.usbOnline = HARDWARE.get_usb_present()
# Fake battery levels on uno for frame
if dp_no_batt:
msg.deviceState.batteryPercent = 100
current_filter.update(msg.deviceState.batteryCurrent / 1e6)
max_comp_temp = temp_filter.update(
max(max(msg.deviceState.cpuTempC), msg.deviceState.memoryTempC, max(msg.deviceState.gpuTempC))
)
if handle_fan is not None:
fan_speed = handle_fan(dp_fan_mode, controller, max_comp_temp, fan_speed, startup_conditions["ignition"])
msg.deviceState.fanSpeedPercentDesired = fan_speed
is_offroad_for_5_min = (started_ts is None) and ((not started_seen) or (off_ts is None) or (sec_since_boot() - off_ts > 60 * 5))
if is_offroad_for_5_min and max_comp_temp > OFFROAD_DANGER_TEMP:
# If device is offroad we want to cool down before going onroad
# since going onroad increases load and can make temps go over 107
thermal_status = ThermalStatus.danger
else:
current_band = THERMAL_BANDS[thermal_status]
band_idx = list(THERMAL_BANDS.keys()).index(thermal_status)
if current_band.min_temp is not None and max_comp_temp < current_band.min_temp:
thermal_status = list(THERMAL_BANDS.keys())[band_idx - 1]
elif current_band.max_temp is not None and max_comp_temp > current_band.max_temp:
thermal_status = list(THERMAL_BANDS.keys())[band_idx + 1]
if not dp_temp_monitor and thermal_status in [ThermalStatus.red, ThermalStatus.danger]:
thermal_status = ThermalStatus.yellow
# **** starting logic ****
# Check for last update time and display alerts if needed
# now = datetime.datetime.utcnow()
#
# # show invalid date/time alert
# startup_conditions["time_valid"] = (now.year > 2020) or (now.year == 2020 and now.month >= 10)
# set_offroad_alert_if_changed("Offroad_InvalidTime", (not startup_conditions["time_valid"]))
#
# # Show update prompt
# try:
# last_update = datetime.datetime.fromisoformat(params.get("LastUpdateTime", encoding='utf8'))
# except (TypeError, ValueError):
# last_update = now
# dt = now - last_update
#
# update_failed_count = params.get("UpdateFailedCount")
# update_failed_count = 0 if update_failed_count is None else int(update_failed_count)
# last_update_exception = params.get("LastUpdateException", encoding='utf8')
#
# if update_failed_count > 15 and last_update_exception is not None:
# if tested_branch:
# extra_text = "Ensure the software is correctly installed"
# else:
# extra_text = last_update_exception
#
# set_offroad_alert_if_changed("Offroad_ConnectivityNeeded", False)
# set_offroad_alert_if_changed("Offroad_ConnectivityNeededPrompt", False)
# set_offroad_alert_if_changed("Offroad_UpdateFailed", True, extra_text=extra_text)
# elif dt.days > DAYS_NO_CONNECTIVITY_MAX and update_failed_count > 1:
# set_offroad_alert_if_changed("Offroad_UpdateFailed", False)
# set_offroad_alert_if_changed("Offroad_ConnectivityNeededPrompt", False)
# set_offroad_alert_if_changed("Offroad_ConnectivityNeeded", True)
# elif dt.days > DAYS_NO_CONNECTIVITY_PROMPT:
# remaining_time = str(max(DAYS_NO_CONNECTIVITY_MAX - dt.days, 0))
# set_offroad_alert_if_changed("Offroad_UpdateFailed", False)
# set_offroad_alert_if_changed("Offroad_ConnectivityNeeded", False)
# set_offroad_alert_if_changed("Offroad_ConnectivityNeededPrompt", True, extra_text=f"{remaining_time} days.")
# else:
# set_offroad_alert_if_changed("Offroad_UpdateFailed", False)
# set_offroad_alert_if_changed("Offroad_ConnectivityNeeded", False)
# set_offroad_alert_if_changed("Offroad_ConnectivityNeededPrompt", False)
startup_conditions["up_to_date"] = params.get("Offroad_ConnectivityNeeded") is None or params.get_bool("DisableUpdates")
startup_conditions["not_uninstalling"] = not params.get_bool("DoUninstall")
startup_conditions["accepted_terms"] = params.get("HasAcceptedTerms") == terms_version
panda_signature = params.get("PandaFirmware")
startup_conditions["fw_version_match"] = (panda_signature is None) or (panda_signature == FW_SIGNATURE) # don't show alert is no panda is connected (None)
set_offroad_alert_if_changed("Offroad_PandaFirmwareMismatch", (not startup_conditions["fw_version_match"]))
# with 2% left, we killall, otherwise the phone will take a long time to boot
startup_conditions["free_space"] = msg.deviceState.freeSpacePercent > 2
startup_conditions["completed_training"] = params.get("CompletedTrainingVersion") == training_version or \
params.get_bool("Passive")
startup_conditions["not_driver_view"] = not params.get_bool("IsDriverViewEnabled")
startup_conditions["not_taking_snapshot"] = not params.get_bool("IsTakingSnapshot")
# if any CPU gets above 107 or the battery gets above 63, kill all processes
# controls will warn with CPU above 95 or battery above 60
startup_conditions["device_temp_good"] = thermal_status < ThermalStatus.danger
set_offroad_alert_if_changed("Offroad_TemperatureTooHigh", (not startup_conditions["device_temp_good"]))
if TICI:
set_offroad_alert_if_changed("Offroad_NvmeMissing", (not Path("/data/media").is_mount()))
# Handle offroad/onroad transition
should_start = all(startup_conditions.values())
if should_start != should_start_prev or (count == 0):
params.put_bool("IsOnroad", should_start)
params.put_bool("IsOffroad", not should_start)
HARDWARE.set_power_save(not should_start)
if should_start:
off_ts = None
if started_ts is None:
started_ts = sec_since_boot()
started_seen = True
else:
if startup_conditions["ignition"] and (startup_conditions != startup_conditions_prev):
cloudlog.event("Startup blocked", startup_conditions=startup_conditions)
started_ts = None
if off_ts is None:
off_ts = sec_since_boot()
# Offroad power monitoring
if not dp_no_batt:
power_monitor.calculate(peripheralState, startup_conditions["ignition"])
msg.deviceState.offroadPowerUsageUwh = power_monitor.get_power_used()
msg.deviceState.carBatteryCapacityUwh = max(0, power_monitor.get_car_battery_capacity())
# Check if we need to disable charging (handled by boardd)
msg.deviceState.chargingDisabled = power_monitor.should_disable_charging(startup_conditions["ignition"], in_car, off_ts)
# Check if we need to shut down
if power_monitor.should_shutdown(peripheralState, startup_conditions["ignition"], in_car, off_ts, started_seen, LEON):
cloudlog.info(f"shutting device down, offroad since {off_ts}")
# TODO: add function for blocking cloudlog instead of sleep
time.sleep(10)
HARDWARE.shutdown()
# dp - auto shutdown
# reset off_ts if we change auto shutdown related params
if off_ts is not None:
if dp_auto_shutdown:
shutdown_sec = dp_auto_shutdown_in * 60
sec_now = sec_since_boot() - off_ts
if (shutdown_sec - 5) < sec_now:
msg.deviceState.chargingDisabled = True
if shutdown_sec < sec_now:
time.sleep(10)
HARDWARE.shutdown()
if dp_auto_shutdown_in_last != dp_auto_shutdown_in or dp_auto_shutdown_last != dp_auto_shutdown:
off_ts = sec_since_boot()
dp_auto_shutdown_last = dp_auto_shutdown
dp_auto_shutdown_in_last = dp_auto_shutdown_in
# If UI has crashed, set the brightness to reasonable non-zero value
ui_running = "ui" in (p.name for p in sm["managerState"].processes if p.running)
if ui_running_prev and not ui_running:
HARDWARE.set_screen_brightness(20)
ui_running_prev = ui_running
msg.deviceState.chargingError = current_filter.x > 0. and msg.deviceState.batteryPercent < 90 # if current is positive, then battery is being discharged
msg.deviceState.started = started_ts is not None
msg.deviceState.startedMonoTime = int(1e9*(started_ts or 0))
last_ping = params.get("LastAthenaPingTime")
if last_ping is not None:
msg.deviceState.lastAthenaPingTime = int(last_ping)
msg.deviceState.thermalStatus = thermal_status
pm.send("deviceState", msg)
if EON and not is_uno:
set_offroad_alert_if_changed("Offroad_ChargeDisabled", (not usb_power))
should_start_prev = should_start
startup_conditions_prev = startup_conditions.copy()
# report to server once every 10 minutes
# if (count % int(600. / DT_TRML)) == 0:
# if EON and started_ts is None and msg.deviceState.memoryUsagePercent > 40:
# cloudlog.event("High offroad memory usage", mem=msg.deviceState.memoryUsagePercent)
#
# cloudlog.event("STATUS_PACKET",
# count=count,
# pandaStates=(strip_deprecated_keys(pandaStates.to_dict()) if pandaStates else None),
# peripheralState=strip_deprecated_keys(peripheralState.to_dict()),
# location=(strip_deprecated_keys(sm["gpsLocationExternal"].to_dict()) if sm.alive["gpsLocationExternal"] else None),
# deviceState=strip_deprecated_keys(msg.to_dict()))
count += 1
def main():
thermald_thread()
if __name__ == "__main__":
main()
|
py | b400a37c337ca6377c5061a713cb96f596a8e0e4 | # Generated by Django 3.1.2 on 2020-11-16 20:28
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('vote', '0023_election_disable_abstention'),
]
operations = [
migrations.AlterField(
model_name='election',
name='result_published',
field=models.CharField(choices=[('0', 'unpublished'), ('1', 'published')], default='0', max_length=1),
),
]
|
py | b400a4592261cb47b38c8e1de4414c10903e2327 | #!/usr/bin/env python
# ----------------------------------------------------------------------------
# Copyright 2015-2016 Nervana Systems Inc.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ----------------------------------------------------------------------------
"""
VGG_A Benchmark
https://github.com/soumith/convnet-benchmarks
./vgg_a.py
./vgg_a.py -d f16
"""
from neon import NervanaObject
from neon.util.argparser import NeonArgparser
from neon.initializers import Gaussian
from neon.layers import Conv, Pooling, GeneralizedCost, Affine
from neon.optimizers import GradientDescentMomentum, MultiOptimizer, Schedule
from neon.transforms import Rectlin, Softmax, CrossEntropyMulti
from neon.models import Model
from neon.data import ArrayIterator
import numpy as np
parser = NeonArgparser(__doc__)
args = parser.parse_args()
NervanaObject.be.bsz = 64
NervanaObject.be.enable_winograd = 4
# setup data provider
X_train = np.random.uniform(-1, 1, (64, 3 * 224 * 224))
y_train = np.random.randint(0, 999, (64, 1000))
train = ArrayIterator(X_train, y_train, nclass=1000, lshape=(3, 224, 224))
layers = [Conv((3, 3, 64), init=Gaussian(scale=0.01), activation=Rectlin(), padding=1),
Pooling(2, strides=2),
Conv((3, 3, 128), init=Gaussian(scale=0.01), activation=Rectlin(), padding=1),
Pooling(2, strides=2),
Conv((3, 3, 256), init=Gaussian(scale=0.01), activation=Rectlin(), padding=1),
Conv((3, 3, 256), init=Gaussian(scale=0.01), activation=Rectlin(), padding=1),
Pooling(2, strides=2),
Conv((3, 3, 512), init=Gaussian(scale=0.01), activation=Rectlin(), padding=1),
Conv((3, 3, 512), init=Gaussian(scale=0.01), activation=Rectlin(), padding=1),
Pooling(2, strides=2),
Conv((3, 3, 512), init=Gaussian(scale=0.01), activation=Rectlin(), padding=1),
Conv((3, 3, 512), init=Gaussian(scale=0.01), activation=Rectlin(), padding=1),
Pooling(2, strides=2),
Affine(nout=4096, init=Gaussian(scale=0.01), activation=Rectlin()),
Affine(nout=4096, init=Gaussian(scale=0.01), activation=Rectlin()),
Affine(nout=1000, init=Gaussian(scale=0.01), activation=Softmax())]
model = Model(layers=layers)
weight_sched = Schedule([22, 44, 65], (1 / 250.)**(1 / 3.))
opt_gdm = GradientDescentMomentum(0.01, 0.0, wdecay=0.0005, schedule=weight_sched)
opt = MultiOptimizer({'default': opt_gdm})
cost = GeneralizedCost(costfunc=CrossEntropyMulti())
model.benchmark(train, cost=cost, optimizer=opt, niterations=10, nskip=1)
|
py | b400a46d1c80ce890c99a61ade5fb257e4a57af3 | """Run Alleyoop snpeval tool on Slamdunk results."""
import os
from plumbum import TEE
from resolwe.process import Cmd, DataField, FileField, IntegerField, Process, StringField
class AlleyoopSnpEval(Process):
"""Run Alleyoop snpeval."""
slug = 'alleyoop-snpeval'
process_type = 'data:alleyoop:snpeval'
name = 'Alleyoop snpeval'
requirements = {
'expression-engine': 'jinja',
'executor': {
'docker': {
'image': 'resolwebio/slamdunk:1.0.0'
},
},
'resources': {
'cores': 1,
'memory': 16384,
},
}
entity = {
'type': 'sample',
}
category = 'Slamdunk'
data_name = '{{ slamdunk|sample_name|default("?") }}'
version = '1.1.0'
class Input:
"""Input fields for AlleyoopSnpEval."""
ref_seq = DataField('seq:nucleotide', label='FASTA file containig sequences for aligning')
regions = DataField('bed', label='BED file with coordinates of regions of interest')
slamdunk = DataField('alignment:bam:slamdunk', label='Slamdunk results')
read_length = IntegerField(
label='Maximum read length',
description='Maximum length of reads in the input FASTQ file',
default=150
)
class Output:
"""Output fields to process AlleyoopSnpEval."""
report = FileField(label='Tab-separated file with read counts, T>C read counts and SNP indication')
plot = FileField(label='SNP evaluation plot')
species = StringField(label='Species')
build = StringField(label='Build')
def run(self, inputs, outputs):
"""Run analysis."""
basename = os.path.basename(inputs.slamdunk.bam.path)
assert basename.endswith('.bam')
name = basename[:-4]
args = [
'-o', 'snpeval',
'-r', inputs.ref_seq.fasta.path,
'-b', inputs.regions.bed.path,
'-s', '.',
'-l', inputs.read_length,
]
(Cmd['ln']['-s', inputs.slamdunk.variants.path, f'{name}_snp.vcf'])()
return_code, _, _ = Cmd['alleyoop']['snpeval'][args][inputs.slamdunk.bam.path] & TEE(retcode=None)
if return_code:
self.error('Alleyoop snpeval analysis failed.')
snp_file = os.path.join('snpeval', f'{name}_SNPeval.csv')
snp_file_renamed = os.path.join('snpeval', f'{name}_SNPeval.txt')
os.rename(snp_file, snp_file_renamed)
outputs.report = snp_file_renamed
outputs.plot = os.path.join('snpeval', f'{name}_SNPeval.pdf')
outputs.species = inputs.slamdunk.species
outputs.build = inputs.slamdunk.build
|
py | b400a5528780df2deda2ac4c7900aae0f60d0288 | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import abc
import ctypes
import json
import logging
import os
import pprint
import re
import signal
import subprocess
import sys
import tempfile
import time
import warnings
from dataclasses import asdict, dataclass
from datetime import datetime
from typing import TextIO, Dict, Iterable, List, Optional, Tuple, Pattern, Any
from uuid import uuid4
from torchx.schedulers.api import AppDryRunInfo, DescribeAppResponse, Scheduler
from torchx.specs.api import (
NONE,
Application,
AppState,
InvalidRunConfigException,
RunConfig,
SchedulerBackend,
is_terminal,
macros,
runopts,
)
log: logging.Logger = logging.getLogger(__name__)
def make_unique(app_name: str) -> str:
return f"{app_name}_{str(uuid4()).split('-')[0]}"
NA: str = "<N/A>"
class ImageFetcher(abc.ABC):
"""
Downloads and sets up an image onto the localhost. This is only needed for
``LocalhostScheduler`` since typically real schedulers will do this
on-behalf of the user.
"""
@abc.abstractmethod
def fetch(self, image: str) -> str:
"""
Pulls the given image and returns a path to the pulled image on
the local host.
"""
raise NotImplementedError()
class LocalDirectoryImageFetcher(ImageFetcher):
"""
Interprets the image name as the path to a directory on
local host. Does not "fetch" (e.g. download) anything. Used in conjunction
with ``LocalScheduler`` to run local binaries.
The image name must be an absolute path and must exist.
Example:
1. ``fetch(Image(name="/tmp/foobar"))`` returns ``/tmp/foobar``
2. ``fetch(Image(name="foobar"))`` raises ``ValueError``
2. ``fetch(Image(name="/tmp/dir/that/does/not_exist"))`` raises ``ValueError``
"""
def __init__(self, cfg: RunConfig) -> None:
pass
def fetch(self, image: str) -> str:
"""
Raises:
ValueError - if the image name is not an absolute dir
and if it does not exist or is not a directory
"""
if not os.path.isabs(image):
raise ValueError(
f"Invalid image name: {image}, image name must be an absolute path"
)
if not os.path.isdir(image):
raise ValueError(
f"Invalid image name: {image}, does not exist or is not a directory"
)
return image
# aliases to make clear what the mappings are
AppId = str
AppName = str
RoleName = str
@dataclass
class _LocalReplica:
"""
Contains information about a locally running role replica.
"""
role_name: RoleName
replica_id: int
# pyre-fixme[24]: Generic type `subprocess.Popen` expects 1 type parameter.
proc: subprocess.Popen
stdout: Optional[TextIO] # None means no log_dir (out to console)
stderr: Optional[TextIO] # None means no log_dir (out to console)
error_file: str
def terminate(self) -> None:
"""
terminates the underlying process for this replica
closes stdout and stderr file handles
safe to call multiple times
"""
# safe to call terminate on a process that already died
self.proc.terminate()
self.proc.wait()
# close stdout and stderr log file handles
if self.stdout:
# pyre-ignore [16] already null checked
self.stdout.close()
if self.stderr:
self.stderr.close()
def is_alive(self) -> bool:
return self.proc.poll() is None
def failed(self) -> bool:
if self.is_alive(): # if still running, then has not failed
return False
else:
return self.proc.returncode != 0
class _LocalApplication:
"""
Container object used by ``LocalhostScheduler`` to group the pids that
form an application. Each replica of a role in the application is a
process and has a pid.
"""
def __init__(self, id: str, log_dir: str) -> None:
self.id = id
# cfg.get("log_dir")/<session_name>/<app_id> or /tmp/tsm/<session_name>/<app_id>
self.log_dir = log_dir
# role name -> [replicas, ...]
self.role_replicas: Dict[RoleName, List[_LocalReplica]] = {}
self.state: AppState = AppState.PENDING
# time (in seconds since epoch) when the last set_state method() was called
self.last_updated: float = -1
def add_replica(self, role_name: str, replica: _LocalReplica) -> None:
procs = self.role_replicas.setdefault(role_name, [])
procs.append(replica)
def set_state(self, state: AppState) -> None:
self.last_updated = time.time()
self.state = state
def terminate(self) -> None:
"""
terminates all procs associated with this app,
and closes any resources (e.g. log file handles)
safe to call multiple times
"""
# terminate all replica processes
for replicas in self.role_replicas.values():
for r in replicas:
r.terminate()
def _get_error_file(self) -> Optional[str]:
error_file = None
min_timestamp = sys.maxsize
for replicas in self.role_replicas.values():
for replica in replicas:
if not os.path.exists(replica.error_file):
continue
mtime = os.path.getmtime(replica.error_file)
if mtime < min_timestamp:
min_timestamp = mtime
error_file = replica.error_file
return error_file
def get_structured_error_msg(self) -> str:
error_file = self._get_error_file()
if not error_file:
return NONE
with open(error_file, "r") as f:
return json.dumps(json.load(f))
def close(self) -> None:
"""
terminates all procs associated with this app,
and closes any resources (e.g. log file handles)
and if log_dir has been specified,
writes a SUCCESS file indicating that the log files
have been flushed and closed and ready to read.
NOT safe to call multiple times!
"""
self.terminate()
def _fmt_io_filename(std_io: Optional[TextIO]) -> str:
if std_io:
return std_io.name
else:
return "<CONSOLE>"
# drop a SUCCESS file in the log dir to signal that
# the log file handles have all been closed properly
# and that they can reliably be read
roles_info = {}
for role_name, replicas in self.role_replicas.items():
replicas_info = []
for replica in replicas:
replica_info = {
"replica_id": replica.replica_id,
"pid": replica.proc.pid,
"exitcode": replica.proc.returncode,
"stdout": _fmt_io_filename(replica.stdout),
"stderr": _fmt_io_filename(replica.stderr),
"error_file": replica.error_file,
}
replicas_info.append(replica_info)
roles_info[role_name] = replicas_info
app_info = {
"app_id": self.id,
"log_dir": self.log_dir,
"final_state": self.state.name,
"last_updated": self.last_updated,
"roles": roles_info,
}
info_str = json.dumps(app_info, indent=2)
with open(os.path.join(self.log_dir, "SUCCESS"), "w") as fp:
fp.write(info_str)
log.info(f"Successfully closed app_id: {self.id}.\n{info_str}")
def __repr__(self) -> str:
role_to_pid = {}
for (role_name, replicas) in self.role_replicas.items():
pids = role_to_pid.setdefault(role_name, [])
for r in replicas:
pids.append(r.proc.pid)
return f"{{app_id:{self.id}, state:{self.state}, pid_map:{role_to_pid}}}"
def _pr_set_pdeathsig() -> None:
"""
Sets PR_SET_PDEATHSIG to ensure a child process is
terminated appropriately.
See http://stackoverflow.com/questions/1884941/ for more information.
For libc.so.6 read http://www.linux-m68k.org/faq/glibcinfo.html
"""
libc = ctypes.CDLL("libc.so.6")
PR_SET_PDEATHSIG = 1
libc.prctl(PR_SET_PDEATHSIG, signal.SIGTERM)
@dataclass
class ReplicaParam:
"""
Holds ``LocalScheduler._popen()``parameters for each replica of the role.
"""
args: List[str]
env: Dict[str, str]
stdout: Optional[str]
stderr: Optional[str]
@dataclass
class PopenRequest:
"""
Holds parameters to create a subprocess for each replica of each role
of an application.
"""
app_id: AppId
log_dir: str
# maps role_name -> List[ReplicaSpec]
# role_params["trainer"][0] -> holds trainer's 0^th replica (NOT rank!) parameters
role_params: Dict[RoleName, List[ReplicaParam]]
# maps role_name -> List[replica_log_dir]
# role_log_dirs["trainer"][0] -> holds trainer's 0^th replica's log directory path
role_log_dirs: Dict[RoleName, List[str]]
class LocalScheduler(Scheduler):
"""
Schedules on localhost. Containers are modeled as processes and
certain properties of the container that are either not relevant
or that cannot be enforced for localhost
runs are ignored. Properties that are ignored:
1. Resource requirements
2. Container limit enforcements
3. Retry policies
4. Retry counts (no retries supported)
5. Deployment preferences
..note:: Use this scheduler sparingly since an application
that runs successfully on a session backed by this
scheduler may not work on an actual production cluster
using a different scheduler.
"""
def __init__(self, session_name: str, cache_size: int = 100) -> None:
super().__init__("local", session_name)
# TODO T72035686 replace dict with a proper LRUCache data structure
self._apps: Dict[AppId, _LocalApplication] = {}
if cache_size <= 0:
raise ValueError("cache size must be greater than zero")
self._cache_size = cache_size
def run_opts(self) -> runopts:
opts = runopts()
opts.add("image_fetcher", type_=str, help="image fetcher type", default="dir")
opts.add(
"log_dir",
type_=str,
default=None,
help="dir to write stdout/stderr log files of replicas",
)
return opts
def _validate(self, app: Application, scheduler: SchedulerBackend) -> None:
# Skip validation step for local application
pass
def _img_fetchers(self, cfg: RunConfig) -> Dict[str, ImageFetcher]:
return {"dir": LocalDirectoryImageFetcher(cfg)}
def _get_img_fetcher(self, cfg: RunConfig) -> ImageFetcher:
img_fetcher_type = cfg.get("image_fetcher")
fetchers = self._img_fetchers(cfg)
# pyre-ignore [6]: type check already done by runopt.resolve
img_fetcher = fetchers.get(img_fetcher_type, None)
if not img_fetcher:
raise InvalidRunConfigException(
f"Unsupported image fetcher type: {img_fetcher_type}. Must be one of: {fetchers.keys()}",
cfg,
self.run_opts(),
)
return img_fetcher
def _evict_lru(self) -> bool:
"""
Evicts one least recently used element from the apps cache. LRU is defined as
the oldest app in a terminal state (e.g. oldest finished app).
Returns:
``True`` if an entry was evicted, ``False`` if no entries could be evicted
(e.g. all apps are running)
"""
lru_time = sys.maxsize
lru_app_id = None
for (app_id, app) in self._apps.items():
if is_terminal(app.state):
if app.last_updated <= lru_time:
lru_app_id = app_id
if lru_app_id:
# evict LRU finished app from the apps cache
del self._apps[lru_app_id]
log.debug(f"evicting app: {lru_app_id}, from local scheduler cache")
return True
else:
log.debug(f"no apps evicted, all {len(self._apps)} apps are running")
return False
def _get_file_io(self, file: Optional[str]) -> Optional[TextIO]:
"""
Given a file name, opens the file for write and returns the IO.
If no file name is given, then returns ``None``
Raises a ``FileExistsError`` if the file is already present.
"""
if not file:
return None
if os.path.isfile(file):
raise FileExistsError(
f"log file: {file} already exists,"
f" specify a different log_dir, app_name, or remove the file and retry"
)
os.makedirs(os.path.dirname(file), exist_ok=True)
return open(file, mode="w")
def _popen(
self, role_name: RoleName, replica_id: int, replica_params: ReplicaParam
) -> _LocalReplica:
"""
Same as ``subprocess.Popen(**popen_kwargs)`` but is able to take ``stdout`` and ``stderr``
as file name ``str`` rather than a file-like obj.
"""
stdout_ = self._get_file_io(replica_params.stdout)
stderr_ = self._get_file_io(replica_params.stderr)
# inherit parent's env vars since 99.9% of the time we want this behavior
# just make sure we override the parent's env vars with the user_defined ones
env = os.environ.copy()
env.update(replica_params.env)
error_file = env["TORCHELASTIC_ERROR_FILE"]
args_pfmt = pprint.pformat(asdict(replica_params), indent=2, width=80)
log.info(f"Running {role_name} (replica {replica_id}):\n {args_pfmt}")
proc = subprocess.Popen(
args=replica_params.args,
env=env,
stdout=stdout_,
stderr=stderr_,
preexec_fn=_pr_set_pdeathsig,
)
return _LocalReplica(
role_name,
replica_id,
proc,
stdout=stdout_,
stderr=stderr_,
error_file=error_file,
)
def _get_app_log_dir(self, app_id: str, cfg: RunConfig) -> Tuple[str, bool]:
"""
Returns the log dir and a bool (should_redirect_std). We redirect stdout/err
to a log file ONLY if the log_dir is user-provided in the cfg
1. if cfg.get("log_dir") -> (user-specified log dir, True)
2. if not cfg.get("log_dir") -> (autogen tmp log dir, False)
"""
base_log_dir = cfg.get("log_dir")
redirect_std = True
if not base_log_dir:
base_log_dir = tempfile.mkdtemp(prefix="tsm_")
redirect_std = False
return os.path.join(str(base_log_dir), self.session_name, app_id), redirect_std
def schedule(self, dryrun_info: AppDryRunInfo[PopenRequest]) -> str:
if len(self._apps) == self._cache_size:
if not self._evict_lru():
raise IndexError(
f"App cache size ({self._cache_size}) exceeded. Increase the cache size"
)
request: PopenRequest = dryrun_info.request
app_id = request.app_id
app_log_dir = request.log_dir
assert (
app_id not in self._apps
), "no app_id collisons expected since uuid4 suffix is used"
os.makedirs(app_log_dir)
local_app = _LocalApplication(app_id, app_log_dir)
for role_name in request.role_params.keys():
role_params = request.role_params[role_name]
role_log_dirs = request.role_log_dirs[role_name]
for replica_id in range(len(role_params)):
replica_params = role_params[replica_id]
replica_log_dir = role_log_dirs[replica_id]
os.makedirs(replica_log_dir)
replica = self._popen(role_name, replica_id, replica_params)
local_app.add_replica(role_name, replica)
self._apps[app_id] = local_app
return app_id
def _submit_dryrun(
self, app: Application, cfg: RunConfig
) -> AppDryRunInfo[PopenRequest]:
request = self._to_popen_request(app, cfg)
return AppDryRunInfo(request, lambda p: pprint.pformat(p, indent=2, width=80))
def _to_popen_request(
self,
app: Application,
cfg: RunConfig,
) -> PopenRequest:
"""
Converts the application and cfg into a ``PopenRequest``.
"""
app_id = make_unique(app.name)
image_fetcher = self._get_img_fetcher(cfg)
app_log_dir, redirect_std = self._get_app_log_dir(app_id, cfg)
role_params: Dict[str, List[ReplicaParam]] = {}
role_log_dirs: Dict[str, List[str]] = {}
for role in app.roles:
replica_params = role_params.setdefault(role.name, [])
replica_log_dirs = role_log_dirs.setdefault(role.name, [])
container = role.container
img_root = image_fetcher.fetch(container.image)
cmd = os.path.join(img_root, role.entrypoint)
for replica_id in range(role.num_replicas):
values = macros.Values(
img_root=img_root,
app_id=app_id,
replica_id=str(replica_id),
)
replica_role = values.apply(role)
args = [cmd] + replica_role.args
replica_log_dir = os.path.join(app_log_dir, role.name, str(replica_id))
env_vars = {
# this is the top level (agent if using elastic role) error file
# a.k.a scheduler reply file
"TORCHELASTIC_ERROR_FILE": os.path.join(
replica_log_dir, "error.json"
),
**replica_role.env,
}
stdout = None
stderr = None
if redirect_std:
stdout = os.path.join(replica_log_dir, "stdout.log")
stderr = os.path.join(replica_log_dir, "stderr.log")
replica_params.append(ReplicaParam(args, env_vars, stdout, stderr))
replica_log_dirs.append(replica_log_dir)
return PopenRequest(app_id, app_log_dir, role_params, role_log_dirs)
def describe(self, app_id: str) -> Optional[DescribeAppResponse]:
if app_id not in self._apps:
return None
local_app = self._apps[app_id]
structured_error_msg = local_app.get_structured_error_msg()
# check if the app is known to have finished
if is_terminal(local_app.state):
state = local_app.state
else:
running = False
failed = False
for replicas in local_app.role_replicas.values():
for r in replicas:
running |= r.is_alive()
failed |= r.failed()
if running:
state = AppState.RUNNING
elif failed:
state = AppState.FAILED
else:
state = AppState.SUCCEEDED
local_app.set_state(state)
if is_terminal(local_app.state):
local_app.close()
resp = DescribeAppResponse()
resp.app_id = app_id
resp.structured_error_msg = structured_error_msg
resp.state = state
resp.num_restarts = 0
resp.ui_url = f"file://{local_app.log_dir}"
return resp
def log_iter(
self,
app_id: str,
role_name: str,
k: int = 0,
regex: Optional[str] = None,
since: Optional[datetime] = None,
until: Optional[datetime] = None,
should_tail: bool = False,
) -> Iterable[str]:
if since or until:
warnings.warn(
"Since and/or until times specified for LocalScheduler.log_iter."
" These will be ignored and all log lines will be returned"
)
app = self._apps[app_id]
log_file = os.path.join(app.log_dir, role_name, str(k), "stderr.log")
if not os.path.isfile(log_file):
raise RuntimeError(
f"app: {app_id} was not configured to log into a file."
f" Did you run it with log_dir set in RunConfig?"
)
return LogIterator(app_id, regex or ".*", log_file, self)
def _cancel_existing(self, app_id: str) -> None:
# can assume app_id exists
local_app = self._apps[app_id]
local_app.close()
local_app.state = AppState.CANCELLED
def __del__(self) -> None:
# terminate all apps
for (app_id, app) in self._apps.items():
log.info(f"Terminating app: {app_id}")
app.terminate()
class LogIterator:
def __init__(
self, app_id: str, regex: str, log_file: str, scheduler: LocalScheduler
) -> None:
self._app_id: str = app_id
self._regex: Pattern[str] = re.compile(regex)
self._log_file: str = log_file
self._log_fp: Optional[TextIO] = None
self._scheduler: LocalScheduler = scheduler
self._app_finished: bool = False
def _check_finished(self) -> None:
# either the app (already finished) was evicted from the LRU cache
# -- or -- the app reached a terminal state (and still in the cache)
desc = self._scheduler.describe(self._app_id)
if not desc or is_terminal(desc.state):
self._app_finished = True
else:
self._app_finished = False
def __iter__(self) -> "LogIterator":
# wait for the log file to appear or app to finish (whichever happens first)
while True:
self._check_finished() # check to see if app has finished running
if os.path.isfile(self._log_file):
self._log_fp = open(self._log_file, "r") # noqa: P201
break
if self._app_finished:
# app finished without ever writing a log file
raise RuntimeError(
f"app: {self._app_id} finished without writing: {self._log_file}"
)
time.sleep(1)
return self
def __next__(self) -> str:
log_fp = self._log_fp
assert log_fp is not None
while True:
line = log_fp.readline()
if not line:
# we have reached EOF and app finished
if self._app_finished:
log_fp.close()
raise StopIteration()
# if app is still running we need to wait for more possible log lines
# sleep for 1 sec to avoid thrashing the follow
time.sleep(1)
self._check_finished()
else:
line = line.rstrip("\n") # strip the trailing newline
if re.match(self._regex, line):
return line
def create_scheduler(session_name: str, **kwargs: Any) -> LocalScheduler:
return LocalScheduler(
session_name=session_name,
cache_size=kwargs.get("cache_size", 100),
)
|
py | b400a5f4390b075406935b238bb9905d2e01dd97 | import json
from nose.tools import assert_equals, ok_
from .testapp import App
class TestMeasures():
def __init__(self):
self.app = App()
def test_get_all_measures(self):
res = self.app.get('/measures', status=200)
results = json.loads(res.body.decode('utf-8'))
assert_equals(results["status"], "ok")
ok_(results["data"])
def test_not_allowed_post(self):
res = self.app.post('/measures', status=405)
results = json.loads(res.body.decode('utf-8'))
assert_equals(results["status"], "error")
assert_equals(results["code"], 405)
def test_not_allowed_put(self):
res = self.app.put('/measures', status=405)
results = json.loads(res.body.decode('utf-8'))
assert_equals(results["status"], "error")
assert_equals(results["code"], 405)
def test_not_allowed_delete(self):
res = self.app.delete('/measures', status=405)
results = json.loads(res.body.decode('utf-8'))
assert_equals(results["status"], "error")
assert_equals(results["code"], 405)
|
py | b400a604c93302ceaa28a0dfb9ea435ed2209492 | # coding: utf-8
"""
Kubernetes
No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
The version of the OpenAPI document: v1.20.7
Generated by: https://openapi-generator.tech
"""
from __future__ import absolute_import
import unittest
import datetime
import kubernetes.client
from kubernetes.client.models.v1beta1_validating_webhook_configuration_list import V1beta1ValidatingWebhookConfigurationList # noqa: E501
from kubernetes.client.rest import ApiException
class TestV1beta1ValidatingWebhookConfigurationList(unittest.TestCase):
"""V1beta1ValidatingWebhookConfigurationList unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def make_instance(self, include_optional):
"""Test V1beta1ValidatingWebhookConfigurationList
include_option is a boolean, when False only required
params are included, when True both required and
optional params are included """
# model = kubernetes.client.models.v1beta1_validating_webhook_configuration_list.V1beta1ValidatingWebhookConfigurationList() # noqa: E501
if include_optional :
return V1beta1ValidatingWebhookConfigurationList(
api_version = '0',
items = [
kubernetes.client.models.v1beta1/validating_webhook_configuration.v1beta1.ValidatingWebhookConfiguration(
api_version = '0',
kind = '0',
metadata = kubernetes.client.models.v1/object_meta.v1.ObjectMeta(
annotations = {
'key' : '0'
},
cluster_name = '0',
creation_timestamp = datetime.datetime.strptime('2013-10-20 19:20:30.00', '%Y-%m-%d %H:%M:%S.%f'),
deletion_grace_period_seconds = 56,
deletion_timestamp = datetime.datetime.strptime('2013-10-20 19:20:30.00', '%Y-%m-%d %H:%M:%S.%f'),
finalizers = [
'0'
],
generate_name = '0',
generation = 56,
labels = {
'key' : '0'
},
managed_fields = [
kubernetes.client.models.v1/managed_fields_entry.v1.ManagedFieldsEntry(
api_version = '0',
fields_type = '0',
fields_v1 = kubernetes.client.models.fields_v1.fieldsV1(),
manager = '0',
operation = '0',
time = datetime.datetime.strptime('2013-10-20 19:20:30.00', '%Y-%m-%d %H:%M:%S.%f'), )
],
name = '0',
namespace = '0',
owner_references = [
kubernetes.client.models.v1/owner_reference.v1.OwnerReference(
api_version = '0',
block_owner_deletion = True,
controller = True,
kind = '0',
name = '0',
uid = '0', )
],
resource_version = '0',
self_link = '0',
uid = '0', ),
webhooks = [
kubernetes.client.models.v1beta1/validating_webhook.v1beta1.ValidatingWebhook(
admission_review_versions = [
'0'
],
kubernetes.client_config = kubernetes.client.models.admissionregistration/v1beta1/webhook_client_config.admissionregistration.v1beta1.WebhookClientConfig(
ca_bundle = 'YQ==',
service = kubernetes.client.models.admissionregistration/v1beta1/service_reference.admissionregistration.v1beta1.ServiceReference(
name = '0',
namespace = '0',
path = '0',
port = 56, ),
url = '0', ),
failure_policy = '0',
match_policy = '0',
name = '0',
namespace_selector = kubernetes.client.models.v1/label_selector.v1.LabelSelector(
match_expressions = [
kubernetes.client.models.v1/label_selector_requirement.v1.LabelSelectorRequirement(
key = '0',
operator = '0',
values = [
'0'
], )
],
match_labels = {
'key' : '0'
}, ),
object_selector = kubernetes.client.models.v1/label_selector.v1.LabelSelector(),
rules = [
kubernetes.client.models.v1beta1/rule_with_operations.v1beta1.RuleWithOperations(
api_groups = [
'0'
],
api_versions = [
'0'
],
operations = [
'0'
],
resources = [
'0'
],
scope = '0', )
],
side_effects = '0',
timeout_seconds = 56, )
], )
],
kind = '0',
metadata = kubernetes.client.models.v1/list_meta.v1.ListMeta(
continue = '0',
remaining_item_count = 56,
resource_version = '0',
self_link = '0', )
)
else :
return V1beta1ValidatingWebhookConfigurationList(
items = [
kubernetes.client.models.v1beta1/validating_webhook_configuration.v1beta1.ValidatingWebhookConfiguration(
api_version = '0',
kind = '0',
metadata = kubernetes.client.models.v1/object_meta.v1.ObjectMeta(
annotations = {
'key' : '0'
},
cluster_name = '0',
creation_timestamp = datetime.datetime.strptime('2013-10-20 19:20:30.00', '%Y-%m-%d %H:%M:%S.%f'),
deletion_grace_period_seconds = 56,
deletion_timestamp = datetime.datetime.strptime('2013-10-20 19:20:30.00', '%Y-%m-%d %H:%M:%S.%f'),
finalizers = [
'0'
],
generate_name = '0',
generation = 56,
labels = {
'key' : '0'
},
managed_fields = [
kubernetes.client.models.v1/managed_fields_entry.v1.ManagedFieldsEntry(
api_version = '0',
fields_type = '0',
fields_v1 = kubernetes.client.models.fields_v1.fieldsV1(),
manager = '0',
operation = '0',
time = datetime.datetime.strptime('2013-10-20 19:20:30.00', '%Y-%m-%d %H:%M:%S.%f'), )
],
name = '0',
namespace = '0',
owner_references = [
kubernetes.client.models.v1/owner_reference.v1.OwnerReference(
api_version = '0',
block_owner_deletion = True,
controller = True,
kind = '0',
name = '0',
uid = '0', )
],
resource_version = '0',
self_link = '0',
uid = '0', ),
webhooks = [
kubernetes.client.models.v1beta1/validating_webhook.v1beta1.ValidatingWebhook(
admission_review_versions = [
'0'
],
kubernetes.client_config = kubernetes.client.models.admissionregistration/v1beta1/webhook_client_config.admissionregistration.v1beta1.WebhookClientConfig(
ca_bundle = 'YQ==',
service = kubernetes.client.models.admissionregistration/v1beta1/service_reference.admissionregistration.v1beta1.ServiceReference(
name = '0',
namespace = '0',
path = '0',
port = 56, ),
url = '0', ),
failure_policy = '0',
match_policy = '0',
name = '0',
namespace_selector = kubernetes.client.models.v1/label_selector.v1.LabelSelector(
match_expressions = [
kubernetes.client.models.v1/label_selector_requirement.v1.LabelSelectorRequirement(
key = '0',
operator = '0',
values = [
'0'
], )
],
match_labels = {
'key' : '0'
}, ),
object_selector = kubernetes.client.models.v1/label_selector.v1.LabelSelector(),
rules = [
kubernetes.client.models.v1beta1/rule_with_operations.v1beta1.RuleWithOperations(
api_groups = [
'0'
],
api_versions = [
'0'
],
operations = [
'0'
],
resources = [
'0'
],
scope = '0', )
],
side_effects = '0',
timeout_seconds = 56, )
], )
],
)
def testV1beta1ValidatingWebhookConfigurationList(self):
"""Test V1beta1ValidatingWebhookConfigurationList"""
inst_req_only = self.make_instance(include_optional=False)
inst_req_and_optional = self.make_instance(include_optional=True)
if __name__ == '__main__':
unittest.main()
|
py | b400a60d58af052956109f464a3729a29ec8ab28 | import argparse
parser = argparse.ArgumentParser("cifar")
parser.add_argument('--data', type=str, default='./dataset/', help='location of the data corpus')
parser.add_argument('--dataset', type=str, default='cifar10', help='dataset')
parser.add_argument('--batch_size', type=int, default=64, help='batch size')
parser.add_argument('--lamda0', type=float, default=1.0, help='coefficient for loss_arch')
parser.add_argument('--lamda1', type=float, default=1.0, help='coefficient for loss_aux')
parser.add_argument('--learning_rate', type=float, default=0.025, help='init learning rate')
parser.add_argument('--learning_rate_min', type=float, default=0.001, help='min learning rate')
parser.add_argument('--momentum', type=float, default=0.9, help='momentum')
parser.add_argument('--weight_decay', type=float, default=3e-4, help='weight decay')
parser.add_argument('--report_freq', type=float, default=50, help='report frequency')
parser.add_argument('--gpu_id', type=int, default=0, help='gpu device id')
parser.add_argument('--epochs', type=int, default=50, help='num of training epochs')
parser.add_argument('--init_channels', type=int, default=16, help='num of init channels')
parser.add_argument('--layers', type=int, default=8, help='total number of layers')
parser.add_argument('--model_path', type=str, default='saved_models', help='path to save the model')
parser.add_argument('--cutout', action='store_true', default=False, help='use cutout')
parser.add_argument('--cutout_length', type=int, default=16, help='cutout length')
parser.add_argument('--drop_path_prob', type=float, default=0.3, help='drop path probability')
parser.add_argument('--save', type=str, default='EXP', help='experiment name')
parser.add_argument('--seed', type=int, default=2, help='random seed')
parser.add_argument('--grad_clip', type=float, default=5, help='gradient clipping')
parser.add_argument('--train_portion', type=float, default=0.5, help='portion of training data')
parser.add_argument('--debug', action='store_true', default=False, help='debug')
parser.add_argument('--unrolled', action='store_true', default=False, help='use one-step unrolled validation loss')
parser.add_argument('--del_none', action='store_false', default=True, help='delete none operation')
parser.add_argument('--aux_loss', action='store_false', default=True, help='use info_entropy')
parser.add_argument('--beta_loss', action='store_false', default=True, help='use beta loss')
parser.add_argument('--info_linear_grow', action='store_true', default=False, help='info_linear_grow')
parser.add_argument('--one_level', action='store_true', default=False, help='use one level')
parser.add_argument('--arch_learning_rate', type=float, default=3e-4, help='learning rate for arch encoding')
parser.add_argument('--arch_weight_decay', type=float, default=1e-3, help='weight decay for arch encoding')
parser.add_argument('--arch_name', type=str, default=None, required=True, help='save model name')
parser.add_argument('--loss_type', type=str, default=None, required=True, help='[entropy, mae, mse, rmse, kl]')
parser.add_argument('--warmup', action='store_true', default=False, help='warmup')
parser.add_argument('--warmup_epoch', type=int, default=25, help='total number of warmup epochs')
args = parser.parse_args()
|
py | b400a722c717d6322475d075e5e6ca07343e213f | import config
import misc
def heartbeat():
"""
Processes data from Bittrex into a simpler dictionary,
calls the save function on it, deletes the oldest
saved dictionary(if it's out of lookback range), and finally
creates a list of the best coins to be used in tkinter listboxes.
:return: A list containing triples of (coin name, increase rate, volume)
"""
data = misc.retrieve_data()
# Processing for saving latest data from Bittrex API
latest_data = {}
for d in data.get('result', {}):
name = d.get('Market', {}).get('MarketCurrencyLong', '')
last_price = d.get('Summary', {}).get('Last', 0.0)
last_vol = d.get('Summary', {}).get('BaseVolume', 0.0)
base_currency = d.get('Market', {}).get('BaseCurrency', '')
if base_currency == 'BTC' and last_price >= \
config.FASTTICK_MIN_PRICE and last_vol >= config.FASTTICK_MIN_VOL:
latest_data[name] = {'Summary': d['Summary']}
# Processing all data within 9 ticks + latest and returning
# rate for output in GUI
prev_data = list(misc.open_pickles('fasttick_history', config.FASTTICK_LOOKBACK))
prev_data.append(latest_data)
ticker_data = []
if prev_data:
for name in latest_data:
prev_changes = []
for i in range(len(prev_data)-1):
old_price = float(prev_data[i].get(name, {}).get('Summary', {}).get('Last', 0.0))
new_price = float(prev_data[i+1].get(name, {}).get('Summary', {}).get('Last', 0.0))
if old_price != 0:
change = (((new_price - old_price) / old_price) * 100)
prev_changes.append(change)
if prev_changes:
volume = float(latest_data.get(name, {}).get('Summary', {}).get('BaseVolume', 0.0))
average_rate = float((sum(prev_changes) / len(prev_changes)))
if average_rate >= config.FASTTICK_MIN_RATE:
ticker_data.append((name, average_rate, volume))
misc.save_pickle(latest_data, 'fasttick_history')
misc.delete_ancient_pickles('fasttick_history', config.FASTTICK_LOOKBACK)
return ticker_data |
py | b400a8200313bdc20b2d62b8e14a3df4959fc39d | from unittest import TestCase
from .. import chords
from ..notegroups import InvalidDegreeError, InvalidQualityError
class TestChords(TestCase):
def setUp(self):
self.c_thirteen = chords.Chord('C DOMINANT 13')
self.c_major = chords.Chord('c major')
self.d_minor = chords.Chord('d minor')
def test_invalid_bass(self):
with self.assertRaises(chords.InvalidBassError):
z = chords.Chord('Z MAJOR')
def test_invalid_quality(self):
with self.assertRaises(InvalidQualityError):
a = chords.Chord('A Foo')
def test_getitem_bass(self):
c = self.c_thirteen['bass']
self.assertEqual(c, 'C')
def test_getitem_third(self):
e = self.c_thirteen['THIRD']
self.assertEqual(e, 'E')
def test_getitem_fifth(self):
g = self.c_thirteen['FIFTH']
self.assertEqual(g, 'G')
def test_getitem_seventh(self):
b_flat = self.c_thirteen['SEVENTH']
self.assertEqual(b_flat, 'Bb')
def test_getitem_ninth(self):
d = self.c_thirteen['NINTH']
self.assertEqual(d, 'D')
def test_getitem_eleventh(self):
f = self.c_thirteen['ELEVENTH']
self.assertEqual(f, 'F')
def test_getitem_thirteenth(self):
a = self.c_thirteen['THIRTEENTH']
self.assertEqual(a, 'A')
def test_getitem_nonexistant_degree(self):
b_flat = self.c_major['SEVENTH']
self.assertTrue(b_flat is None)
def test_getitem_invalid_degree(self):
with self.assertRaises(InvalidDegreeError):
bad_degree = self.c_major['baddeg']
|
py | b400a8428b1d1dfc79df0020841def14201b5054 | #!/usr/bin/env python3
#pylint: skip-file
#import os
#import time
#import math
#import atexit
#import numpy as np
#import threading
#import random
import cereal.messaging as messaging
#import argparse
#from common.params import Params
from common.realtime import Ratekeeper
#import queue
#import requests
#import cereal.messaging.messaging_pyx as messaging_pyx
#import datetime
#import json
from common.op_params import opParams
def main():
op_params = opParams()
rk = Ratekeeper(1.0, print_delay_threshold=None)
sm = messaging.SubMaster(['liveMapData'])
while 1:
sm.update()
if sm.updated['liveMapData']:
print (sm['liveMapData'])
camera_offset = op_params.get('camera_offset')
print (camera_offset)
#sm.update()
rk.keep_time()
if __name__ == "__main__":
main()
|
py | b400a8c51771f3ad045199d94bfec2e236c47dbb | # '''Trains a memory network on each of the 2 bAbI datasets.
# References:
# - Jason Weston, Antoine Bordes, Sumit Chopra, Tomas Mikolov, Alexander M. Rush,
# "Towards AI-Complete Question Answering: A Set of Prerequisite Toy Tasks",
# http://arxiv.org/abs/1502.05698
# - Sainbayar Sukhbaatar, Arthur Szlam, Jason Weston, Rob Fergus,
# "End-To-End Memory Networks",
# http://arxiv.org/abs/1503.08895
# '''
#run pip install -r requirements.txt or pip3 install -r requirements.txt
#to install dependencies
from __future__ import print_function
from tensorflow.keras import callbacks
from tensorflow.keras.models import Sequential, Model, load_model
from tensorflow.keras import layers
from tensorflow.keras.layers import Embedding, Input, Activation, Dense, Permute, Dropout, add, dot, concatenate, LSTM
from tensorflow.keras.utils import get_file
from tensorflow.keras.preprocessing.sequence import pad_sequences
from functools import reduce
import tarfile
import numpy as np
import re
import os
from random import shuffle
# configure challenge type here
challenge_type = 'single_supporting_fact_10k'
# challenge_type = 'two_supporting_facts_10k'
# configure epoch here
# in 'single_supporting_fact_10k', if epochs != 1, epochs = 120
# in 'two_supporting_facts_10k', if epochs != 1, epochs = 40
epochs = 40
# when using Jupyter Notebook
dir = os.getcwd()
# when using local runtime
dir = os.path.dirname(__file__)
batch_size = None
dropout = 0.3
def tokenize(sent):
return [x.strip() for x in re.split(r'(\W+)?', sent) if x.strip()]
# can also use nltk
# from nltk.tokenize import word_tokenize
# import nltk
# nltk.download('punkt')
# def tokenize(sent):
# return word_tokenize(sent)
def parse_stories(lines, only_supporting=False):
'''Parse stories provided in the bAbi tasks format'''
data = []
story = []
for line in lines:
line = line.decode('utf-8').strip()
nid, line = line.split(' ', 1)
nid = int(nid)
if nid == 1:
story = []
if '\t' in line:
q, a, supporting = line.split('\t')
q = tokenize(q)
substory = None
#If only_supporting is true, only the sentences
#that support the answer are kept.
if only_supporting:
# Only select the related substory
supporting = map(int, supporting.split())
substory = [story[i - 1] for i in supporting]
else:
# Provide all the substories
substory = [x for x in story if x]
data.append((substory, q, a))
story.append('')
else:
sent = tokenize(line)
story.append(sent)
return data
def get_stories(f, only_supporting=False, max_length=None):
'''Given a file name, read the file, retrieve the stories'''
data = parse_stories(f.readlines(), only_supporting=only_supporting)
def flatten(data): return reduce(lambda x, y: x + y, data)
# convert the sentences into a single story
# If max_length is supplied, any stories longer than max_length tokens will be discarded.
data = [(flatten(story), q, answer) for story, q,
answer in data if not max_length or len(flatten(story)) < max_length]
return data
def vectorize_stories(data, word_idx, story_maxlen, query_maxlen):
X = []
Xq = []
Y = []
for story, query, answer in data:
x = [word_idx[w] for w in story]
xq = [word_idx[w] for w in query]
# index 0 is reserved
y = np.zeros(len(word_idx) + 1)
y[word_idx[answer]] = 1
X.append(x)
Xq.append(xq)
Y.append(y)
return (pad_sequences(X, maxlen=story_maxlen),
pad_sequences(Xq, maxlen=query_maxlen), np.array(Y))
try:
path = get_file('babi-tasks-v1-2.tar.gz',
origin='https://s3.amazonaws.com/text-datasets/babi_tasks_1-20_v1-2.tar.gz')
except:
print('Error downloading dataset, please download it manually:\n'
'$ wget http://www.thespermwhale.com/jaseweston/babi/tasks_1-20_v1-2.tar.gz\n'
'$ mv tasks_1-20_v1-2.tar.gz ~/.keras/datasets/babi-tasks-v1-2.tar.gz')
raise
tar = tarfile.open(path).extractfile
challenges = {
# QA1 with 10,000 samples
'single_supporting_fact_10k': 'tasks_1-20_v1-2/en-10k/qa1_single-supporting-fact_{}.txt',
# QA2 with 10,000 samples
'two_supporting_facts_10k': 'tasks_1-20_v1-2/en-10k/qa2_two-supporting-facts_{}.txt',
}
challenge = challenges[challenge_type]
print('Extracting stories for the challenge:', challenge_type)
train_stories = get_stories(tar(challenge.format('train')))
test_stories = get_stories(tar(challenge.format('test')))
shuffle(train_stories)
shuffle(test_stories)
vocab = set()
for story, q, answer in train_stories + test_stories:
vocab |= set(story + q + [answer])
vocab = sorted(vocab)
# Reserve 0 for masking via pad_sequences
vocab_size = len(vocab) + 1
story_maxlen = max(map(len, (x for x, _, _ in train_stories + test_stories)))
query_maxlen = max(map(len, (x for _, x, _ in train_stories + test_stories)))
print('-')
print('Vocab size:', vocab_size, 'unique words')
print('Story max length:', story_maxlen, 'words')
print('Query max length:', query_maxlen, 'words')
print('Number of training stories:', len(train_stories))
print('Number of test stories:', len(test_stories))
print('-')
print('Here\'s what a "story" tuple looks like (input, , answer):')
print(train_stories[0])
print('-')
print('Vectorizing the word sequences...')
word_idx = dict((c, i + 1) for i, c in enumerate(vocab))
inputs_train, queries_train, answers_train = vectorize_stories(train_stories,
word_idx,
story_maxlen,
query_maxlen)
inputs_test, queries_test, answers_test = vectorize_stories(test_stories,
word_idx,
story_maxlen,
query_maxlen)
print('-')
print('inputs: integer tensor of shape (samples, max_length)')
print('inputs_train shape:', inputs_train.shape)
print('inputs_test shape:', inputs_test.shape)
print('-')
print('queries: integer tensor of shape (samples, max_length)')
print('queries_train shape:', queries_train.shape)
print('queries_test shape:', queries_test.shape)
print('-')
print('answers: binary (1 or 0) tensor of shape (samples, vocab_size)')
print('answers_train shape:', answers_train.shape)
print('answers_test shape:', answers_test.shape)
print('-')
print('Building model...')
def one_supporting_facts(epochs=epochs, batch_size=batch_size,
dropout=dropout, output_dim=64,
LSTM_unit=32):
input_sequence = Input((story_maxlen,))
question = Input((query_maxlen,))
input_encoder_m = Sequential()
input_encoder_m.add(Embedding(input_dim=vocab_size,
output_dim=output_dim))
input_encoder_m.add(Dropout(dropout))
input_encoder_c = Sequential()
input_encoder_c.add(Embedding(input_dim=vocab_size,
output_dim=query_maxlen))
input_encoder_c.add(Dropout(dropout))
question_encoder = Sequential()
question_encoder.add(Embedding(input_dim=vocab_size,
output_dim=output_dim,
input_length=query_maxlen))
question_encoder.add(Dropout(dropout))
input_encoded_m = input_encoder_m(input_sequence)
input_encoded_c = input_encoder_c(input_sequence)
question_encoded = question_encoder(question)
match = dot([input_encoded_m, question_encoded], axes=(2, 2))
match = Activation('softmax')(match)
response = add([match, input_encoded_c])
response = Permute((2, 1))(response)
answer = concatenate([response, question_encoded])
answer = LSTM(LSTM_unit)(answer)
answer = Dropout(dropout)(answer)
answer = Dense(vocab_size)(answer)
answer = Activation('softmax')(answer)
model = Model([input_sequence, question], answer)
return model
def two_supporting_facts(epochs=epochs, batch_size=batch_size,
dropout=dropout, embed_hidden_size=50,
sent_hidden_size=100,
query_hidden_size=100):
sentence = layers.Input(shape=(story_maxlen,), dtype='int32')
encoded_sentence = layers.Embedding(
vocab_size, embed_hidden_size)(sentence)
encoded_sentence = layers.Dropout(dropout)(encoded_sentence)
question = layers.Input(shape=(query_maxlen,), dtype='int32')
encoded_question = layers.Embedding(
vocab_size, embed_hidden_size)(question)
encoded_question = layers.Dropout(dropout)(encoded_question)
encoded_question = LSTM(embed_hidden_size)(encoded_question)
encoded_question = layers.RepeatVector(story_maxlen)(encoded_question)
merged = layers.add([encoded_sentence, encoded_question])
merged = LSTM(embed_hidden_size)(merged)
merged = layers.Dropout(dropout)(merged)
preds = layers.Dense(vocab_size, activation='softmax')(merged)
model = Model([sentence, question], preds)
return model
if challenge_type == 'single_supporting_fact_10k':
if epochs != 1:
epochs = 120
# filepath = os.path.join(dir, 'one_fact_chatbot_model.h5')
filepath = os.path.join(dir, 'one_fact_chatbot_model_weight.h5')
log_dir = os.path.join(dir, 'logs/1')
output_dim = 64
LSTM_unit = 32
model = one_supporting_facts(epochs=epochs, batch_size=batch_size,
dropout=dropout, output_dim=output_dim,
LSTM_unit=LSTM_unit)
else:
if epochs != 1:
epochs = 40
filepath = os.path.join(dir, 'two_facts_chatbot_model_weight.h5')
log_dir = os.path.join(dir, 'logs/2')
embed_hidden_size = 20
sent_hidden_size = 100
query_hidden_size = 100
model = two_supporting_facts(dropout=dropout, batch_size=batch_size,
epochs=epochs,
embed_hidden_size=embed_hidden_size,
sent_hidden_size=sent_hidden_size,
query_hidden_size=query_hidden_size)
model.compile(optimizer='adam', loss='categorical_crossentropy',
metrics=['accuracy'])
# make the file directories needed
new_dir = os.path.join(dir, 'logs')
if not os.path.exists(new_dir):
os.makedirs(new_dir)
# train
print('Training...')
callbacks = [
callbacks.ModelCheckpoint(filepath=filepath, verbose=1,
monitor='val_loss', save_best_only=True),
# Check out the train history later with Tensorboard
callbacks.TensorBoard(log_dir=log_dir),
callbacks.EarlyStopping(patience=20)]
model.fit([inputs_train, queries_train], answers_train,
batch_size=batch_size,
epochs=epochs,
validation_split=0.1,
callbacks=callbacks)
# del model
# model = load_model(filepath=filepath)
model.load_weights(filepath=filepath)
# evaluate
test_result = model.evaluate([inputs_test, queries_test], answers_test)
print(
f'Test result:\nTest loss = {test_result[0]}, Test accuracy = {test_result[1]}')
# predict
predictions = model.predict([inputs_test, queries_test])
re_word_idx = {v: k for k, v in word_idx.items()}
for i in range(9):
for j, k in enumerate(test_stories[i]):
if j < 2:
print('\n' + ' '.join(k))
if j == 2:
print('\nGround truth: ' + ''.join(k))
predicted = re_word_idx[np.argmax(predictions[i])]
print(f'Prediction : {predicted}\n')
|
py | b400a927b5e475f9fccab4283d05899ba92f0cef | """
Interfaces with SimpliSafe alarm control panel.
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/alarm_control_panel.simplisafe/
"""
import logging
import re
import voluptuous as vol
from homeassistant.components.alarm_control_panel import (
PLATFORM_SCHEMA, AlarmControlPanel)
from homeassistant.const import (
CONF_CODE, CONF_NAME, CONF_PASSWORD, CONF_USERNAME,
STATE_ALARM_ARMED_AWAY, STATE_ALARM_ARMED_HOME,
STATE_ALARM_DISARMED, STATE_UNKNOWN)
import homeassistant.helpers.config_validation as cv
REQUIREMENTS = ['simplisafe-python==2.0.2']
_LOGGER = logging.getLogger(__name__)
DEFAULT_NAME = 'SimpliSafe'
ATTR_ALARM_ACTIVE = "alarm_active"
ATTR_TEMPERATURE = "temperature"
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
vol.Required(CONF_PASSWORD): cv.string,
vol.Required(CONF_USERNAME): cv.string,
vol.Optional(CONF_CODE): cv.string,
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
})
def setup_platform(hass, config, add_devices, discovery_info=None):
"""Set up the SimpliSafe platform."""
from simplipy.api import SimpliSafeApiInterface, SimpliSafeAPIException
name = config.get(CONF_NAME)
code = config.get(CONF_CODE)
username = config.get(CONF_USERNAME)
password = config.get(CONF_PASSWORD)
try:
simplisafe = SimpliSafeApiInterface(username, password)
except SimpliSafeAPIException:
_LOGGER.error("Failed to setup SimpliSafe")
return
systems = []
for system in simplisafe.get_systems():
systems.append(SimpliSafeAlarm(system, name, code))
add_devices(systems)
class SimpliSafeAlarm(AlarmControlPanel):
"""Representation of a SimpliSafe alarm."""
def __init__(self, simplisafe, name, code):
"""Initialize the SimpliSafe alarm."""
self.simplisafe = simplisafe
self._name = name
self._code = str(code) if code else None
@property
def unique_id(self):
"""Return the unique ID."""
return self.simplisafe.location_id
@property
def name(self):
"""Return the name of the device."""
if self._name is not None:
return self._name
return 'Alarm {}'.format(self.simplisafe.location_id)
@property
def code_format(self):
"""Return one or more digits/characters."""
if self._code is None:
return None
if isinstance(self._code, str) and re.search('^\\d+$', self._code):
return 'Number'
return 'Any'
@property
def state(self):
"""Return the state of the device."""
status = self.simplisafe.state
if status.lower() == 'off':
state = STATE_ALARM_DISARMED
elif status.lower() == 'home' or status.lower() == 'home_count':
state = STATE_ALARM_ARMED_HOME
elif (status.lower() == 'away' or status.lower() == 'exitDelay' or
status.lower() == 'away_count'):
state = STATE_ALARM_ARMED_AWAY
else:
state = STATE_UNKNOWN
return state
@property
def device_state_attributes(self):
"""Return the state attributes."""
attributes = {}
attributes[ATTR_ALARM_ACTIVE] = self.simplisafe.alarm_active
if self.simplisafe.temperature is not None:
attributes[ATTR_TEMPERATURE] = self.simplisafe.temperature
return attributes
def update(self):
"""Update alarm status."""
self.simplisafe.update()
def alarm_disarm(self, code=None):
"""Send disarm command."""
if not self._validate_code(code, 'disarming'):
return
self.simplisafe.set_state('off')
_LOGGER.info("SimpliSafe alarm disarming")
def alarm_arm_home(self, code=None):
"""Send arm home command."""
if not self._validate_code(code, 'arming home'):
return
self.simplisafe.set_state('home')
_LOGGER.info("SimpliSafe alarm arming home")
def alarm_arm_away(self, code=None):
"""Send arm away command."""
if not self._validate_code(code, 'arming away'):
return
self.simplisafe.set_state('away')
_LOGGER.info("SimpliSafe alarm arming away")
def _validate_code(self, code, state):
"""Validate given code."""
check = self._code is None or code == self._code
if not check:
_LOGGER.warning("Wrong code entered for %s", state)
return check
|
py | b400ab1ba5d7d09bd723ee151ceb16f0baa60c58 | import os
INSTALLED_APPS = [
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.admin',
'mptt',
'easy_thumbnails',
'filer',
'cmsplugin_image',
]
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': 'test.db', # Or path to database file if using sqlite3.
'USER': '', # Not used with sqlite3.
'PASSWORD': '', # Not used with sqlite3.
'HOST': '', # Set to empty string for localhost. Not used with sqlite3.
'PORT': '', # Set to empty string for default. Not used with sqlite3.
}
}
ROOT_URLCONF = 'cmsplugin_image.tests.urls'
HERE = os.path.dirname(os.path.realpath(__file__))
MEDIA_ROOT = os.path.abspath(os.path.join(HERE, 'media'))
MEDIA_URL = '/media/'
STATIC_URL = '/static/'
SECRET_KEY = 'secret'
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
)
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'OPTIONS': {
'context_processors': (
'django.template.context_processors.request',
),
},
},
]
|
py | b400ab720765f382c16efc0886f52963d5b851f7 | import setuptools
with open("README.md", "r") as fh:
long_description = fh.read()
setuptools.setup(
name="badtooth", # Replace with your own username
version="0.0.1",
author="Samson PianoFingers",
author_email="[email protected]",
description="A memory hacking package for windows games.",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/samsonpianofingers/BadTooth",
packages=setuptools.find_packages(),
install_requires=['capstone', 'keystone-engine', 'pefile'],
classifiers=[
'Development Status :: 3 - Alpha',
"Programming Language :: Python :: 3.6",
"Intended Audience :: Other Audience",
"Topic :: Games/Entertainment",
"License :: OSI Approved :: MIT License",
"Operating System :: Microsoft :: Windows :: Windows 10"
],
python_requires='>=3.6',
) |
py | b400abafac6622443604a4b6bdc41dde8f7f171a | from flask import Flask, flash, request, redirect, url_for, render_template
from werkzeug.utils import secure_filename
from tensorflow.keras.models import load_model
import numpy as np
import os
import cv2
# Creating a Flask Instance
app = Flask(__name__)
IMAGE_SIZE = (150, 150)
UPLOAD_FOLDER = 'static\\uploads'
ALLOWED_EXTENSIONS = set(['png', 'jpg', 'jpeg'])
app.config.from_object(__name__)
app.config['SECRET_KEY'] = '7d441f27d441f27567d441f2b6176a'
app.config['UPLOAD_FOLDER'] = UPLOAD_FOLDER
print("Loading Pre-trained Model ...")
model = load_model('model.h5')
def image_preprocessor(path):
'''
Function to pre-process the image before feeding to model.
'''
print('Processing Image ...')
currImg_BGR = cv2.imread(path)
b, g, r = cv2.split(currImg_BGR)
currImg_RGB = cv2.merge([r, g, b])
currImg = cv2.resize(currImg_RGB, IMAGE_SIZE)
currImg = currImg/255.0
currImg = np.reshape(currImg, (1, 150, 150, 3))
return currImg
def model_pred(image):
'''
Perfroms predictions based on input image
'''
print("Image_shape", image.shape)
print("Image_dimension", image.ndim)
# Returns Probability:
# prediction = model.predict(image)[0]
# Returns class:
prediction = model.predict_classes(image)[0]
''' if prediction == 1:
return "Pneumonia"
else:
return "Normal"'''
return (prediction)
def allowed_file(filename):
return '.' in filename and \
filename.rsplit('.', 1)[1].lower() in ALLOWED_EXTENSIONS
@app.route('/', methods=['GET', 'POST'])
def home():
return render_template('index.html')
@app.route('/upload', methods=['GET', 'POST'])
def upload_file():
# Checks if post request was submitted
if request.method == 'POST':
'''
- request.url - http://127.0.0.1:5000/
- request.files - Dictionaary of HTML elem "name" attribute and corrospondiong file details eg.
"imageFile" : <FileStorage: 'Profile_Pic.jpg' ('image/jpeg')>
'''
# check if the post request has the file part
if 'imageFile' not in request.files:
flash('No file part')
return redirect(request.url)
# check if filename is an empty string
file = request.files['imageFile']
if file.filename == '':
flash('No selected file')
return redirect(request.url)
# if file is uploaded
if file and allowed_file(file.filename):
filename = secure_filename(file.filename)
imgPath = os.path.join(app.config['UPLOAD_FOLDER'], filename)
file.save(imgPath)
print(f"Image saved at {imgPath}")
# Preprocessing Image
image = image_preprocessor(imgPath)
# Perfroming Prediction
pred = model_pred(image)
return render_template('upload.html', name=filename, result=pred)
return redirect(url_for('home'))
if __name__ == '__main__':
app.run(debug=True) |
py | b400ac37bad3a89ae5b78d9506d3e2938ce7abe7 | # Generated by Django 3.1.2 on 2020-11-23 21:01
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
("django_peeringdb", "0013_add_phone_help_text"),
]
operations = [
migrations.AddField(
model_name="facility",
name="floor",
field=models.CharField(blank=True, max_length=255, verbose_name="Floor"),
),
migrations.AddField(
model_name="facility",
name="suite",
field=models.CharField(blank=True, max_length=255, verbose_name="Suite"),
),
migrations.AddField(
model_name="organization",
name="floor",
field=models.CharField(blank=True, max_length=255, verbose_name="Floor"),
),
migrations.AddField(
model_name="organization",
name="suite",
field=models.CharField(blank=True, max_length=255, verbose_name="Suite"),
),
]
|
py | b400ae3cfaca3acee8b95a7e0eda80467e0fde4c | from godot import exposed, Vector2, Area2D
DEFAULT_SPEED = 220
@exposed
class Ball(Area2D):
def _reset_ball(self, for_left):
self.position = self.screen_size / 2
if for_left:
self.direction = Vector2(-1, 0)
else:
self.direction = Vector2(1, 0)
self.ball_speed = DEFAULT_SPEED
def stop(self):
self.stopped = True
def _process(self, delta):
# ball will move normally for both players
# even if it's sightly out of sync between them
# so each player sees the motion as smooth and not jerky
if not self.stopped:
self.translate(self.direction * self.ball_speed * delta)
# check screen bounds to make ball bounce
if (self.position.y < 0 and self.direction.y < 0) or (
self.position.y > self.screen_size.y and self.direction.y > 0
):
self.direction.y = -self.direction.y
if self.position.x < 0 or self.position.x > self.screen_size.x:
for_left = self.position.x > 0
self.get_parent().update_score(for_left)
self._reset_ball(for_left)
def bounce(self, left, random):
# using sync because both players can make it bounce
if left:
self.direction.x = abs(self.direction.x)
else:
self.direction.x = -abs(self.direction.x)
self.ball_speed *= 1.1
self.direction.y = random * 2.0 - 1
self.direction = self.direction.normalized()
def _ready(self):
self.direction = Vector2(1, 0)
self.ball_speed = DEFAULT_SPEED
self.stopped = False
self.screen_size = self.get_viewport_rect().size
self.set_process(True) # REMOVE ME
|
py | b400ae92e6c2912a9ae5f8fcf2d08b7c1ba2d078 | # pylint: disable=no-name-in-module
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
import datetime
import itertools
import logging
import multiprocessing
import os
import signal
import sys
import threading
import time
from collections import defaultdict
from contextlib import redirect_stderr, redirect_stdout, suppress
from datetime import timedelta
from multiprocessing.connection import Connection as MultiprocessingConnection
from typing import DefaultDict, Dict, Iterable, List, Optional, Set, Tuple
from setproctitle import setproctitle
from sqlalchemy import and_, func, not_, or_, tuple_
from sqlalchemy.exc import OperationalError
from sqlalchemy.orm import load_only, selectinload
from sqlalchemy.orm.session import Session, make_transient
from airflow import models, settings
from airflow.configuration import conf
from airflow.exceptions import AirflowException, SerializedDagNotFound, TaskNotFound
from airflow.executors.executor_loader import UNPICKLEABLE_EXECUTORS
from airflow.jobs.base_job import BaseJob
from airflow.models import DAG, DagModel, SlaMiss, errors
from airflow.models.dagbag import DagBag
from airflow.models.dagrun import DagRun
from airflow.models.serialized_dag import SerializedDagModel
from airflow.models.taskinstance import SimpleTaskInstance, TaskInstanceKey
from airflow.stats import Stats
from airflow.ti_deps.dependencies_states import EXECUTION_STATES
from airflow.utils import timezone
from airflow.utils.callback_requests import (
CallbackRequest,
DagCallbackRequest,
SlaCallbackRequest,
TaskCallbackRequest,
)
from airflow.utils.dag_processing import AbstractDagFileProcessorProcess, DagFileProcessorAgent
from airflow.utils.email import get_email_address_list, send_email
from airflow.utils.event_scheduler import EventScheduler
from airflow.utils.log.logging_mixin import LoggingMixin, StreamLogWriter, set_context
from airflow.utils.mixins import MultiprocessingStartMethodMixin
from airflow.utils.retries import MAX_DB_RETRIES, retry_db_transaction, run_with_db_retries
from airflow.utils.session import create_session, provide_session
from airflow.utils.sqlalchemy import is_lock_not_available_error, prohibit_commit, skip_locked, with_row_locks
from airflow.utils.state import State
from airflow.utils.types import DagRunType
TI = models.TaskInstance
DR = models.DagRun
DM = models.DagModel
class DagFileProcessorProcess(AbstractDagFileProcessorProcess, LoggingMixin, MultiprocessingStartMethodMixin):
"""Runs DAG processing in a separate process using DagFileProcessor
:param file_path: a Python file containing Airflow DAG definitions
:type file_path: str
:param pickle_dags: whether to serialize the DAG objects to the DB
:type pickle_dags: bool
:param dag_ids: If specified, only look at these DAG ID's
:type dag_ids: List[str]
:param callback_requests: failure callback to execute
:type callback_requests: List[airflow.utils.callback_requests.CallbackRequest]
"""
# Counter that increments every time an instance of this class is created
class_creation_counter = 0
def __init__(
self,
file_path: str,
pickle_dags: bool,
dag_ids: Optional[List[str]],
callback_requests: List[CallbackRequest],
):
super().__init__()
self._file_path = file_path
self._pickle_dags = pickle_dags
self._dag_ids = dag_ids
self._callback_requests = callback_requests
# The process that was launched to process the given .
self._process: Optional[multiprocessing.process.BaseProcess] = None
# The result of DagFileProcessor.process_file(file_path).
self._result: Optional[Tuple[int, int]] = None
# Whether the process is done running.
self._done = False
# When the process started.
self._start_time: Optional[datetime.datetime] = None
# This ID is use to uniquely name the process / thread that's launched
# by this processor instance
self._instance_id = DagFileProcessorProcess.class_creation_counter
self._parent_channel: Optional[MultiprocessingConnection] = None
DagFileProcessorProcess.class_creation_counter += 1
@property
def file_path(self) -> str:
return self._file_path
@staticmethod
def _run_file_processor(
result_channel: MultiprocessingConnection,
parent_channel: MultiprocessingConnection,
file_path: str,
pickle_dags: bool,
dag_ids: Optional[List[str]],
thread_name: str,
callback_requests: List[CallbackRequest],
) -> None:
"""
Process the given file.
:param result_channel: the connection to use for passing back the result
:type result_channel: multiprocessing.Connection
:param parent_channel: the parent end of the channel to close in the child
:type parent_channel: multiprocessing.Connection
:param file_path: the file to process
:type file_path: str
:param pickle_dags: whether to pickle the DAGs found in the file and
save them to the DB
:type pickle_dags: bool
:param dag_ids: if specified, only examine DAG ID's that are
in this list
:type dag_ids: list[str]
:param thread_name: the name to use for the process that is launched
:type thread_name: str
:param callback_requests: failure callback to execute
:type callback_requests: List[airflow.utils.callback_requests.CallbackRequest]
:return: the process that was launched
:rtype: multiprocessing.Process
"""
# This helper runs in the newly created process
log: logging.Logger = logging.getLogger("airflow.processor")
# Since we share all open FDs from the parent, we need to close the parent side of the pipe here in
# the child, else it won't get closed properly until we exit.
log.info("Closing parent pipe")
parent_channel.close()
del parent_channel
set_context(log, file_path)
setproctitle(f"airflow scheduler - DagFileProcessor {file_path}")
try:
# redirect stdout/stderr to log
with redirect_stdout(StreamLogWriter(log, logging.INFO)), redirect_stderr(
StreamLogWriter(log, logging.WARN)
), Stats.timer() as timer:
# Re-configure the ORM engine as there are issues with multiple processes
settings.configure_orm()
# Change the thread name to differentiate log lines. This is
# really a separate process, but changing the name of the
# process doesn't work, so changing the thread name instead.
threading.current_thread().name = thread_name
log.info("Started process (PID=%s) to work on %s", os.getpid(), file_path)
dag_file_processor = DagFileProcessor(dag_ids=dag_ids, log=log)
result: Tuple[int, int] = dag_file_processor.process_file(
file_path=file_path,
pickle_dags=pickle_dags,
callback_requests=callback_requests,
)
result_channel.send(result)
log.info("Processing %s took %.3f seconds", file_path, timer.duration)
except Exception: # pylint: disable=broad-except
# Log exceptions through the logging framework.
log.exception("Got an exception! Propagating...")
raise
finally:
# We re-initialized the ORM within this Process above so we need to
# tear it down manually here
settings.dispose_orm()
result_channel.close()
def start(self) -> None:
"""Launch the process and start processing the DAG."""
start_method = self._get_multiprocessing_start_method()
context = multiprocessing.get_context(start_method)
_parent_channel, _child_channel = context.Pipe(duplex=False)
process = context.Process(
target=type(self)._run_file_processor,
args=(
_child_channel,
_parent_channel,
self.file_path,
self._pickle_dags,
self._dag_ids,
f"DagFileProcessor{self._instance_id}",
self._callback_requests,
),
name=f"DagFileProcessor{self._instance_id}-Process",
)
self._process = process
self._start_time = timezone.utcnow()
process.start()
# Close the child side of the pipe now the subprocess has started -- otherwise this would prevent it
# from closing in some cases
_child_channel.close()
del _child_channel
# Don't store it on self until after we've started the child process - we don't want to keep it from
# getting GCd/closed
self._parent_channel = _parent_channel
def kill(self) -> None:
"""Kill the process launched to process the file, and ensure consistent state."""
if self._process is None:
raise AirflowException("Tried to kill before starting!")
self._kill_process()
def terminate(self, sigkill: bool = False) -> None:
"""
Terminate (and then kill) the process launched to process the file.
:param sigkill: whether to issue a SIGKILL if SIGTERM doesn't work.
:type sigkill: bool
"""
if self._process is None or self._parent_channel is None:
raise AirflowException("Tried to call terminate before starting!")
self._process.terminate()
# Arbitrarily wait 5s for the process to die
with suppress(TimeoutError):
self._process._popen.wait(5) # type: ignore # pylint: disable=protected-access
if sigkill:
self._kill_process()
self._parent_channel.close()
def _kill_process(self) -> None:
if self._process is None:
raise AirflowException("Tried to kill process before starting!")
if self._process.is_alive() and self._process.pid:
self.log.warning("Killing DAGFileProcessorProcess (PID=%d)", self._process.pid)
os.kill(self._process.pid, signal.SIGKILL)
if self._parent_channel:
self._parent_channel.close()
@property
def pid(self) -> int:
"""
:return: the PID of the process launched to process the given file
:rtype: int
"""
if self._process is None or self._process.pid is None:
raise AirflowException("Tried to get PID before starting!")
return self._process.pid
@property
def exit_code(self) -> Optional[int]:
"""
After the process is finished, this can be called to get the return code
:return: the exit code of the process
:rtype: int
"""
if self._process is None:
raise AirflowException("Tried to get exit code before starting!")
if not self._done:
raise AirflowException("Tried to call retcode before process was finished!")
return self._process.exitcode
@property
def done(self) -> bool:
"""
Check if the process launched to process this file is done.
:return: whether the process is finished running
:rtype: bool
"""
if self._process is None or self._parent_channel is None:
raise AirflowException("Tried to see if it's done before starting!")
if self._done:
return True
if self._parent_channel.poll():
try:
self._result = self._parent_channel.recv()
self._done = True
self.log.debug("Waiting for %s", self._process)
self._process.join()
self._parent_channel.close()
return True
except EOFError:
# If we get an EOFError, it means the child end of the pipe has been closed. This only happens
# in the finally block. But due to a possible race condition, the process may have not yet
# terminated (it could be doing cleanup/python shutdown still). So we kill it here after a
# "suitable" timeout.
self._done = True
# Arbitrary timeout -- error/race condition only, so this doesn't need to be tunable.
self._process.join(timeout=5)
if self._process.is_alive():
# Didn't shut down cleanly - kill it
self._kill_process()
if not self._process.is_alive():
self._done = True
self.log.debug("Waiting for %s", self._process)
self._process.join()
self._parent_channel.close()
return True
return False
@property
def result(self) -> Optional[Tuple[int, int]]:
"""
:return: result of running DagFileProcessor.process_file()
:rtype: tuple[int, int] or None
"""
if not self.done:
raise AirflowException("Tried to get the result before it's done!")
return self._result
@property
def start_time(self) -> datetime.datetime:
"""
:return: when this started to process the file
:rtype: datetime
"""
if self._start_time is None:
raise AirflowException("Tried to get start time before it started!")
return self._start_time
@property
def waitable_handle(self):
return self._process.sentinel
class DagFileProcessor(LoggingMixin):
"""
Process a Python file containing Airflow DAGs.
This includes:
1. Execute the file and look for DAG objects in the namespace.
2. Execute any Callbacks if passed to DagFileProcessor.process_file
3. Serialize the DAGs and save it to DB (or update existing record in the DB).
4. Pickle the DAG and save it to the DB (if necessary).
5. Record any errors importing the file into ORM
Returns a tuple of 'number of dags found' and 'the count of import errors'
:param dag_ids: If specified, only look at these DAG ID's
:type dag_ids: List[str]
:param log: Logger to save the processing process
:type log: logging.Logger
"""
UNIT_TEST_MODE: bool = conf.getboolean('core', 'UNIT_TEST_MODE')
def __init__(self, dag_ids: Optional[List[str]], log: logging.Logger):
super().__init__()
self.dag_ids = dag_ids
self._log = log
@provide_session
def manage_slas(self, dag: DAG, session: Session = None) -> None:
"""
Finding all tasks that have SLAs defined, and sending alert emails
where needed. New SLA misses are also recorded in the database.
We are assuming that the scheduler runs often, so we only check for
tasks that should have succeeded in the past hour.
"""
self.log.info("Running SLA Checks for %s", dag.dag_id)
if not any(isinstance(ti.sla, timedelta) for ti in dag.tasks):
self.log.info("Skipping SLA check for %s because no tasks in DAG have SLAs", dag)
return
qry = (
session.query(TI.task_id, func.max(TI.execution_date).label('max_ti'))
.with_hint(TI, 'USE INDEX (PRIMARY)', dialect_name='mysql')
.filter(TI.dag_id == dag.dag_id)
.filter(or_(TI.state == State.SUCCESS, TI.state == State.SKIPPED))
.filter(TI.task_id.in_(dag.task_ids))
.group_by(TI.task_id)
.subquery('sq')
)
max_tis: List[TI] = (
session.query(TI)
.filter(
TI.dag_id == dag.dag_id,
TI.task_id == qry.c.task_id,
TI.execution_date == qry.c.max_ti,
)
.all()
)
ts = timezone.utcnow()
for ti in max_tis:
task = dag.get_task(ti.task_id)
if task.sla and not isinstance(task.sla, timedelta):
raise TypeError(
f"SLA is expected to be timedelta object, got "
f"{type(task.sla)} in {task.dag_id}:{task.task_id}"
)
dttm = dag.following_schedule(ti.execution_date)
while dttm < timezone.utcnow():
following_schedule = dag.following_schedule(dttm)
if following_schedule + task.sla < timezone.utcnow():
session.merge(
SlaMiss(task_id=ti.task_id, dag_id=ti.dag_id, execution_date=dttm, timestamp=ts)
)
dttm = dag.following_schedule(dttm)
session.commit()
# pylint: disable=singleton-comparison
slas: List[SlaMiss] = (
session.query(SlaMiss)
.filter(SlaMiss.notification_sent == False, SlaMiss.dag_id == dag.dag_id) # noqa
.all()
)
# pylint: enable=singleton-comparison
if slas: # pylint: disable=too-many-nested-blocks
sla_dates: List[datetime.datetime] = [sla.execution_date for sla in slas]
fetched_tis: List[TI] = (
session.query(TI)
.filter(TI.state != State.SUCCESS, TI.execution_date.in_(sla_dates), TI.dag_id == dag.dag_id)
.all()
)
blocking_tis: List[TI] = []
for ti in fetched_tis:
if ti.task_id in dag.task_ids:
ti.task = dag.get_task(ti.task_id)
blocking_tis.append(ti)
else:
session.delete(ti)
session.commit()
task_list = "\n".join(sla.task_id + ' on ' + sla.execution_date.isoformat() for sla in slas)
blocking_task_list = "\n".join(
ti.task_id + ' on ' + ti.execution_date.isoformat() for ti in blocking_tis
)
# Track whether email or any alert notification sent
# We consider email or the alert callback as notifications
email_sent = False
notification_sent = False
if dag.sla_miss_callback:
# Execute the alert callback
self.log.info('Calling SLA miss callback')
try:
dag.sla_miss_callback(dag, task_list, blocking_task_list, slas, blocking_tis)
notification_sent = True
except Exception: # pylint: disable=broad-except
self.log.exception("Could not call sla_miss_callback for DAG %s", dag.dag_id)
email_content = f"""\
Here's a list of tasks that missed their SLAs:
<pre><code>{task_list}\n<code></pre>
Blocking tasks:
<pre><code>{blocking_task_list}<code></pre>
Airflow Webserver URL: {conf.get(section='webserver', key='base_url')}
"""
tasks_missed_sla = []
for sla in slas:
try:
task = dag.get_task(sla.task_id)
except TaskNotFound:
# task already deleted from DAG, skip it
self.log.warning(
"Task %s doesn't exist in DAG anymore, skipping SLA miss notification.", sla.task_id
)
continue
tasks_missed_sla.append(task)
emails: Set[str] = set()
for task in tasks_missed_sla:
if task.email:
if isinstance(task.email, str):
emails |= set(get_email_address_list(task.email))
elif isinstance(task.email, (list, tuple)):
emails |= set(task.email)
if emails:
try:
send_email(emails, f"[airflow] SLA miss on DAG={dag.dag_id}", email_content)
email_sent = True
notification_sent = True
except Exception: # pylint: disable=broad-except
Stats.incr('sla_email_notification_failure')
self.log.exception("Could not send SLA Miss email notification for DAG %s", dag.dag_id)
# If we sent any notification, update the sla_miss table
if notification_sent:
for sla in slas:
sla.email_sent = email_sent
sla.notification_sent = True
session.merge(sla)
session.commit()
@staticmethod
def update_import_errors(session: Session, dagbag: DagBag) -> None:
"""
For the DAGs in the given DagBag, record any associated import errors and clears
errors for files that no longer have them. These are usually displayed through the
Airflow UI so that users know that there are issues parsing DAGs.
:param session: session for ORM operations
:type session: sqlalchemy.orm.session.Session
:param dagbag: DagBag containing DAGs with import errors
:type dagbag: airflow.DagBag
"""
# Clear the errors of the processed files
for dagbag_file in dagbag.file_last_changed:
session.query(errors.ImportError).filter(errors.ImportError.filename == dagbag_file).delete()
# Add the errors of the processed files
for filename, stacktrace in dagbag.import_errors.items():
session.add(
errors.ImportError(filename=filename, timestamp=timezone.utcnow(), stacktrace=stacktrace)
)
session.commit()
@provide_session
def execute_callbacks(
self, dagbag: DagBag, callback_requests: List[CallbackRequest], session: Session = None
) -> None:
"""
Execute on failure callbacks. These objects can come from SchedulerJob or from
DagFileProcessorManager.
:param dagbag: Dag Bag of dags
:param callback_requests: failure callbacks to execute
:type callback_requests: List[airflow.utils.callback_requests.CallbackRequest]
:param session: DB session.
"""
for request in callback_requests:
self.log.debug("Processing Callback Request: %s", request)
try:
if isinstance(request, TaskCallbackRequest):
self._execute_task_callbacks(dagbag, request)
elif isinstance(request, SlaCallbackRequest):
self.manage_slas(dagbag.dags.get(request.dag_id))
elif isinstance(request, DagCallbackRequest):
self._execute_dag_callbacks(dagbag, request, session)
except Exception: # pylint: disable=broad-except
self.log.exception(
"Error executing %s callback for file: %s",
request.__class__.__name__,
request.full_filepath,
)
session.commit()
@provide_session
def _execute_dag_callbacks(self, dagbag: DagBag, request: DagCallbackRequest, session: Session):
dag = dagbag.dags[request.dag_id]
dag_run = dag.get_dagrun(execution_date=request.execution_date, session=session)
dag.handle_callback(
dagrun=dag_run, success=not request.is_failure_callback, reason=request.msg, session=session
)
def _execute_task_callbacks(self, dagbag: DagBag, request: TaskCallbackRequest):
simple_ti = request.simple_task_instance
if simple_ti.dag_id in dagbag.dags:
dag = dagbag.dags[simple_ti.dag_id]
if simple_ti.task_id in dag.task_ids:
task = dag.get_task(simple_ti.task_id)
ti = TI(task, simple_ti.execution_date)
# Get properties needed for failure handling from SimpleTaskInstance.
ti.start_date = simple_ti.start_date
ti.end_date = simple_ti.end_date
ti.try_number = simple_ti.try_number
ti.state = simple_ti.state
ti.test_mode = self.UNIT_TEST_MODE
if request.is_failure_callback:
ti.handle_failure_with_callback(error=request.msg, test_mode=ti.test_mode)
self.log.info('Executed failure callback for %s in state %s', ti, ti.state)
@provide_session
def process_file(
self,
file_path: str,
callback_requests: List[CallbackRequest],
pickle_dags: bool = False,
session: Session = None,
) -> Tuple[int, int]:
"""
Process a Python file containing Airflow DAGs.
This includes:
1. Execute the file and look for DAG objects in the namespace.
2. Execute any Callbacks if passed to this method.
3. Serialize the DAGs and save it to DB (or update existing record in the DB).
4. Pickle the DAG and save it to the DB (if necessary).
5. Record any errors importing the file into ORM
:param file_path: the path to the Python file that should be executed
:type file_path: str
:param callback_requests: failure callback to execute
:type callback_requests: List[airflow.utils.dag_processing.CallbackRequest]
:param pickle_dags: whether serialize the DAGs found in the file and
save them to the db
:type pickle_dags: bool
:param session: Sqlalchemy ORM Session
:type session: Session
:return: number of dags found, count of import errors
:rtype: Tuple[int, int]
"""
self.log.info("Processing file %s for tasks to queue", file_path)
try:
dagbag = DagBag(file_path, include_examples=False, include_smart_sensor=False)
except Exception: # pylint: disable=broad-except
self.log.exception("Failed at reloading the DAG file %s", file_path)
Stats.incr('dag_file_refresh_error', 1, 1)
return 0, 0
if len(dagbag.dags) > 0:
self.log.info("DAG(s) %s retrieved from %s", dagbag.dags.keys(), file_path)
else:
self.log.warning("No viable dags retrieved from %s", file_path)
self.update_import_errors(session, dagbag)
return 0, len(dagbag.import_errors)
self.execute_callbacks(dagbag, callback_requests)
# Save individual DAGs in the ORM
dagbag.sync_to_db()
if pickle_dags:
paused_dag_ids = DagModel.get_paused_dag_ids(dag_ids=dagbag.dag_ids)
unpaused_dags: List[DAG] = [
dag for dag_id, dag in dagbag.dags.items() if dag_id not in paused_dag_ids
]
for dag in unpaused_dags:
dag.pickle(session)
# Record import errors into the ORM
try:
self.update_import_errors(session, dagbag)
except Exception: # pylint: disable=broad-except
self.log.exception("Error logging import errors!")
return len(dagbag.dags), len(dagbag.import_errors)
class SchedulerJob(BaseJob): # pylint: disable=too-many-instance-attributes
"""
This SchedulerJob runs for a specific time interval and schedules the jobs
that are ready to run. It figures out the latest runs for each
task and sees if the dependencies for the next schedules are met.
If so, it creates appropriate TaskInstances and sends run commands to the
executor. It does this for each task in each DAG and repeats.
:param subdir: directory containing Python files with Airflow DAG
definitions, or a specific path to a file
:type subdir: str
:param num_runs: The number of times to run the scheduling loop. If you
have a large number of DAG files this could complete before each file
has been parsed. -1 for unlimited times.
:type num_runs: int
:param num_times_parse_dags: The number of times to try to parse each DAG file.
-1 for unlimited times.
:type num_times_parse_dags: int
:param processor_poll_interval: The number of seconds to wait between
polls of running processors
:type processor_poll_interval: int
:param do_pickle: once a DAG object is obtained by executing the Python
file, whether to serialize the DAG object to the DB
:type do_pickle: bool
:param log: override the default Logger
:type log: logging.Logger
"""
__mapper_args__ = {'polymorphic_identity': 'SchedulerJob'}
heartrate: int = conf.getint('scheduler', 'SCHEDULER_HEARTBEAT_SEC')
def __init__(
self,
subdir: str = settings.DAGS_FOLDER,
num_runs: int = conf.getint('scheduler', 'num_runs'),
num_times_parse_dags: int = -1,
processor_poll_interval: float = conf.getfloat('scheduler', 'processor_poll_interval'),
do_pickle: bool = False,
log: logging.Logger = None,
*args,
**kwargs,
):
self.subdir = subdir
self.num_runs = num_runs
# In specific tests, we want to stop the parse loop after the _files_ have been parsed a certain
# number of times. This is only to support testing, and isn't something a user is likely to want to
# configure -- they'll want num_runs
self.num_times_parse_dags = num_times_parse_dags
self._processor_poll_interval = processor_poll_interval
self.do_pickle = do_pickle
super().__init__(*args, **kwargs)
if log:
self._log = log
# Check what SQL backend we use
sql_conn: str = conf.get('core', 'sql_alchemy_conn').lower()
self.using_sqlite = sql_conn.startswith('sqlite')
self.using_mysql = sql_conn.startswith('mysql')
self.max_tis_per_query: int = conf.getint('scheduler', 'max_tis_per_query')
self.processor_agent: Optional[DagFileProcessorAgent] = None
self.dagbag = DagBag(dag_folder=self.subdir, read_dags_from_db=True, load_op_links=False)
def register_signals(self) -> None:
"""Register signals that stop child processes"""
signal.signal(signal.SIGINT, self._exit_gracefully)
signal.signal(signal.SIGTERM, self._exit_gracefully)
signal.signal(signal.SIGUSR2, self._debug_dump)
def _exit_gracefully(self, signum, frame) -> None: # pylint: disable=unused-argument
"""Helper method to clean up processor_agent to avoid leaving orphan processes."""
self.log.info("Exiting gracefully upon receiving signal %s", signum)
if self.processor_agent:
self.processor_agent.end()
sys.exit(os.EX_OK)
def _debug_dump(self, signum, frame): # pylint: disable=unused-argument
try:
sig_name = signal.Signals(signum).name # pylint: disable=no-member
except Exception: # pylint: disable=broad-except
sig_name = str(signum)
self.log.info("%s\n%s received, printing debug\n%s", "-" * 80, sig_name, "-" * 80)
self.executor.debug_dump()
self.log.info("-" * 80)
def is_alive(self, grace_multiplier: Optional[float] = None) -> bool:
"""
Is this SchedulerJob alive?
We define alive as in a state of running and a heartbeat within the
threshold defined in the ``scheduler_health_check_threshold`` config
setting.
``grace_multiplier`` is accepted for compatibility with the parent class.
:rtype: boolean
"""
if grace_multiplier is not None:
# Accept the same behaviour as superclass
return super().is_alive(grace_multiplier=grace_multiplier)
scheduler_health_check_threshold: int = conf.getint('scheduler', 'scheduler_health_check_threshold')
return (
self.state == State.RUNNING
and (timezone.utcnow() - self.latest_heartbeat).total_seconds() < scheduler_health_check_threshold
)
@provide_session
def _change_state_for_tis_without_dagrun(
self, old_states: List[str], new_state: str, session: Session = None
) -> None:
"""
For all DAG IDs in the DagBag, look for task instances in the
old_states and set them to new_state if the corresponding DagRun
does not exist or exists but is not in the running state. This
normally should not happen, but it can if the state of DagRuns are
changed manually.
:param old_states: examine TaskInstances in this state
:type old_states: list[airflow.utils.state.State]
:param new_state: set TaskInstances to this state
:type new_state: airflow.utils.state.State
"""
tis_changed = 0
query = (
session.query(models.TaskInstance)
.outerjoin(models.TaskInstance.dag_run)
.filter(models.TaskInstance.dag_id.in_(list(self.dagbag.dag_ids)))
.filter(models.TaskInstance.state.in_(old_states))
.filter(
or_(
# pylint: disable=comparison-with-callable
models.DagRun.state != State.RUNNING,
# pylint: disable=no-member
models.DagRun.state.is_(None),
)
)
)
# We need to do this for mysql as well because it can cause deadlocks
# as discussed in https://issues.apache.org/jira/browse/AIRFLOW-2516
if self.using_sqlite or self.using_mysql:
tis_to_change: List[TI] = with_row_locks(
query, of=TI, session=session, **skip_locked(session=session)
).all()
for ti in tis_to_change:
ti.set_state(new_state, session=session)
tis_changed += 1
else:
subq = query.subquery()
current_time = timezone.utcnow()
ti_prop_update = {
models.TaskInstance.state: new_state,
models.TaskInstance.start_date: current_time,
}
# Only add end_date and duration if the new_state is 'success', 'failed' or 'skipped'
if new_state in State.finished:
ti_prop_update.update(
{
models.TaskInstance.end_date: current_time,
models.TaskInstance.duration: 0,
}
)
tis_changed = (
session.query(models.TaskInstance)
.filter(
models.TaskInstance.dag_id == subq.c.dag_id,
models.TaskInstance.task_id == subq.c.task_id,
models.TaskInstance.execution_date == subq.c.execution_date,
)
.update(ti_prop_update, synchronize_session=False)
)
if tis_changed > 0:
session.flush()
self.log.warning(
"Set %s task instances to state=%s as their associated DagRun was not in RUNNING state",
tis_changed,
new_state,
)
Stats.gauge('scheduler.tasks.without_dagrun', tis_changed)
@provide_session
def __get_concurrency_maps(
self, states: List[str], session: Session = None
) -> Tuple[DefaultDict[str, int], DefaultDict[Tuple[str, str], int]]:
"""
Get the concurrency maps.
:param states: List of states to query for
:type states: list[airflow.utils.state.State]
:return: A map from (dag_id, task_id) to # of task instances and
a map from (dag_id, task_id) to # of task instances in the given state list
:rtype: tuple[dict[str, int], dict[tuple[str, str], int]]
"""
ti_concurrency_query: List[Tuple[str, str, int]] = (
session.query(TI.task_id, TI.dag_id, func.count('*'))
.filter(TI.state.in_(states))
.group_by(TI.task_id, TI.dag_id)
).all()
dag_map: DefaultDict[str, int] = defaultdict(int)
task_map: DefaultDict[Tuple[str, str], int] = defaultdict(int)
for result in ti_concurrency_query:
task_id, dag_id, count = result
dag_map[dag_id] += count
task_map[(dag_id, task_id)] = count
return dag_map, task_map
# pylint: disable=too-many-locals,too-many-statements
@provide_session
def _executable_task_instances_to_queued(self, max_tis: int, session: Session = None) -> List[TI]:
"""
Finds TIs that are ready for execution with respect to pool limits,
dag concurrency, executor state, and priority.
:param max_tis: Maximum number of TIs to queue in this loop.
:type max_tis: int
:return: list[airflow.models.TaskInstance]
"""
executable_tis: List[TI] = []
# Get the pool settings. We get a lock on the pool rows, treating this as a "critical section"
# Throws an exception if lock cannot be obtained, rather than blocking
pools = models.Pool.slots_stats(lock_rows=True, session=session)
# If the pools are full, there is no point doing anything!
# If _somehow_ the pool is overfull, don't let the limit go negative - it breaks SQL
pool_slots_free = max(0, sum(pool['open'] for pool in pools.values()))
if pool_slots_free == 0:
self.log.debug("All pools are full!")
return executable_tis
max_tis = min(max_tis, pool_slots_free)
# Get all task instances associated with scheduled
# DagRuns which are not backfilled, in the given states,
# and the dag is not paused
query = (
session.query(TI)
.outerjoin(TI.dag_run)
.filter(or_(DR.run_id.is_(None), DR.run_type != DagRunType.BACKFILL_JOB))
.join(TI.dag_model)
.filter(not_(DM.is_paused))
.filter(TI.state == State.SCHEDULED)
.options(selectinload('dag_model'))
)
starved_pools = [pool_name for pool_name, stats in pools.items() if stats['open'] <= 0]
if starved_pools:
query = query.filter(not_(TI.pool.in_(starved_pools)))
query = query.limit(max_tis)
task_instances_to_examine: List[TI] = with_row_locks(
query,
of=TI,
session=session,
**skip_locked(session=session),
).all()
# TODO[HA]: This was wrong before anyway, as it only looked at a sub-set of dags, not everything.
# Stats.gauge('scheduler.tasks.pending', len(task_instances_to_examine))
if len(task_instances_to_examine) == 0:
self.log.debug("No tasks to consider for execution.")
return executable_tis
# Put one task instance on each line
task_instance_str = "\n\t".join(repr(x) for x in task_instances_to_examine)
self.log.info("%s tasks up for execution:\n\t%s", len(task_instances_to_examine), task_instance_str)
pool_to_task_instances: DefaultDict[str, List[models.Pool]] = defaultdict(list)
for task_instance in task_instances_to_examine:
pool_to_task_instances[task_instance.pool].append(task_instance)
# dag_id to # of running tasks and (dag_id, task_id) to # of running tasks.
dag_concurrency_map: DefaultDict[str, int]
task_concurrency_map: DefaultDict[Tuple[str, str], int]
dag_concurrency_map, task_concurrency_map = self.__get_concurrency_maps(
states=list(EXECUTION_STATES), session=session
)
num_tasks_in_executor = 0
# Number of tasks that cannot be scheduled because of no open slot in pool
num_starving_tasks_total = 0
# Go through each pool, and queue up a task for execution if there are
# any open slots in the pool.
# pylint: disable=too-many-nested-blocks
for pool, task_instances in pool_to_task_instances.items():
pool_name = pool
if pool not in pools:
self.log.warning("Tasks using non-existent pool '%s' will not be scheduled", pool)
continue
open_slots = pools[pool]["open"]
num_ready = len(task_instances)
self.log.info(
"Figuring out tasks to run in Pool(name=%s) with %s open slots "
"and %s task instances ready to be queued",
pool,
open_slots,
num_ready,
)
priority_sorted_task_instances = sorted(
task_instances, key=lambda ti: (-ti.priority_weight, ti.execution_date)
)
num_starving_tasks = 0
for current_index, task_instance in enumerate(priority_sorted_task_instances):
if open_slots <= 0:
self.log.info("Not scheduling since there are %s open slots in pool %s", open_slots, pool)
# Can't schedule any more since there are no more open slots.
num_unhandled = len(priority_sorted_task_instances) - current_index
num_starving_tasks += num_unhandled
num_starving_tasks_total += num_unhandled
break
# Check to make sure that the task concurrency of the DAG hasn't been
# reached.
dag_id = task_instance.dag_id
current_dag_concurrency = dag_concurrency_map[dag_id]
dag_concurrency_limit = task_instance.dag_model.concurrency
self.log.info(
"DAG %s has %s/%s running and queued tasks",
dag_id,
current_dag_concurrency,
dag_concurrency_limit,
)
if current_dag_concurrency >= dag_concurrency_limit:
self.log.info(
"Not executing %s since the number of tasks running or queued "
"from DAG %s is >= to the DAG's task concurrency limit of %s",
task_instance,
dag_id,
dag_concurrency_limit,
)
continue
task_concurrency_limit: Optional[int] = None
if task_instance.dag_model.has_task_concurrency_limits:
# Many dags don't have a task_concurrency, so where we can avoid loading the full
# serialized DAG the better.
serialized_dag = self.dagbag.get_dag(dag_id, session=session)
if serialized_dag.has_task(task_instance.task_id):
task_concurrency_limit = serialized_dag.get_task(
task_instance.task_id
).task_concurrency
if task_concurrency_limit is not None:
current_task_concurrency = task_concurrency_map[
(task_instance.dag_id, task_instance.task_id)
]
if current_task_concurrency >= task_concurrency_limit:
self.log.info(
"Not executing %s since the task concurrency for"
" this task has been reached.",
task_instance,
)
continue
if task_instance.pool_slots > open_slots:
self.log.info(
"Not executing %s since it requires %s slots "
"but there are %s open slots in the pool %s.",
task_instance,
task_instance.pool_slots,
open_slots,
pool,
)
num_starving_tasks += 1
num_starving_tasks_total += 1
# Though we can execute tasks with lower priority if there's enough room
continue
executable_tis.append(task_instance)
open_slots -= task_instance.pool_slots
dag_concurrency_map[dag_id] += 1
task_concurrency_map[(task_instance.dag_id, task_instance.task_id)] += 1
Stats.gauge(f'pool.starving_tasks.{pool_name}', num_starving_tasks)
Stats.gauge('scheduler.tasks.starving', num_starving_tasks_total)
Stats.gauge('scheduler.tasks.running', num_tasks_in_executor)
Stats.gauge('scheduler.tasks.executable', len(executable_tis))
task_instance_str = "\n\t".join(repr(x) for x in executable_tis)
self.log.info("Setting the following tasks to queued state:\n\t%s", task_instance_str)
# set TIs to queued state
filter_for_tis = TI.filter_for_tis(executable_tis)
session.query(TI).filter(filter_for_tis).update(
# TODO[ha]: should we use func.now()? How does that work with DB timezone on mysql when it's not
# UTC?
{TI.state: State.QUEUED, TI.queued_dttm: timezone.utcnow(), TI.queued_by_job_id: self.id},
synchronize_session=False,
)
for ti in executable_tis:
make_transient(ti)
return executable_tis
def _enqueue_task_instances_with_queued_state(self, task_instances: List[TI]) -> None:
"""
Takes task_instances, which should have been set to queued, and enqueues them
with the executor.
:param task_instances: TaskInstances to enqueue
:type task_instances: list[TaskInstance]
"""
# actually enqueue them
for ti in task_instances:
command = TI.generate_command(
ti.dag_id,
ti.task_id,
ti.execution_date,
local=True,
mark_success=False,
ignore_all_deps=False,
ignore_depends_on_past=False,
ignore_task_deps=False,
ignore_ti_state=False,
pool=ti.pool,
file_path=ti.dag_model.fileloc,
pickle_id=ti.dag_model.pickle_id,
)
priority = ti.priority_weight
queue = ti.queue
self.log.info("Sending %s to executor with priority %s and queue %s", ti.key, priority, queue)
self.executor.queue_command(
ti,
command,
priority=priority,
queue=queue,
)
def _critical_section_execute_task_instances(self, session: Session) -> int:
"""
Attempts to execute TaskInstances that should be executed by the scheduler.
There are three steps:
1. Pick TIs by priority with the constraint that they are in the expected states
and that we do exceed max_active_runs or pool limits.
2. Change the state for the TIs above atomically.
3. Enqueue the TIs in the executor.
HA note: This function is a "critical section" meaning that only a single executor process can execute
this function at the same time. This is achieved by doing ``SELECT ... from pool FOR UPDATE``. For DBs
that support NOWAIT, a "blocked" scheduler will skip this and continue on with other tasks (creating
new DAG runs, progressing TIs from None to SCHEDULED etc.); DBs that don't support this (such as
MariaDB or MySQL 5.x) the other schedulers will wait for the lock before continuing.
:param session:
:type session: sqlalchemy.orm.Session
:return: Number of task instance with state changed.
"""
if self.max_tis_per_query == 0:
max_tis = self.executor.slots_available
else:
max_tis = min(self.max_tis_per_query, self.executor.slots_available)
queued_tis = self._executable_task_instances_to_queued(max_tis, session=session)
self._enqueue_task_instances_with_queued_state(queued_tis)
return len(queued_tis)
@provide_session
def _change_state_for_tasks_failed_to_execute(self, session: Session = None):
"""
If there are tasks left over in the executor,
we set them back to SCHEDULED to avoid creating hanging tasks.
:param session: session for ORM operations
"""
if not self.executor.queued_tasks:
return
filter_for_ti_state_change = [
and_(
TI.dag_id == dag_id,
TI.task_id == task_id,
TI.execution_date == execution_date,
# The TI.try_number will return raw try_number+1 since the
# ti is not running. And we need to -1 to match the DB record.
TI._try_number == try_number - 1, # pylint: disable=protected-access
TI.state == State.QUEUED,
)
for dag_id, task_id, execution_date, try_number in self.executor.queued_tasks.keys()
]
ti_query = session.query(TI).filter(or_(*filter_for_ti_state_change))
tis_to_set_to_scheduled: List[TI] = with_row_locks(ti_query, session=session).all()
if not tis_to_set_to_scheduled:
return
# set TIs to queued state
filter_for_tis = TI.filter_for_tis(tis_to_set_to_scheduled)
session.query(TI).filter(filter_for_tis).update(
{TI.state: State.SCHEDULED, TI.queued_dttm: None}, synchronize_session=False
)
for task_instance in tis_to_set_to_scheduled:
self.executor.queued_tasks.pop(task_instance.key)
task_instance_str = "\n\t".join(repr(x) for x in tis_to_set_to_scheduled)
self.log.info("Set the following tasks to scheduled state:\n\t%s", task_instance_str)
@provide_session
def _process_executor_events(self, session: Session = None) -> int:
"""Respond to executor events."""
if not self.processor_agent:
raise ValueError("Processor agent is not started.")
ti_primary_key_to_try_number_map: Dict[Tuple[str, str, datetime.datetime], int] = {}
event_buffer = self.executor.get_event_buffer()
tis_with_right_state: List[TaskInstanceKey] = []
# Report execution
for ti_key, value in event_buffer.items():
state: str
state, _ = value
# We create map (dag_id, task_id, execution_date) -> in-memory try_number
ti_primary_key_to_try_number_map[ti_key.primary] = ti_key.try_number
self.log.info(
"Executor reports execution of %s.%s execution_date=%s "
"exited with status %s for try_number %s",
ti_key.dag_id,
ti_key.task_id,
ti_key.execution_date,
state,
ti_key.try_number,
)
if state in (State.FAILED, State.SUCCESS, State.QUEUED):
tis_with_right_state.append(ti_key)
# Return if no finished tasks
if not tis_with_right_state:
return len(event_buffer)
# Check state of finished tasks
filter_for_tis = TI.filter_for_tis(tis_with_right_state)
tis: List[TI] = session.query(TI).filter(filter_for_tis).options(selectinload('dag_model')).all()
for ti in tis:
try_number = ti_primary_key_to_try_number_map[ti.key.primary]
buffer_key = ti.key.with_try_number(try_number)
state, info = event_buffer.pop(buffer_key)
# TODO: should we fail RUNNING as well, as we do in Backfills?
if state == State.QUEUED:
ti.external_executor_id = info
self.log.info("Setting external_id for %s to %s", ti, info)
continue
if ti.try_number == buffer_key.try_number and ti.state == State.QUEUED:
Stats.incr('scheduler.tasks.killed_externally')
msg = (
"Executor reports task instance %s finished (%s) although the "
"task says its %s. (Info: %s) Was the task killed externally?"
)
self.log.error(msg, ti, state, ti.state, info)
request = TaskCallbackRequest(
full_filepath=ti.dag_model.fileloc,
simple_task_instance=SimpleTaskInstance(ti),
msg=msg % (ti, state, ti.state, info),
)
self.log.info('Setting task instance %s state to %s as reported by executor', ti, state)
ti.set_state(state)
self.processor_agent.send_callback_to_execute(request)
return len(event_buffer)
def _execute(self) -> None:
self.log.info("Starting the scheduler")
# DAGs can be pickled for easier remote execution by some executors
pickle_dags = self.do_pickle and self.executor_class not in UNPICKLEABLE_EXECUTORS
self.log.info("Processing each file at most %s times", self.num_times_parse_dags)
# When using sqlite, we do not use async_mode
# so the scheduler job and DAG parser don't access the DB at the same time.
async_mode = not self.using_sqlite
processor_timeout_seconds: int = conf.getint('core', 'dag_file_processor_timeout')
processor_timeout = timedelta(seconds=processor_timeout_seconds)
self.processor_agent = DagFileProcessorAgent(
dag_directory=self.subdir,
max_runs=self.num_times_parse_dags,
processor_factory=type(self)._create_dag_file_processor,
processor_timeout=processor_timeout,
dag_ids=[],
pickle_dags=pickle_dags,
async_mode=async_mode,
)
try:
self.executor.job_id = self.id
self.executor.start()
self.register_signals()
self.processor_agent.start()
execute_start_time = timezone.utcnow()
self._run_scheduler_loop()
# Stop any processors
self.processor_agent.terminate()
# Verify that all files were processed, and if so, deactivate DAGs that
# haven't been touched by the scheduler as they likely have been
# deleted.
if self.processor_agent.all_files_processed:
self.log.info(
"Deactivating DAGs that haven't been touched since %s", execute_start_time.isoformat()
)
models.DAG.deactivate_stale_dags(execute_start_time)
settings.Session.remove() # type: ignore
except Exception: # pylint: disable=broad-except
self.log.exception("Exception when executing SchedulerJob._run_scheduler_loop")
raise
finally:
try:
self.executor.end()
except Exception: # pylint: disable=broad-except
self.log.exception("Exception when executing Executor.end")
try:
self.processor_agent.end()
except Exception: # pylint: disable=broad-except
self.log.exception("Exception when executing DagFileProcessorAgent.end")
self.log.info("Exited execute loop")
@staticmethod
def _create_dag_file_processor(
file_path: str,
callback_requests: List[CallbackRequest],
dag_ids: Optional[List[str]],
pickle_dags: bool,
) -> DagFileProcessorProcess:
"""Creates DagFileProcessorProcess instance."""
return DagFileProcessorProcess(
file_path=file_path, pickle_dags=pickle_dags, dag_ids=dag_ids, callback_requests=callback_requests
)
def _run_scheduler_loop(self) -> None:
"""
The actual scheduler loop. The main steps in the loop are:
#. Harvest DAG parsing results through DagFileProcessorAgent
#. Find and queue executable tasks
#. Change task instance state in DB
#. Queue tasks in executor
#. Heartbeat executor
#. Execute queued tasks in executor asynchronously
#. Sync on the states of running tasks
Following is a graphic representation of these steps.
.. image:: ../docs/apache-airflow/img/scheduler_loop.jpg
:rtype: None
"""
if not self.processor_agent:
raise ValueError("Processor agent is not started.")
is_unit_test: bool = conf.getboolean('core', 'unit_test_mode')
timers = EventScheduler()
# Check on start up, then every configured interval
self.adopt_or_reset_orphaned_tasks()
timers.call_regular_interval(
conf.getfloat('scheduler', 'orphaned_tasks_check_interval', fallback=300.0),
self.adopt_or_reset_orphaned_tasks,
)
timers.call_regular_interval(
conf.getfloat('scheduler', 'pool_metrics_interval', fallback=5.0),
self._emit_pool_metrics,
)
timers.call_regular_interval(
conf.getfloat('scheduler', 'clean_tis_without_dagrun_interval', fallback=15.0),
self._clean_tis_without_dagrun,
)
for loop_count in itertools.count(start=1):
with Stats.timer() as timer:
if self.using_sqlite:
self.processor_agent.run_single_parsing_loop()
# For the sqlite case w/ 1 thread, wait until the processor
# is finished to avoid concurrent access to the DB.
self.log.debug("Waiting for processors to finish since we're using sqlite")
self.processor_agent.wait_until_finished()
with create_session() as session:
num_queued_tis = self._do_scheduling(session)
self.executor.heartbeat()
session.expunge_all()
num_finished_events = self._process_executor_events(session=session)
self.processor_agent.heartbeat()
# Heartbeat the scheduler periodically
self.heartbeat(only_if_necessary=True)
# Run any pending timed events
next_event = timers.run(blocking=False)
self.log.debug("Next timed event is in %f", next_event)
self.log.debug("Ran scheduling loop in %.2f seconds", timer.duration)
if not is_unit_test and not num_queued_tis and not num_finished_events:
# If the scheduler is doing things, don't sleep. This means when there is work to do, the
# scheduler will run "as quick as possible", but when it's stopped, it can sleep, dropping CPU
# usage when "idle"
time.sleep(min(self._processor_poll_interval, next_event))
if loop_count >= self.num_runs > 0:
self.log.info(
"Exiting scheduler loop as requested number of runs (%d - got to %d) has been reached",
self.num_runs,
loop_count,
)
break
if self.processor_agent.done:
self.log.info(
"Exiting scheduler loop as requested DAG parse count (%d) has been reached after %d"
" scheduler loops",
self.num_times_parse_dags,
loop_count,
)
break
@provide_session
def _clean_tis_without_dagrun(self, session):
with prohibit_commit(session) as guard:
try:
self._change_state_for_tis_without_dagrun(
old_states=[State.UP_FOR_RETRY], new_state=State.FAILED, session=session
)
self._change_state_for_tis_without_dagrun(
old_states=[State.QUEUED, State.SCHEDULED, State.UP_FOR_RESCHEDULE, State.SENSING],
new_state=State.NONE,
session=session,
)
guard.commit()
except OperationalError as e:
if is_lock_not_available_error(error=e):
self.log.debug("Lock held by another Scheduler")
session.rollback()
else:
raise
guard.commit()
def _do_scheduling(self, session) -> int:
"""
This function is where the main scheduling decisions take places. It:
- Creates any necessary DAG runs by examining the next_dagrun_create_after column of DagModel
Since creating Dag Runs is a relatively time consuming process, we select only 10 dags by default
(configurable via ``scheduler.max_dagruns_to_create_per_loop`` setting) - putting this higher will
mean one scheduler could spend a chunk of time creating dag runs, and not ever get around to
scheduling tasks.
- Finds the "next n oldest" running DAG Runs to examine for scheduling (n=20 by default, configurable
via ``scheduler.max_dagruns_per_loop_to_schedule`` config setting) and tries to progress state (TIs
to SCHEDULED, or DagRuns to SUCCESS/FAILURE etc)
By "next oldest", we mean hasn't been examined/scheduled in the most time.
The reason we don't select all dagruns at once because the rows are selected with row locks, meaning
that only one scheduler can "process them", even it is waiting behind other dags. Increasing this
limit will allow more throughput for smaller DAGs but will likely slow down throughput for larger
(>500 tasks.) DAGs
- Then, via a Critical Section (locking the rows of the Pool model) we queue tasks, and then send them
to the executor.
See docs of _critical_section_execute_task_instances for more.
:return: Number of TIs enqueued in this iteration
:rtype: int
"""
# Put a check in place to make sure we don't commit unexpectedly
with prohibit_commit(session) as guard:
if settings.USE_JOB_SCHEDULE:
self._create_dagruns_for_dags(guard, session)
dag_runs = self._get_next_dagruns_to_examine(session)
# Bulk fetch the currently active dag runs for the dags we are
# examining, rather than making one query per DagRun
# TODO: This query is probably horribly inefficient (though there is an
# index on (dag_id,state)). It is to deal with the case when a user
# clears more than max_active_runs older tasks -- we don't want the
# scheduler to suddenly go and start running tasks from all of the
# runs. (AIRFLOW-137/GH #1442)
#
# The longer term fix would be to have `clear` do this, and put DagRuns
# in to the queued state, then take DRs out of queued before creating
# any new ones
# Build up a set of execution_dates that are "active" for a given
# dag_id -- only tasks from those runs will be scheduled.
active_runs_by_dag_id = defaultdict(set)
query = (
session.query(
TI.dag_id,
TI.execution_date,
)
.filter(
TI.dag_id.in_(list({dag_run.dag_id for dag_run in dag_runs})),
TI.state.notin_(list(State.finished) + [State.REMOVED]),
)
.group_by(TI.dag_id, TI.execution_date)
)
for dag_id, execution_date in query:
active_runs_by_dag_id[dag_id].add(execution_date)
for dag_run in dag_runs:
# Use try_except to not stop the Scheduler when a Serialized DAG is not found
# This takes care of Dynamic DAGs especially
# SerializedDagNotFound should not happen here in the same loop because the DagRun would
# not be created in self._create_dag_runs if Serialized DAG does not exist
# But this would take care of the scenario when the Scheduler is restarted after DagRun is
# created and the DAG is deleted / renamed
try:
self._schedule_dag_run(dag_run, active_runs_by_dag_id.get(dag_run.dag_id, set()), session)
except SerializedDagNotFound:
self.log.exception("DAG '%s' not found in serialized_dag table", dag_run.dag_id)
continue
guard.commit()
# Without this, the session has an invalid view of the DB
session.expunge_all()
# END: schedule TIs
try:
if self.executor.slots_available <= 0:
# We know we can't do anything here, so don't even try!
self.log.debug("Executor full, skipping critical section")
return 0
timer = Stats.timer('scheduler.critical_section_duration')
timer.start()
# Find anything TIs in state SCHEDULED, try to QUEUE it (send it to the executor)
num_queued_tis = self._critical_section_execute_task_instances(session=session)
# Make sure we only sent this metric if we obtained the lock, otherwise we'll skew the
# metric, way down
timer.stop(send=True)
except OperationalError as e:
timer.stop(send=False)
if is_lock_not_available_error(error=e):
self.log.debug("Critical section lock held by another Scheduler")
Stats.incr('scheduler.critical_section_busy')
session.rollback()
return 0
raise
guard.commit()
return num_queued_tis
@retry_db_transaction
def _get_next_dagruns_to_examine(self, session):
"""Get Next DagRuns to Examine with retries"""
return DagRun.next_dagruns_to_examine(session)
@retry_db_transaction
def _create_dagruns_for_dags(self, guard, session):
"""Find Dag Models needing DagRuns and Create Dag Runs with retries in case of OperationalError"""
query = DagModel.dags_needing_dagruns(session)
self._create_dag_runs(query.all(), session)
# commit the session - Release the write lock on DagModel table.
guard.commit()
# END: create dagruns
def _create_dag_runs(self, dag_models: Iterable[DagModel], session: Session) -> None:
"""
Unconditionally create a DAG run for the given DAG, and update the dag_model's fields to control
if/when the next DAGRun should be created
"""
# Bulk Fetch DagRuns with dag_id and execution_date same
# as DagModel.dag_id and DagModel.next_dagrun
# This list is used to verify if the DagRun already exist so that we don't attempt to create
# duplicate dag runs
active_dagruns = (
session.query(DagRun.dag_id, DagRun.execution_date)
.filter(
tuple_(DagRun.dag_id, DagRun.execution_date).in_(
[(dm.dag_id, dm.next_dagrun) for dm in dag_models]
)
)
.all()
)
for dag_model in dag_models:
try:
dag = self.dagbag.get_dag(dag_model.dag_id, session=session)
except SerializedDagNotFound:
self.log.exception("DAG '%s' not found in serialized_dag table", dag_model.dag_id)
continue
dag_hash = self.dagbag.dags_hash.get(dag.dag_id)
# Explicitly check if the DagRun already exists. This is an edge case
# where a Dag Run is created but `DagModel.next_dagrun` and `DagModel.next_dagrun_create_after`
# are not updated.
# We opted to check DagRun existence instead
# of catching an Integrity error and rolling back the session i.e
# we need to run self._update_dag_next_dagruns if the Dag Run already exists or if we
# create a new one. This is so that in the next Scheduling loop we try to create new runs
# instead of falling in a loop of Integrity Error.
if (dag.dag_id, dag_model.next_dagrun) not in active_dagruns:
run = dag.create_dagrun(
run_type=DagRunType.SCHEDULED,
execution_date=dag_model.next_dagrun,
start_date=timezone.utcnow(),
state=State.RUNNING,
external_trigger=False,
session=session,
dag_hash=dag_hash,
creating_job_id=self.id,
)
expected_start_date = dag.following_schedule(run.execution_date)
if expected_start_date:
schedule_delay = run.start_date - expected_start_date
Stats.timing(
f'dagrun.schedule_delay.{dag.dag_id}',
schedule_delay,
)
self._update_dag_next_dagruns(dag_models, session)
# TODO[HA]: Should we do a session.flush() so we don't have to keep lots of state/object in
# memory for larger dags? or expunge_all()
def _update_dag_next_dagruns(self, dag_models: Iterable[DagModel], session: Session) -> None:
"""
Bulk update the next_dagrun and next_dagrun_create_after for all the dags.
We batch the select queries to get info about all the dags at once
"""
# Check max_active_runs, to see if we are _now_ at the limit for any of
# these dag? (we've just created a DagRun for them after all)
active_runs_of_dags = dict(
session.query(DagRun.dag_id, func.count('*'))
.filter(
DagRun.dag_id.in_([o.dag_id for o in dag_models]),
DagRun.state == State.RUNNING, # pylint: disable=comparison-with-callable
DagRun.external_trigger.is_(False),
)
.group_by(DagRun.dag_id)
.all()
)
for dag_model in dag_models:
# Get the DAG in a try_except to not stop the Scheduler when a Serialized DAG is not found
# This takes care of Dynamic DAGs especially
try:
dag = self.dagbag.get_dag(dag_model.dag_id, session=session)
except SerializedDagNotFound:
self.log.exception("DAG '%s' not found in serialized_dag table", dag_model.dag_id)
continue
active_runs_of_dag = active_runs_of_dags.get(dag.dag_id, 0)
if dag.max_active_runs and active_runs_of_dag >= dag.max_active_runs:
self.log.info(
"DAG %s is at (or above) max_active_runs (%d of %d), not creating any more runs",
dag.dag_id,
active_runs_of_dag,
dag.max_active_runs,
)
dag_model.next_dagrun_create_after = None
else:
dag_model.next_dagrun, dag_model.next_dagrun_create_after = dag.next_dagrun_info(
dag_model.next_dagrun
)
def _schedule_dag_run(
self,
dag_run: DagRun,
currently_active_runs: Set[datetime.datetime],
session: Session,
) -> int:
"""
Make scheduling decisions about an individual dag run
``currently_active_runs`` is passed in so that a batch query can be
used to ask this for all dag runs in the batch, to avoid an n+1 query.
:param dag_run: The DagRun to schedule
:param currently_active_runs: Number of currently active runs of this DAG
:return: Number of tasks scheduled
"""
dag = dag_run.dag = self.dagbag.get_dag(dag_run.dag_id, session=session)
if not dag:
self.log.error("Couldn't find dag %s in DagBag/DB!", dag_run.dag_id)
return 0
if (
dag_run.start_date
and dag.dagrun_timeout
and dag_run.start_date < timezone.utcnow() - dag.dagrun_timeout
):
dag_run.set_state(State.FAILED)
unfinished_task_instances = (
session.query(TI)
.filter(TI.dag_id == dag_run.dag_id)
.filter(TI.execution_date == dag_run.execution_date)
.filter(TI.state.in_(State.unfinished))
)
for task_instance in unfinished_task_instances:
task_instance.state = State.SKIPPED
session.merge(task_instance)
session.flush()
self.log.info("Run %s of %s has timed-out", dag_run.run_id, dag_run.dag_id)
# Work out if we should allow creating a new DagRun now?
self._update_dag_next_dagruns([session.query(DagModel).get(dag_run.dag_id)], session)
callback_to_execute = DagCallbackRequest(
full_filepath=dag.fileloc,
dag_id=dag.dag_id,
execution_date=dag_run.execution_date,
is_failure_callback=True,
msg='timed_out',
)
# Send SLA & DAG Success/Failure Callbacks to be executed
self._send_dag_callbacks_to_processor(dag_run, callback_to_execute)
return 0
if dag_run.execution_date > timezone.utcnow() and not dag.allow_future_exec_dates:
self.log.error("Execution date is in future: %s", dag_run.execution_date)
return 0
if dag.max_active_runs:
if (
len(currently_active_runs) >= dag.max_active_runs
and dag_run.execution_date not in currently_active_runs
):
self.log.info(
"DAG %s already has %d active runs, not queuing any tasks for run %s",
dag.dag_id,
len(currently_active_runs),
dag_run.execution_date,
)
return 0
self._verify_integrity_if_dag_changed(dag_run=dag_run, session=session)
# TODO[HA]: Rename update_state -> schedule_dag_run, ?? something else?
schedulable_tis, callback_to_run = dag_run.update_state(session=session, execute_callbacks=False)
self._send_dag_callbacks_to_processor(dag_run, callback_to_run)
# This will do one query per dag run. We "could" build up a complex
# query to update all the TIs across all the execution dates and dag
# IDs in a single query, but it turns out that can be _very very slow_
# see #11147/commit ee90807ac for more details
return dag_run.schedule_tis(schedulable_tis, session)
@provide_session
def _verify_integrity_if_dag_changed(self, dag_run: DagRun, session=None):
"""Only run DagRun.verify integrity if Serialized DAG has changed since it is slow"""
latest_version = SerializedDagModel.get_latest_version_hash(dag_run.dag_id, session=session)
if dag_run.dag_hash == latest_version:
self.log.debug("DAG %s not changed structure, skipping dagrun.verify_integrity", dag_run.dag_id)
return
dag_run.dag_hash = latest_version
# Refresh the DAG
dag_run.dag = self.dagbag.get_dag(dag_id=dag_run.dag_id, session=session)
# Verify integrity also takes care of session.flush
dag_run.verify_integrity(session=session)
def _send_dag_callbacks_to_processor(
self, dag_run: DagRun, callback: Optional[DagCallbackRequest] = None
):
if not self.processor_agent:
raise ValueError("Processor agent is not started.")
dag = dag_run.get_dag()
self._send_sla_callbacks_to_processor(dag)
if callback:
self.processor_agent.send_callback_to_execute(callback)
def _send_sla_callbacks_to_processor(self, dag: DAG):
"""Sends SLA Callbacks to DagFileProcessor if tasks have SLAs set and check_slas=True"""
if not settings.CHECK_SLAS:
return
if not any(isinstance(ti.sla, timedelta) for ti in dag.tasks):
self.log.debug("Skipping SLA check for %s because no tasks in DAG have SLAs", dag)
return
if not self.processor_agent:
raise ValueError("Processor agent is not started.")
self.processor_agent.send_sla_callback_request_to_execute(
full_filepath=dag.fileloc, dag_id=dag.dag_id
)
@provide_session
def _emit_pool_metrics(self, session: Session = None) -> None:
pools = models.Pool.slots_stats(session=session)
for pool_name, slot_stats in pools.items():
Stats.gauge(f'pool.open_slots.{pool_name}', slot_stats["open"])
Stats.gauge(f'pool.queued_slots.{pool_name}', slot_stats[State.QUEUED]) # type: ignore
Stats.gauge(f'pool.running_slots.{pool_name}', slot_stats[State.RUNNING]) # type: ignore
@provide_session
def heartbeat_callback(self, session: Session = None) -> None:
Stats.incr('scheduler_heartbeat', 1, 1)
@provide_session
def adopt_or_reset_orphaned_tasks(self, session: Session = None):
"""
Reset any TaskInstance still in QUEUED or SCHEDULED states that were
enqueued by a SchedulerJob that is no longer running.
:return: the number of TIs reset
:rtype: int
"""
self.log.info("Resetting orphaned tasks for active dag runs")
timeout = conf.getint('scheduler', 'scheduler_health_check_threshold')
for attempt in run_with_db_retries(logger=self.log):
with attempt:
self.log.debug(
"Running SchedulerJob.adopt_or_reset_orphaned_tasks with retries. Try %d of %d",
attempt.retry_state.attempt_number,
MAX_DB_RETRIES,
)
self.log.debug("Calling SchedulerJob.adopt_or_reset_orphaned_tasks method")
try:
num_failed = (
session.query(SchedulerJob)
.filter(
SchedulerJob.state == State.RUNNING,
SchedulerJob.latest_heartbeat < (timezone.utcnow() - timedelta(seconds=timeout)),
)
.update({"state": State.FAILED})
)
if num_failed:
self.log.info("Marked %d SchedulerJob instances as failed", num_failed)
Stats.incr(self.__class__.__name__.lower() + '_end', num_failed)
resettable_states = [State.SCHEDULED, State.QUEUED, State.RUNNING]
query = (
session.query(TI)
.filter(TI.state.in_(resettable_states))
# outerjoin is because we didn't use to have queued_by_job
# set, so we need to pick up anything pre upgrade. This (and the
# "or queued_by_job_id IS NONE") can go as soon as scheduler HA is
# released.
.outerjoin(TI.queued_by_job)
.filter(or_(TI.queued_by_job_id.is_(None), SchedulerJob.state != State.RUNNING))
.join(TI.dag_run)
.filter(
DagRun.run_type != DagRunType.BACKFILL_JOB,
# pylint: disable=comparison-with-callable
DagRun.state == State.RUNNING,
)
.options(load_only(TI.dag_id, TI.task_id, TI.execution_date))
)
# Lock these rows, so that another scheduler can't try and adopt these too
tis_to_reset_or_adopt = with_row_locks(
query, of=TI, session=session, **skip_locked(session=session)
).all()
to_reset = self.executor.try_adopt_task_instances(tis_to_reset_or_adopt)
reset_tis_message = []
for ti in to_reset:
reset_tis_message.append(repr(ti))
ti.state = State.NONE
ti.queued_by_job_id = None
for ti in set(tis_to_reset_or_adopt) - set(to_reset):
ti.queued_by_job_id = self.id
Stats.incr('scheduler.orphaned_tasks.cleared', len(to_reset))
Stats.incr('scheduler.orphaned_tasks.adopted', len(tis_to_reset_or_adopt) - len(to_reset))
if to_reset:
task_instance_str = '\n\t'.join(reset_tis_message)
self.log.info(
"Reset the following %s orphaned TaskInstances:\n\t%s",
len(to_reset),
task_instance_str,
)
# Issue SQL/finish "Unit of Work", but let @provide_session
# commit (or if passed a session, let caller decide when to commit
session.flush()
except OperationalError:
session.rollback()
raise
return len(to_reset)
|
py | b400af234137abe66d150c2ddc907fc60e736c42 | from pykiwoom.kiwoom import *
kiwoom = Kiwoom()
kiwoom.CommConnect(block=True)
kospi = kiwoom.GetCodeListByMarket('0')
kosdaq = kiwoom.GetCodeListByMarket('10')
etf = kiwoom.GetCodeListByMarket('8')
print(len(kospi), kospi)
print(len(kosdaq), kosdaq)
print(len(etf), etf)
|
py | b400b036cc046b0c989a24842f76c34f2ddacacc | import logging
from .base import BaseSnitcher
from cloud_snitch.models import AptPackageEntity
from cloud_snitch.models import EnvironmentEntity
from cloud_snitch.models import HostEntity
logger = logging.getLogger(__name__)
class AptSnitcher(BaseSnitcher):
"""Models path host -> virtualenv -> python package path in graph."""
file_pattern = '^dpkg_list_(?P<hostname>.*).json$'
def _update_apt_package(self, session, pkgdict):
"""Updates apt package in graph.
Will only update the apt package if status = installed
:param session: neo4j driver session
:type session: neo4j.v1.session.BoltSession
:param pkgdict: apt package dict.
should contain name and version and status.
:type pkg: dict
:returns: AptPackage object or None for no action
:rtype: AptPackageEntity|None
"""
if pkgdict.get('status') != 'installed':
return None
aptpkg = AptPackageEntity(
name=pkgdict.get('name'),
version=pkgdict.get('version')
)
aptpkg.update(session, self.time_in_ms)
return aptpkg
def _snitch(self, session):
"""Update the apt part of the graph..
:param session: neo4j driver session
:type session: neo4j.v1.session.BoltSession
"""
env = EnvironmentEntity(uuid=self.run.environment_uuid)
for hostname, filename in self._find_host_tuples(self.file_pattern):
aptpkgs = []
# Find host in graph, continue if host not found.
host = HostEntity(hostname=hostname, environment=env.identity)
host = HostEntity.find(session, host.identity)
if host is None:
logger.warning(
'Unable to locate host entity {}'.format(hostname)
)
continue
# Read data from file
aptdata = self.run.get_object(filename)
aptlist = aptdata.get('data', [])
# Iterate over package maps
for aptdict in aptlist:
aptpkg = self._update_apt_package(session, aptdict)
if aptpkg is not None:
aptpkgs.append(aptpkg)
host.aptpackages.update(session, aptpkgs, self.time_in_ms)
|
py | b400b0a7344c09b4a73a6486989e13a16c0693de | import numpy as np
import pandas as pd
import os
import datetime as dt
from flask import Flask,render_template,url_for,request,g, flash, redirect
from werkzeug.utils import secure_filename
from flask_sqlalchemy import SQLAlchemy
from flask_migrate import Migrate
from flask_login import current_user, LoginManager, login_user, logout_user, UserMixin
from flask_admin import Admin, AdminIndexView
from flask_admin.contrib.sqla import ModelView
from flask_admin.model import BaseModelView
from sklearn.metrics import mean_absolute_error
from forms import LoginForm, RegisterForm
from config import Config
from scorer import Scorer
# PARAMETER
## Leaderboard parameter
limit_lb = 100 # Number of user showed at leaderboard table
greater_better = False # True if lowest score is the best; False if greatest score is the best
metric = mean_absolute_error #change the metric using sklearn function
scorer = Scorer(public_path = './master_key/public_key.csv',
private_path = './master_key/private_key.csv',
metric = metric) #change the metric using sklearn function
## Upload parameter
UPLOAD_FOLDER = 'submissions'
ALLOWED_EXTENSIONS = {'csv'} # only accept csv files
## FLASK configuration
app = Flask(__name__)
app.config['MAX_CONTENT_LENGTH'] = 2 * 1024 * 1024 # 2 Megabytes
app.config['UPLOAD_FOLDER'] = UPLOAD_FOLDER
app.config['SECRET_KEY'] = 'my'
app.config.from_object(Config)
## Database configuration
db = SQLAlchemy(app)
db.app = app
migrate = Migrate(app, db)
login = LoginManager(app)
# Database Model
@login.user_loader
def load_user(id):
return User.query.get(int(id))
class User(UserMixin, db.Model):
__tablename__ = "user"
id = db.Column(db.Integer, primary_key=True)
username = db.Column(db.String(64), index=True, unique=True)
password = db.Column(db.String(128)) ## Too lazy to make it hash
def __repr__(self):
return self.username
def check_password(self, password): ## Too lazy to make it hash
return self.password == password
class Submission(db.Model):
__tablename__ = "submission"
id = db.Column(db.Integer, primary_key=True)
timestamp = db.Column(db.DateTime, index=True, default=dt.datetime.now)
submission_type = db.Column(db.String(64))
user_id = db.Column(db.Integer, db.ForeignKey('user.id'))
user = db.relationship('User')
score = db.Column(db.Float)
def __repr__(self):
return f'<User ID {self.user_id} score {self.score}>'
db.create_all()
# Admin
class MyAdminIndexView(AdminIndexView):
def is_accessible(self):
if current_user.is_authenticated:
return current_user.username == '[email protected]'
else:
False
def inaccessible_callback(self, name, **kwargs):
return redirect(url_for('home_page'))
class UserView(ModelView):
column_list = (User.id, 'username','password')
def is_accessible(self):
if current_user.is_authenticated:
return current_user.username == '[email protected]'
else:
False
def inaccessible_callback(self, name, **kwargs):
return redirect(url_for('home_page'))
class SubmissionView(ModelView):
column_list = (Submission.id, 'submission_type', 'user_id', 'user', 'timestamp', 'score')
def is_accessible(self):
if current_user.is_authenticated:
return current_user.username == '[email protected]'
else:
False
def inaccessible_callback(self, name, **kwargs):
return redirect(url_for('home_page'))
admin = Admin(app, index_view=MyAdminIndexView())
admin.add_view(UserView(User, db.session))
admin.add_view(SubmissionView(Submission, db.session))
# Leader Board
def get_leaderboard(greater_better, limit, submission_type = 'public'):
if greater_better:
score_agg = "MAX"
score_sorting = "DESC"
else:
score_agg = "MIN"
score_sorting = "ASC"
query = f"""
SELECT
user.username,
{score_agg}(submission.score) as score,
count(submission.id) as total_submission,
max(timestamp) as last_sub
FROM submission
LEFT JOIN user
ON user.id = submission.user_id
WHERE submission_type = '{submission_type}'
GROUP BY 1
ORDER BY 2 {score_sorting}, 4
LIMIT {limit}
"""
df = pd.read_sql(query,
db.session.bind)
return df
# Route
@app.route('/register', methods=['GET', 'POST'])
def register_page():
registration_status = request.args.get("registration_status", "")
reg_form = RegisterForm()
if request.method == 'POST':
### REGISTRATION
if reg_form.validate_on_submit():
user = User.query.filter_by(username=reg_form.username.data).first()
print(user)
if user is None: # only when user is not registered then proceed
if reg_form.username.data.endswith('@bshg.com'): # only proceed if it is a bshg mail
print("HALOOO")
u = User(username=reg_form.username.data, password = reg_form.password.data)
db.session.add(u)
db.session.commit()
# flash('Congratulations, you are now a registered user!')
registration_status = f"Welcome {reg_form.username.data}, Please Login at HOME page"
return redirect(url_for('register_page', registration_status = registration_status))
else:
registration_status = "PLEASE USE A @BSHG.COM MAIL TO REGISTER"
return redirect(url_for('register_page', registration_status = registration_status))
else:
registration_status = "USER NAME ALREADY USED"
return redirect(url_for('register_page', registration_status = registration_status))
else:
registration_status = "ERROR VALIDATION"
print("ANEH")
return redirect(url_for('register_page', registration_status = registration_status))
if request.method == 'GET':
return render_template('register.html', reg_form = reg_form, registration_status = registration_status)
@app.route('/logout')
def logout():
logout_user()
print("log out success")
return redirect(url_for('home_page'))
def allowed_file(filename):
# checks if extension in filename is allowed
return '.' in filename and \
filename.rsplit('.', 1)[1] in ALLOWED_EXTENSIONS
@app.route('/', methods=['GET', 'POST'])
def home_page():
login_form = LoginForm()
login_status = request.args.get("login_status", "")
submission_status = request.args.get("submission_status", "")
leaderboard = get_leaderboard(greater_better = greater_better, limit = limit_lb, submission_type='public')
leaderboard_private = get_leaderboard(greater_better = greater_better, limit = limit_lb, submission_type='private')
if request.method == 'POST': # If upload file / Login
### LOGIN
if login_form.validate_on_submit():
print(f'Login requested for user {login_form.username.data}, remember_me={login_form.remember_me.data}')
user = User.query.filter_by(username=login_form.username.data).first()
if user is None: # USER is not registered
login_status = "User is not registered / Password does not match"
return redirect(url_for('home_page', login_status = login_status))
elif user.check_password(login_form.password.data): # Password True
print('True pass')
login_status = ""
login_user(user, remember=login_form.remember_me.data)
return redirect(url_for('home_page', login_status = login_status))
else: #WRONG PASSWORD
print('WRONG PASS')
login_status = "User is not registered / Password does not match"
return redirect(url_for('home_page', login_status = login_status))
login_status = ""
login_user(user, remember=login_form.remember_me.data)
return redirect(url_for('home_page', login_status = login_status))
### UPLOAD FILE
if 'uploadfile' in request.files.keys() and current_user.is_authenticated:
submission_file = request.files['uploadfile']
#throw error if extension is not allowed
if not allowed_file(submission_file.filename):
raise Exception('Invalid file extension')
if submission_file and allowed_file(submission_file.filename):
filename = secure_filename(submission_file.filename)
target_dir = os.path.join(app.config['UPLOAD_FOLDER'], str(current_user.id))
if not os.path.exists(target_dir):
os.makedirs(target_dir)
fullPath = os.path.join(app.config['UPLOAD_FOLDER'], str(current_user.id) , filename)
submission_file.save(fullPath)
submission_type = request.form.get('submission_type', "public")
result = scorer.calculate_score(submission_path = fullPath, submission_type = submission_type)
submission_status = result[0]
if submission_status == "SUBMISSION SUCCESS":
score = result[1]
score = round(score, 3)
s = Submission(user_id=current_user.id , score=score, submission_type = submission_type)
db.session.add(s)
db.session.commit()
print(f"submitted {score}")
submission_status = f"SUBMISSION SUCCESS | Score: {round(score,3)}"
return redirect(url_for('home_page', submission_status = submission_status))
return render_template('index.html',
leaderboard = leaderboard,
leaderboard_private = leaderboard_private,
login_form=login_form,
login_status=login_status,
submission_status=submission_status
)
if __name__ == '__main__':
app.debug = True
app.run(host = '0.0.0.0',port=5005) |
py | b400b2bb5d5d15d351a4b88062359a49bb230093 | # -*- coding: utf-8 -*-
# Generated by Django 1.11.2 on 2017-08-05 18:55
from __future__ import unicode_literals
import django.contrib.postgres.fields
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [("accounts", "0015_auto_20170805_1827")]
operations = [
migrations.AlterField(
model_name="demographicdata",
name="child_birthdays",
field=django.contrib.postgres.fields.ArrayField(
base_field=models.DateField(),
blank=True,
size=None,
verbose_name="children's birthdays",
),
)
]
|
py | b400b2cabfbf4edc410a096fe8aa39f72f8f6b5f | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import sys
import numpy as np # type: ignore
from onnx import TensorProto
from onnx import mapping
from six import text_type, binary_type
from typing import Sequence, Any, Optional, Text, List
if sys.byteorder != 'little':
raise RuntimeError(
'Numpy helper for tensor/ndarray is not available on big endian '
'systems yet.')
def combine_pairs_to_complex(fa): # type: (Sequence[int]) -> Sequence[np.complex64]
return [complex(fa[i * 2], fa[i * 2 + 1]) for i in range(len(fa) // 2)]
def to_array(tensor): # type: (TensorProto) -> np.ndarray[Any]
"""Converts a tensor def object to a numpy array.
Inputs:
tensor: a TensorProto object.
Returns:
arr: the converted array.
"""
if tensor.HasField("segment"):
raise ValueError(
"Currently not supporting loading segments.")
if tensor.data_type == TensorProto.UNDEFINED:
raise ValueError("The data type is not defined.")
tensor_dtype = tensor.data_type
np_dtype = mapping.TENSOR_TYPE_TO_NP_TYPE[tensor_dtype]
storage_type = mapping.TENSOR_TYPE_TO_STORAGE_TENSOR_TYPE[tensor_dtype]
storage_np_dtype = mapping.TENSOR_TYPE_TO_NP_TYPE[storage_type]
storage_field = mapping.STORAGE_TENSOR_TYPE_TO_FIELD[storage_type]
dims = tensor.dims
if tensor.data_type == TensorProto.STRING:
utf8_strings = getattr(tensor, storage_field)
ss = list(s.decode('utf-8') for s in utf8_strings)
return np.asarray(ss).astype(np_dtype).reshape(dims)
if tensor.HasField("raw_data"):
# Raw_bytes support: using frombuffer.
return np.frombuffer(
tensor.raw_data,
dtype=np_dtype).reshape(dims)
else:
data = getattr(tensor, storage_field), # type: Sequence[np.complex64]
if (tensor_dtype == TensorProto.COMPLEX64
or tensor_dtype == TensorProto.COMPLEX128):
data = combine_pairs_to_complex(data)
return (
np.asarray(
data,
dtype=storage_np_dtype)
.astype(np_dtype)
.reshape(dims)
)
def from_array(arr, name=None): # type: (np.ndarray[Any], Optional[Text]) -> TensorProto
"""Converts a numpy array to a tensor def.
Inputs:
arr: a numpy array.
name: (optional) the name of the tensor.
Returns:
tensor_def: the converted tensor def.
"""
tensor = TensorProto()
tensor.dims.extend(arr.shape)
if name:
tensor.name = name
if arr.dtype == np.object:
# Special care for strings.
tensor.data_type = mapping.NP_TYPE_TO_TENSOR_TYPE[arr.dtype]
for e in arr:
if isinstance(e, text_type):
tensor.string_data.append(e.encode('utf-8'))
elif isinstance(e, np.ndarray):
tensor.string_data.append(e.tobytes())
else:
raise NotImplementedError("Unrecognized object in the object array, expect a string, or array of bytes")
return tensor
# For numerical types, directly use numpy raw bytes.
try:
dtype = mapping.NP_TYPE_TO_TENSOR_TYPE[arr.dtype]
except KeyError:
raise RuntimeError(
"Numpy data type not understood yet: {}".format(str(arr.dtype)))
tensor.data_type = dtype
tensor.raw_data = arr.tobytes() # note: tobytes() is only after 1.9.
return tensor
|
py | b400b2efbc3d8d1f61aa0e6f7581086d07808b1a | # -*- coding: utf-8 -*-
import logging
from apscheduler.schedulers.background import BackgroundScheduler
from examples.report_utils import report_top_stats
from zvt import init_log
logger = logging.getLogger(__name__)
sched = BackgroundScheduler()
@sched.scheduled_job("cron", hour=19, minute=30, day_of_week="mon-fri")
def report_stats():
report_top_stats(
entity_type="stock",
entity_provider="em",
data_provider="em",
periods=[7, 30, 180, 365],
ignore_new_stock=True,
adjust_type=None,
top_count=30,
turnover_threshold=100000000,
turnover_rate_threshold=0.02,
em_group_over_write=True,
)
report_top_stats(
entity_type="stockhk",
entity_provider="em",
data_provider="em",
top_count=30,
periods=[7, 30, 180, 365],
ignore_new_stock=True,
adjust_type=None,
turnover_threshold=100000000,
turnover_rate_threshold=0.005,
em_group_over_write=False,
)
if __name__ == "__main__":
init_log("report_stats.log")
report_stats()
sched.start()
sched._thread.join()
|
py | b400b33de5ca135d2901817996685f7c646de537 | import os
# The default DATAPATH for installing data and tools
DATAPATH=os.path.expanduser("~/.tees")
DEFAULT_LOCAL_SETTINGS=os.path.expanduser("~/.tees_local_settings.py")
# Default locations for evaluators and their devel set gold data (relative to DATAPATH)
EVALUATOR = {}
EVALUATOR["GE11"] = "BioNLP-ST_2011_genia_tools_rev1"
EVALUATOR["EPI11"] = "BioNLP-ST_2011_EPI-eval-tools"
EVALUATOR["ID11"] = "BioNLP-ST_2011_ID-eval-tools"
EVALUATOR["BB11"] = "BioNLP-ST_2011_Bacteria_Biotopes_evaluation_software"
EVALUATOR["BI11"] = "BioNLP-ST_2011_bacteria_interactions_evaluation_software"
EVALUATOR["REN11"] = "BioNLP-ST_2011_bacteria_rename_evaluation_sofware"
EVALUATOR["CO11"] = "CREvalPackage1.4"
EVALUATOR["GE09"] = "bionlp09_shared_task_evaluation_tools_v1"
EVALUATOR["GRN13"] = "BioNLP-ST-2013_Gene_Regulation_Network_eval"
# Gold data for evaluators
EVALUATOR["GE11-gold"] = "BioNLP-ST_2011_genia_devel_data_rev1.tar.gz"
EVALUATOR["EPI11-gold"] = "BioNLP-ST_2011_Epi_and_PTM_development_data_rev1.tar.gz"
EVALUATOR["ID11-gold"] = "BioNLP-ST_2011_Infectious_Diseases_development_data_rev1.tar.gz"
EVALUATOR["BB11-gold"] = "BioNLP-ST_2011_Bacteria_Biotopes_dev_data_rev1.tar.gz"
EVALUATOR["BI11-gold"] = "BioNLP-ST_2011_bacteria_interactions_dev_data_rev1-remixed.tar.gz"
EVALUATOR["REN11-gold"] = "BioNLP-ST_2011_bacteria_rename_dev_data.tar.gz"
EVALUATOR["CO11-gold"] = "BioNLP-ST_2011_coreference_development_data.tar.gz"
EVALUATOR["GE09-gold"] = "bionlp09_shared_task_development_data_rev1_for_evaluator.tar.gz"
EVALUATOR["GRN13-gold"] = "BioNLP-ST-2013_Gene_Regulation_Network_dev.tar.gz"
# A dictionary for installation URLs. If there is a problem with a
# download location, it is possible to override a URL in the "TEES_SETTINGS"
# local settings file.
URL = {}
# Models
URL["MODELS"] = "http://sourceforge.net/projects/tees/files/data/TEES-models-130413.tar.bz2"
# External Tools ##############################################################
URL["SVM_MULTICLASS_SOURCE"] = "http://download.joachims.org/svm_multiclass/current/svm_multiclass.tar.gz"
URL["SVM_MULTICLASS_LINUX"] = "http://download.joachims.org/svm_multiclass/current/svm_multiclass_linux32.tar.gz"
URL["GENIA_SENTENCE_SPLITTER"] = "http://www.nactem.ac.uk/y-matsu/geniass/geniass-1.00.tar.gz"
URL["BANNER_SOURCE"] = "http://banner.svn.sourceforge.net/viewvc/banner/trunk/?view=tar"
URL["BANNER_COMPILED"] = "http://sourceforge.net/projects/tees/files/data/BANNER-svn-snapshot-120630.tar.gz"
URL["BLLIP_SOURCE"] = "https://github.com/dmcc/bllip-parser/zipball/master"
URL["STANFORD_PARSER"] = "http://nlp.stanford.edu/software/stanford-parser-2012-03-09.tgz"
RUBY_PATH = "ruby" # for GENIA Sentence Splitter
JAVA = "java" # for programs using java
# Corpora #####################################################################
# Preconverted
URL["BIONLP_13_CORPORA"] = "http://sourceforge.net/projects/tees/files/data/BioNLP13-corpora-XML-140216.zip"
URL["BIONLP_11_CORPORA"] = "http://sourceforge.net/projects/tees/files/data/BioNLP11-corpora-XML-140216.zip"
URL["BIONLP_09_CORPUS"] = "http://sourceforge.net/projects/tees/files/data/BioNLP09-corpus-XML-140216.zip"
URL["DDI_11_CORPUS"] = "http://sourceforge.net/projects/tees/files/data/DDI11-corpus-XML-130224.zip"
# BioNLP'13
urlBase = "http://2013.bionlp-st.org/tasks/"
URL["GE13_DEVEL"] = urlBase + "BioNLP-ST-2013_GE_devel_data_rev3.tar.gz"
URL["GE13_TRAIN"] = urlBase + "BioNLP-ST-2013_GE_train_data_rev3.tar.gz"
URL["GE13_TEST"] = urlBase + "BioNLP-ST-2013_GE_test_data_rev1.tar.gz"
URL["CG13_DEVEL"] = urlBase + "BioNLP-ST_2013_CG_development_data.tar.gz"
URL["CG13_TRAIN"] = urlBase + "BioNLP-ST_2013_CG_training_data.tar.gz"
URL["CG13_TEST"] = urlBase + "BioNLP-ST_2013_CG_test_data.tar.gz"
URL["PC13_DEVEL"] = urlBase + "BioNLP-ST_2013_PC_development_data.tar.gz"
URL["PC13_TRAIN"] = urlBase + "BioNLP-ST_2013_PC_training_data.tar.gz"
URL["PC13_TEST"] = urlBase + "BioNLP-ST_2013_PC_test_data.tar.gz"
URL["GRO13_DEVEL"] = urlBase + "BioNLP-ST_2013_GRO_development_data.tar.gz"
URL["GRO13_TRAIN"] = urlBase + "BioNLP-ST_2013_GRO_training_data.tar.gz"
URL["GRO13_TEST"] = urlBase + "BioNLP-ST_2013_GRO_test-1.0-a1.tgz"
URL["GRN13_DEVEL"] = urlBase + "BioNLP-ST-2013_Gene_Regulation_Network_dev.tar.gz"
URL["GRN13_TRAIN"] = urlBase + "BioNLP-ST-2013_Gene_Regulation_Network_train.tar.gz"
URL["GRN13_TEST"] = urlBase + "BioNLP-ST-2013_Gene_Regulation_Network_test.tar.gz"
URL["BB13_DEVEL"] = urlBase + "BioNLP-ST-2013_Bacteria_Biotopes_dev.tar.gz"
URL["BB13_TRAIN"] = urlBase + "BioNLP-ST-2013_Bacteria_Biotopes_train.tar.gz"
URL["BB13_TEST"] = urlBase + "BioNLP-ST-2013_Bacteria_Biotopes_test.tar.gz"
#URL["BioNLP13_TOKENS"] = "http://2013.bionlp-st.org/supporting-resources/bionlp-st-2013_all_tasks_tokenised.tar.gz"
#URL["BioNLP13_STANFORD_PARSES"] = "http://2013.bionlp-st.org/supporting-resources/bionlp-st-2013_all_tasks_stanford_parser.tar.gz"
# Tokenizations
urlBase = "http://weaver.nlplab.org/~ninjin/bionlp_st_2013_supporting/"
URL["BB13_DEVEL_TOK"] = urlBase + "BioNLP-ST-2013_Bacteria_Biotopes_dev_tok_v1.tar.gz"
URL["BB13_TRAIN_TOK"] = urlBase + "BioNLP-ST-2013_Bacteria_Biotopes_train_tok_v1.tar.gz"
URL["BB13_TEST_TOK"] = urlBase + "BioNLP-ST-2013_Bacteria_Biotopes_test_tok_v1.tar.gz"
URL["CG13_DEVEL_TOK"] = urlBase + "BioNLP-ST_2013_CG_development_data_tok_v1.tar.gz"
URL["CG13_TRAIN_TOK"] = urlBase + "BioNLP-ST_2013_CG_training_data_tok_v1.tar.gz"
URL["CG13_TEST_TOK"] = urlBase + "BioNLP-ST_2013_CG_test_data_tok_v1.tar.gz"
URL["GE13_DEVEL_TOK"] = urlBase + "BioNLP-ST-2013_GE_devel_data_rev2_tok_v1.tar.gz"
URL["GE13_TRAIN_TOK"] = urlBase + "BioNLP-ST-2013_GE_train_data_rev2_tok_v1.tar.gz"
URL["GE13_TEST_TOK"] = urlBase + "BioNLP-ST_2013_GE_test_data_tok_v1.tar.gz"
URL["GRN13_DEVEL_TOK"] = urlBase + "BioNLP-ST-2013_Gene_Regulation_Network_dev_tok_v1.tar.gz"
URL["GRN13_TRAIN_TOK"] = urlBase + "BioNLP-ST-2013_Gene_Regulation_Network_train_tok_v1.tar.gz"
URL["GRN13_TEST_TOK"] = urlBase + "BioNLP-ST-2013_Gene_Regulation_Network_test_tok_v1.tar.gz"
URL["GRO13_DEVEL_TOK"] = urlBase + "BioNLP-ST_2013_GRO_development_data_tok_v1.tar.gz"
URL["GRO13_TRAIN_TOK"] = urlBase + "BioNLP-ST_2013_GRO_training_data_tok_v1.tar.gz"
URL["GRO13_TEST_TOK"] = urlBase + "BioNLP-ST_2013_GRO_test-1.0-a1_tok_v1.tar.gz"
URL["PC13_DEVEL_TOK"] = urlBase + "BioNLP-ST_2013_PC_development_data_tok_v1.tar.gz"
URL["PC13_TRAIN_TOK"] = urlBase + "BioNLP-ST_2013_PC_training_data_tok_v1.tar.gz"
URL["PC13_TEST_TOK"] = urlBase + "BioNLP-ST_2013_PC_test_data_tok_v1.tar.gz"
# Parses
URL["BB13_DEVEL_McCCJ"] = urlBase + "BioNLP-ST-2013_Bacteria_Biotopes_dev_mcccj_v1.tar.gz"
URL["BB13_TRAIN_McCCJ"] = urlBase + "BioNLP-ST-2013_Bacteria_Biotopes_train_mcccj_v1.tar.gz"
URL["BB13_TEST_McCCJ"] = urlBase + "BioNLP-ST-2013_Bacteria_Biotopes_test_mcccj_v1.tar.gz"
URL["CG13_DEVEL_McCCJ"] = urlBase + "BioNLP-ST_2013_CG_development_data_mcccj_v1.tar.gz"
URL["CG13_TRAIN_McCCJ"] = urlBase + "BioNLP-ST_2013_CG_training_data_mcccj_v1.tar.gz"
URL["CG13_TEST_McCCJ"] = urlBase + "BioNLP-ST_2013_CG_test_data_mcccj_v1.tar.gz"
URL["GE13_DEVEL_McCCJ"] = urlBase + "BioNLP-ST-2013_GE_devel_data_rev2_mcccj_v1.tar.gz"
URL["GE13_TRAIN_McCCJ"] = urlBase + "BioNLP-ST-2013_GE_train_data_rev2_mcccj_v1.tar.gz"
URL["GE13_TEST_McCCJ"] = urlBase + "BioNLP-ST_2013_GE_test_data_mcccj_v1.tar.gz"
URL["GRN13_DEVEL_McCCJ"] = urlBase + "BioNLP-ST-2013_Gene_Regulation_Network_dev_mcccj_v1.tar.gz"
URL["GRN13_TRAIN_McCCJ"] = urlBase + "BioNLP-ST-2013_Gene_Regulation_Network_train_mcccj_v1.tar.gz"
URL["GRN13_TEST_McCCJ"] = urlBase + "BioNLP-ST-2013_Gene_Regulation_Network_test_mcccj_v1.tar.gz"
URL["GRO13_DEVEL_McCCJ"] = urlBase + "BioNLP-ST_2013_GRO_development_data_mcccj_v1.tar.gz"
URL["GRO13_TRAIN_McCCJ"] = urlBase + "BioNLP-ST_2013_GRO_training_data_mcccj_v1.tar.gz"
URL["GRO13_TEST_McCCJ"] = urlBase + "BioNLP-ST_2013_GRO_test-1.0-a1_mcccj_v1.tar.gz"
URL["PC13_DEVEL_McCCJ"] = urlBase + "BioNLP-ST_2013_PC_development_data_mcccj_v1.tar.gz"
URL["PC13_TRAIN_McCCJ"] = urlBase + "BioNLP-ST_2013_PC_training_data_mcccj_v1.tar.gz"
URL["PC13_TEST_McCCJ"] = urlBase + "BioNLP-ST_2013_PC_test_data_mcccj_v1.tar.gz"
# BioNLP'11
urlBase = "http://weaver.nlplab.org/~bionlp-st/BioNLP-ST/downloads/files/"
# Main tasks
URL["GE11_DEVEL"] = urlBase + "BioNLP-ST_2011_genia_devel_data_rev1.tar.gz"
URL["GE11_TRAIN"] = urlBase + "BioNLP-ST_2011_genia_train_data_rev1.tar.gz"
URL["GE11_TEST"] = urlBase + "BioNLP-ST_2011_genia_test_data.tar.gz"
URL["EPI11_DEVEL"] = urlBase + "BioNLP-ST_2011_Epi_and_PTM_development_data_rev1.tar.gz"
URL["EPI11_TRAIN"] = urlBase + "BioNLP-ST_2011_Epi_and_PTM_training_data_rev1.tar.gz"
URL["EPI11_TEST"] = urlBase + "BioNLP-ST_2011_Epi_and_PTM_test_data.tar.gz"
URL["ID11_DEVEL"] = urlBase + "BioNLP-ST_2011_Infectious_Diseases_development_data_rev1.tar.gz"
URL["ID11_TRAIN"] = urlBase + "BioNLP-ST_2011_Infectious_Diseases_training_data_rev1.tar.gz"
URL["ID11_TEST"] = urlBase + "BioNLP-ST_2011_Infectious_Diseases_test_data.tar.gz"
URL["BB11_DEVEL"] = urlBase + "BioNLP-ST_2011_Bacteria_Biotopes_dev_data_rev1.tar.gz"
URL["BB11_TRAIN"] = urlBase + "BioNLP-ST_2011_Bacteria_Biotopes_train_data_rev1.tar.gz"
URL["BB11_TEST"] = urlBase + "BioNLP-ST_2011_Bacteria_Biotopes_test_data.tar.gz"
URL["BI11_DEVEL"] = urlBase + "BioNLP-ST_2011_bacteria_interactions_dev_data_rev1.tar.gz"
URL["BI11_TRAIN"] = urlBase + "BioNLP-ST_2011_bacteria_interactions_train_data_rev1.tar.gz"
URL["BI11_TEST"] = urlBase + "BioNLP-ST_2011_bacteria_interactions_test_data.tar.gz"
# Supporting tasks
URL["REL11_DEVEL"] = urlBase + "BioNLP-ST_2011_Entity_Relations_development_data.tar.gz"
URL["REL11_TRAIN"] = urlBase + "BioNLP-ST_2011_Entity_Relations_training_data.tar.gz"
URL["REL11_TEST"] = urlBase + "BioNLP-ST_2011_Entity_Relations_test_data.tar.gz"
URL["REN11_DEVEL"] = urlBase + "BioNLP-ST_2011_bacteria_rename_dev_data.tar.gz"
URL["REN11_TRAIN"] = urlBase + "BioNLP-ST_2011_bacteria_rename_train_data.tar.gz"
URL["REN11_TEST"] = urlBase + "BioNLP-ST_2011_bacteria_rename_test_data.tar.gz"
URL["CO11_DEVEL"] = urlBase + "BioNLP-ST_2011_coreference_development_data.tar.gz"
URL["CO11_TRAIN"] = urlBase + "BioNLP-ST_2011_coreference_training_data_rev1.tar.gz"
URL["CO11_TEST"] = urlBase + "BioNLP-ST_2011_coreference_test_data.tar.gz"
# BioNLP'11 Evaluators
URL["BIONLP11_EVALUATORS"] = "http://sourceforge.net/projects/tees/files/data/BioNLP-evaluators-130224.tar.gz"
URL["GE11_EVALUATOR"] = urlBase + "BioNLP-ST_2011_genia_tools_rev1.tar.gz"
#URL["EPI_EVALUATOR"] = urlBase +
#URL["ID_EVALUATOR"] = urlBase +
URL["BB11_EVALUATOR"] = urlBase + "BioNLP-ST_2011_Bacteria_Biotopes_evaluation_software_rev2.tar.gz"
URL["BI11_EVALUATOR"] = urlBase + "BioNLP-ST_2011_bacteria_interactions_evaluation_software.tar.gz"
#URL["REN_EVALUATOR"] = "http://sites.google.com/site/bionlpst/home/bacteria-gene-renaming-rename/BioNLP-ST_2011_bacteria_rename_evaluation_sofware.tar.gz"
URL["CO11_EVALUATOR"] = urlBase + "CREvalPackage1.6.tar.gz"
# BioNLP'11 Supporting resources
urlBase = "http://weaver.nlplab.org/~bionlp-st/BioNLP-ST/downloads/support-files/"
#GE
URL["GE11_TRAIN_TOKENS"] = urlBase + "Tokenised-BioNLP-ST_2011_genia_train_data_rev1.tar.gz"
URL["GE11_DEVEL_TOKENS"] = urlBase + "Tokenised-BioNLP-ST_2011_genia_devel_data_rev1.tar.gz"
URL["GE11_TEST_TOKENS"] = urlBase + "Tokenised-BioNLP-ST_2011_genia_test_data.tar.gz"
URL["GE11_TRAIN_McCC"] = urlBase + "McCC-parses-BioNLP-ST_2011_genia_train_data_rev1.tar.gz"
URL["GE11_DEVEL_McCC"] = urlBase + "McCC-parses-BioNLP-ST_2011_genia_devel_data_rev1.tar.gz"
URL["GE11_TEST_McCC"] = urlBase + "McCC-parses-BioNLP-ST_2011_genia_test_data.tar.gz"
#EPI
URL["EPI11_DEVEL_TOKENS"] = urlBase + "Tokenised-BioNLP-ST_2011_Epi_and_PTM_development_data.tar.gz"
URL["EPI11_TRAIN_TOKENS"] = urlBase + "Tokenised-BioNLP-ST_2011_Epi_and_PTM_training_data.tar.gz"
URL["EPI11_TEST_TOKENS"] = urlBase + "Tokenised-BioNLP-ST_2011_Epi_and_PTM_test_data.tar.gz"
URL["EPI11_DEVEL_McCC"] = urlBase + "McCC-parses-BioNLP-ST_2011_Epi_and_PTM_development_data.tar.gz"
URL["EPI11_TRAIN_McCC"] = urlBase + "McCC-parses-BioNLP-ST_2011_Epi_and_PTM_training_data.tar.gz"
URL["EPI11_TEST_McCC"] = urlBase + "McCC-parses-BioNLP-ST_2011_Epi_and_PTM_test_data.tar.gz"
#ID
URL["ID11_DEVEL_TOKENS"] = urlBase + "Tokenised-BioNLP-ST_2011_Infectious_Diseases_development_data.tar.gz"
URL["ID11_TRAIN_TOKENS"] = urlBase + "Tokenised-BioNLP-ST_2011_Infectious_Diseases_training_data.tar.gz"
URL["ID11_TEST_TOKENS"] = urlBase + "Tokenised-BioNLP-ST_2011_Infectious_Diseases_test_data.tar.gz"
URL["ID11_DEVEL_McCC"] = urlBase + "McCC-parses-BioNLP-ST_2011_Infectious_Diseases_development_data.tar.gz"
URL["ID11_TRAIN_McCC"] = urlBase + "McCC-parses-BioNLP-ST_2011_Infectious_Diseases_training_data.tar.gz"
URL["ID11_TEST_McCC"] = urlBase + "McCC-parses-BioNLP-ST_2011_Infectious_Diseases_test_data.tar.gz"
#BB
URL["BB11_DEVEL_TOKENS"] = urlBase + "Tokenised-BioNLP-ST_2011_Bacteria_Biotopes_dev_data_rev1.tar.gz"
URL["BB11_TRAIN_TOKENS"] = urlBase + "Tokenised-BioNLP-ST_2011_Bacteria_Biotopes_train_data_rev1.tar.gz"
URL["BB11_TEST_TOKENS"] = urlBase + "Tokenised-BioNLP-ST_2011_Bacteria_Biotopes_test_data.tar.gz"
URL["BB11_DEVEL_McCC"] = urlBase + "McCC-parses-BioNLP-ST_2011_Bacteria_Biotopes_dev_data_rev1.tar.gz"
URL["BB11_TRAIN_McCC"] = urlBase + "McCC-parses-BioNLP-ST_2011_Bacteria_Biotopes_train_data_rev1.tar.gz"
URL["BB11_TEST_McCC"] = urlBase + "McCC-parses-BioNLP-ST_2011_Bacteria_Biotopes_test_data.tar.gz"
#BI
URL["BI11_DEVEL_TOKENS"] = urlBase + "Tokenised-BioNLP-ST_2011_bacteria_interactions_dev_data_rev1.tar.gz"
URL["BI11_TRAIN_TOKENS"] = urlBase + "Tokenised-BioNLP-ST_2011_bacteria_interactions_train_data_rev1.tar.gz"
URL["BI11_TEST_TOKENS"] = urlBase + "Tokenised-BioNLP-ST_2011_bacteria_interactions_test_data.tar.gz"
URL["BI11_DEVEL_McCC"] = urlBase + "McCC-parses-BioNLP-ST_2011_bacteria_interactions_dev_data_rev1.tar.gz"
URL["BI11_TRAIN_McCC"] = urlBase + "McCC-parses-BioNLP-ST_2011_bacteria_interactions_train_data_rev1.tar.gz"
URL["BI11_TEST_McCC"] = urlBase + "McCC-parses-BioNLP-ST_2011_bacteria_interactions_test_data.tar.gz"
# Parses not provided in the shared tasks
#URL["TEES_PARSES"] = "/home/jari/biotext/TEES-parses-2.0/TEES-parses-120706.tar.gz"
URL["TEES_PARSES"] = "http://sourceforge.net/projects/tees/files/data/TEES-parses-130224.tar.gz"
# BioNLP'09 Shared Task
urlBase = "http://www.nactem.ac.uk/tsujii/GENIA/SharedTask/dat/"
URL["GE09_DEVEL"] = urlBase + "bionlp09_shared_task_development_data_rev1.tar.gz"
URL["GE09_TRAIN"] = urlBase + "bionlp09_shared_task_training_data_rev2.tar.gz"
URL["GE09_TEST"] = urlBase + "bionlp09_shared_task_test_data_without_gold_annotation.tar.gz"
# BioNLP'09 Evaluator
URL["GE09_EVALUATOR"] = urlBase + "bionlp09_shared_task_evaluation_tools_v1.tar.gz"
# BioNLP'09 Shared Task parses
urlBase = "http://www-tsujii.is.s.u-tokyo.ac.jp/GENIA/SharedTask/dat/"
URL["GE09_DEVEL_ANALYSES"] = urlBase + "bionlp09_shared_task_development_analyses_rev2.tar.gz"
URL["GE09_TRAIN_ANALYSES"] = urlBase + "bionlp09_shared_task_training_analyses_rev2.tar.gz"
URL["GE09_TEST_ANALYSES"] = urlBase + "bionlp09_shared_task_test_analyses_rev2.tar.gz"
# DDI'11 Shared Task
urlBase = "http://labda.inf.uc3m.es/DDIExtraction2011/"
URL["DDI11_TRAIN_UNIFIED"] = urlBase + "DrugDDI_Unified.zip"
URL["DDI11_TRAIN_MTMX"] = urlBase + "DrugDDI_MTMX.zip"
# If you have registered for the DDI'11 Shared Task, insert the paths of your downloaded
# test files in the following variables (in your local settings file) to have them
# converted for use with TEES
URL["DDI11_TEST_UNIFIED"] = None
URL["DDI11_TEST_MTMX"] = None
# DDI'13 Shared Task
URL["DDI13_TRAIN"] = "http://www.cs.york.ac.uk/semeval-2013/task9/data/uploads/datasets/train/semeval_task9_train.zip"
# If you have registered for the DDI'11 Shared Task, insert the paths of your downloaded
# test files in the following variables (in your local settings file) to have them
# converted for use with TEES when using Utils/Convert/convertDDI13.py
URL["DDI13_TEST_TASK_9.1"] = None
URL["DDI13_TEST_TASK_9.2"] = None
URL["DDI13_TEST_TASK_9.1_TEES_PARSES"] = None
URL["DDI13_TEST_TASK_9.2_TEES_PARSES"] = None
URL["DDI13_TRAIN_TEES_PARSES"] = "http://sourceforge.net/projects/tees/files/data/DDI13-TEES-parses-130224.tar.gz"
# Miscellaneous files
URL["TEES_RESOURCES"] = "http://sourceforge.net/projects/tees/files/data/TEES-resources-120705.tar.gz"
URL["DRUG_BANK_XML"] = "http://www.drugbank.ca/system/downloads/current/drugbank.xml.zip"
|
py | b400b4c0dd6b4ca1e85146856adb22a0bd961632 | import scipy.io as sio
import graph_tool
import graph_tool.draw
mutag = sio.loadmat('NCI109.mat')
data = mutag['NCI109']
label = mutag['lnci109']
f_text=open("NCI109/text.tx","w")
for i in range (len(label)):
g = graph_tool.Graph()
g.set_directed(False)
#read data from .mat file
node = list(data['n109'][0,i].item(0))[0]
edge = list(data['e109'][0,i])[0].item(0)[0]
#print(len(edge))
#print(type(edge))
#construct the graph
g.add_vertex(len(node))
vprop_name = g.new_vertex_property("string")
g.vp.name = vprop_name
vprop_value = g.new_vertex_property("init")
g.vp.label = vprop_value
for i in range(len(node)):
g.vp.name[g.vertex(j)]="n"+str(j)
g.vp.lable[g.vertex(j)]=node(j).item(0)
for j in range(int(len(edeg))):
if len(edge[j][0]) != 0:
node_edge = list(edge[j][0][0])
for k in range(len(node_edge)):
g.add_edge(g.vertex(j),g.vertex(node_edge[k]-1))
#eprop = g.new_edge_property("init")
#g.edge.properties['weight'] = eprop
# for j in range(len(edge)):
# g.edge_index(j).weight = edge[j][2]
#print(g)
file_name = "nci109_"+str(i)
g.save("NCI109/"+file_name+".xml.gz")
f_text.write(file_name + ".xml.gz" + " " +str(label[i].item(0)) + '\n')
print(g)
#f_text.close()
_
|
py | b400b55702c7b9e68b6882224f3374a242f2f427 | # Copyright 2020, Kay Hayen, mailto:[email protected]
#
# Part of "Nuitka", an optimizing Python compiler that is compatible and
# integrates with CPython, but also works on its own.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
""" Nuitka distutils integration.
"""
import distutils.command.build # pylint: disable=I0021,import-error,no-name-in-module
import distutils.command.install # pylint: disable=I0021,import-error,no-name-in-module
import os
import subprocess
import sys
import wheel.bdist_wheel # pylint: disable=I0021,import-error,no-name-in-module
from nuitka.tools.testing.Common import my_print
def setuptools_build_hook(dist, keyword, value):
# If the user project setup.py includes the key "build_with_nuitka=True" all
# build operations (build, bdist_wheel, install etc) will run via Nuitka.
# pylint: disable=unused-argument
if not value:
return
dist.cmdclass = dist.cmdclass or {} # Ensure is a dict
dist.cmdclass["build"] = build
dist.cmdclass["install"] = install
dist.cmdclass["bdist_wheel"] = bdist_nuitka
class PyPackage(object):
"""
Called by _find_to_build, represents a py_package to be built by _build
"""
def __init__(self, module_name, related_packages=()):
self.module_name = module_name # string
self.related_packages = related_packages # tuple/list
def __str__(self):
return self.__class__.__name__ + "(module_name=%s, related_packages=%s)" % (
self.module_name,
self.related_packages,
)
class PyModule(object):
"""
Called by _find_to_build, represents a py_module to be built by _build
"""
def __init__(self, module_name, related_modules=()):
self.module_name = module_name # string
self.related_modules = related_modules # tuple/list
def __str__(self):
return self.__class__.__name__ + "(module_name=%s, related_modules=%s)" % (
self.module_name,
self.related_modules,
)
# Class name enforced by distutils, must match the command name.
# pylint: disable=C0103
class build(distutils.command.build.build):
# pylint: disable=attribute-defined-outside-init
def run(self):
self.compile_packages = self.distribution.packages or ()
self.py_modules = self.distribution.py_modules or ()
if not self.compile_packages and not self.py_modules:
sys.exit("Missing both compile_packages and py_modules, aborting...")
# Python2 does not allow super on this old style class.
distutils.command.build.build.run(self)
self._build(os.path.abspath(self.build_lib))
def _find_to_build(self):
"""
Helper for _build
Returns list containing PyPackage or PyModule instances.
Algorithm for finding distinct packages:
1) Take minimum package
2) Find related packages that start with this name
3) Add this to the list to return, then repeat steps 1 & 2
until no more packages exist
"""
builds = []
py_packages = self.compile_packages[:]
py_modules = self.py_modules[:]
while py_packages:
current_package = min(py_packages)
related = [
p
for p in py_packages
if p == current_package or p.startswith(current_package + ".")
]
builds.append(PyPackage(current_package, related_packages=related))
for p in related:
py_packages.remove(p)
while py_modules:
current_module = min(py_modules)
related = [
m
for m in py_modules
if m == current_module or m.startswith(current_module + ".")
]
builds.append(PyModule(current_module, related_modules=related))
for m in related:
py_modules.remove(m)
return builds
def _build(self, build_lib):
# High complexity, pylint: disable=too-many-branches,too-many-locals
# Nuitka wants the main package by filename, probably we should stop
# needing that.
from nuitka.importing.Importing import findModule, setMainScriptDirectory
from nuitka.utils.ModuleNames import ModuleName
from nuitka.__past__ import ( # pylint: disable=I0021,redefined-builtin
Iterable,
unicode,
)
old_dir = os.getcwd()
os.chdir(build_lib)
# Search in the build directory preferably.
setMainScriptDirectory(".")
to_builds = self._find_to_build()
for to_build in to_builds:
package, main_filename, finding = findModule(
importing=None,
module_name=ModuleName(to_build.module_name),
parent_package=None,
level=0,
warn=False,
)
# Check expectations, e.g. do not compile built-in modules.
assert finding == "absolute", finding
if package is not None:
output_dir = os.path.join(build_lib, package)
else:
output_dir = build_lib
command = [
sys.executable,
"-m",
"nuitka",
"--module",
"--plugin-enable=pylint-warnings",
"--output-dir=%s" % output_dir,
"--nofollow-import-to=*.tests",
"--show-modules",
"--remove-output",
]
if type(to_build) is PyPackage:
command += [
"--include-package=%s" % package_name.replace("/", ".")
for package_name in to_build.related_packages
]
else: # type(to_build) is PyModule
command += [
"--include-module=%s" % module_name
for module_name in to_build.related_modules
]
# Process any extra options from setuptools
if "nuitka" in self.distribution.command_options:
for option, value in self.distribution.command_options[
"nuitka"
].items():
option = "--" + option.lstrip("-")
if value is None:
command.append(option)
elif isinstance(value, bool):
option = "--" + ("no" if not value else "") + option.lstrip("-")
command.append(option)
elif isinstance(value, Iterable) and not isinstance(
value, (unicode, bytes, str)
):
for val in value:
command.append("%s=%s" % (option, val))
else:
command.append("%s=%s" % (option, value))
command.append(main_filename)
# added for clarity
my_print("Building: %s" % to_build, style="yellow")
subprocess.check_call(command, cwd=build_lib)
for root, _, filenames in os.walk(build_lib):
for filename in filenames:
fullpath = os.path.join(root, filename)
if fullpath.lower().endswith((".py", ".pyw", ".pyc", ".pyo")):
os.unlink(fullpath)
os.chdir(old_dir)
self.build_lib = build_lib
# pylint: disable=C0103
class install(distutils.command.install.install):
# pylint: disable=attribute-defined-outside-init
def finalize_options(self):
distutils.command.install.install.finalize_options(self)
# Ensure the purelib folder is not used
self.install_lib = self.install_platlib
# pylint: disable=C0103
class bdist_nuitka(wheel.bdist_wheel.bdist_wheel):
def initialize_options(self):
# Register the command class overrides above
dist = self.distribution
dist.cmdclass = dist.cmdclass or {} # Ensure is a dict
dist.cmdclass["build"] = build
dist.cmdclass["install"] = install
wheel.bdist_wheel.bdist_wheel.initialize_options(self)
# pylint: disable=attribute-defined-outside-init
def finalize_options(self):
wheel.bdist_wheel.bdist_wheel.finalize_options(self)
# Force module to use correct platform in name
self.root_is_pure = False
self.plat_name_supplied = self.plat_name is not None
def write_wheelfile(self, wheelfile_base, generator=None):
if generator is None:
from nuitka.Version import getNuitkaVersion
generator = "Nuitka (%s)" % getNuitkaVersion()
wheel.bdist_wheel.bdist_wheel.write_wheelfile(
self, wheelfile_base=wheelfile_base, generator=generator
)
|
py | b400b6145266bcf5fb3d284444248f56cabd1a06 | import pytest
import gzip
from pathlib import Path
from pypipegraph import FileGeneratingJob, MultiFileGeneratingJob
import requests_mock
import pypipegraph as ppg
from mbf_align import (
FASTQsFromFile,
FASTQsFromFiles,
FASTQsFromFolder,
FASTQsFromJob,
FASTQsFromURLs,
FASTQsFromAccession,
FASTQsFromPrefix,
build_fastq_strategy,
FASTQsFromMRNAs,
FASTQsJoin
)
from mbf_align import Sample
from mbf_align import PairingError
from mbf_align import fastq2
from mbf_align._common import read_fastq_iterator
from mbf_sampledata import get_sample_data
import attr
def test_FASTQsFromFile():
fn = Path(
get_sample_data(Path("mbf_align/sample_a") / ".." / "sample_a" / "a.fastq")
)
o = FASTQsFromFile(fn)
assert o() == [(fn.resolve(),)]
def test_FASTQsFromFileRaisesOnMissing():
fn = get_sample_data(Path("mbf_align/sample_a") / "a.fastq.nosuchfile")
with pytest.raises(IOError):
FASTQsFromFile(fn)
def test_FASTQsFromFilePaired():
fn = Path(get_sample_data(Path("mbf_align/sample_b") / "a_R1_.fastq.gz"))
fn2 = Path(
get_sample_data(
Path("mbf_align/sample_b") / ".." / "sample_b" / "a_R2_.fastq.gz"
)
)
o = FASTQsFromFile(fn, fn2)
assert o() == [(fn.resolve(), fn2.resolve())]
def test_FASTQsFromFilePairedMissingR2():
fn = get_sample_data(Path("mbf_align/sample_b") / "a_R1_.fastq.gz")
fn2 = get_sample_data(Path("mbf_align/sample_b") / "a_R2_.fastq.gz.nosuchfile")
with pytest.raises(IOError):
FASTQsFromFile(fn, fn2)
def test_FASTQsFromFilesPaired():
fn = Path(get_sample_data(Path("mbf_align/sample_b") / "a_R1_.fastq.gz"))
fn2 = Path(
get_sample_data(
Path("mbf_align/sample_b") / ".." / "sample_b" / "a_R2_.fastq.gz"
)
)
o = FASTQsFromFiles([fn, fn2])
assert o() == [(fn.resolve(), fn2.resolve())]
def test_FASTQsFromFilesPaired_build_strategy():
fn = Path(get_sample_data(Path("mbf_align/sample_b") / "a_R1_.fastq.gz"))
fn2 = Path(
get_sample_data(
Path("mbf_align/sample_b") / ".." / "sample_b" / "a_R2_.fastq.gz"
)
)
o = build_fastq_strategy([fn, fn2])
assert o() == [(fn.resolve(), fn2.resolve())]
def test_FASTQsFromFolder():
folder = Path(get_sample_data(Path("mbf_align/sample_a")))
o = FASTQsFromFolder(folder)
import pprint
pprint.pprint(o())
assert o() == [
((folder / "a.fastq").resolve(),),
((folder / "b.fastq.gz").resolve(),),
]
def test_fastqs_join():
fn = Path(get_sample_data(Path("mbf_align/sample_b") / "a_R1_.fastq.gz"))
fn2 = Path(
get_sample_data(
Path("mbf_align/sample_b") / ".." / "sample_b" / "a_R2_.fastq.gz"
)
)
a = FASTQsFromFiles([fn, fn2])
b = FASTQsFromFile(fn)
c = FASTQsFromFile(fn2)
d = FASTQsJoin([a, b, c])
o = d()
assert o == [(fn.resolve(), fn2.resolve()), (fn.resolve(),), (fn2.resolve(),)]
def test_FASTQsFromFolder_raises_on_non_existing():
with pytest.raises(IOError):
FASTQsFromFolder("shu")
def test_FASTQsFromFolder_raises_on_no_fastqs():
with pytest.raises(ValueError):
FASTQsFromFolder(get_sample_data(Path("mbf_align/sample_f")))
def test_FASTQsFromFolder_raises_on_not_a_folder():
with pytest.raises(ValueError):
FASTQsFromFolder(get_sample_data(Path("mbf_align/sample_a") / "a.fastq"))
def test_FASTQsFromFolderPaired():
folder = Path(get_sample_data(Path("mbf_align/sample_b")))
o = FASTQsFromFolder(folder)
assert o() == [
((folder / "a_R1_.fastq.gz").resolve(), (folder / "a_R2_.fastq.gz").resolve())
]
def test_FASTQsFromFolderR2_but_missing_any_r1():
folder = get_sample_data(Path("mbf_align/sample_c"))
o = FASTQsFromFolder(folder)
with pytest.raises(ValueError):
o()
def test_FASTQsFromFolder_pairing_files_fails():
folder = get_sample_data(Path("mbf_align/sample_d"))
o = FASTQsFromFolder(folder)
with pytest.raises(ValueError):
o()
def test_FASTQsFromFolder_pairing_files_fails2():
folder = get_sample_data(Path("mbf_align/sample_e"))
o = FASTQsFromFolder(folder)
with pytest.raises(ValueError):
o()
def test_FASTQsFromPrefix():
fn1 = Path(get_sample_data(Path("mbf_align/sample_d") / "a_R1_.fastq.gz"))
fn2 = Path(get_sample_data(Path("mbf_align/sample_d") / "a_R2_.fastq.gz"))
fn_prefix = Path(get_sample_data(Path("mbf_align/sample_d") / "a"))
o = FASTQsFromPrefix(fn_prefix)
str(o)
assert o() == [(fn1.resolve(), fn2.resolve())]
def test_FASTQsFromPrefix_raises_on_non_existant():
fn_prefix = Path("/shu/sha")
with pytest.raises(IOError):
FASTQsFromPrefix(fn_prefix)
def test_FASTQsFromPrefix_raises_on_non_found():
fn_prefix = Path(get_sample_data(Path("mbf_align/sample_d") / "x"))
with pytest.raises(ValueError):
FASTQsFromPrefix(fn_prefix)
@pytest.mark.usefixtures("new_pipegraph_no_qc")
class TestSamples:
def test_FASTQsFromJob(self):
job = FileGeneratingJob("test.fastq.gz", lambda of: None)
o = FASTQsFromJob(job)
assert o() == [(Path("test.fastq.gz").resolve(),)]
def test_FASTQsFromJob_R1_ok(self):
job = FileGeneratingJob("test_R1_.fastq.gz", lambda of: None)
o = FASTQsFromJob(job)
assert o() == [(Path("test_R1_.fastq.gz").resolve(),)]
def test_FASTQsFromJob_Multiple_Unpaired(self):
job = MultiFileGeneratingJob(
["test.fastq.gz", "test2.fastq.gz"], lambda of: None
)
o = FASTQsFromJob(job)
assert o() == [
(Path("test.fastq.gz").resolve(),),
(Path("test2.fastq.gz").resolve(),),
]
def test_FASTQsFromJob_Multiple_Unpaired_R1(self):
job = MultiFileGeneratingJob(
["test_R1_.fastq.gz", "test2_R1_.fastq.gz"], lambda of: None
)
o = FASTQsFromJob(job)
assert o() == [
# 2 sorts before _
(Path("test2_R1_.fastq.gz").resolve(),),
(Path("test_R1_.fastq.gz").resolve(),),
]
def test_FASTQsFromJob_Multiple_Paired(self):
job = MultiFileGeneratingJob(
[
"test_R1_.fastq.gz",
"test2_R1_.fastq.gz",
"test_R2_.fastq.gz",
"test2_R2_.fastq.gz",
],
lambda of: None,
)
o = FASTQsFromJob(job)
assert set(o()) == set(
[
# 2 sorts before _
(
Path("test2_R1_.fastq.gz").resolve(),
Path("test2_R2_.fastq.gz").resolve(),
),
(
Path("test_R1_.fastq.gz").resolve(),
Path("test_R2_.fastq.gz").resolve(),
),
]
)
def test_build_fastq_strategy(self):
# single filename
assert build_fastq_strategy(
get_sample_data(
Path("mbf_align/sample_b") / ".." / "sample_b" / "a_R1_.fastq.gz"
)
)() == [
(
(
Path(get_sample_data(Path("mbf_align/sample_b") / "a_R1_.fastq.gz"))
).resolve(),
)
]
assert build_fastq_strategy(
str(
get_sample_data(
Path("mbf_align/sample_b") / ".." / "sample_b" / "a_R1_.fastq.gz"
)
)
)() == [
(
(
Path(get_sample_data(Path("mbf_align/sample_b") / "a_R1_.fastq.gz"))
).resolve(),
)
]
# multiple files - end up being paired!
assert build_fastq_strategy(
[
get_sample_data(
Path("mbf_align/sample_b") / ".." / "sample_b" / "a_R1_.fastq.gz"
),
get_sample_data(
Path("mbf_align/sample_b") / ".." / "sample_b" / "a_R2_.fastq.gz"
),
]
)() == [
(
Path(
get_sample_data(Path("mbf_align/sample_b") / "a_R1_.fastq.gz")
).resolve(),
Path(
get_sample_data(Path("mbf_align/sample_b") / "a_R2_.fastq.gz")
).resolve(),
)
]
# folder
assert build_fastq_strategy(
get_sample_data(Path("mbf_align/sample_b") / ".." / "sample_b")
)() == [
(
Path(
get_sample_data(Path("mbf_align/sample_b") / "a_R1_.fastq.gz")
).resolve(),
Path(
get_sample_data(Path("mbf_align/sample_b") / "a_R2_.fastq.gz")
).resolve(),
)
]
# job
assert isinstance(
build_fastq_strategy(FileGeneratingJob("test.fastq", lambda of: None)),
FASTQsFromJob,
)
# pass through
fn = get_sample_data(Path("mbf_align/sample_a") / ".." / "sample_a" / "a.fastq")
o = FASTQsFromFile(fn)
assert build_fastq_strategy(o) is o
with pytest.raises(ValueError):
build_fastq_strategy(55)
def test_lane(self):
lane = Sample(
"Sample_a", get_sample_data(Path("mbf_align/sample_a")), False, vid="VA000"
)
assert lane.vid == "VA000"
temp_job = lane.prepare_input()
real_job = lane.save_input()
ppg.run_pipegraph()
assert not Path(temp_job.filenames[0]).exists()
assert Path(real_job.filenames[0]).exists()
with gzip.GzipFile(real_job.filenames[0], "r") as op:
lines = op.readlines()
assert len(lines) == 20 + 20
def test_paired_modes(self):
with pytest.raises(PairingError):
lane = Sample(
"Sample_a",
get_sample_data(Path("mbf_align/sample_b")),
False,
vid="VA000",
)
lane.prepare_input()
def test_lane_paired_straight(self):
lane = Sample(
"Sample_a",
get_sample_data(Path("mbf_align/sample_b")),
False,
vid="VA000",
pairing="paired",
)
assert lane.vid == "VA000"
temp_job = lane.prepare_input()
real_job = lane.save_input()
ppg.run_pipegraph()
assert not Path(temp_job.filenames[0]).exists()
assert not Path(temp_job.filenames[1]).exists()
assert Path(real_job.filenames[0]).exists()
assert Path(real_job.filenames[1]).exists()
assert "_R1_" in real_job.filenames[0]
assert "_R2_" in real_job.filenames[1]
assert ".fastq.gz" in real_job.filenames[0]
assert ".fastq.gz" in real_job.filenames[1]
for input_fn, output_fn in zip(
[
(get_sample_data(Path("mbf_align/sample_b") / "a_R1_.fastq.gz")),
(get_sample_data(Path("mbf_align/sample_b") / "a_R2_.fastq.gz")),
],
real_job.filenames,
):
with gzip.GzipFile(output_fn, "r") as op:
actual = op.read()
with gzip.GzipFile(input_fn, "r") as op:
should = op.read()
assert actual == should
def test_lane_paired_filtered(self):
lane = Sample(
"Sample_a",
get_sample_data(Path("mbf_align/sample_b")),
False,
vid="VA000",
pairing="paired",
fastq_processor=fastq2.Paired_Filtered(lambda *args: True),
)
assert lane.vid == "VA000"
temp_job = lane.prepare_input()
real_job = lane.save_input()
ppg.run_pipegraph()
assert not Path(temp_job.filenames[0]).exists()
assert not Path(temp_job.filenames[1]).exists()
assert Path(real_job.filenames[0]).exists()
assert Path(real_job.filenames[1]).exists()
assert "_R1_" in real_job.filenames[0]
assert "_R2_" in real_job.filenames[1]
assert ".fastq.gz" in real_job.filenames[0]
assert ".fastq.gz" in real_job.filenames[1]
for input_fn, output_fn in zip(
[
(get_sample_data(Path("mbf_align/sample_b") / "a_R1_.fastq.gz")),
(get_sample_data(Path("mbf_align/sample_b") / "a_R2_.fastq.gz")),
],
real_job.filenames,
):
with gzip.GzipFile(output_fn, "r") as op:
actual = op.read()
with gzip.GzipFile(input_fn, "r") as op:
should = op.read()
assert actual == should
def test_lane_paired_as_single(self):
lane = Sample(
"Sample_a",
get_sample_data(Path("mbf_align/sample_b")),
False,
vid="VA000",
pairing="paired_as_single",
)
assert lane.vid == "VA000"
temp_job = lane.prepare_input()
real_job = lane.save_input()
ppg.run_pipegraph()
assert not Path(temp_job.filenames[0]).exists()
assert len(temp_job.filenames) == 1
assert Path(real_job.filenames[0]).exists()
assert len(real_job.filenames) == 1
assert not "_R1_" in real_job.filenames[0]
assert ".fastq.gz" in real_job.filenames[0]
should = b""
for input_fn in [
(get_sample_data(Path("mbf_align/sample_b") / "a_R1_.fastq.gz")),
(get_sample_data(Path("mbf_align/sample_b") / "a_R2_.fastq.gz")),
]:
with gzip.GzipFile(input_fn, "r") as op:
should += op.read()
with gzip.GzipFile(real_job.filenames[0], "r") as op:
actual = op.read()
assert actual == should
def test_lane_paired_missing_R2(self):
lane = Sample(
"Sample_a",
get_sample_data(Path("mbf_align/sample_a")),
False,
vid="VA000",
pairing="paired",
)
with pytest.raises(PairingError):
lane.prepare_input()
def test_lane_paired_only_first(self):
lane = Sample(
"Sample_a",
get_sample_data(Path("mbf_align/sample_b")),
False,
vid="VA000",
pairing="only_first",
)
assert lane.vid == "VA000"
temp_job = lane.prepare_input()
real_job = lane.save_input()
ppg.run_pipegraph()
assert not Path(temp_job.filenames[0]).exists()
assert len(temp_job.filenames) == 1
assert Path(real_job.filenames[0]).exists()
assert len(real_job.filenames) == 1
assert not "_R1_" in real_job.filenames[0]
assert ".fastq.gz" in real_job.filenames[0]
should = b""
for input_fn in [
(get_sample_data(Path("mbf_align/sample_b") / "a_R1_.fastq.gz"))
]:
with gzip.GzipFile(input_fn, "r") as op:
should += op.read()
with gzip.GzipFile(real_job.filenames[0], "r") as op:
actual = op.read()
assert actual == should
def test_lane_paired_only_second(self):
lane = Sample(
"Sample_a",
get_sample_data(Path("mbf_align/sample_b")),
False,
vid="VA000",
pairing="only_second",
)
assert lane.vid == "VA000"
temp_job = lane.prepare_input()
real_job = lane.save_input()
ppg.run_pipegraph()
assert not Path(temp_job.filenames[0]).exists()
assert len(temp_job.filenames) == 1
assert Path(real_job.filenames[0]).exists()
assert len(real_job.filenames) == 1
assert not "_R1_" in real_job.filenames[0]
assert ".fastq.gz" in real_job.filenames[0]
should = b""
for input_fn in [
(get_sample_data(Path("mbf_align/sample_b") / "a_R2_.fastq.gz"))
]:
with gzip.GzipFile(input_fn, "r") as op:
should += op.read()
with gzip.GzipFile(real_job.filenames[0], "r") as op:
actual = op.read()
assert actual == should
def test_pairing_invalid_value(self):
with pytest.raises(ValueError):
Sample(
"Sample_a",
get_sample_data(Path("mbf_align/sample_a")),
False,
pairing="do_what_you_want",
)
with pytest.raises(ValueError):
Sample(
"Sample_a",
get_sample_data(Path("mbf_align/sample_a")),
False,
pairing=False,
)
with pytest.raises(ValueError):
Sample(
"Sample_a",
get_sample_data(Path("mbf_align/sample_a")),
False,
pairing=None,
)
with pytest.raises(ValueError):
Sample(
"Sample_a",
get_sample_data(Path("mbf_align/sample_a")),
False,
pairing=[5],
)
def test_lane_raises_on_pe_as_se(self):
lane = Sample("Sample_a", get_sample_data(Path("mbf_align/sample_b")), False)
with pytest.raises(PairingError):
lane.prepare_input()
def test_lane_with_job_generating_fastq(self):
def gen_fastq(fn):
with open(fn, "wb") as op:
op.write(b"@shu\nAGTC\n+\nasdf")
job = FileGeneratingJob("input.fastq", gen_fastq)
lane = Sample("Sample_a", job, False, vid="VA000")
assert lane.vid == "VA000"
temp_job = lane.prepare_input()
assert job in temp_job.prerequisites
real_job = lane.save_input()
ppg.run_pipegraph()
assert not Path(temp_job.filenames[0]).exists()
assert Path(real_job.filenames[0]).exists()
with gzip.GzipFile(real_job.filenames[0], "r") as op:
lines = op.readlines()
assert len(lines) == 4
def test_align(self, local_store):
import json
import gzip
class FakeGenome:
name = "FakeGenome"
def download_genome(self):
return []
def job_genes(self):
return []
def job_transcripts(self):
return []
def build_index(self, aligner, fasta_to_use=None, gtf_to_use=None):
job = ppg.FileGeneratingJob(
"fake_index", lambda: Path("fake_index").write_text("hello")
)
job.output_path = "fake_index"
return job
class FakeAligner:
name = "FakeAligner"
version = "0.1"
def align_job(
self,
input_fastq,
paired_end_filename,
index_basename,
output_bam_filename,
parameters,
):
def align():
with open(output_bam_filename, "w") as op:
json.dump(
[
open(input_fastq).read(200),
open(paired_end_filename).read(200)
if paired_end_filename
else "",
index_basename,
str(parameters),
],
op,
)
with open(str(output_bam_filename) + ".bai", "w") as op:
op.write("Done")
job = ppg.MultiFileGeneratingJob(
[output_bam_filename, str(output_bam_filename) + ".bai"], align
)
job.depends_on_params("")
return job
aligner = FakeAligner()
lane = Sample(
"Sample_a",
get_sample_data(Path("mbf_align/sample_b")),
False,
vid="VA000",
pairing="paired",
)
genome = FakeGenome()
params = {"shu": 123}
aligned_lane = lane.align(aligner, genome, params)
ppg.run_pipegraph()
assert Path("fake_index").exists()
assert Path("fake_index").read_text() == "hello"
assert aligned_lane.load()[0].filenames[0].endswith(lane.name + ".bam")
assert aligned_lane.load()[0].filenames[1].endswith(lane.name + ".bam.bai")
assert Path(aligned_lane.load()[0].filenames[0]).exists()
with open(aligned_lane.load()[0].filenames[0]) as op:
actual = json.load(op)
with gzip.GzipFile(
get_sample_data(Path("mbf_align/sample_b") / "a_R1_.fastq.gz")
) as op:
should_0 = op.read(200).decode("utf-8")
with gzip.GzipFile(
get_sample_data(Path("mbf_align/sample_b") / "a_R2_.fastq.gz")
) as op:
should_1 = op.read(200).decode("utf-8")
assert actual[0] == should_0
assert actual[1] == should_1
assert actual[2] == "fake_index"
assert actual[3] == str(params)
def test_align_parameterDependencyChecking(self, local_store):
class FakeGenome:
name = "FakeGenome"
def build_index(self, aligner, fasta_to_use=None, gtf_to_use=None):
job = ppg.FileGeneratingJob(
"fake_index", lambda: Path("fake_index").write_text("hello")
)
job.output_path = "fake_index"
return job
class FakeAligner:
name = "FakeAligner"
version = "0.1"
def align_job(
self,
input_fastq,
paired_end_filename,
index_basename,
output_bam_filename,
parameters,
):
job = ppg.MultiFileGeneratingJob(
[output_bam_filename, str(output_bam_filename) + ".bai"], lambda: 5
)
# job.depends_on_params("") # that's the line we check
return job
aligner = FakeAligner()
lane = Sample(
"Sample_a",
get_sample_data(Path("mbf_align/sample_b")),
False,
vid="VA000",
pairing="paired",
)
genome = FakeGenome()
params = {"shu": 123}
with pytest.raises(ppg.JobContractError):
lane.align(aligner, genome, params)
def test_from_url(self):
import requests_mock
with requests_mock.mock() as m:
url = "https://www.imt.uni-marburg.de/sample.fastq.gz"
m.get(url, text="hello_world")
o = FASTQsFromURLs(url)
ppg.run_pipegraph()
assert len(o.target_files) == 1
assert len(o.dependencies) == 2
assert Path(o.dependencies[0].filenames[0]).read_text() == "hello_world"
assert Path(o.dependencies[0].filenames[1]).read_text() == url
assert o() == [(Path(o.dependencies[0].filenames[0]).absolute(),)]
def test_from_url_paired(self):
import requests_mock
with requests_mock.mock() as m:
url1 = "https://www.imt.uni-marburg.de/sample_R1_.fastq.gz"
url2 = "https://www.imt.uni-marburg.de/sample_R2_.fastq.gz"
m.get(url1, text="hello_world1")
m.get(url2, text="hello_world2")
o = FASTQsFromURLs([url1, url2])
ppg.run_pipegraph()
assert len(o.target_files) == 2
assert len(o.dependencies) == 3
assert Path(o.dependencies[0].filenames[0]).read_text() == "hello_world1"
assert "_R1_.fastq.gz" in o.dependencies[0].filenames[0]
assert Path(o.dependencies[0].filenames[1]).read_text() == url1
assert Path(o.dependencies[1].filenames[0]).read_text() == "hello_world2"
assert Path(o.dependencies[1].filenames[1]).read_text() == url2
assert o() == [
(
Path(o.dependencies[0].filenames[0]).absolute(),
Path(o.dependencies[1].filenames[0]).absolute(),
)
]
def test_from_url_detects_404(self):
with requests_mock.mock() as m:
url = "https://www.imt.uni-marburg.de/sample.fastq.gz"
m.get(url, text="hello_world", status_code=404)
o = FASTQsFromURLs(url)
o.download_files()
with pytest.raises(ppg.RuntimeError):
ppg.run_pipegraph()
assert "Error return" in str(o.dependencies[0].exception)
assert url in str(o.dependencies[0].exception)
def test_fastqs_from_err(self):
with requests_mock.mock() as m:
m.get(
"http://www.ebi.ac.uk/ena/data/warehouse/filereport?accession=ERR2223563&result=read_run&fields=run_accession,fastq_ftp,fastq_md5,fastq_bytes",
text="""run_accession fastq_ftp fastq_md5 fastq_bytes
ERR2223563 ftp.sra.ebi.ac.uk/vol1/fastq/ERR222/003/ERR2223563/ERR2223563_1.fastq.gz;ftp.sra.ebi.ac.uk/vol1/fastq/ERR222/003/ERR2223563/ERR2223563_2.fastq.gz 0e29c053bcd31072c8bed9eddece1cec;5d848b65379c195fe158a5d7324b4a18 1170312089;1246298835""",
)
o = FASTQsFromAccession("ERR2223563")
print(o.urls)
assert (
o.urls[0]
== "http://ftp.sra.ebi.ac.uk/vol1/fastq/ERR222/003/ERR2223563/ERR2223563_1.fastq.gz"
)
assert (
o.urls[1]
== "http://ftp.sra.ebi.ac.uk/vol1/fastq/ERR222/003/ERR2223563/ERR2223563_2.fastq.gz"
)
def test_fastqs_from_mrnas(self):
@attr.s
class Transcript:
transcript_stable_id = attr.ib()
mrna = attr.ib()
class FakeGenome:
name = "FakeGenome"
def download_genome(self):
return []
def job_genes(self):
return []
def job_transcripts(self):
return []
transcripts = {
"tr1": Transcript("gene1", "AGTC"),
"tr2": Transcript("gene1", "ACCA"),
}
o = FASTQsFromMRNAs(["tr1", "tr2"], FakeGenome(), 2)
target = o()[0][0]
ppg.run_pipegraph()
assert Path(target).exists()
with open(target) as r:
seqs, names, quals = zip(*read_fastq_iterator(r))
assert list(seqs) == ["AG", "GT", "TC", "AC", "CC", "CA"]
@pytest.mark.usefixtures("new_pipegraph")
class TestSamplesQC:
def test_fastqc(self):
from mbf_qualitycontrol import get_qc_jobs
lane = Sample(
"Sample_a", get_sample_data(Path("mbf_align/sample_a")), False, vid="VA000"
)
qc_jobs = list(get_qc_jobs())
assert len(qc_jobs) == 1
assert "results/lanes/Sample_a/FASTQC/sentinel.txt" in qc_jobs[0].filenames
assert lane.prepare_input() in qc_jobs[0].prerequisites
|
py | b400b632d4b86fa9094c566677950d4b0599b86b | '''A library to allow navigating rest apis easy.'''
from __future__ import print_function
from __future__ import unicode_literals
__version__ = '1.0'
from weakref import WeakValueDictionary
try:
from http import client as http_client
except ImportError:
import httplib as http_client
import json
try:
from urllib import parse as urlparse
except ImportError:
import urlparse
import webbrowser
import requests
import six
import uritemplate
from restnavigator import exc, utils
DEFAULT_HEADERS = {
'Accept': 'application/hal+json,application/json',
'User-Agent': 'HALNavigator/{0}'.format(__version__)
}
# Constants used with requests library
GET = 'GET'
POST = 'POST'
DELETE = 'DELETE'
PATCH = 'PATCH'
PUT = 'PUT'
class APICore(object):
'''Shared data between navigators from a single api.
This should contain all state that is generally maintained from
one navigator to the next.
'''
def __init__(self,
root,
nav_class,
apiname=None,
default_curie=None,
session=None,
id_map=None,
):
self.root = root
self.nav_class = nav_class
self.apiname = utils.namify(root) if apiname is None else apiname
self.default_curie = default_curie
self.session = session or requests.Session()
self.id_map = id_map if id_map is not None else WeakValueDictionary()
def cache(self, link, nav):
'''Stores a navigator in the identity map for the current
api. Can take a link or a bare uri'''
if link is None:
return # We don't cache navigators without a Link
elif hasattr(link, 'uri'):
self.id_map[link.uri] = nav
else:
self.id_map[link] = nav
def get_cached(self, link, default=None):
'''Retrieves a cached navigator from the id_map.
Either a Link object or a bare uri string may be passed in.'''
if hasattr(link, 'uri'):
return self.id_map.get(link.uri, default)
else:
return self.id_map.get(link, default)
def is_cached(self, link):
'''Returns whether the current navigator is cached. Intended
to be overwritten and customized by subclasses.
'''
if link is None:
return False
elif hasattr(link, 'uri'):
return link.uri in self.id_map
else:
return link in self.id_map
def authenticate(self, auth):
'''Sets the authentication for future requests to the api'''
self.session.auth = auth
class Link(object):
'''Represents a HAL link. Does not store the link relation'''
def __init__(self, uri, properties=None):
self.uri = uri
self.props = properties or {}
def relative_uri(self, root):
'''Returns the link of the current uri compared against an api root'''
return self.uri.replace(root, '/')
class PartialNavigator(object):
'''A lazy representation of a navigator. Expands to a full
navigator when template arguments are given by calling it.
'''
def __init__(self, link, core=None):
self.link = link
self._core = core
def __repr__(self): # pragma: nocover
relative_uri = self.link.relative_uri(self._core.root)
objectified_uri = utils.objectify_uri(relative_uri)
return "{cls}({name}{path})".format(
cls=type(self).__name__,
name=self._core.apiname,
path=objectified_uri
)
@property
def variables(self):
'''Returns a set of the template variables in this templated
link'''
return uritemplate.variables(self.link.uri)
def expand_uri(self, **kwargs):
'''Returns the template uri expanded with the current arguments'''
kwargs = dict([(k, v if v != 0 else '0') for k, v in kwargs.items()])
return uritemplate.expand(self.link.uri, kwargs)
def expand_link(self, **kwargs):
'''Expands with the given arguments and returns a new
untemplated Link object
'''
props = self.link.props.copy()
del props['templated']
return Link(
uri=self.expand_uri(**kwargs),
properties=props,
)
@property
def template_uri(self):
return self.link.uri
def __call__(self, **kwargs):
'''Expands the current PartialNavigator into a new
navigator. Keyword traversal are supplied to the uri template.
'''
return HALNavigator(
core=self._core,
link=self.expand_link(**kwargs),
)
class Navigator(object):
'''A factory for other navigators. Makes creating them more
convenient
'''
@staticmethod
def hal(root,
apiname=None,
default_curie=None,
auth=None,
headers=None,
session=None,
):
'''Create a HALNavigator'''
root = utils.fix_scheme(root)
halnav = HALNavigator(
link=Link(uri=root),
core=APICore(
root=root,
nav_class=HALNavigator,
apiname=apiname,
default_curie=default_curie,
session=session,
)
)
if auth:
halnav.authenticate(auth)
halnav.headers.update(DEFAULT_HEADERS)
if headers is not None:
halnav.headers.update(headers)
return halnav
class HALNavigatorBase(object):
'''Base class for navigation objects'''
DEFAULT_CONTENT_TYPE = 'application/hal+json'
def __new__(cls, link, core, *args, **kwargs):
'''New decides whether we need a new instance or whether it's
already in the id_map of the core'''
if core.is_cached(link):
return core.get_cached(link.uri)
else:
return super(HALNavigatorBase, cls).__new__(cls)
def __init__(self, link, core,
response=None,
state=None,
curies=None,
_links=None,
_embedded=None,
):
'''Internal constructor. If you want to create a new
HALNavigator, use the factory `Navigator.hal`
'''
if core.is_cached(link):
# Don't want to overwrite a cached navigator
return
else:
self.self = link
self.response = response
self.state = state
self.fetched = response is not None
self.curies = curies
self._core = core
self._links = _links or utils.CurieDict(core.default_curie, {})
self._embedded = _embedded or utils.CurieDict(
core.default_curie, {})
core.cache(link, self)
@property
def uri(self):
if self.self is not None:
return self.self.uri
@property
def apiname(self):
return self._core.apiname
@property
def title(self):
if self.self is not None:
return self.self.props.get('title')
@property
def profile(self):
if self.self is not None:
return self.self.props.get('profile')
@property
def type(self):
if self.self is not None:
return self.self.props.get('type')
@property
def headers(self):
return self._core.session.headers
@property
def resolved(self):
return self.fetched or self.state is not None
def __repr__(self): # pragma: nocover
relative_uri = self.self.relative_uri(self._core.root)
objectified_uri = utils.objectify_uri(relative_uri)
return "{cls}({name}{path})".format(
cls=type(self).__name__, name=self.apiname, path=objectified_uri)
def authenticate(self, auth):
'''Authenticate with the api'''
self._core.authenticate(auth)
def links(self):
'''Returns a dictionary of navigators from the current
resource. Fetches the resource if necessary.
'''
if not self.resolved:
self.fetch()
return self._links
def embedded(self):
'''Returns a dictionary of navigators representing embedded
documents in the current resource. If the navigators have self
links they can be fetched as well.
'''
if not self.resolved:
self.fetch()
return self._embedded
@property
def status(self):
if self.response is not None:
return self.response.status_code, self.response.reason
def __eq__(self, other):
'''Equality'''
try:
return self.uri == other.uri and self.apiname == other.apiname
except Exception:
return False
def __ne__(self, other):
'''Inequality'''
return not self == other
def __iter__(self):
'''Part of iteration protocol'''
yield self
last = self
while True:
try:
current = last.next()
current() # fetch if necessary
yield current
except StopIteration:
return
last = current
def __nonzero__(self):
'''Whether this navigator was successful.'''
if not self.resolved:
raise exc.NoResponseError(
'this navigator has not been fetched '
'yet, so we cannot determine if it succeeded')
return bool(self.response)
def __contains__(self, value):
if not self.resolved:
raise exc.NoResponseError(
'this navigator has not been fetched '
'yet, so we cannot determine if it contains a link '
'relation')
return value in self._links or value in self._embedded
def next(self):
try:
return self['next']
except exc.OffTheRailsException as otre:
if isinstance(otre.exception, KeyError):
raise StopIteration()
else:
raise
def __getitem__(self, getitem_args):
r'''Rel selector and traversor for navigators'''
traversal = utils.normalize_getitem_args(getitem_args)
intermediates = [self]
val = self
for i, arg in enumerate(traversal):
try:
if isinstance(arg, six.string_types):
val() # fetch the resource if necessary
if val._embedded and arg in val._embedded:
val = val._embedded[arg]
else:
# We're hoping it's in links, otherwise we're
# off the tracks
val = val.links()[arg]
elif isinstance(arg, tuple):
val = val.get_by(*arg, raise_exc=True)
elif isinstance(arg, int) and isinstance(val, list):
val = val[arg]
else:
raise TypeError("{0!r} doesn't accept a traversor of {1!r}"
.format(val, arg))
except Exception as e:
raise exc.OffTheRailsException(
traversal, i, intermediates, e)
intermediates.append(val)
return val
def docsfor(self, rel): # pragma: nocover
'''Obtains the documentation for a link relation. Opens in a webbrowser
window'''
prefix, _rel = rel.split(':')
if prefix in self.curies:
doc_url = uritemplate.expand(self.curies[prefix], {'rel': _rel})
else:
doc_url = rel
print('opening', doc_url)
webbrowser.open(doc_url)
def _make_links_from(self, body):
'''Creates linked navigators from a HAL response body'''
ld = utils.CurieDict(self._core.default_curie, {})
for rel, link in body.get('_links', {}).items():
if rel != 'curies':
if isinstance(link, list):
ld[rel] = utils.LinkList(
(self._navigator_or_thunk(lnk), lnk) for lnk in link)
else:
ld[rel] = self._navigator_or_thunk(link)
return ld
def _make_embedded_from(self, doc):
'''Creates embedded navigators from a HAL response doc'''
ld = utils.CurieDict(self._core.default_curie, {})
for rel, doc in doc.get('_embedded', {}).items():
if isinstance(doc, list):
ld[rel] = [self._recursively_embed(d) for d in doc]
else:
ld[rel] = self._recursively_embed(doc)
return ld
def _recursively_embed(self, doc, update_state=True):
'''Crafts a navigator from a hal-json embedded document'''
self_link = None
self_uri = utils.getpath(doc, '_links.self.href')
if self_uri is not None:
uri = urlparse.urljoin(self.uri, self_uri)
self_link = Link(
uri=uri,
properties=utils.getpath(doc, '_links.self')
)
curies = utils.getpath(doc, '_links.curies')
state = utils.getstate(doc)
if self_link is None:
nav = OrphanHALNavigator(
link=None,
response=None,
parent=self,
core=self._core,
curies=curies,
state=state,
)
else:
nav = HALNavigator(
link=self_link,
response=None,
core=self._core,
curies=curies,
state=state,
)
if update_state:
nav.state = state
links = self._make_links_from(doc)
if links is not None:
nav._links = links
embedded = self._make_embedded_from(doc)
if embedded is not None:
nav._embedded = embedded
return nav
def _navigator_or_thunk(self, link):
'''Crafts a navigator or from a hal-json link dict.
If the link is relative, the returned navigator will have a
uri that relative to this navigator's uri.
If the link passed in is templated, a PartialNavigator will be
returned instead.
'''
# resolve relative uris against the current uri
uri = urlparse.urljoin(self.uri, link['href'])
link_obj = Link(uri=uri, properties=link)
if link.get('templated'):
# Can expand into a real HALNavigator
return PartialNavigator(link_obj, core=self._core)
else:
return HALNavigator(link_obj, core=self._core)
def _can_parse(self, content_type):
'''Whether this navigator can parse the given content-type.
Checks that the content_type matches one of the types specified
in the 'Accept' header of the request, if supplied.
If not supplied, matches against the default'''
content_type, content_subtype, content_param = utils.parse_media_type(content_type)
for accepted in self.headers.get('Accept', self.DEFAULT_CONTENT_TYPE).split(','):
type, subtype, param = utils.parse_media_type(accepted)
# if either accepted_type or content_type do not
# contain a parameter section, then it will be
# optimistically ignored
matched = (type == content_type) \
and (subtype == content_subtype) \
and (param == content_param or not (param and content_param))
if matched:
return True
return False
def _parse_content(self, text):
'''Parses the content of a response doc into the correct
format for .state.
'''
try:
return json.loads(text)
except ValueError:
raise exc.UnexpectedlyNotJSON(
"The resource at {.uri} wasn't valid JSON", self)
def _update_self_link(self, link, headers):
'''Update the self link of this navigator'''
self.self.props.update(link)
# Set the self.type to the content_type of the returned document
self.self.props['type'] = headers.get(
'Content-Type', self.DEFAULT_CONTENT_TYPE)
self.self.props
def _ingest_response(self, response):
'''Takes a response object and ingests state, links, embedded
documents and updates the self link of this navigator to
correspond. This will only work if the response is valid
JSON
'''
self.response = response
if self._can_parse(response.headers['Content-Type']):
hal_json = self._parse_content(response.text)
else:
raise exc.HALNavigatorError(
message="Unexpected content type! Wanted {0}, got {1}"
.format(self.headers.get('Accept', self.DEFAULT_CONTENT_TYPE),
self.response.headers['content-type']),
nav=self,
status=self.response.status_code,
response=self.response,
)
self._links = self._make_links_from(hal_json)
self._embedded = self._make_embedded_from(hal_json)
# Set properties from new document's self link
self._update_self_link(
hal_json.get('_links', {}).get('self', {}),
response.headers,
)
# Set curies if available
self.curies = dict(
(curie['name'], curie['href'])
for curie in
hal_json.get('_links', {}).get('curies', []))
# Set state by removing HAL attributes
self.state = utils.getstate(hal_json)
class HALNavigator(HALNavigatorBase):
'''The main navigation entity'''
def __call__(self, raise_exc=True):
if not self.resolved:
return self.fetch(raise_exc=raise_exc)
else:
return self.state.copy()
def _create_navigator(self, response, raise_exc=True):
'''Create the appropriate navigator from an api response'''
method = response.request.method
# TODO: refactor once hooks in place
if method in (POST, PUT, PATCH, DELETE) \
and response.status_code in (
http_client.CREATED,
http_client.FOUND,
http_client.SEE_OTHER,
http_client.NO_CONTENT) \
and 'Location' in response.headers:
uri = urlparse.urljoin(self._core.root, response.headers['Location'])
nav = HALNavigator(
link=Link(uri=uri),
core=self._core
)
# We don't ingest the response because we haven't fetched
# the newly created resource yet
elif method in (POST, PUT, PATCH, DELETE):
nav = OrphanHALNavigator(
link=None,
core=self._core,
response=response,
parent=self,
)
nav._ingest_response(response)
elif method == GET:
nav = self
nav._ingest_response(response)
else: # pragma: nocover
assert False, "This shouldn't happen"
return nav
def _request(self, method, body=None, raise_exc=True, headers=None, files=None):
'''Fetches HTTP response using the passed http method. Raises
HALNavigatorError if response is in the 400-500 range.'''
headers = headers or {}
if body and 'Content-Type' not in headers:
headers.update({'Content-Type': 'application/json'})
response = self._core.session.request(
method,
self.uri,
data=body if not isinstance(body, dict) else None,
json=body if isinstance(body, dict) else None,
files=files,
headers=headers,
allow_redirects=False,
)
nav = self._create_navigator(response, raise_exc=raise_exc)
if raise_exc and not response:
raise exc.HALNavigatorError(
message=response.text,
status=response.status_code,
nav=nav, # may be self
response=response,
)
else:
return nav
def fetch(self, raise_exc=True):
'''Performs a GET request to the uri of this navigator'''
self._request(GET, raise_exc=raise_exc) # ingests response
self.fetched = True
return self.state.copy()
def create(self, body=None, raise_exc=True, headers=None, **kwargs):
'''Performs an HTTP POST to the server, to create a
subordinate resource. Returns a new HALNavigator representing
that resource.
`body` may either be a string or a dictionary representing json
`headers` are additional headers to send in the request
'''
return self._request(POST, body, raise_exc, headers, **kwargs)
def delete(self, raise_exc=True, headers=None, files=None):
'''Performs an HTTP DELETE to the server, to delete resource(s).
`headers` are additional headers to send in the request'''
return self._request(DELETE, None, raise_exc, headers, files)
def upsert(self, body, raise_exc=True, headers=False, files=None):
'''Performs an HTTP PUT to the server. This is an idempotent
call that will create the resource this navigator is pointing
to, or will update it if it already exists.
`body` may either be a string or a dictionary representing json
`headers` are additional headers to send in the request
'''
return self._request(PUT, body, raise_exc, headers, files)
def patch(self, body, raise_exc=True, headers=False, files=None):
'''Performs an HTTP PATCH to the server. This is a
non-idempotent call that may update all or a portion of the
resource this navigator is pointing to. The format of the
patch body is up to implementations.
`body` may either be a string or a dictionary representing json
`headers` are additional headers to send in the request
'''
return self._request(PATCH, body, raise_exc, headers, files)
class OrphanHALNavigator(HALNavigatorBase):
'''A Special navigator that is the result of a non-GET
This navigator cannot be fetched or created, but has a special
property called `.parent` that refers to the navigator this one
was created from. If the result is a HAL document, it will be
populated properly
'''
def __init__(self, link, core,
response=None,
state=None,
curies=None,
_links=None,
parent=None,
):
super(OrphanHALNavigator, self).__init__(
link, core, response, state, curies, _links)
self.parent = parent
def __call__(self, *args, **kwargs):
return self.state.copy()
def __repr__(self): # pragma: nocover
relative_uri = self.parent.self.relative_uri(self._core.root)
objectified_uri = utils.objectify_uri(relative_uri)
return "{cls}({name}{path})".format(
cls=type(self).__name__, name=self.apiname, path=objectified_uri)
def _can_parse(self, content_type):
'''If something doesn't parse, we just return an empty doc'''
return True
def _parse_content(self, text):
'''Try to parse as HAL, but on failure use an empty dict'''
try:
return super(OrphanHALNavigator, self)._parse_content(text)
except exc.UnexpectedlyNotJSON:
return {}
def _update_self_link(self, link, headers):
'''OrphanHALNavigator has no link object'''
pass
def _navigator_or_thunk(self, link):
'''We need to resolve relative links against the parent uri'''
return HALNavigatorBase._navigator_or_thunk(self.parent, link)
|
py | b400b6e2bb70effa9b2d576866689f561029abed | #!"C:\Users\maisu\PycharmProjects\Python_ Script_Image_To_text\venv\Scripts\python.exe"
# EASY-INSTALL-ENTRY-SCRIPT: 'pip==19.0.3','console_scripts','pip3'
__requires__ = 'pip==19.0.3'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('pip==19.0.3', 'console_scripts', 'pip3')()
)
|
py | b400b7c87b2b5d70aa6dcc534aa80c03a71c6f82 | from flask import Flask, jsonify, request
from uuid import uuid4
from blockchain import Blockchain
# Instantiate our Node
app = Flask(__name__)
# Generate a globally unique address for this node
node_identifier = str(uuid4()).replace('-', '')
# Instantiate the Blockchain
blockchain = Blockchain()
@app.route('/mine', methods=['GET'])
def mine():
# We run the proof of work algorithm to get the next proof...
last_block = blockchain.last_block
last_proof = last_block['proof']
proof = blockchain.proof_of_work(last_proof)
# We must receive a reward for finding the proof.
# The sender is "0" to signify that this node has mined a new coin.
blockchain.new_transaction(
sender="0",
recipient=node_identifier,
amount=1,
)
# Forge the new Block by adding it to the chain
block = blockchain.new_block(proof)
response = {
'message': "New Block Forged",
'index': block['index'],
'transactions': block['transactions'],
'proof': block['proof'],
'previous_hash': block['previous_hash'],
}
return jsonify(response), 200
@app.route('/transactions/new', methods=['POST'])
def new_transaction():
values = request.get_json()
# Check that the required fields are in the POST'ed data
required = ['sender', 'recipient', 'amount']
if not all(k in values for k in required):
return 'Missing values', 400
# Create a new Transaction
index = blockchain.new_transaction(values['sender'], values['recipient'], values['amount'])
response = {'message': f'Transaction will be added to Block {index}'}
return jsonify(response), 201
@app.route('/chain', methods=['GET'])
def full_chain():
response = {
'chain': blockchain.chain,
'length': len(blockchain.chain),
}
return jsonify(response), 200
@app.route('/nodes/register', methods=['POST'])
def register_nodes():
values = request.get_json()
nodes = values.get('nodes')
if nodes is None:
return "Error: Please supply a valid list of nodes", 400
for node in nodes:
blockchain.register_node(node)
response = {
'message': 'New nodes have been added',
'total_nodes': list(blockchain.nodes),
}
return jsonify(response), 201
@app.route('/nodes/resolve', methods=['GET'])
def consensus():
replaced = blockchain.resolve_conflicts()
if replaced:
response = {
'message': 'Our chain was replaced',
'new_chain': blockchain.chain
}
else:
response = {
'message': 'Our chain is authoritative',
'chain': blockchain.chain
}
return jsonify(response), 200
if __name__ == '__main__':
app.run(host='0.0.0.0', port=5000) |
py | b400b83232dc6e471483656954859b298e2b7fee | #!G:\PycharmProjects\GBPI_project\Scripts\python.exe
# EASY-INSTALL-ENTRY-SCRIPT: 'pip==10.0.1','console_scripts','pip3'
__requires__ = 'pip==10.0.1'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('pip==10.0.1', 'console_scripts', 'pip3')()
)
|
py | b400b99451be9d0bdd248db6040215db0387cab6 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, AsyncIterable, Callable, Dict, Generic, Optional, TypeVar, Union
import warnings
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest
from azure.mgmt.core.exceptions import ARMErrorFormat
from ... import models as _models
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class AssetsOperations:
"""AssetsOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.media.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def list(
self,
resource_group_name: str,
account_name: str,
filter: Optional[str] = None,
top: Optional[int] = None,
orderby: Optional[str] = None,
**kwargs
) -> AsyncIterable["_models.AssetCollection"]:
"""List Assets.
List Assets in the Media Services account with optional filtering and ordering.
:param resource_group_name: The name of the resource group within the Azure subscription.
:type resource_group_name: str
:param account_name: The Media Services account name.
:type account_name: str
:param filter: Restricts the set of items returned.
:type filter: str
:param top: Specifies a non-negative integer n that limits the number of items returned from a
collection. The service returns the number of available items up to but not greater than the
specified value n.
:type top: int
:param orderby: Specifies the key by which the result collection should be ordered.
:type orderby: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either AssetCollection or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.media.models.AssetCollection]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.AssetCollection"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-05-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'accountName': self._serialize.url("account_name", account_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
if filter is not None:
query_parameters['$filter'] = self._serialize.query("filter", filter, 'str')
if top is not None:
query_parameters['$top'] = self._serialize.query("top", top, 'int')
if orderby is not None:
query_parameters['$orderby'] = self._serialize.query("orderby", orderby, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('AssetCollection', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.odata_next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
error = self._deserialize.failsafe_deserialize(_models.ApiError, response)
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Media/mediaServices/{accountName}/assets'} # type: ignore
async def get(
self,
resource_group_name: str,
account_name: str,
asset_name: str,
**kwargs
) -> Optional["_models.Asset"]:
"""Get an Asset.
Get the details of an Asset in the Media Services account.
:param resource_group_name: The name of the resource group within the Azure subscription.
:type resource_group_name: str
:param account_name: The Media Services account name.
:type account_name: str
:param asset_name: The Asset name.
:type asset_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: Asset, or the result of cls(response)
:rtype: ~azure.mgmt.media.models.Asset or None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[Optional["_models.Asset"]]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-05-01"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'accountName': self._serialize.url("account_name", account_name, 'str'),
'assetName': self._serialize.url("asset_name", asset_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 404]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ApiError, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('Asset', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Media/mediaServices/{accountName}/assets/{assetName}'} # type: ignore
async def create_or_update(
self,
resource_group_name: str,
account_name: str,
asset_name: str,
parameters: "_models.Asset",
**kwargs
) -> "_models.Asset":
"""Create or update an Asset.
Creates or updates an Asset in the Media Services account.
:param resource_group_name: The name of the resource group within the Azure subscription.
:type resource_group_name: str
:param account_name: The Media Services account name.
:type account_name: str
:param asset_name: The Asset name.
:type asset_name: str
:param parameters: The request parameters.
:type parameters: ~azure.mgmt.media.models.Asset
:keyword callable cls: A custom type or function that will be passed the direct response
:return: Asset, or the result of cls(response)
:rtype: ~azure.mgmt.media.models.Asset
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.Asset"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-05-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self.create_or_update.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'accountName': self._serialize.url("account_name", account_name, 'str'),
'assetName': self._serialize.url("asset_name", asset_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'Asset')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ApiError, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('Asset', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('Asset', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Media/mediaServices/{accountName}/assets/{assetName}'} # type: ignore
async def delete(
self,
resource_group_name: str,
account_name: str,
asset_name: str,
**kwargs
) -> None:
"""Delete an Asset.
Deletes an Asset in the Media Services account.
:param resource_group_name: The name of the resource group within the Azure subscription.
:type resource_group_name: str
:param account_name: The Media Services account name.
:type account_name: str
:param asset_name: The Asset name.
:type asset_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: None, or the result of cls(response)
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-05-01"
accept = "application/json"
# Construct URL
url = self.delete.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'accountName': self._serialize.url("account_name", account_name, 'str'),
'assetName': self._serialize.url("asset_name", asset_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ApiError, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Media/mediaServices/{accountName}/assets/{assetName}'} # type: ignore
async def update(
self,
resource_group_name: str,
account_name: str,
asset_name: str,
parameters: "_models.Asset",
**kwargs
) -> "_models.Asset":
"""Update an Asset.
Updates an existing Asset in the Media Services account.
:param resource_group_name: The name of the resource group within the Azure subscription.
:type resource_group_name: str
:param account_name: The Media Services account name.
:type account_name: str
:param asset_name: The Asset name.
:type asset_name: str
:param parameters: The request parameters.
:type parameters: ~azure.mgmt.media.models.Asset
:keyword callable cls: A custom type or function that will be passed the direct response
:return: Asset, or the result of cls(response)
:rtype: ~azure.mgmt.media.models.Asset
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.Asset"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-05-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self.update.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'accountName': self._serialize.url("account_name", account_name, 'str'),
'assetName': self._serialize.url("asset_name", asset_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'Asset')
body_content_kwargs['content'] = body_content
request = self._client.patch(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ApiError, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('Asset', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Media/mediaServices/{accountName}/assets/{assetName}'} # type: ignore
async def list_container_sas(
self,
resource_group_name: str,
account_name: str,
asset_name: str,
parameters: "_models.ListContainerSasInput",
**kwargs
) -> "_models.AssetContainerSas":
"""List the Asset URLs.
Lists storage container URLs with shared access signatures (SAS) for uploading and downloading
Asset content. The signatures are derived from the storage account keys.
:param resource_group_name: The name of the resource group within the Azure subscription.
:type resource_group_name: str
:param account_name: The Media Services account name.
:type account_name: str
:param asset_name: The Asset name.
:type asset_name: str
:param parameters: The request parameters.
:type parameters: ~azure.mgmt.media.models.ListContainerSasInput
:keyword callable cls: A custom type or function that will be passed the direct response
:return: AssetContainerSas, or the result of cls(response)
:rtype: ~azure.mgmt.media.models.AssetContainerSas
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.AssetContainerSas"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-05-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self.list_container_sas.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'accountName': self._serialize.url("account_name", account_name, 'str'),
'assetName': self._serialize.url("asset_name", asset_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'ListContainerSasInput')
body_content_kwargs['content'] = body_content
request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ApiError, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('AssetContainerSas', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
list_container_sas.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Media/mediaServices/{accountName}/assets/{assetName}/listContainerSas'} # type: ignore
async def get_encryption_key(
self,
resource_group_name: str,
account_name: str,
asset_name: str,
**kwargs
) -> "_models.StorageEncryptedAssetDecryptionData":
"""Gets the Asset storage key.
Gets the Asset storage encryption keys used to decrypt content created by version 2 of the
Media Services API.
:param resource_group_name: The name of the resource group within the Azure subscription.
:type resource_group_name: str
:param account_name: The Media Services account name.
:type account_name: str
:param asset_name: The Asset name.
:type asset_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: StorageEncryptedAssetDecryptionData, or the result of cls(response)
:rtype: ~azure.mgmt.media.models.StorageEncryptedAssetDecryptionData
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.StorageEncryptedAssetDecryptionData"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-05-01"
accept = "application/json"
# Construct URL
url = self.get_encryption_key.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'accountName': self._serialize.url("account_name", account_name, 'str'),
'assetName': self._serialize.url("asset_name", asset_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.post(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ApiError, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('StorageEncryptedAssetDecryptionData', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_encryption_key.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Media/mediaServices/{accountName}/assets/{assetName}/getEncryptionKey'} # type: ignore
async def list_streaming_locators(
self,
resource_group_name: str,
account_name: str,
asset_name: str,
**kwargs
) -> "_models.ListStreamingLocatorsResponse":
"""List Streaming Locators.
Lists Streaming Locators which are associated with this asset.
:param resource_group_name: The name of the resource group within the Azure subscription.
:type resource_group_name: str
:param account_name: The Media Services account name.
:type account_name: str
:param asset_name: The Asset name.
:type asset_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: ListStreamingLocatorsResponse, or the result of cls(response)
:rtype: ~azure.mgmt.media.models.ListStreamingLocatorsResponse
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ListStreamingLocatorsResponse"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-05-01"
accept = "application/json"
# Construct URL
url = self.list_streaming_locators.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'accountName': self._serialize.url("account_name", account_name, 'str'),
'assetName': self._serialize.url("asset_name", asset_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.post(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ApiError, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('ListStreamingLocatorsResponse', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
list_streaming_locators.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Media/mediaServices/{accountName}/assets/{assetName}/listStreamingLocators'} # type: ignore
|
py | b400bc73be63eddb4de7f7b78c17da404324d244 | import random
from eth_utils import to_dict
from eth_utils.toolz import (
first,
keyfilter,
merge,
merge_with,
partition,
second,
sliding_window,
)
import pytest
from eth2._utils import bitfield
from eth2.beacon.committee_helpers import (
get_committee_count_at_slot,
get_committee_count_per_slot_at_epoch,
iterate_committees_at_epoch,
iterate_committees_at_slot,
)
from eth2.beacon.epoch_processing_helpers import get_attesting_indices
from eth2.beacon.fork_choice.lmd_ghost import (
Store,
_balance_for_validator,
lmd_ghost_scoring,
score_block_by_root,
)
from eth2.beacon.helpers import compute_epoch_at_slot, compute_start_slot_at_epoch
from eth2.beacon.types.attestation_data import AttestationData
from eth2.beacon.types.attestations import Attestation
from eth2.beacon.types.blocks import BeaconBlock
from eth2.beacon.types.checkpoints import Checkpoint
# TODO(ralexstokes) merge this and next into tools/builder
@to_dict
def _mk_attestation_inputs_in_epoch(epoch, state, config):
for committee, committee_index, slot in iterate_committees_at_epoch(
state, epoch, config
):
if not committee:
# empty committee this slot
continue
attestation_data = AttestationData(
slot=slot, index=committee_index, target=Checkpoint(epoch=epoch)
)
committee_size = len(committee)
aggregation_bits = bitfield.get_empty_bitfield(committee_size)
for index in range(committee_size):
aggregation_bits = bitfield.set_voted(aggregation_bits, index)
for index in committee:
yield (
index,
(attestation_data.slot, (aggregation_bits, attestation_data)),
)
def _mk_attestations_for_epoch_by_count(
number_of_committee_samples, epoch, state, config
):
results = {}
for _ in range(number_of_committee_samples):
sample = _mk_attestation_inputs_in_epoch(epoch, state, config)
results = merge(results, sample)
return results
def _extract_attestations_from_index_keying(values):
results = ()
for value in values:
aggregation_bits, data = second(value)
attestation = Attestation(aggregation_bits=aggregation_bits, data=data)
if attestation not in results:
results += (attestation,)
return results
def _keep_by_latest_slot(values):
"""
we get a sequence of (Slot, (Bitfield, AttestationData))
and return the AttestationData with the highest slot
"""
return max(values, key=first)[1][1]
def _find_collision(state, config, validator_index, epoch):
"""
Given a target epoch, make the attestation expected for the
validator w/ the given ``validator_index``.
"""
for committee, committee_index, slot in iterate_committees_at_epoch(
state, epoch, config
):
if validator_index in committee:
# TODO(ralexstokes) refactor w/ tools/builder
attestation_data = AttestationData(
slot=slot, index=committee_index, target=Checkpoint(epoch=epoch)
)
committee_count = len(committee)
aggregation_bits = bitfield.get_empty_bitfield(committee_count)
for i in range(committee_count):
aggregation_bits = bitfield.set_voted(aggregation_bits, i)
return {
index: (slot, (aggregation_bits, attestation_data))
for index in committee
}
else:
raise Exception("should have found a duplicate validator")
def _introduce_collisions(all_attestations_by_index, state, config):
"""
Find some attestations for later epochs for the validators
that are current attesting in each source of attestation.
"""
collisions = (all_attestations_by_index[0],)
for src, dst in sliding_window(2, all_attestations_by_index):
if not src:
# src can be empty at low validator count
collisions += (dst,)
continue
src_index = random.choice(list(src.keys()))
src_val = src[src_index]
src_slot, _ = src_val
src_epoch = compute_epoch_at_slot(src_slot, config.SLOTS_PER_EPOCH)
dst_epoch = src_epoch + 1
collision = _find_collision(
state, config, validator_index=src_index, epoch=dst_epoch
)
collisions += (merge(dst, collision),)
return collisions
def _get_committee_count(state, epoch, config):
return (
get_committee_count_per_slot_at_epoch(
state,
epoch,
config.MAX_COMMITTEES_PER_SLOT,
config.SLOTS_PER_EPOCH,
config.TARGET_COMMITTEE_SIZE,
)
* config.SLOTS_PER_EPOCH
)
@pytest.mark.parametrize(
("validator_count",),
[
(8,), # low number of validators
(128,), # medium number of validators
# NOTE: the test at 1024 count takes too long :(
(256,), # high number of validators
],
)
@pytest.mark.parametrize(("collisions_from_another_epoch",), [(True,), (False,)])
def test_store_get_latest_attestation(
genesis_state, empty_attestation_pool, config, collisions_from_another_epoch
):
"""
Given some attestations across the various sources, can we
find the latest ones for each validator?
"""
some_epoch = 3
state = genesis_state.copy(
slot=compute_start_slot_at_epoch(some_epoch, config.SLOTS_PER_EPOCH)
)
previous_epoch = state.previous_epoch(config.SLOTS_PER_EPOCH, config.GENESIS_EPOCH)
previous_epoch_committee_count = _get_committee_count(state, previous_epoch, config)
current_epoch = state.current_epoch(config.SLOTS_PER_EPOCH)
current_epoch_committee_count = _get_committee_count(state, current_epoch, config)
next_epoch = state.next_epoch(config.SLOTS_PER_EPOCH)
next_epoch_committee_count = _get_committee_count(state, next_epoch, config)
number_of_committee_samples = 4
assert number_of_committee_samples <= previous_epoch_committee_count
assert number_of_committee_samples <= current_epoch_committee_count
assert number_of_committee_samples <= next_epoch_committee_count
# prepare samples from previous epoch
previous_epoch_attestations_by_index = _mk_attestations_for_epoch_by_count(
number_of_committee_samples, previous_epoch, state, config
)
previous_epoch_attestations = _extract_attestations_from_index_keying(
previous_epoch_attestations_by_index.values()
)
# prepare samples from current epoch
current_epoch_attestations_by_index = _mk_attestations_for_epoch_by_count(
number_of_committee_samples, current_epoch, state, config
)
current_epoch_attestations_by_index = keyfilter(
lambda index: index not in previous_epoch_attestations_by_index,
current_epoch_attestations_by_index,
)
current_epoch_attestations = _extract_attestations_from_index_keying(
current_epoch_attestations_by_index.values()
)
# prepare samples for pool, taking half from the current epoch and half from the next epoch
pool_attestations_in_current_epoch_by_index = _mk_attestations_for_epoch_by_count(
number_of_committee_samples // 2, current_epoch, state, config
)
pool_attestations_in_next_epoch_by_index = _mk_attestations_for_epoch_by_count(
number_of_committee_samples // 2, next_epoch, state, config
)
pool_attestations_by_index = merge(
pool_attestations_in_current_epoch_by_index,
pool_attestations_in_next_epoch_by_index,
)
pool_attestations_by_index = keyfilter(
lambda index: (
index not in previous_epoch_attestations_by_index
or index not in current_epoch_attestations_by_index
),
pool_attestations_by_index,
)
pool_attestations = _extract_attestations_from_index_keying(
pool_attestations_by_index.values()
)
all_attestations_by_index = (
previous_epoch_attestations_by_index,
current_epoch_attestations_by_index,
pool_attestations_by_index,
)
if collisions_from_another_epoch:
(
previous_epoch_attestations_by_index,
current_epoch_attestations_by_index,
pool_attestations_by_index,
) = _introduce_collisions(all_attestations_by_index, state, config)
previous_epoch_attestations = _extract_attestations_from_index_keying(
previous_epoch_attestations_by_index.values()
)
current_epoch_attestations = _extract_attestations_from_index_keying(
current_epoch_attestations_by_index.values()
)
pool_attestations = _extract_attestations_from_index_keying(
pool_attestations_by_index.values()
)
# build expected results
expected_index = merge_with(
_keep_by_latest_slot,
previous_epoch_attestations_by_index,
current_epoch_attestations_by_index,
pool_attestations_by_index,
)
# ensure we get the expected results
state = state.copy(
previous_epoch_attestations=previous_epoch_attestations,
current_epoch_attestations=current_epoch_attestations,
)
pool = empty_attestation_pool
for attestation in pool_attestations:
pool.add(attestation)
chain_db = None # not relevant for this test
store = Store(chain_db, state, pool, BeaconBlock, config)
# sanity check
assert expected_index.keys() == store._attestation_index.keys()
for validator_index in range(len(state.validators)):
expected_attestation_data = expected_index.get(validator_index, None)
stored_attestation_data = store._get_latest_attestation(validator_index)
assert expected_attestation_data == stored_attestation_data
def _mk_block(block_params, slot, parent, block_offset):
return BeaconBlock(**block_params).copy(
slot=slot,
parent_root=parent.signing_root,
# mix in something unique
state_root=block_offset.to_bytes(32, byteorder="big"),
)
def _build_block_tree(
block_params, root_block, base_slot, forking_descriptor, forking_asymmetry, config
):
"""
build a block tree according to the data in ``forking_descriptor``, starting at
the block with root ``base_root``.
"""
tree = [[root_block]]
for slot_offset, block_count in enumerate(forking_descriptor):
slot = base_slot + slot_offset
blocks = []
for parent in tree[-1]:
if forking_asymmetry:
if random.choice([True, False]):
continue
for block_offset in range(block_count):
block = _mk_block(block_params, slot, parent, block_offset)
blocks.append(block)
tree.append(blocks)
# other code written w/ expectation that root is not in the tree
tree.pop(0)
return tree
def _iter_block_tree_by_slot(tree):
for level in tree:
yield level
def _iter_block_level_by_block(level):
for block in level:
yield block
def _iter_block_tree_by_block(tree):
for level in _iter_block_tree_by_slot(tree):
for block in _iter_block_level_by_block(level):
yield block
def _get_committees(state, target_slot, config, sampling_fraction):
committees_per_slot = get_committee_count_at_slot(
state,
target_slot,
config.MAX_COMMITTEES_PER_SLOT,
config.SLOTS_PER_EPOCH,
config.TARGET_COMMITTEE_SIZE,
)
committees_at_slot = ()
for committee, _, _ in iterate_committees_at_slot(
state, target_slot, committees_per_slot, config
):
committees_at_slot += (committee,)
return tuple(
random.sample(
committees_at_slot, int((sampling_fraction * committees_per_slot))
)
)
def _attach_committee_to_block(block, committee_and_index):
block._committee_data = committee_and_index
def _get_committee_from_block(block):
return getattr(block, "_committee_data", None)
def _attach_attestation_to_block(block, attestation):
block._attestation = attestation
def _get_attestation_from_block(block):
return getattr(block, "_attestation", None)
def _attach_committees_to_block_tree(
state, block_tree, committees_by_slot, config, forking_asymmetry
):
for level, committees in zip(
_iter_block_tree_by_slot(block_tree), committees_by_slot
):
block_count = len(level)
partitions = partition(block_count, committees)
for committee_index, (block, committee) in enumerate(
zip(_iter_block_level_by_block(level), partitions)
):
if forking_asymmetry:
if random.choice([True, False]):
# random drop out
continue
_attach_committee_to_block(block, (first(committee), committee_index))
# TODO(ralexstokes) merge in w/ tools/builder
def _mk_attestation_for_block_with_committee(block, committee, committee_index, config):
committee_count = len(committee)
aggregation_bits = bitfield.get_empty_bitfield(committee_count)
for index in range(committee_count):
aggregation_bits = bitfield.set_voted(aggregation_bits, index)
attestation = Attestation(
aggregation_bits=aggregation_bits,
data=AttestationData(
slot=block.slot,
index=committee_index,
beacon_block_root=block.signing_root,
target=Checkpoint(
epoch=compute_epoch_at_slot(block.slot, config.SLOTS_PER_EPOCH)
),
),
)
return attestation
def _attach_attestations_to_block_tree_with_committees(block_tree, config):
for block in _iter_block_tree_by_block(block_tree):
committee_data = _get_committee_from_block(block)
if not committee_data:
# w/ asymmetry in forking we may need to skip this step
continue
committee, committee_index = committee_data
attestation = _mk_attestation_for_block_with_committee(
block, committee, committee_index, config
)
_attach_attestation_to_block(block, attestation)
def _score_block(block, store, state, config):
return sum(
_balance_for_validator(state, validator_index)
for validator_index, target in store._get_attestation_targets()
if store._get_ancestor(target, block.slot) == block
) + score_block_by_root(block)
def _build_score_index_from_decorated_block_tree(block_tree, store, state, config):
return {
block.signing_root: _score_block(block, store, state, config)
for block in _iter_block_tree_by_block(block_tree)
}
def _iter_attestation_by_validator_index(state, attestation, config):
for index in get_attesting_indices(
state, attestation.data, attestation.aggregation_bits, config
):
yield index
class _store:
"""
Mock Store class.
"""
def __init__(self, state, root_block, block_tree, attestation_pool, config):
self._state = state
self._block_tree = block_tree
self._attestation_pool = attestation_pool
self._config = config
self._latest_attestations = self._find_attestation_targets()
self._block_index = {
block.signing_root: block for block in _iter_block_tree_by_block(block_tree)
}
self._block_index[root_block.signing_root] = root_block
self._blocks_by_parent_root = {
block.parent_root: self._block_index[block.parent_root]
for block in _iter_block_tree_by_block(block_tree)
}
def _find_attestation_targets(self):
result = {}
for _, attestation in self._attestation_pool:
target_slot = attestation.data.slot
for validator_index in _iter_attestation_by_validator_index(
self._state, attestation, self._config
):
if validator_index in result:
existing = result[validator_index]
existing_slot = existing.data.slot
if existing_slot > target_slot:
continue
result[validator_index] = attestation
return result
def _get_attestation_targets(self):
for index, target in self._latest_attestations.items():
yield (index, self._block_index[target.data.beacon_block_root])
def _get_parent_block(self, block):
return self._blocks_by_parent_root[block.parent_root]
def _get_ancestor(self, block, slot):
if block.slot == slot:
return block
elif block.slot < slot:
return None
else:
return self._get_ancestor(self._get_parent_block(block), slot)
@pytest.mark.parametrize(
("validator_count",),
[
(8,), # low number of validators
(128,), # medium number of validators
(1024,), # high number of validators
],
)
@pytest.mark.parametrize(
(
# controls how many children a parent has
"forking_descriptor",
),
[
((1,),),
((2,),),
((3,),),
((1, 1),),
((2, 1),),
((3, 2),),
((1, 4),),
((1, 2, 1),),
],
)
@pytest.mark.parametrize(
(
# controls how children should be allocated to a given parent
"forking_asymmetry",
),
[
# Asymmetry means we may deviate from the description in ``forking_descriptor``.
(True,),
# No asymmetry means every parent has
# the number of children prescribed in ``forking_descriptor``.
# => randomly drop some blocks from receiving attestations
(False,),
],
)
def test_lmd_ghost_fork_choice_scoring(
sample_beacon_block_params,
chaindb_at_genesis,
# see note below on how this is used
fork_choice_scoring,
forking_descriptor,
forking_asymmetry,
genesis_state,
empty_attestation_pool,
config,
):
"""
Given some blocks and some attestations, can we score them correctly?
"""
chain_db = chaindb_at_genesis
root_block = chain_db.get_canonical_head(BeaconBlock)
some_epoch = 3
some_slot_offset = 10
state = genesis_state.copy(
slot=compute_start_slot_at_epoch(some_epoch, config.SLOTS_PER_EPOCH)
+ some_slot_offset,
current_justified_checkpoint=Checkpoint(
epoch=some_epoch, root=root_block.signing_root
),
)
assert some_epoch >= state.current_justified_checkpoint.epoch
# NOTE: the attestations have to be aligned to the blocks which start from ``base_slot``.
base_slot = compute_start_slot_at_epoch(some_epoch, config.SLOTS_PER_EPOCH) + 1
block_tree = _build_block_tree(
sample_beacon_block_params,
root_block,
base_slot,
forking_descriptor,
forking_asymmetry,
config,
)
slot_count = len(forking_descriptor)
committee_sampling_fraction = 1
committees_by_slot = tuple(
_get_committees(
state, base_slot + slot_offset, config, committee_sampling_fraction
)
for slot_offset in range(slot_count)
)
_attach_committees_to_block_tree(
state, block_tree, committees_by_slot, config, forking_asymmetry
)
_attach_attestations_to_block_tree_with_committees(block_tree, config)
attestations = tuple(
_get_attestation_from_block(block)
for block in _iter_block_tree_by_block(block_tree)
if _get_attestation_from_block(block)
)
attestation_pool = empty_attestation_pool
for attestation in attestations:
attestation_pool.add(attestation)
store = _store(state, root_block, block_tree, attestation_pool, config)
score_index = _build_score_index_from_decorated_block_tree(
block_tree, store, state, config
)
for block in _iter_block_tree_by_block(block_tree):
# NOTE: we use the ``fork_choice_scoring`` fixture, it doesn't matter for this test
chain_db.persist_block(block, BeaconBlock, fork_choice_scoring)
scoring_fn = lmd_ghost_scoring(
chain_db, attestation_pool, state, config, BeaconBlock
)
for block in _iter_block_tree_by_block(block_tree):
score = scoring_fn(block)
expected_score = score_index[block.signing_root]
assert score == expected_score
|
py | b400bd71d60b5392f16ebd56efb4117cde7df51e | # -*- coding: utf-8 -*-
##
# \file errors_calc2_modes.py
# \title Calculation of the errors and norms for the case2:
# modes in 2D square box.
# \author Pierre Chobeau
# \version 0.1
# \license BSD 3-Clause License
# \inst UMRAE (Ifsttar Nantes), LAUM (Le Mans Université)
# \date 2017, 12 Feb.
##
import numpy as np
from scipy import special as sp
import os
import site
base_path = reduce(lambda l, r: l + os.path.sep + r,
os.path.dirname(os.path.realpath(__file__)).split(
os.path.sep))
data_plotting_path = os.path.join(base_path.rsplit(os.sep, 2)[0],
'data_plotting')
site.addsitedir(data_plotting_path)
from plot_time_signals import plot_ts, plot_ts_basic
from plot_errors_norms import plot_error_basic
def error_calc2(h_set, case):
"""
:param d_sr: horizontal distances between the source and the receivers,
list of floats (m).
:type d_sr: list of floats
:param h_set: spatial step sequence (m).
:type h_set: list of floats
:param c: sound speed (m.s-1).
:type c: float
:param freq: frequency sequence (Hz).
:type freq: list of floats
:param case: integer that sorts of the saved folders in the results dir.
:type case: int
:return nothing, plot and save graphs of the corresponding data_plotting
functions.
"""
for num_meth in ['fdtd', 'tlm']:
import os
res_path = os.path.join(base_path.rsplit(os.sep, 1)[0], 'results',
'case%i' % case, num_meth)
res_path_fd = os.path.join(base_path.rsplit(os.sep, 1)[0], 'results',
'case%i' % case, 'fd')
one_norm = np.zeros((len(h_set)))
two_norm = np.zeros((len(h_set)))
max_norm = np.zeros((len(h_set)))
ord_acc_one = np.zeros((len(h_set) - 1))
ord_acc_two = np.zeros((len(h_set) - 1))
ord_acc_max = np.zeros((len(h_set) - 1))
for l in range(len(h_set)):
p_num = np.load(os.path.join(res_path,'p_%s_%i.npy'
% (num_meth, l)))
p_an = np.load(os.path.join(res_path, 'p_an_%i.npy' % l))
error = np.abs(p_num[1:-1, 1:-1] - p_an[1:-1, 1:-1])
one_norm[l] = np.linalg.norm((error) *
h_set[l] ** 2, ord=1)
two_norm[l] = np.linalg.norm((error) *
h_set[l] ** 2, ord=2)
max_norm[l] = np.linalg.norm((error) *
h_set[l] ** 2, ord=np.inf)
for l in range(len(h_set) - 1):
ord_acc_one[l] = np.log(
one_norm[l + 1] / one_norm[l]) / np.log(
h_set[l + 1] / h_set[l])
ord_acc_two[l] = np.log(
two_norm[l + 1] / two_norm[l]) / np.log(
h_set[l + 1] / h_set[l])
ord_acc_max[l] = np.log(
max_norm[l + 1] / max_norm[l]) / np.log(
h_set[l + 1] / h_set[l])
import os
res_path = os.path.join(base_path.rsplit(os.sep, 1)[0],
'results', 'case%i'
% case, '%s' % num_meth)
if not os.path.exists(res_path):
os.makedirs(res_path)
np.save(os.path.join(res_path, 'one_norm_%s.npy' % num_meth),
one_norm)
np.save(os.path.join(res_path, 'two_norm_%s.npy' % num_meth),
two_norm)
np.save(os.path.join(res_path, 'max_norm_%s.npy' % num_meth),
max_norm)
np.save(os.path.join(res_path, 'ord_acc_one_%s.npy' % num_meth),
ord_acc_one)
np.save(os.path.join(res_path, 'ord_acc_two_%s.npy' % num_meth),
ord_acc_two)
np.save(os.path.join(res_path, 'ord_acc_max_%s.npy' % num_meth),
ord_acc_max)
# plot_error_basic(h_set, one_norm, two_norm, max_norm,
# ord_acc_one, ord_acc_two, ord_acc_max,
# case, True)
print 'end'
|
py | b400bdabaa4960c27eefab2b1f064dd1714c7b99 | from dispatch.database.core import SessionLocal
from dispatch.plugin import service as plugin_service
def send_monitor_notification(
project_id: int, conversation_id: int, message_template: str, db_session: SessionLocal, **kwargs
):
"""Sends a monitor notification."""
notification_text = "Incident Notification"
notification_type = "incident-notification"
plugin = plugin_service.get_active_instance(
db_session=db_session, plugin_type="conversation", project_id=project_id
)
plugin.instance.send(
conversation_id, notification_text, message_template, notification_type, **kwargs
)
|
py | b400be38721871d309a6cc6ba56b6707da1e5b52 | # Meta / Inline Stylesheet
# Use inline CSS to style a Wave app in case of quirks. Prefer using native Wave components if possible.
# ---
from h2o_wave import site, ui
page = site['/demo']
style = '''
p {
color: red;
}
'''
# Add a placeholder.
page['example'] = ui.markdown_card(
box='1 1 2 2',
title='Try out the styling',
content='I should be red!',
)
# Add the style to the page.
page['meta'] = ui.meta_card(box='', stylesheet=ui.inline_stylesheet(style))
page.save()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.