metadata
dict | text
stringlengths 60
3.49M
|
---|---|
{
"source": "Jeetah/HomematicIP-Processor",
"score": 3
}
|
#### File: Jeetah/HomematicIP-Processor/sensorWriter.py
```python
from homematicip.device import WeatherSensorPlus
from Adafruit_IO import Client
import os
# print(f'User: ${os.environ.get("ADAFRUIT_IO_USERNAME")}')
aio = Client(os.environ.get('ADAFRUIT_IO_USERNAME'), os.environ.get('ADAFRUIT_IO_KEY'))
def write_cli_weather_sensor(room, device):
weather = device
print(f'{room} - {device.label} : \
\n LastUpdate: {device.lastStatusUpdate}\
\n Temp: {weather.actualTemperature}°\
\n Hum: {weather.humidity}%\
\n Illumination: {weather.illumination}\
\n Wind: {weather.windSpeed} km/h\
\n Sun: {weather.todaySunshineDuration} min/d\
\n Raining: {weather.raining}\
\n Vapor: {weather.vaporAmount}\
\n Today rain: {weather.todayRainCounter}mm')
def write_aio_weather_sensor(device):
weather = device
print(f'\nSending data to AIO...')
write_aio_value_to_feed('weather.hmip-temp', weather.actualTemperature, 'Temp')
write_aio_value_to_feed('weather.hmip-humidity', weather.humidity, 'Humidity')
write_aio_value_to_feed('weather.hmip-wind', weather.windSpeed, 'Wind')
write_aio_value_to_feed('weather.hmip-rain', weather.todayRainCounter, 'Rain')
write_aio_value_to_feed('weather.hmip-light', weather.illumination, 'Light')
write_aio_value_to_feed('weather.hmip-raining', 'w:raindrop' if weather.raining else 'times-circle-o', 'Raining')
write_aio_value_to_feed('weather.hmip-vapor', weather.vaporAmount, 'Vapor')
print(f'Done: Sending data to AIO.')
def write_aio_value_to_feed(feed, value, sensor):
feed = aio.feeds(feed)
print(f'Sending value "{value}" of {sensor} to AIO "{feed}"')
aio.send_data(feed.key, value)
```
|
{
"source": "Jeetandra/cortx-s3server",
"score": 2
}
|
#### File: s3backgrounddelete/s3backgrounddelete/cortx_s3_cipher.py
```python
import argparse
import base64
import sys
from s3backgrounddelete.cortx_cluster_config import CORTXClusterConfig, CipherInvalidToken
from cortx.utils.security.cipher import Cipher
class CortxS3Cipher:
def __init__(self, config = None, use_base64 = False, key_len = 20, const_key = "default"):
"""Load and initialise s3cipher."""
self.use_base64 = use_base64
self.key_len = key_len
self.const_key = const_key
self.config = config
if (self.config is None):
self.config = CORTXClusterConfig()
try:
self.cluster_id = self.config.get_cluster_id()
except KeyError as err:
print("Fail to parse cluster_id from config file")
sys.exit(1)
@staticmethod
def encrypt(key: str, data: str):
edata = Cipher.encrypt(bytes(key, 'utf-8'), bytes(data, 'utf-8'))
return edata.decode("utf-8")
@staticmethod
def decrypt(key: str, data: str):
ddata = Cipher.decrypt(bytes(key, 'utf-8'), bytes(data, 'utf-8'))
return ddata.decode("utf-8")
def generate_key(self):
key = Cipher.generate_key(self.cluster_id, self.const_key)
return key.decode("utf-8")
def get_key(self):
try:
key = Cipher.generate_key(self.cluster_id, self.const_key)
except Exception as err:
raise CipherInvalidToken("Cipher generate key failed with error : {0}".format(err))
if(self.use_base64):
key = base64.b64encode(key, str.encode("AZ"))
if(len(key) < self.key_len):
while(len(key) < self.key_len):
key = key * 2
key = key[:self.key_len]
elif(len(key) > self.key_len):
key = key[:self.key_len]
return key.decode("utf-8")
def run(self):
parser = argparse.ArgumentParser(description='S3Cipher tool used for obtaining encrypted keys',add_help=False)
parser.add_argument('-h', '--help', action='help', default=argparse.SUPPRESS,
help='Show this help message and exit')
parser.add_argument("--use_base64", help="Used to obtain alphanumeric base64 keys", action="store_true")
parser.add_argument("--key_len", help="Key length to be obtained", type=int)
parser.add_argument("--const_key", help="Constant key name to be used during encryption", type=str)
parser.add_argument("--encrypt", help="encrypt provided bytes of data, with provided key", action="store_true")
parser.add_argument("--decrypt", help="decrypt provided bytes of data, with provided key", action="store_true")
parser.add_argument("--generate_key", help="generate key to encrypt or decrypt data with it, use '--const_key' option with this.", action="store_true")
parser.add_argument("--key", help="key (in bytes) to be used in encrypting or decrypting bytes of data")
parser.add_argument("--data", help="bytes which needs to be encrypted or decrypted using provided key", type=str)
args = parser.parse_args()
if args.use_base64:
use_base64_flag = True
else:
use_base64_flag = False
if args.key_len:
key_len_flag = args.key_len
else:
key_len_flag = 0
if args.const_key:
const_key_flag = args.const_key
else:
const_key_flag = "default_key"
if args.key:
key = args.key
if args.data:
data = args.data
s3_cipher = CortxS3Cipher(None, use_base64_flag, key_len_flag, const_key_flag)
try:
if args.encrypt:
print(s3_cipher.encrypt(key, data))
elif args.decrypt:
print(s3_cipher.decrypt(key, data))
elif args.generate_key:
print(s3_cipher.generate_key())
else:
print(s3_cipher.get_key())
except CipherInvalidToken as err:
print("Cipher generate key failed with error : {0}".format(err))
sys.exit(1)
```
#### File: s3backgrounddelete/s3backgrounddelete/object_recovery_queue.py
```python
import traceback
import time
import json
import pika
from s3backgrounddelete.cortx_s3_kv_api import CORTXS3KVApi
from s3backgrounddelete.cortx_s3_object_api import CORTXS3ObjectApi
from s3backgrounddelete.cortx_s3_index_api import CORTXS3IndexApi
from s3backgrounddelete.object_recovery_validator import ObjectRecoveryValidator
from s3backgrounddelete.IEMutil import IEMutil
class ObjectRecoveryRabbitMq(object):
"""This class is implementation of RabbitMq for object recovery."""
_connection = None
_channel = None
def __init__(self, config, user, password, host,
exchange, queue, mode, durable, logger):
"""Initialise rabbitmq"""
self.config = config
self._user = user
self._password = password
self._host = host
self._exchange = exchange
self._mode = mode
self._queue = queue
self._durable = True if durable == "True" else False
self.logger = logger
self.connect()
def connect(self):
"""Connect to message queue"""
try:
credentials = pika.PlainCredentials(self._user, self._password)
self._connection = pika.BlockingConnection(
pika.ConnectionParameters(host=self._host, credentials=credentials))
self._channel = self._connection.channel()
self._channel.queue_declare(
queue=self._queue, durable=self._durable)
except Exception as exception:
err_msg = "error:%s, %s" % (exception, traceback.format_exc())
self.logger.warn("msg_queue connect failed." + str(err_msg))
time.sleep(5)
self.connect()
def purge_queue(self, queue_name):
"""Purge all entries from queue."""
try:
self.logger.info(("Purging queue: %s") % queue_name)
self._channel.queue_purge(queue=queue_name)
except Exception as exception:
msg = ("Purge queue exception: %s, %s") % (
exception, traceback.format_exc())
return False
return True
def send_data(self, data, mq_routing):
"""Send message data."""
try:
self._channel.basic_publish(exchange=self._exchange,
routing_key=mq_routing,
body=json.dumps(data),
properties=pika.BasicProperties(
delivery_mode=self._mode, # make message persistent
))
except Exception as exception:
msg = ("msg_queue send data except:%s, %s") % (
exception, traceback.format_exc())
return False, msg
return True, None
def worker(self, queue_msg_count=None):
"""Create worker which will process results."""
# Callback function to consume the queue messages and parameters are,
# channel : rabbitmq Channel used to send/receive/ack messages
# method : contain details to identify which consumer the message should
# go e.g delivery_tag
# properties : BasicProperties contains message properties (metadata)
# body : message body
# example:
# method: <Basic.GetOk(['delivery_tag=1', 'exchange=',
# 'message_count=0', 'redelivered=False',
# 'routing_key=s3_delete_obj_job_queue'])>
# properties: <BasicProperties(['delivery_mode=2'])>
# body: b'{"Key": "<KEY>=",
# "Value": "{\\"index_id\\":\\"egZPBQAAAHg=-YwIAAAAAJKc=\\",
# \\"object_layout_id\\":1,
# \\"object_metadata_path\\":\\"object1\\"}\\n"}'
def callback(channel, method, properties, body):
"""Process the result and send acknowledge."""
try:
self.logger.info(
"Received " +
body.decode("utf-8") +
"at consumer end")
probable_delete_records = json.loads(body.decode("utf-8"))
if (probable_delete_records is not None):
self.logger.info(
"Processing following records in consumer " +
str(probable_delete_records))
validator = ObjectRecoveryValidator(
self.config, probable_delete_records, self.logger)
validator.process_results()
channel.basic_ack(delivery_tag=method.delivery_tag)
except BaseException:
self.logger.error(
"msg_queue callback failed." + traceback.format_exc())
self._channel.basic_qos(prefetch_count=1)
# If service is running in non-daemon mode,
# then consume messages till the queue is empty and then stop
# else start consuming the message till service stops.
if (queue_msg_count is not None):
self.logger.info("Queue contains " + str(queue_msg_count) + " messages")
for msg in range(queue_msg_count, 0, -1):
method, properties, body = self._channel.basic_get(self._queue, no_ack=False)
callback(self._channel, method, properties, body)
self.logger.info("Consumed all messages and queue is empty")
return
else:
self._channel.basic_consume(callback, self._queue, no_ack=False)
self._channel.start_consuming()
def receive_data(self):
"""Receive data and create msg queue."""
try:
# Check if service is running in non-daemon mode
# then consumer should stop once queue is empty.
if not self.config.get_daemon_mode():
queue_state = self._channel.queue_declare(
queue=self._queue, durable=self._durable)
queue_msg_count = queue_state.method.message_count
self.worker(queue_msg_count)
return
else:
self._channel.queue_declare(
queue=self._queue, durable=self._durable)
self.worker()
except Exception as exception:
err_msg = "error:%s, %s" % (exception, traceback.format_exc())
IEMutil("ERROR", IEMutil.RABBIT_MQ_CONN_FAILURE, IEMutil.RABBIT_MQ_CONN_FAILURE_STR)
self.logger.error("msg_queue receive data failed." + str(err_msg))
self.connect()
self.receive_data()
def close(self):
"""Stop consumer and close rabbitmq connection."""
try:
self._channel.stop_consuming()
finally:
self._connection.close()
```
|
{
"source": "Jeet-Chugh/brainstorm-app",
"score": 3
}
|
#### File: Jeet-Chugh/brainstorm-app/main.py
```python
from kivy.app import App
from kivy.lang import Builder
from kivy.uix.screenmanager import Screen,ScreenManager
# Widget Imports
from kivy.uix.gridlayout import GridLayout
from kivy.uix.label import Label
from kivy.uix.button import Button
from kivy.uix.popup import Popup
from kivy.uix.textinput import TextInput
# Other Imports
import pickle
import os.path
from collections import defaultdict
class Create_Screen(Screen):
def __init__(self,**kwargs):
super(Create_Screen,self).__init__(**kwargs)
def save_idea(self):
# Import Data
try:
category_ideas_dict = pickle.load(open('data\\info\\categories_ideas.pkl','rb'))
except:
category_ideas_dict = defaultdict(list)
try:
all_ideas_list = pickle.load(open('data\\info\\all_ideas.pkl','rb'))
except:
all_ideas_list = ['_']
# Assign Input Variables
idea_name = self.ids['idea_name'].text
idea_category = self.ids['category_name'].text.lower()
idea_text = self.ids['idea_text'].text
path = 'data\\ideas\\' + idea_name.replace(' ','_') + '.txt'
# Check for Errors
if len(idea_name) == 0:
return app.error_popup('Error: No Name')
elif len(idea_category) == 0:
return app.error_popup('Error: No Category')
elif len(idea_text) == 0:
return app.error_popup('Error: No Text')
elif path in all_ideas_list:
return app.error_popup('Error: Idea Already Exists')
elif len(category_ideas_dict[idea_category]) >= 10:
return app.error_popup('Error: Category Full')
elif len(list(category_ideas_dict.keys())) >= 40:
return app.error_popup('Error: Too Many Categories')
# Create File
open(path,'w').write(idea_text)
category_ideas_dict[idea_category].append(path)
all_ideas_list.append(path)
# Save Data
pickle.dump(category_ideas_dict,open('data\\info\\categories_ideas.pkl','wb'))
pickle.dump(all_ideas_list,open('data\\info\\all_ideas.pkl','wb'))
# Update All Categories (Ideas/Categories)
Create_Category().update_all_cats()
Create_Category().update_all_ideas()
app.update_recents()
app.root.ids['create_category'].ids.category_list.text = Create_Category().category_string()
class Create_Category(Screen):
def __init__(self,**kwargs):
super(Create_Category,self).__init__(**kwargs)
def create_category(self):
# Import Data
try:
category_ideas_dict = pickle.load(open('data\\info\\categories_ideas.pkl','rb'))
except:
category_ideas_dict = defaultdict(list)
# Create Category if it doesnt exist
if self.ids.new_category_name.text.lower() not in list(category_ideas_dict.keys()):
category_ideas_dict[self.ids.new_category_name.text.lower()] = []
pickle.dump(category_ideas_dict,open('data\\info\\categories_ideas.pkl','wb'))
else:
app.error_popup('Category Already Exists')
# Update All Categories
self.ids.category_list.text = self.category_string()
app.update_cat_list()
self.update_all_cats()
def delete_category(self):
# Delete Category from List
try:
category_ideas_dict = pickle.load(open('data\\info\\categories_ideas.pkl','rb'))
except:
category_ideas_dict = defaultdict(list)
try:
del category_ideas_dict[self.ids.del_category_name.text]
pickle.dump(category_ideas_dict,open('data\\info\\categories_ideas.pkl','wb'))
except:
app.error_popup('Error: Invalid Category')
# Remove Deleted Categories Instance in "Recent Categories"
for i,item in enumerate([app.root.ids['ideas_screen'].ids.a.text,app.root.ids['ideas_screen'].ids.b.text,
app.root.ids['ideas_screen'].ids.c.text,app.root.ids['ideas_screen'].ids.d.text,
app.root.ids['ideas_screen'].ids.e.text,app.root.ids['ideas_screen'].ids.f.text,
app.root.ids['ideas_screen'].ids.g.text,app.root.ids['ideas_screen'].ids.h.text,
app.root.ids['ideas_screen'].ids.i.text,app.root.ids['ideas_screen'].ids.j.text]):
if item == self.ids.del_category_name.text:
if i == 0:
app.root.ids['ideas_screen'].ids.a.text = 'N/A'
elif i == 1:
app.root.ids['ideas_screen'].ids.b.text = 'N/A'
elif i == 2:
app.root.ids['ideas_screen'].ids.c.text = 'N/A'
elif i == 3:
app.root.ids['ideas_screen'].ids.d.text = 'N/A'
elif i == 4:
app.root.ids['ideas_screen'].ids.e.text = 'N/A'
elif i == 5:
app.root.ids['ideas_screen'].ids.f.text = 'N/A'
elif i == 6:
app.root.ids['ideas_screen'].ids.g.text = 'N/A'
elif i == 7:
app.root.ids['ideas_screen'].ids.h.text = 'N/A'
elif i == 8:
app.root.ids['ideas_screen'].ids.i.text = 'N/A'
elif i == 9:
app.root.ids['ideas_screen'].ids.j.text = 'N/A'
# Category List Updating
self.ids.category_list.text = self.category_string()
self.update_all_cats()
def update_all_cats(self):
# Update the ALL CATEGORIES section
app.root.ids['all_categories'].ids.a1.text = All_Categories().category_names(1)
app.root.ids['all_categories'].ids.a2.text = All_Categories().category_names(2)
app.root.ids['all_categories'].ids.a3.text = All_Categories().category_names(3)
app.root.ids['all_categories'].ids.a4.text = All_Categories().category_names(4)
app.root.ids['all_categories'].ids.a5.text = All_Categories().category_names(5)
app.root.ids['all_categories'].ids.a6.text = All_Categories().category_names(6)
app.root.ids['all_categories'].ids.a7.text = All_Categories().category_names(7)
app.root.ids['all_categories'].ids.a8.text = All_Categories().category_names(8)
app.root.ids['all_categories'].ids.a9.text = All_Categories().category_names(9)
app.root.ids['all_categories'].ids.a10.text = All_Categories().category_names(10)
app.root.ids['all_categories'].ids.a11.text = All_Categories().category_names(11)
app.root.ids['all_categories'].ids.a12.text = All_Categories().category_names(12)
app.root.ids['all_categories'].ids.a13.text = All_Categories().category_names(13)
app.root.ids['all_categories'].ids.a14.text = All_Categories().category_names(14)
app.root.ids['all_categories'].ids.a15.text = All_Categories().category_names(15)
app.root.ids['all_categories'].ids.a16.text = All_Categories().category_names(16)
app.root.ids['all_categories'].ids.a17.text = All_Categories().category_names(17)
app.root.ids['all_categories'].ids.a18.text = All_Categories().category_names(18)
app.root.ids['all_categories'].ids.a19.text = All_Categories().category_names(19)
app.root.ids['all_categories'].ids.a20.text = All_Categories().category_names(20)
app.root.ids['all_categories'].ids.a21.text = All_Categories().category_names(21)
app.root.ids['all_categories'].ids.a22.text = All_Categories().category_names(22)
app.root.ids['all_categories'].ids.a23.text = All_Categories().category_names(23)
app.root.ids['all_categories'].ids.a24.text = All_Categories().category_names(24)
app.root.ids['all_categories'].ids.a25.text = All_Categories().category_names(25)
app.root.ids['all_categories'].ids.a26.text = All_Categories().category_names(26)
app.root.ids['all_categories'].ids.a27.text = All_Categories().category_names(27)
app.root.ids['all_categories'].ids.a28.text = All_Categories().category_names(28)
app.root.ids['all_categories'].ids.a29.text = All_Categories().category_names(29)
app.root.ids['all_categories'].ids.a30.text = All_Categories().category_names(30)
app.root.ids['all_categories'].ids.a31.text = All_Categories().category_names(31)
app.root.ids['all_categories'].ids.a32.text = All_Categories().category_names(32)
app.root.ids['all_categories'].ids.a33.text = All_Categories().category_names(33)
app.root.ids['all_categories'].ids.a34.text = All_Categories().category_names(34)
app.root.ids['all_categories'].ids.a35.text = All_Categories().category_names(35)
app.root.ids['all_categories'].ids.a36.text = All_Categories().category_names(36)
app.root.ids['all_categories'].ids.a37.text = All_Categories().category_names(37)
app.root.ids['all_categories'].ids.a38.text = All_Categories().category_names(38)
app.root.ids['all_categories'].ids.a39.text = All_Categories().category_names(39)
app.root.ids['all_categories'].ids.a40.text = All_Categories().category_names(40)
def update_all_ideas(self):
# Update the ALL IDEAS Section
app.root.ids['all_ideas'].ids.b1.text = All_Ideas().ideas_names(1)
app.root.ids['all_ideas'].ids.b2.text = All_Ideas().ideas_names(2)
app.root.ids['all_ideas'].ids.b3.text = All_Ideas().ideas_names(3)
app.root.ids['all_ideas'].ids.b4.text = All_Ideas().ideas_names(4)
app.root.ids['all_ideas'].ids.b5.text = All_Ideas().ideas_names(5)
app.root.ids['all_ideas'].ids.b6.text = All_Ideas().ideas_names(6)
app.root.ids['all_ideas'].ids.b7.text = All_Ideas().ideas_names(7)
app.root.ids['all_ideas'].ids.b8.text = All_Ideas().ideas_names(8)
app.root.ids['all_ideas'].ids.b9.text = All_Ideas().ideas_names(9)
app.root.ids['all_ideas'].ids.b10.text = All_Ideas().ideas_names(10)
app.root.ids['all_ideas'].ids.b11.text = All_Ideas().ideas_names(11)
app.root.ids['all_ideas'].ids.b12.text = All_Ideas().ideas_names(12)
app.root.ids['all_ideas'].ids.b13.text = All_Ideas().ideas_names(13)
app.root.ids['all_ideas'].ids.b14.text = All_Ideas().ideas_names(14)
app.root.ids['all_ideas'].ids.b15.text = All_Ideas().ideas_names(15)
app.root.ids['all_ideas'].ids.b16.text = All_Ideas().ideas_names(16)
app.root.ids['all_ideas'].ids.b17.text = All_Ideas().ideas_names(17)
app.root.ids['all_ideas'].ids.b18.text = All_Ideas().ideas_names(18)
app.root.ids['all_ideas'].ids.b19.text = All_Ideas().ideas_names(19)
app.root.ids['all_ideas'].ids.b20.text = All_Ideas().ideas_names(20)
app.root.ids['all_ideas'].ids.b21.text = All_Ideas().ideas_names(21)
app.root.ids['all_ideas'].ids.b22.text = All_Ideas().ideas_names(22)
app.root.ids['all_ideas'].ids.b23.text = All_Ideas().ideas_names(23)
app.root.ids['all_ideas'].ids.b24.text = All_Ideas().ideas_names(24)
app.root.ids['all_ideas'].ids.b25.text = All_Ideas().ideas_names(25)
app.root.ids['all_ideas'].ids.b26.text = All_Ideas().ideas_names(26)
app.root.ids['all_ideas'].ids.b27.text = All_Ideas().ideas_names(27)
app.root.ids['all_ideas'].ids.b28.text = All_Ideas().ideas_names(28)
app.root.ids['all_ideas'].ids.b29.text = All_Ideas().ideas_names(29)
app.root.ids['all_ideas'].ids.b30.text = All_Ideas().ideas_names(30)
app.root.ids['all_ideas'].ids.b31.text = All_Ideas().ideas_names(31)
app.root.ids['all_ideas'].ids.b32.text = All_Ideas().ideas_names(32)
app.root.ids['all_ideas'].ids.b33.text = All_Ideas().ideas_names(33)
app.root.ids['all_ideas'].ids.b34.text = All_Ideas().ideas_names(34)
app.root.ids['all_ideas'].ids.b35.text = All_Ideas().ideas_names(35)
app.root.ids['all_ideas'].ids.b36.text = All_Ideas().ideas_names(36)
app.root.ids['all_ideas'].ids.b37.text = All_Ideas().ideas_names(37)
app.root.ids['all_ideas'].ids.b38.text = All_Ideas().ideas_names(38)
app.root.ids['all_ideas'].ids.b39.text = All_Ideas().ideas_names(39)
app.root.ids['all_ideas'].ids.b40.text = All_Ideas().ideas_names(40)
def category_string(self):
# Make the categories string
try:
category_ideas_dict = pickle.load(open('data\\info\\categories_ideas.pkl','rb'))
except:
category_ideas_dict = defaultdict(list)
catstring = ', '.join(list(category_ideas_dict.keys()))
return str(catstring)
class View_Category(Screen):
def __init__(self,**kwargs):
super(View_Category,self).__init__(**kwargs)
def ideas_in_x(self):
# Secondary Header for View Category Section
return('Ideas in {a}'.format(a=self.ids['category_name'].text))
class View_Idea(Screen):
def __init__(self,**kwargs):
super(View_Idea,self).__init__(**kwargs)
def edit_idea(self):
path = 'data\\ideas\\' + self.ids['idea_name_label'].text.replace(' ','_') + '.txt'
open(path,'w').write(self.ids['idea_text_edit'].text)
def delete_idea(self):
app.root.ids['screen_manager'].current = 'ideas_screen'
recent_ideas_list = pickle.load(open('data\\info\\all_ideas.pkl','rb'))
category_ideas_dict = pickle.load(open('data\\info\\categories_ideas.pkl','rb'))
path = path = 'data\\ideas\\' + self.ids['idea_name_label'].text.replace(' ','_') + '.txt'
for value in list(category_ideas_dict.values()):
if path in value:
del_value = value
break
else:
del_value = None
try:
if del_value != None:
del_key = [key for (key,val) in category_ideas_dict.items() if val == del_value][0]
category_ideas_dict[del_key].remove(path)
except: pass
if path in recent_ideas_list:
recent_ideas_list.remove(path)
if os.path.exists(path):
os.remove(path)
else:
app.error_popup('Something Went Wrong!')
pickle.dump(category_ideas_dict,open('data\\info\\categories_ideas.pkl','wb'))
pickle.dump(recent_ideas_list,open('data\\info\\all_ideas.pkl','wb'))
Create_Category().update_all_cats()
Create_Category().update_all_ideas()
app.update_recents()
app.root.ids['create_category'].ids.category_list.text = Create_Category().category_string()
class All_Categories(Screen):
def __init__(self,**kwargs):
super(All_Categories,self).__init__(**kwargs)
def category_names(self,i):
# Return First Category In Section
try:
category_ideas_dict = pickle.load(open('data\\info\\categories_ideas.pkl','rb'))
except:
category_ideas_dict = defaultdict(list)
try:
a = list(category_ideas_dict.keys())[i-1]
return a
except:
return ''
class All_Ideas(Screen):
def __init__(self,**kwargs):
super(All_Ideas,self).__init__(**kwargs)
def ideas_names(self,i):
try:
all_ideas_list = pickle.load(open('data\\info\\all_ideas.pkl','rb'))
except:
all_ideas_list = ['_']
try:
a = all_ideas_list[1:]
return a[i-1].replace('data\\ideas\\','').replace('.txt','').replace('_',' ')
except:
return ''
class Ideas_Screen(Screen):
def __init__(self,**kwargs):
super(Ideas_Screen,self).__init__(**kwargs)
def recent_text(self,i):
try:
all_ideas_list = pickle.load(open('data\\info\\all_ideas.pkl','rb'))
except:
all_ideas_list = ['_']
finally:
try:
a = all_ideas_list[1:]
return a[-i].replace('data\\ideas\\','').replace('.txt','').replace('_',' ')
except:
return ''
def categories_list(self,i):
try:
category_ideas_dict = pickle.load(open('data\\info\\categories_ideas.pkl','rb'))
except:
category_ideas_dict = defaultdict(list)
try:
a = list(category_ideas_dict.keys())[-i]
return a
except:
return ''
def view_category(self,id):
try:
category_ideas_dict = pickle.load(open('data\\info\\categories_ideas.pkl','rb'))
except:
category_ideas_dict = defaultdict(list)
if id == '' or id not in list(category_ideas_dict.keys()):
app.error_popup('Invalid Category')
else:
app.view_category(id)
#>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>
gui = Builder.load_file('main.kv')
class MainApp(App):
def __init__(self,**kwargs):
super(MainApp,self).__init__(**kwargs)
def build(self):
return gui
def change_screen(self,screen_name):
screen_manager = (self.root.ids['screen_manager'])
screen_manager.current = screen_name
change_button = {
'create_screen' : self.root.ids.create_button,
'ideas_screen' : self.root.ids.ideas_button,
'settings_screen' : self.root.ids.settings_button}
default_button = {
'create_screen' : 'images\\buttons\\Create_Button_Normal.jpg',
'ideas_screen' : 'images\\buttons\\Ideas_Button_Normal.jpg',
'settings_screen' : 'images\\buttons\\Settings_Button_Normal.jpg'}
page_names = {
'create_screen' : 'Create',
'ideas_screen' : 'Ideas',
'settings_screen' : 'Settings',
'account_settings' : 'Account',
'notifications_settings' : 'Alerts'}
if str(screen_manager.current) in ['create_screen','ideas_screen','settings_screen']:
for key in change_button.keys():
change_button[key].background_normal = default_button[key]
change_button[str(screen_manager.current)].background_normal = str(
default_button[screen_manager.current].replace('Normal','Down'))
def view_idea(self,file_name):
if file_name == '':
return app.error_popup('Invalid Idea')
try:
category_ideas_dict = pickle.load(open('data\\info\\categories_ideas.pkl','rb'))
except:
category_ideas_dict = defaultdict(list)
path = 'data\\ideas\\' + file_name.replace(' ','_') + '.txt'
if os.path.exists(path) == False:
return app.error_popup('Idea Not Found')
for value in list(category_ideas_dict.values()):
if path in value:
del_value = value
break
else:
del_value = None
try:
if del_value != None:
del_key = [key for (key,val) in category_ideas_dict.items() if val == del_value][0]
except: pass
self.root.ids['view_idea'].ids.idea_name_label.text = file_name
self.root.ids['view_idea'].ids.idea_text_edit.text = open(path,'r').read()
try:
if del_value != 'None':
self.root.ids['view_idea'].ids.category_name_i.text = [key for (key,value) in category_ideas_dict.items() if value == del_value][0]
else:
self.root.ids['view_idea'].ids.category_name_i.text = '<No Category>'
except:
self.root.ids['view_idea'].ids.category_name_i.text = '<No Category>'
if os.path.exists(path):
self.change_screen('view_idea')
else:
self.error_popup('Idea Nonexistent')
def update_cat_list(self):
try:
category_ideas_dict = pickle.load(open('data\\info\\categories_ideas.pkl','rb'))
except:
category_ideas_dict = defaultdict(list)
if len(list(category_ideas_dict.keys()))>0:
self.root.ids['ideas_screen'].ids.a.text = list(category_ideas_dict.keys())[-1]
if len(list(category_ideas_dict.keys()))>1:
self.root.ids['ideas_screen'].ids.b.text = list(category_ideas_dict.keys())[-2]
if len(list(category_ideas_dict.keys()))>2:
self.root.ids['ideas_screen'].ids.c.text = list(category_ideas_dict.keys())[-3]
if len(list(category_ideas_dict.keys()))>3:
self.root.ids['ideas_screen'].ids.d.text = list(category_ideas_dict.keys())[-4]
if len(list(category_ideas_dict.keys()))>4:
self.root.ids['ideas_screen'].ids.e.text = list(category_ideas_dict.keys())[-5]
if len(list(category_ideas_dict.keys()))>5:
self.root.ids['ideas_screen'].ids.f.text = list(category_ideas_dict.keys())[-6]
if len(list(category_ideas_dict.keys()))>6:
self.root.ids['ideas_screen'].ids.g.text = list(category_ideas_dict.keys())[-7]
if len(list(category_ideas_dict.keys()))>7:
self.root.ids['ideas_screen'].ids.h.text = list(category_ideas_dict.keys())[-8]
if len(list(category_ideas_dict.keys()))>8:
self.root.ids['ideas_screen'].ids.i.text = list(category_ideas_dict.keys())[-9]
if len(list(category_ideas_dict.keys()))>9:
self.root.ids['ideas_screen'].ids.j.text = list(category_ideas_dict.keys())[-10]
def view_category(self,name):
try:
category_ideas_dict = pickle.load(open('data\\info\\categories_ideas.pkl','rb'))
except:
category_ideas_dict = defaultdict(list)
if name == '':
return self.error_popup('Invalid Category')
self.root.ids['screen_manager'].current = 'view_category'
self.root.ids['view_category'].ids.category_name.text = name
try:
if len(category_ideas_dict[name])>0:
self.root.ids['view_category'].ids.aa.text = category_ideas_dict[name][-1].replace('_',' ').replace('data\\ideas\\','').replace('.txt','')
if len(category_ideas_dict[name])>1:
self.root.ids['view_category'].ids.bb.text = category_ideas_dict[name][-2].replace('_',' ').replace('data\\ideas\\','').replace('.txt','')
if len(category_ideas_dict[name])>2:
self.root.ids['view_category'].ids.cc.text = category_ideas_dict[name][-3].replace('_',' ').replace('data\\ideas\\','').replace('.txt','')
if len(category_ideas_dict[name])>3:
self.root.ids['view_category'].ids.dd.text = category_ideas_dict[name][-4].replace('_',' ').replace('data\\ideas\\','').replace('.txt','')
if len(category_ideas_dict[name])>4:
self.root.ids['view_category'].ids.ee.text = category_ideas_dict[name][-5].replace('_',' ').replace('data\\ideas\\','').replace('.txt','')
if len(category_ideas_dict[name])>5:
self.root.ids['view_category'].ids.ff.text = category_ideas_dict[name][-6].replace('_',' ').replace('data\\ideas\\','').replace('.txt','')
if len(category_ideas_dict[name])>6:
self.root.ids['view_category'].ids.gg.text = category_ideas_dict[name][-7].replace('_',' ').replace('data\\ideas\\','').replace('.txt','')
if len(category_ideas_dict[name])>7:
self.root.ids['view_category'].ids.hh.text = category_ideas_dict[name][-8].replace('_',' ').replace('data\\ideas\\','').replace('.txt','')
if len(category_ideas_dict[name])>8:
self.root.ids['view_category'].ids.ii.text = category_ideas_dict[name][-9].replace('_',' ').replace('data\\ideas\\','').replace('.txt','')
if len(category_ideas_dict[name])>9:
self.root.ids['view_category'].ids.jj.text = category_ideas_dict[name][-10].replace('_',' ').replace('data\\ideas\\','').replace('.txt','')
except:
self.error_popup('Invalid Idea Name')
finally:
self.root.ids['view_category'].ids.category_label.text = 'Ideas in {a}:'.format(
a=self.root.ids['view_category'].ids.category_name.text)
def categories_list(self,i):
try:
category_ideas_dict = pickle.load(open('data\\info\\categories_ideas.pkl','rb'))
except:
category_ideas_dict = defaultdict(list)
try:
return list(category_ideas_dict.keys())[-i]
except:
return ''
def error_popup(self,text):
popup_button = Button(text='Dismiss')
invalid_popup = Popup(title=text,content=popup_button,size_hint=(0.7,0.7))
popup_button.bind(on_press=lambda *args: invalid_popup.dismiss())
return invalid_popup.open()
def update_recents(self):
app.root.ids['ideas_screen'].ids.aaa1.text = Ideas_Screen().recent_text(1)
app.root.ids['ideas_screen'].ids.aaa2.text = Ideas_Screen().recent_text(2)
app.root.ids['ideas_screen'].ids.aaa3.text = Ideas_Screen().recent_text(3)
app.root.ids['ideas_screen'].ids.aaa4.text = Ideas_Screen().recent_text(4)
app.root.ids['ideas_screen'].ids.aaa5.text = Ideas_Screen().recent_text(5)
app.root.ids['ideas_screen'].ids.aaa6.text = Ideas_Screen().recent_text(6)
app.root.ids['ideas_screen'].ids.aaa7.text = Ideas_Screen().recent_text(7)
app.root.ids['ideas_screen'].ids.aaa8.text = Ideas_Screen().recent_text(8)
app.root.ids['ideas_screen'].ids.aaa9.text = Ideas_Screen().recent_text(9)
app.root.ids['ideas_screen'].ids.aaa10.text = Ideas_Screen().recent_text(10)
app.root.ids['ideas_screen'].ids.a.text = All_Categories().category_names(1)
app.root.ids['ideas_screen'].ids.b.text = All_Categories().category_names(2)
app.root.ids['ideas_screen'].ids.c.text = All_Categories().category_names(3)
app.root.ids['ideas_screen'].ids.d.text = All_Categories().category_names(4)
app.root.ids['ideas_screen'].ids.e.text = All_Categories().category_names(5)
app.root.ids['ideas_screen'].ids.f.text = All_Categories().category_names(6)
app.root.ids['ideas_screen'].ids.g.text = All_Categories().category_names(7)
app.root.ids['ideas_screen'].ids.h.text = All_Categories().category_names(8)
app.root.ids['ideas_screen'].ids.i.text = All_Categories().category_names(9)
app.root.ids['ideas_screen'].ids.j.text = All_Categories().category_names(10)
def stats(self):
ideas_list = pickle.load(open('data\\info\\all_ideas.pkl','rb'))
cats_list = pickle.load(open('data\\info\\categories_ideas.pkl','rb'))
app.root.ids['stats_settings'].ids['ideas_number'].text = str(len(ideas_list)-1)
app.root.ids['stats_settings'].ids['cat_number'].text = str(len(list(cats_list.keys())))
app.root.ids['screen_manager'].current = 'stats_settings'
def reset(self):
popup_button = Button(text='Confirm Reset')
invalid_popup = Popup(title='Reset All Data?',content=popup_button,size_hint=(0.7,0.7))
popup_button.bind(on_press=lambda *args: app.reset_real())
return invalid_popup.open()
def reset_real(self):
pickle.dump(defaultdict(list),open('data\\info\\categories_ideas.pkl','wb'))
pickle.dump(['_'],open('data\\info\\all_ideas.pkl','wb'))
if __name__ == '__main__':
app = MainApp()
app.run()
```
|
{
"source": "Jeetendra-Shakya/DVC-NLP-Simple-usecase",
"score": 2
}
|
#### File: DVC-NLP-Simple-usecase/src/stage_01_prepare.py
```python
import argparse
import os
import shutil
from tqdm import tqdm
import logging
from src.utils.common import read_yaml, create_directories
from src.utils.data_mgmt import process_posts
import random
STAGE = "one"
logging.basicConfig(
filename=os.path.join("logs", 'running_logs.log'),
level=logging.INFO,
format="[%(asctime)s: %(levelname)s: %(module)s]: %(message)s",
filemode="a"
)
def main(config_path, params_path):
## converting XML data to tsv
config = read_yaml(config_path)
params = read_yaml(params_path)
source_data = config["source_data"]
input_data = os.path.join(source_data["data_dir"], source_data["data_file"])
split = params["prepare"]["split"]
seed = params["prepare"]["seed"]
random.seed(seed)
artifacts = config["artifacts"]
prepared_data_dir_path = os.path.join(artifacts["ARTIFACTS_DIR"], artifacts["PREPARED_DATA"])
create_directories([prepared_data_dir_path])
train_data_path = os.path.join(prepared_data_dir_path, artifacts["TRAIN_DATA"])
test_data_path = os.path.join(prepared_data_dir_path, artifacts["TEST_DATA"])
encode = "utf8"
with open(input_data, encoding=encode) as fd_in:
with open(train_data_path, "w", encoding=encode) as fd_out_train:
with open(test_data_path, "w", encoding=encode) as fd_out_test:
process_posts(fd_in, fd_out_train, fd_out_test, "<python>", split)
if __name__ == '__main__':
args = argparse.ArgumentParser()
args.add_argument("--config", "-c", default="configs/config.yaml")
args.add_argument("--params", "-p", default="params.yaml")
parsed_args = args.parse_args()
try:
logging.info("\n********************")
logging.info(">>>>> stage {STAGE} started <<<<<")
main(config_path=parsed_args.config, params_path = parsed_args.params)
logging.info(">>>>> stage {STAGE} completed! <<<<<\n")
except Exception as e:
logging.exception(e)
raise e
```
|
{
"source": "jeeteshgour1000/Indian-Sign-Language-recognition-system.",
"score": 2
}
|
#### File: jeeteshgour1000/Indian-Sign-Language-recognition-system./imagePreprocessing.py
```python
import numpy as np
import cv2
import os
from sklearn.cluster import KMeans
from scipy.spatial import distance
from sklearn.cluster import MiniBatchKMeans
from sklearn.svm import SVC
import sklearn.metrics as skmetrics
import random
import pickle
import imagePreprocessingUtils as ipu
#import glob
train_labels = []
test_labels = []
def preprocess_all_images():
images_labels = []
train_disc_by_class = {}
test_disc_by_class = {}
all_train_dis = []
train_img_disc = []
test_img_disc = []
label_value = 0
for (dirpath,dirnames,filenames) in os.walk(ipu.PATH):
dirnames.sort()
for label in dirnames:
#print(label)
if not (label == '.DS_Store'):
for (subdirpath,subdirnames,images) in os.walk(ipu.PATH+'/'+label+'/'):
#print(len(images))
count = 0
train_features = []
test_features = []
for image in images:
#print(label)
imagePath = ipu.PATH+'/'+label+'/'+image
#print(imagePath)
img = cv2.imread(imagePath)
if img is not None:
img = get_canny_edge(img)[0]
sift_disc = get_SIFT_descriptors(img)
print(sift_disc.shape)
if(count < (ipu.TOTAL_IMAGES * ipu.TRAIN_FACTOR * 0.01)):
print('Train:--------- Label is {} and Count is {}'.format(label, count) )
#train_features.append(sift_disc)
train_img_disc.append(sift_disc)
all_train_dis.extend(sift_disc)
train_labels.append(label_value)
elif((count>=(ipu.TOTAL_IMAGES * ipu.TRAIN_FACTOR * 0.01)) and count <ipu.TOTAL_IMAGES):
print('Test:--------- Label is {} and Count is {}'.format(label, count) )
#test_features.append(sift_disc)
test_img_disc.append(sift_disc)
test_labels.append(label_value)
count += 1
#images_labels.append((label,sift_disc))
#train_disc_by_class[label] = train_features
#test_disc_by_class[label] = test_features
label_value +=1
print('length of train features are %i' % len(train_img_disc))
print('length of test features are %i' % len(test_img_disc))
print('length of all train discriptors is {}'.format(len(all_train_dis)))
#print('length of all train discriptors by class is {}'.format(len(train_disc_by_class)))
#print('length of all test disc is {}'.format(len(test_disc_by_class)))
return all_train_dis, train_img_disc, train_disc_by_class, test_disc_by_class, test_img_disc
def get_canny_edge(image):
grayImage = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
# Convert from RGB to HSV
HSVImaage = cv2.cvtColor(image, cv2.COLOR_BGR2HSV)
# Finding pixels with itensity of skin
lowerBoundary = np.array([0,40,30],dtype="uint8")
upperBoundary = np.array([43,255,254],dtype="uint8")
skinMask = cv2.inRange(HSVImaage, lowerBoundary, upperBoundary)
# blurring of gray scale using medianBlur
skinMask = cv2.addWeighted(skinMask,0.5,skinMask,0.5,0.0)
skinMask = cv2.medianBlur(skinMask, 5)
skin = cv2.bitwise_and(grayImage, grayImage, mask = skinMask)
#cv2.imshow("masked2",skin)
#. canny edge detection
canny = cv2.Canny(skin,60,60)
#plt.imshow(img2, cmap = 'gray')
return canny,skin
def get_SIFT_descriptors(canny):
# Intialising SIFT
surf = cv2.xfeatures2d.SURF_create()
#surf.extended=True
canny = cv2.resize(canny,(256,256))
# computing SIFT descriptors
kp, des = surf.detectAndCompute(canny,None)
#print(len(des))
#sift_features_image = cv2.drawKeypoints(canny,kp,None,(0,0,255),4)
return des
### K-means is not used as data is large and requires a better computer with good specifications
def kmeans(k, descriptor_list):
print('K-Means started.')
print ('%i descriptors before clustering' % descriptor_list.shape[0])
kmeanss = KMeans(k)
kmeanss.fit(descriptor_list)
visual_words = kmeanss.cluster_centers_
return visual_words, kmeanss
def mini_kmeans(k, descriptor_list):
print('Mini batch K-Means started.')
print ('%i descriptors before clustering' % descriptor_list.shape[0])
kmeans_model = MiniBatchKMeans(k)
kmeans_model.fit(descriptor_list)
print('Mini batch K means trained to get visual words.')
filename = 'mini_kmeans_model.sav'
pickle.dump(kmeans_model, open(filename, 'wb'))
return kmeans_model
def get_histograms(discriptors_by_class,visual_words, cluster_model):
histograms_by_class = {}
total_histograms = []
for label,images_discriptors in discriptors_by_class.items():
print('Label: %s' % label)
histograms = []
# loop for all images
for each_image_discriptors in images_discriptors:
## manual method to calculate words occurence as histograms
'''histogram = np.zeros(len(visual_words))
# loop for all discriptors in a image discriptorss
for each_discriptor in each_image_discriptors:
#list_words = visual_words.tolist()
a = np.array([visual_words])
index = find_index(each_discriptor, visual_words)
#print(index)
#del list_words
histogram[index] += 1
print(histogram)'''
## using cluster model
raw_words = cluster_model.predict(each_image_discriptors)
hist = np.bincount(raw_words, minlength=len(visual_words))
print(hist)
histograms.append(hist)
histograms_by_class[label] = histograms
total_histograms.append(histograms)
print('Histograms succesfully created for %i classes.' % len(histograms_by_class))
return histograms_by_class, total_histograms
def dataSplit(dataDictionary):
X = []
Y = []
for key,values in dataDictionary.items():
for value in values:
X.append(value)
Y.append(key)
return X,Y
def predict_svm(X_train, X_test, y_train, y_test):
svc=SVC(kernel='linear')
print("Support Vector Machine started.")
svc.fit(X_train,y_train)
filename = 'svm_model.sav'
pickle.dump(svc, open(filename, 'wb'))
y_pred=svc.predict(X_test)
np.savetxt('submission_svm.csv', np.c_[range(1,len(y_test)+1),y_pred,y_test], delimiter=',', header = 'ImageId,PredictedLabel,TrueLabel', comments = '', fmt='%d')
calculate_metrics("SVM",y_test,y_pred)
def calculate_metrics(method,label_test,label_pred):
print("Accuracy score for ",method,skmetrics.accuracy_score(label_test,label_pred))
print("Precision_score for ",method,skmetrics.precision_score(label_test,label_pred,average='micro'))
print("f1 score for ",method,skmetrics.f1_score(label_test,label_pred,average='micro'))
print("Recall score for ",method,skmetrics.recall_score(label_test,label_pred,average='micro'))
### STEP:1 SIFT discriptors for all train and test images with class seperation
all_train_dis,train_img_disc, train_disc_by_class, test_disc_by_class, test_img_disc = preprocess_all_images()
## deleting these variables as they are not used with mini batch k means
del train_disc_by_class, test_disc_by_class
### STEP:2 MINI K-MEANS
mini_kmeans_model = mini_kmeans(ipu.N_CLASSES * ipu.CLUSTER_FACTOR, np.array(all_train_dis))
del all_train_dis
### Collecting VISUAL WORDS for all images (train , test)
print('Collecting visual words for train .....')
train_images_visual_words = [mini_kmeans_model.predict(visual_words) for visual_words in train_img_disc]
print('Visual words for train data collected. length is %i' % len(train_images_visual_words))
print('Collecting visual words for test .....')
test_images_visual_words = [mini_kmeans_model.predict(visual_words) for visual_words in test_img_disc]
print('Visual words for test data collected. length is %i' % len(test_images_visual_words))
### STEP:3 HISTOGRAMS (findiing the occurence of each visual word of images in total words)
## Can be calculated using get_histograms function also manually
print('Calculating Histograms for train...')
bovw_train_histograms = np.array([np.bincount(visual_words, minlength=ipu.N_CLASSES * ipu.CLUSTER_FACTOR) for visual_words in train_images_visual_words])
print('Train histograms are collected. Length : %i ' % len(bovw_train_histograms))
print('Calculating Histograms for test...')
bovw_test_histograms = np.array([np.bincount(visual_words, minlength=ipu.N_CLASSES * ipu.CLUSTER_FACTOR) for visual_words in test_images_visual_words])
print('Test histograms are collected. Length : %i ' % len(bovw_test_histograms))
print('Each histogram length is : %i' % len(bovw_train_histograms[0]))
#----------------------
print('============================================')
# preperaing for training svm
X_train = bovw_train_histograms
X_test = bovw_test_histograms
Y_train = train_labels
Y_test = test_labels
#print(Y_train)
### shuffling
buffer = list(zip(X_train, Y_train))
random.shuffle(buffer)
random.shuffle(buffer)
random.shuffle(buffer)
X_train, Y_train = zip(*buffer)
#print(Y_train)
buffer = list(zip(X_test, Y_test))
random.shuffle(buffer)
random.shuffle(buffer)
X_test, Y_test = zip(*buffer)
print('Length of X-train: %i ' % len(X_train))
print('Length of Y-train: %i ' % len(Y_train))
print('Length of X-test: %i ' % len(X_test))
print('Length of Y-test: %i ' % len(Y_test))
predict_svm(X_train, X_test,Y_train, Y_test)
#######################################################
'''
#STEP:2 K-MEANS clustering to get visual words
visual_words, cluster_model = kmeans(ipu.N_CLASSES * 8, np.array(all_train_dis))
print(' Length of Visual words using k-means= %i' % len(visual_words))
print(type(visual_words))
print(visual_words.shape)
print('Histograms creation started for training set.')
bovw_train_histograms_by_class = get_histograms(train_disc_by_class,visual_words, cluster_model)[0]
print('Histograms created with k-means.')
for key, values in bovw_train_histograms_by_class.items():
for value in values:
print(value)
print('Histograms creation started for testing set.')
bovw_test_histograms_by_class = get_histograms(test_disc_by_class,visual_words, cluster_model)[0]
print('Histograms created.')
X_train, Y_train = dataSplit(bovw_train_histograms_by_class)
print('Length of x_train are % i ' % len(X_train))
print('Length of y_train are % i ' % len(Y_train))
X_test, Y_test = dataSplit(bovw_test_histograms_by_class)
print('Length of x_test are % i ' % len(X_test))
print('Length of y_test are % i ' % len(Y_test))
X_train, Y_train = dataSplit(bovw_train_histograms_by_class)
predict_svm(X_train, X_test,Y_train, Y_test)
'''
```
#### File: jeeteshgour1000/Indian-Sign-Language-recognition-system./recogniseGesture.py
```python
import numpy as np
import cv2
import os
import pickle
import imagePreprocessingUtils as ipu
CAPTURE_FLAG = False
class_labels = ipu.get_labels()
def recognise(cluster_model, classify_model):
global CAPTURE_FLAG
gestures = ipu.get_all_gestures()
cv2.imwrite("all_gestures.jpg", gestures)
camera = cv2.VideoCapture(1)
print('Now camera window will be open, then \n1) Place your hand gesture in ROI (rectangle) \n2) Press esc key to exit.')
count = 0
while(True):
(t,frame) = camera.read()
frame = cv2.flip(frame,1)
cv2.rectangle(frame,ipu.START, ipu.END,(0,255,0),2 )
cv2.imshow("All_gestures", gestures)
pressedKey = cv2.waitKey(1)
if pressedKey == 27:
break
elif pressedKey == ord('p'):
if(CAPTURE_FLAG):
CAPTURE_FLAG = False
else:
CAPTURE_FLAG = True
if(CAPTURE_FLAG):
# Region of Interest
roi = frame[ ipu.START[1]+5:ipu.END[1], ipu.START[0]+5:ipu.END[0]]
if roi is not None:
roi = cv2.resize(roi, (ipu.IMG_SIZE,ipu.IMG_SIZE))
img = ipu.get_canny_edge(roi)[0]
cv2.imshow("Edges ",img)
print(img)
sift_disc = ipu.get_SIFT_descriptors(img)
print(type(sift_disc))
if sift_disc is not None:
visual_words = cluster_model.predict(sift_disc)
print('visual words collected.')
bovw_histogram = np.array(np.bincount(visual_words, minlength=ipu.N_CLASSES * ipu.CLUSTER_FACTOR))
pred = classify_model.predict([bovw_histogram])
label = class_labels[pred[0]]
rectangle_bgr = (0, 0, 0)
(text_width, text_height) = cv2.getTextSize('Predicted text: ', 1, fontScale=1.5, thickness=2)[0]
# set the text start position
text_offset_x = 50
text_offset_y = 20
# make the coords of the box with a small padding of two pixels
box_coords = ((text_offset_x, text_offset_y), (text_offset_x + text_width + 40, text_offset_y + text_height +50))
cv2.rectangle(frame, box_coords[0], box_coords[1], rectangle_bgr, cv2.FILLED)
frame = cv2.putText(frame, 'Predicted text: ', (50,70), cv2.FONT_HERSHEY_SIMPLEX,1, (255,255,255), 2, cv2.LINE_AA)
frame = cv2.putText(frame, label, (300,80), cv2.FONT_HERSHEY_SIMPLEX, 2, (255,255,255), 2, cv2.LINE_AA)
cv2.imshow("Video",frame)
camera.release()
cv2.destroyAllWindows()
clustering_model = pickle.load(open('mini_kmeans_model.sav', 'rb'))
classification_model = pickle.load(open('svm_model.sav', 'rb'))
recognise(clustering_model,classification_model)
```
|
{
"source": "jeetgor/xzceb-flask_eng_fr",
"score": 3
}
|
#### File: machinetranslation/tests/tests.py
```python
import unittest
from translator import translator_service,english_to_french,french_to_english
class test_translation(unittest.TestCase):
def test1(self):
self.assertEqual(english_to_french('Hello'), 'Bonjour')
self.assertEqual(french_to_english('Bonjour'), 'Hello')
self.assertNotEqual(french_to_english('Bonjour'),'Bonjour')
self.assertNotEqual(english_to_french('Hello'),'Hello')
class test_null(unittest.TestCase):
def test1(self):
text=input()
self.assertEqual(english_to_french(text), None)
self.assertEqual(french_to_english(text), None)
unittest.main()
```
|
{
"source": "jeethesh-pai/caps",
"score": 3
}
|
#### File: caps/dataloader/megadepth.py
```python
import torch
from torch.utils.data import Dataset
import os
import numpy as np
import cv2
import skimage.io as io
import torchvision.transforms as transforms
import utils
import collections
from tqdm import tqdm
import dataloader.data_utils as data_utils
rand = np.random.RandomState(234)
class MegaDepthLoader():
def __init__(self, args):
self.args = args
self.dataset = MegaDepth(args)
self.data_loader = torch.utils.data.DataLoader(self.dataset, batch_size=args.batch_size, shuffle=False,
num_workers=args.workers, collate_fn=self.my_collate)
def my_collate(self, batch):
''' Puts each data field into a tensor with outer dimension batch size '''
batch = list(filter(lambda b: b is not None, batch))
return torch.utils.data.dataloader.default_collate(batch)
def load_data(self):
return self.data_loader
def name(self):
return 'MegaDepthLoader'
def __len__(self):
return len(self.dataset)
class MegaDepth(Dataset):
def __init__(self, args):
self.args = args
if args.phase == 'train':
# augment during training
self.transform = transforms.Compose([transforms.ToPILImage(),
transforms.ColorJitter
(brightness=1, contrast=1, saturation=1, hue=0.4),
transforms.ToTensor(),
transforms.Normalize(mean=(0.485, 0.456, 0.406),
std=(0.229, 0.224, 0.225)),
])
else:
self.transform = transforms.Compose([transforms.ToTensor(),
transforms.Normalize(mean=(0.485, 0.456, 0.406),
std=(0.229, 0.224, 0.225)),
])
self.phase = args.phase
self.root = os.path.join(args.datadir, self.phase)
self.images = self.read_img_cam()
self.imf1s, self.imf2s = self.read_pairs()
print('total number of image pairs loaded: {}'.format(len(self.imf1s)))
# shuffle data
index = np.arange(len(self.imf1s))
rand.shuffle(index)
self.imf1s = list(np.array(self.imf1s)[index])
self.imf2s = list(np.array(self.imf2s)[index])
def read_img_cam(self):
images = {}
Image = collections.namedtuple(
"Image", ["name", "w", "h", "fx", "fy", "cx", "cy", "rvec", "tvec"])
for scene_id in os.listdir(self.root):
densefs = [f for f in os.listdir(os.path.join(self.root, scene_id))
if 'dense' in f and os.path.isdir(os.path.join(self.root, scene_id, f))]
for densef in densefs:
folder = os.path.join(self.root, scene_id, densef, 'aligned')
img_cam_txt_path = os.path.join(folder, 'img_cam.txt')
with open(img_cam_txt_path, "r") as fid:
while True:
line = fid.readline()
if not line:
break
line = line.strip()
if len(line) > 0 and line[0] != "#":
elems = line.split()
image_name = elems[0]
img_path = os.path.join(folder, 'images', image_name)
w, h = int(elems[1]), int(elems[2])
fx, fy = float(elems[3]), float(elems[4])
cx, cy = float(elems[5]), float(elems[6])
R = np.array(elems[7:16])
T = np.array(elems[16:19])
images[img_path] = Image(
name=image_name, w=w, h=h, fx=fx, fy=fy, cx=cx, cy=cy, rvec=R, tvec=T
)
return images
def read_pairs(self):
imf1s, imf2s = [], []
print('reading image pairs from {}...'.format(self.root))
for scene_id in tqdm(os.listdir(self.root), desc='# loading data from scene folders'):
densefs = [f for f in os.listdir(os.path.join(self.root, scene_id))
if 'dense' in f and os.path.isdir(os.path.join(self.root, scene_id, f))]
for densef in densefs:
imf1s_ = []
imf2s_ = []
folder = os.path.join(self.root, scene_id, densef, 'aligned')
pairf = os.path.join(folder, 'pairs.txt')
if os.path.exists(pairf):
f = open(pairf, 'r')
for line in f:
imf1, imf2 = line.strip().split(' ')
imf1s_.append(os.path.join(folder, 'images', imf1))
imf2s_.append(os.path.join(folder, 'images', imf2))
# make # image pairs per scene more balanced
if len(imf1s_) > 5000:
index = np.arange(len(imf1s_))
rand.shuffle(index)
imf1s_ = list(np.array(imf1s_)[index[:5000]])
imf2s_ = list(np.array(imf2s_)[index[:5000]])
imf1s.extend(imf1s_)
imf2s.extend(imf2s_)
return imf1s, imf2s
@staticmethod
def get_intrinsics(im_meta):
return np.array([[im_meta.fx, 0, im_meta.cx],
[0, im_meta.fy, im_meta.cy],
[0, 0, 1]])
@staticmethod
def get_extrinsics(im_meta):
R = im_meta.rvec.reshape(3, 3)
t = im_meta.tvec
extrinsic = np.eye(4)
extrinsic[:3, :3] = R
extrinsic[:3, 3] = t
return extrinsic
def __getitem__(self, item):
imf1 = self.imf1s[item]
imf2 = self.imf2s[item]
im1_meta = self.images[imf1]
im2_meta = self.images[imf2]
im1 = io.imread(imf1)
im2 = io.imread(imf2)
h, w = im1.shape[:2]
intrinsic1 = self.get_intrinsics(im1_meta)
intrinsic2 = self.get_intrinsics(im2_meta)
extrinsic1 = self.get_extrinsics(im1_meta)
extrinsic2 = self.get_extrinsics(im2_meta)
relative = extrinsic2.dot(np.linalg.inv(extrinsic1))
R = relative[:3, :3]
# remove pairs that have a relative rotation angle larger than 80 degrees
theta = np.arccos(np.clip((np.trace(R) - 1) / 2, -1, 1)) * 180 / np.pi
if theta > 80 and self.phase == 'train':
return None
T = relative[:3, 3]
tx = data_utils.skew(T)
E_gt = np.dot(tx, R)
F_gt = np.linalg.inv(intrinsic2).T.dot(E_gt).dot(np.linalg.inv(intrinsic1))
# generate candidate query points
coord1 = data_utils.generate_query_kpts(im1, self.args.train_kp, 10*self.args.num_pts, h, w)
# if no keypoints are detected
if len(coord1) == 0:
return None
# prune query keypoints that are not likely to have correspondence in the other image
if self.args.prune_kp:
ind_intersect = data_utils.prune_kpts(coord1, F_gt, im2.shape[:2], intrinsic1, intrinsic2,
relative, d_min=4, d_max=400)
if np.sum(ind_intersect) == 0:
return None
coord1 = coord1[ind_intersect]
coord1 = utils.random_choice(coord1, self.args.num_pts)
coord1 = torch.from_numpy(coord1).float()
im1_ori, im2_ori = torch.from_numpy(im1), torch.from_numpy(im2)
F_gt = torch.from_numpy(F_gt).float() / (F_gt[-1, -1] + 1e-10)
intrinsic1 = torch.from_numpy(intrinsic1).float()
intrinsic2 = torch.from_numpy(intrinsic2).float()
pose = torch.from_numpy(relative[:3, :]).float()
im1_tensor = self.transform(im1)
im2_tensor = self.transform(im2)
out = {'im1': im1_tensor,
'im2': im2_tensor,
'im1_ori': im1_ori,
'im2_ori': im2_ori,
'pose': pose,
'F': F_gt,
'intrinsic1': intrinsic1,
'intrinsic2': intrinsic2,
'coord1': coord1}
return out
def __len__(self):
return len(self.imf1s)
```
|
{
"source": "jeethesh-pai/Computer-Vison-and-ML-Assignments",
"score": 3
}
|
#### File: Computer-Vison-and-ML-Assignments/sheet3/Task_3.py
```python
import cv2
import numpy as np
from utils import from0_1to0_255asUint8, showImages, asUint8
from skimage.util import random_noise
import matplotlib.pyplot as plt
# TODO: Simulate a picture captured in low light without noise.
# Reduce the brightness of `img` about the provided darkening `factor`.
# The data type of the returned image shall be the same as that of the input image.
# Example (factor = 3): three times darker, i.e. a third of the original intensity.
def reduceBrightness(img, factor):
img_hsi = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)
img_new = img_hsi
img_new[:, :, 2] = img_hsi[:, :, 2] / factor
img_new = cv2.cvtColor(img_new, cv2.COLOR_HSV2BGR)
return img_new
# TODO: "Restore" the brightness of a picture captured in low light, ignoring potential noise.
# Apply the inverse operation to `reduceBrightness(..)`.
def restoreBrightness(img, factor):
img_new = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)
img_new[:, :, 2] = np.clip(img_new[:, :, 2] * factor, a_min=0, a_max=255)
img_new = cv2.cvtColor(img_new, cv2.COLOR_HSV2BGR)
return img_new
img2 = cv2.imread("img/couch.jpg")
imgs = [("Original", img2)]
# Reduce image brightness
darkening_factor = 3
img_dark = reduceBrightness(img2, darkening_factor)
# Restore image brightness
img_restored = restoreBrightness(img_dark, darkening_factor)
imgs = imgs + [("Low light", img_dark), ("Low light restored", img_restored)]
# showImages(imgs)
# Simulate multiple pictures captured in low light with noise.
num_dark_noise_imgs = 10
imgs_dark_noise = [from0_1to0_255asUint8(random_noise(img_dark, mode="poisson")) for _ in range(num_dark_noise_imgs)]
# TODO: Now try to "restore" a picture captured in low light with noise (`img_dark_noise`) using the same function as
# for the picture without noise.
img_dark_noise = imgs_dark_noise[0]
img_noise_restored_simple = restoreBrightness(img_dark_noise, darkening_factor)
imgs = imgs + [None, ("Low light with noise", img_dark_noise),
("Low light with noise restored", img_noise_restored_simple)]
# showImages(imgs)
# # TODO: Explain with your own words why the "restored" picture shows that much noise, i.e. why the intensity of the
# # noise in low light images is typically so high compared to the image signal.
'''
Noise is generally high or low intensity pixels(0 or 255) scattered at random locations of the image. So when we
increase the brightnessof this noise injected image we simultaneously increase the noise value also which was already
255 or 0 and hence results in undesired artefacts. Therefore we need to treat the noise before increasing the overall
brghtness of the image.
________________________________________________________________________________
'''
# TODO: Restore a picture from all the low light pictures with noise (`imgs_dark_noise`) by computing the "average
# image" of them. Adjust the resulting brightness to the original image (using the `darkening_factor` and
# `num_dark_noise_imgs`).
img_noise_stack_restored = np.copy(imgs_dark_noise[0])
for i in range(1, num_dark_noise_imgs):
img_noise_stack_restored = cv2.add(img_noise_stack_restored, imgs_dark_noise[i])
img_noise_stack_restored = asUint8(img_noise_stack_restored / 2)
img_noise_stack_restored = restoreBrightness(img_noise_stack_restored, darkening_factor)
imgs = imgs + [("Low light with noise 1 ...", imgs_dark_noise[0]),
("... Low light with noise " + str(num_dark_noise_imgs), imgs_dark_noise[-1]),
("Low light stack with noise restored", img_noise_stack_restored)]
plt.figure(figsize=(15, 8))
showImages(imgs, 3)
```
#### File: Computer-Vison-and-ML-Assignments/sheet3/Task_4.py
```python
import numpy as np
import cv2
from utils import asUint8, from0_1to0_255asUint8, PLACEHOLDER, showImage, showImages, convertColorImagesBGR2RGB
from filter_zoo import filter_gauss
import matplotlib.pyplot as plt
from matplotlib.widgets import Slider
from skimage.util import random_noise
# TODO: Simulate a picture captured in low light without noise.
# Reduce the brightness of `img` about the provided darkening `factor`.
# The data type of the returned image shall be the same as that of the input image.
# Example (factor = 3): three times darker, i.e. a third of the original intensity.
def reduceBrightness(img, factor):
img_hsi = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)
img_new = img_hsi
img_new[:, :, 2] = img_hsi[:, :, 2] / factor
img_new = cv2.cvtColor(img_new, cv2.COLOR_HSV2BGR)
return img_new
# TODO: "Restore" the brightness of a picture captured in low light, ignoring potential noise.
# Apply the inverse operation to `reduceBrightness(..)`.
def restoreBrightness(img, factor):
img_new = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)
img_new[:, :, 2] = np.clip(img_new[:, :, 2] * factor, a_min=0, a_max=255)
img_new = cv2.cvtColor(img_new, cv2.COLOR_HSV2BGR)
return img_new
def filter_sobel(img, ksize=3):
# TODO: Implement a sobel filter (x/horizontal + y/vertical) for the provided `img` with kernel size `ksize`.
# The values of the final (combined) image shall be normalized to the range [0, 1].
# Return the final result along with the two intermediate images.
central_difference_kernel = np.expand_dims([-1, 0, 1], axis=-1)
gaussian_kernel = cv2.getGaussianKernel(ksize, sigma=-1)
sobel_kernel_y = np.matmul(central_difference_kernel, np.transpose(gaussian_kernel))
sobel_kernel_x = np.matmul(gaussian_kernel, np.transpose(central_difference_kernel))
sobel_y = cv2.filter2D(img, ddepth=-1, kernel=sobel_kernel_y, borderType=cv2.BORDER_DEFAULT)
sobel_x = cv2.filter2D(img, ddepth=-1, kernel=sobel_kernel_x, borderType=cv2.BORDER_DEFAULT)
sobel = asUint8(np.sqrt(sobel_y * sobel_y + sobel_x * sobel_x))
# sobel = np.clip(sobel, a_min=0, a_max=255)
return sobel, sobel_x, sobel_y
def applyThreshold(img, threshold):
# TODO: Return an image whose values are 1 where the `img` values are > `threshold` and 0 otherwise.
new_img = np.zeros_like(img)
new_img[img > threshold] = 1
return new_img
def applyMask(img, mask):
# TODO: Apply white color to the masked pixels, i.e. return an image whose values are 1 where `mask` values are 1
# and unchanged otherwise. (All mask values can be assumed to be either 0 or 1)
img[mask == 1] = 1
return img
img2 = cv2.imread("img/couch.jpg")
imgs = [("Original", img2)]
img3 = img2
# Reduce image brightness
darkening_factor = 3
img_dark = reduceBrightness(img2, darkening_factor)
# Restore image brightness
img_restored = restoreBrightness(img_dark, darkening_factor)
imgs = imgs + [("Low light", img_dark), ("Low light restored", img_restored)]
# Simulate multiple pictures captured in low light with noise.
num_dark_noise_imgs = 10
imgs_dark_noise = [from0_1to0_255asUint8(random_noise(img_dark, mode="poisson")) for _ in range(num_dark_noise_imgs)]
# TODO: Now try to "restore" a picture captured in low light with noise (`img_dark_noise`) using the same function as
# for the picture without noise.
img_dark_noise = imgs_dark_noise[0]
img_noise_restored_simple = restoreBrightness(img_dark_noise, darkening_factor)
img_noise_stack_restored = np.copy(imgs_dark_noise[0])
for i in range(1, num_dark_noise_imgs):
img_noise_stack_restored = cv2.add(img_noise_stack_restored, imgs_dark_noise[i])
img_noise_stack_restored = asUint8(img_noise_stack_restored / 2)
img_noise_stack_restored = restoreBrightness(img_noise_stack_restored, darkening_factor)
imgs3 = [('Noise', img_noise_restored_simple),
('Gauss filter', filter_gauss(img_noise_restored_simple, 3)),
('Image stack + Gauss filter', filter_gauss(img_noise_stack_restored, 3))]
initial_threshold = .25
imgs3_gray = [cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) for _, img in imgs3]
imgs_sobel = [filter_sobel(img_gray) for img_gray in imgs3_gray]
imgs_thresh = [applyThreshold(img_sobel, initial_threshold) for img_sobel, _, _ in imgs_sobel]
imgs_masked = [applyMask(img3, img_thresh) for img_thresh in imgs_thresh]
def header(label, imgs, i, j=None):
if i == 0:
return label, (imgs[i] if j is None else imgs[i][j])
return imgs[i] if j is None else imgs[i][j]
imgs = [[imgs3[i], header('Sobel X', imgs_sobel, i, 0),
header('Sobel Y', imgs_sobel, i, 1),
header('Sobel', imgs_sobel, i, 2),
header('Edge mask', imgs_thresh, i),
header('Stylized image', imgs_masked, i)] for i in range(len(imgs3))]
imgs = [label_and_image for img_list in imgs for label_and_image in img_list]
plt.figure(figsize=(17, 7))
plt_imgs = showImages(imgs, 6, False, padding=(.05, .15, .05, .05))
def updateImg(threshold):
imgs_thresh = [applyThreshold(img_sobel, threshold) for img_sobel, _, _ in imgs_sobel]
imgs_masked = [applyMask(img3, img_thresh) for img_thresh in imgs_thresh]
imgs_masked = [convertColorImagesBGR2RGB(img_masked)[0] for img_masked in imgs_masked]
for i in range(len(imgs3)):
cols = len(imgs) // len(imgs3)
plt_imgs[i * cols + 4].set_data(imgs_thresh[i])
plt_imgs[i * cols + 5].set_data(imgs_masked[i])
ax_threshold = plt.axes([.67, .05, .27, .06])
slider_threshold = Slider(ax=ax_threshold, label='Threshold', valmin=0, valmax=1, valinit=initial_threshold,
valstep=.01)
slider_threshold.on_changed(updateImg)
plt.show()
```
#### File: Computer-Vison-and-ML-Assignments/sheet4/tf_object_recognition.py
```python
import tensorflow as tf
from utils import *
#
# Task 3
#
# Create a network to recognize single handwritten digits (0-9)
# train data , test data
# (images, digits), (images, digits)
(x_train, y_train), (x_test, y_test) = tf.keras.datasets.mnist.load_data()
print(x_train.shape)
input_shape = x_train[0].shape
print(input_shape)
num_classees = len(set(y_test)) # digits 0, 1, .. 9
print(num_classees)
x_valid, x_train = x_train[:2000], x_train[2000:]
y_valid, y_train = y_train[:2000], y_train[2000:]
# TODO: Normalize all input data to [0, 1].
x_train = tf.keras.utils.normalize(x_train, axis=1)
x_valid = tf.keras.utils.normalize(x_valid, axis=1)
print("The shape of x_training is", x_train.shape)
x_test = tf.keras.utils.normalize(x_test, axis=1)
# Now it's time to create the actual TensorFlow model.
# To get to know the syntax, you shall create the same network using the two available APIs.
# TODO: Use the sequential model API to create your model
# (https://www.tensorflow.org/guide/keras/sequential_model)
def build_model_sequential(input_shape, num_output_classes):
# Create an empty sequential model
model = tf.keras.models.Sequential()
# Add an input `flatten` layer with the provided `input_shape` to the model
model.add(tf.keras.layers.Flatten(input_shape=input_shape))
# Add a hidden `dense` layer of size 128 with "relu" activation to the model
model.add(tf.keras.layers.Dense(128, activation=tf.nn.relu))
# Add an output `dense` layer of size `num_output_classes` with "softmax" activation to the model
model.add(tf.keras.layers.Dense(num_output_classes, activation=tf.nn.softmax))
return model
# TODO: Use the functional model API to create the *same* model as above
# (https://www.tensorflow.org/guide/keras/functional)
def build_model_functional(input_shape, num_output_classes):
# Start by creating an `input` layer with the provided `input_shape`
inputlayer = tf.keras.layers.Input(shape=input_shape)
flattenlayer = tf.keras.layers.Flatten(input_shape=input_shape)(inputlayer)
# Then create the same layers as in `build_model_sequential`
hiddenlayer = tf.keras.layers.Dense(128, activation=tf.nn.relu)(flattenlayer)
outputlayer = tf.keras.layers.Dense(10, activation="softmax")(hiddenlayer)
# Finally, build and return the actual model using the input layer and the last (output) layer
return tf.keras.Model(inputs=inputlayer, outputs=outputlayer)
model_seq = build_model_sequential(input_shape, num_classees)
model_fun = build_model_functional(input_shape, num_classees)
img_groups = []
log_dir = ["log_dir_seq", "log_dir_func"]
for index, model in enumerate([model_seq, model_fun]):
if hasattr(model, 'summary'):
model.summary() # Tipp: If this function fails, the above created model is not complete (e.g. input_shape
# information might be missing)
# TODO: Compile the model using
# "sgd" as `optimizer`
# "sparse_categorical_crossentropy" as `loss` function
# "accuracy" as additional `metrics`
model.compile(loss="sparse_categorical_crossentropy", optimizer="sgd",
metrics=["accuracy"])
# TODO: Train the model using the `x/y_train` data for 5 epocs.
# Attach a callback to enable evaluation of the training progress using `TensorBoard`.
tensorboard_callback = tf.keras.callbacks.TensorBoard(log_dir=log_dir[index])
history = model.fit(x_train, y_train, epochs=5, validation_data=(x_valid, y_valid),
callbacks=[tensorboard_callback])
score = model.evaluate(x_test, y_test)
print("The evaluation score on test images of", model, "is", score)
# Use the trained model to recognize the digit of some random images
num_samples = 9
sample_idx = np.random.randint(0, len(x_test), num_samples)
img_groups = img_groups + [
[("GT: " + str(y_test[i]) + " / Detected: " + str(np.argmax(model(x_test[[i], :, :]))), x_test[i]) for i in
sample_idx]]
for imgs in img_groups:
plt.figure(figsize=(6, 6))
showImages(imgs, 3, False)
plt.show()
```
#### File: Computer-Vison-and-ML-Assignments/sheet5/flow_utils.py
```python
import os
import numpy as np
import cv2
import matplotlib.pyplot as plt
def PLACEHOLDER_FLOW(frames):
return np.array(
[[[x, y] for x in np.linspace(-1, 1, frames[0].shape[1])] for y in np.linspace(-1, 1, frames[0].shape[0])])
PLACEHOLDER_FLOW_VISUALIZATION = cv2.imread('resources/example_flow_visualization.png')
#
# Task 1
#
# Implement utility functions for flow visualization.
def flowMapToBGR(flow_map):
# Flow vector (X, Y) to angle
h, w = flow_map.shape[:2]
X, Y = flow_map[:, :, 0], flow_map[:, :, 1]
angle = np.arctan2(Y, X) + np.pi
magnitude = np.sqrt(X * X + Y * Y)
# Angle and vector size to HSV color
hsv = np.zeros((h, w, 3), np.uint8)
# Sets image hue according to the optical flow direction
hsv[..., 0] = angle * (180 / np.pi / 2)
# Sets image saturation to maximum
hsv[..., 1] = cv2.normalize(magnitude, None, 0, 255, cv2.NORM_MINMAX)
# Sets image value according to the optical flow magnitude (normalized)
hsv[..., 2] = 255
bgr = cv2.cvtColor(hsv, cv2.COLOR_HSV2BGR)
return bgr
# TODO: Draw arrows depicting the provided `flow` on a 10x10 pixel grid.
# You may use `cv2.arrowedLine(..)`.
# def drawArrows(img, flow, arrow_color=(0, 255, 0)):
# outimg = img.copy()
#
# # Turn grayscale to rgb if needed
# if len(outimg.shape) == 2:
# outimg = np.stack((outimg,) * 3, axis=2)
#
# # Get start and end coordinates of the optical flow
# flow_start = np.stack(np.meshgrid(range(flow.shape[1]), range(flow.shape[0])), 2)
# flow_end = (flow[flow_start[:, :, 1], flow_start[:, :, 0], :1] * 3 + flow_start).astype(np.int32)
#
# # Threshold values
# norm = np.linalg.norm(flow_end - flow_start, axis=2)
# norm[norm < 2] = 0
#
# # Draw all the nonzero values
# nz = np.nonzero(norm)
# for i in range(0, len(nz[0]), 100):
# y, x = nz[0][i], nz[1][i]
# cv2.arrowedLine(outimg,
# pt1=tuple(flow_start[y, x]),
# pt2=tuple(flow_end[y, x]),
# color=arrow_color,
# thickness=1,
# tipLength=.2)
# return outimg
# works with 10 x 10 pixels map
# # TODO: Draw arrows depicting the provided `flow` on a 10x10 pixel grid.
# # You may use `cv2.arrowedLine(..)`.
def drawArrows(img, flow, arrow_color=(0, 255, 0)):
out_img = img.copy()
magnitude, ang = cv2.cartToPolar(flow[:, :, 0], flow[:, :, 1], angleInDegrees=False)
# magnitude = cv2.normalize(magnitude, None, 0, 10, cv2.NORM_MINMAX)
for i in range(0, flow.shape[0], 10):
for j in range(0, flow.shape[1], 10):
increment_x, increment_y = (10, 10)
if i + 10 > flow.shape[0]:
increment_y = flow.shape[0] - i
if j + 10 > flow.shape[1]:
increment_x = flow.shape[1] - j
avg_magnitude = np.mean(magnitude[i: i + increment_y, j: j + increment_x])
avg_angle = np.mean(ang[i: i + increment_y, j: j + increment_x])
flow_start = (j, i)
flow_end = (int(j + avg_magnitude * np.cos(avg_angle))
if int(j + avg_magnitude * np.cos(avg_angle)) > 0 else 0,
int(i + avg_magnitude * np.sin(avg_angle))
if int(i + avg_magnitude * np.sin(avg_angle)) > 0 else 0)
out_img = cv2.arrowedLine(out_img, flow_start, flow_end, color=arrow_color, tipLength=0.2)
return out_img
# Calculate the angular error of an estimated optical flow compared to ground truth
def calculateAngularError(estimated_flow, groundtruth_flow):
nom = groundtruth_flow[:, :, 0] * estimated_flow[:, :, 0] + groundtruth_flow[:, :, 1] * estimated_flow[:, :, 1] + \
1.0
denom = np.sqrt((groundtruth_flow[:, :, 0] ** 2 + groundtruth_flow[:, :, 1] ** 2 + 1.0) * (
estimated_flow[:, :, 0] ** 2 + estimated_flow[:, :, 1] ** 2 + 1.0))
return (1.0 / (estimated_flow.shape[0] * estimated_flow.shape[1])) * np.sum(np.arccos(np.clip(nom / denom, 0, 1)))
# Load a flow map from a file
def load_FLO_file(filename):
if os.path.isfile(filename) is False:
print("file does not exist %r" % str(filename))
flo_file = open(filename, 'rb')
magic = np.fromfile(flo_file, np.float32, count=1)
if magic != 202021.25:
print('Magic number incorrect. .flo file is invalid')
w = np.fromfile(flo_file, np.int32, count=1)
h = np.fromfile(flo_file, np.int32, count=1)
# The float values for u and v are interleaved in row order, i.e., u[row0,col0], v[row0,col0], u[row0,col1],
# v[row0,col1], ..., In total, there are 2*w*h flow values
data = np.fromfile(flo_file, np.float32, count=2 * w[0] * h[0])
# Reshape data into 3D array (columns, rows, bands)
flow = np.resize(data, (int(h), int(w), 2))
flo_file.close()
# Some cleanup (remove cv-destroying large numbers)
flow[np.sqrt(np.sum(flow ** 2, axis=2)) > 100] = 0
return flow
```
#### File: Computer-Vison-and-ML-Assignments/sheet6/depth_utils.py
```python
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import cm
def plot3D(depth, gridsize, gridpos, gridspan):
xs = np.arange(0, depth.shape[1])
ys = np.arange(depth.shape[0], 0, -1)
xs, ys = np.meshgrid(xs, ys)
ax = plt.subplot2grid(gridsize, gridpos, projection='3d', rowspan=gridspan[0], colspan=gridspan[1])
ax.plot_surface(xs, ys, -depth, linewidth=0, antialiased=False, cmap=cm.coolwarm)
ax.axis("off")
ax.set_title("3D from depth")
ax.view_init(80, -90)
```
|
{
"source": "jeethsuresh/scrapd",
"score": 2
}
|
#### File: jeethsuresh/scrapd/noxfile.py
```python
from pathlib import Path
import nox
# Behavior's options.
nox.options.reuse_existing_virtualenvs = True
nox.options.sessions = ["venv"]
# Configuration values.
nox_file = Path()
project_name = 'scrapd'
dockerfile = 'Dockerfile'
docker_org = 'scrapd'
docker_repo = f'{docker_org}/{project_name}'
docker_img = f'{docker_repo}'
@nox.session()
def ci(session):
"""Run all the CI tasks."""
session.install('-rrequirements-dev.txt')
session.install('-e', '.')
run_sphinx(session)
run_yapf(session, True)
run_all_linters(session)
run_pytest_units(session)
run_pytest_integrations(session)
@nox.session()
def dist(session):
"""Package the application."""
session.install('-rrequirements-dev.txt')
session.install('-e', '.')
session.run('python', 'setup.py', 'bdist_wheel')
@nox.session()
def dist_upload(session):
"""Package the application."""
session.install('-rrequirements-dev.txt')
session.run('twine', 'upload', 'dist/*')
@nox.session()
def docs(session):
"""Ensure the documentation builds."""
session.install('-rrequirements-dev.txt')
session.install('-e', '.')
run_sphinx(session)
@nox.session()
def format(session):
"""Format the codebase using YAPF."""
session.install('-rrequirements-dev.txt')
run_yapf(session, diff=False)
@nox.session()
def lint(session):
"""Run all the linters."""
session.install('-rrequirements-dev.txt')
session.install('-e', '.')
run_all_linters(session)
@nox.session(name='lint-format')
def lint_format(session):
"""Check the formatting of the codebase using YAPF."""
session.install('-rrequirements-dev.txt')
run_yapf(session, diff=True)
@nox.session()
def profiling(session):
"""Setup the developper environment."""
# Install dependencies.
session.install('--upgrade', 'pip', 'setuptools')
session.install('-r', 'requirements-profilers.txt')
session.install('-e', '.')
env_dir = Path(session.bin)
scrapd = env_dir / 'scrapd'
session.run("pyinstrument", "--renderer", "html", f'{scrapd.resolve()}', "-v", "--format", "count", "--pages", "5")
@nox.session()
def pydocstyle(session):
"""Check the docstrings."""
session.install('-rrequirements-dev.txt')
session.install('-e', '.')
run_pydocstyle(session)
@nox.session()
def pylint(session):
"""Run the pylint linter."""
session.install('-rrequirements-dev.txt')
session.install('-e', '.')
run_pylint(session)
@nox.session(python='python3.7')
def test(session):
"""Run all the tests."""
session.install('-rrequirements-dev.txt')
session.install('-e', '.')
run_pytest(session)
@nox.session(python='python3.7', name='test-units')
def test_units(session):
"""Run the unit tests."""
session.install('-rrequirements-dev.txt')
session.install('-e', '.')
run_pytest_units(session)
@nox.session(python='python3.7', name='test-integrations')
def test_integrations(session):
"""Run the integration tests."""
session.install('-rrequirements-dev.txt')
session.install('-e', '.')
run_pytest_integrations(session)
@nox.session()
def venv(session):
"""Setup the developper environment."""
# Install dependencies.
session.install("--upgrade", "pip", "setuptools")
session.install("-r", "requirements-dev.txt")
session.install("-e", ".")
# Customize the venv.
env_dir = Path(session.bin)
activate = env_dir / 'activate'
with activate.open('a') as f:
f.write(f'\n[ -f {activate.resolve()}/postactivate ] && . {activate.resolve()}/postactivate\n')
scrapd_complete = nox_file / 'contrib/scrapd-complete.sh'
postactivate = env_dir / 'postactivate'
with postactivate.open('a') as f:
f.write('export PYTHONBREAKPOINT=bpdb.set_trace\n')
f.write(f'source {scrapd_complete.resolve()}\n')
predeactivate = env_dir / 'predeactivate'
with predeactivate.open('a') as f:
f.write('unset PYTHONBREAKPOINT\n')
def run_all_linters(session):
run_flake8(session)
run_pydocstyle(session)
run_pylint(session)
def run_flake8(session):
session.run('flake8', 'scrapd')
def run_pydocstyle(session):
session.run('pydocstyle', 'scrapd')
def run_pylint(session):
session.run('pylint', '--ignore=tests', 'scrapd')
def run_pytest(session, *posargs):
session.run('pytest', '-x', '--junitxml=/tmp/pytest/junit-py37.xml', '--cov-report', 'term-missing', '--cov-report',
'html', '--cov=scrapd', *posargs, f'{(nox_file / "tests").resolve()}')
def run_pytest_units(session):
run_pytest(session, '-m', 'not integrations')
def run_pytest_integrations(session):
run_pytest(session, '-m', 'integrations', '--reruns', '3', '--reruns-delay', '5', '-r', 'R')
def run_sphinx(session):
session.run('python', 'setup.py', 'build_sphinx')
def run_yapf(session, diff=True):
mode = '-d' if diff else '-i'
session.run('yapf', '-r', mode, '-e', '*.nox/*', '-e', '*.tox/*', '-e', '*venv/*', '-e', '*.eggs/*', '.')
```
#### File: jeethsuresh/scrapd/tasks.py
```python
from pathlib import Path
from invoke import task
from nox.virtualenv import VirtualEnv
# Configuration values.
project_name = 'scrapd'
docker_org = 'scrapd'
docker_repo = f'{docker_org}/{project_name}'
@task
def build_docker(c):
"""Build a docker image."""
tag = c.run('git describe', hide=True)
docker_img = f'{docker_repo}:{tag.stdout.strip()}'
c.run(f'docker build -t {docker_img} .')
@task
def clean(c):
"""Remove unwanted files and artifacts in this project (!DESTRUCTIVE!)."""
clean_docker(c)
clean_repo(c)
@task
def clean_docker(c):
"""Remove all docker images built for this project (!DESTRUCTIVE!)."""
c.run(f'docker image rm -f $(docker image ls --filter reference={docker_repo} -q) || true')
@task
def clean_repo(c):
"""Remove unwanted files in project (!DESTRUCTIVE!)."""
c.run('git clean -ffdx')
c.run('git reset --hard')
@task
def flame_graph(c):
c.run(f'nox --install-only -s profiling')
location = Path('.nox/profiling')
venv = VirtualEnv(location.resolve())
venv_bin = Path(venv.bin)
scrapd = venv_bin / 'scrapd'
c.run(f'sudo py-spy -d 20 --flame profile.svg -- {scrapd.resolve()} -v --pages 5')
@task
def nox(c, s=''):
"""Wrapper for the nox tasks (`inv nox list` for details)."""
if not s:
c.run('nox --list')
else:
c.run(f'nox -s {s}')
@task
def publish(c):
"""Publish the documentation."""
c.run('./.circleci/publish.sh')
@task(default=True)
def setup(c):
"""Setup the developper environment."""
c.run('nox --envdir .')
```
#### File: tests/core/test_apd.py
```python
from datetime import date
from unittest import mock
import aiohttp
from aioresponses import aioresponses
import asynctest
from faker import Faker
from loguru import logger
import pytest
from tenacity import RetryError
from tenacity import stop_after_attempt
from scrapd.core import apd
from scrapd.core.constant import Fields
from tests import mock_data
from tests.test_common import TEST_DATA_DIR
from tests.test_common import scenario_ids
from tests.test_common import scenario_inputs
# Disable logging for the tests.
logger.remove()
# Set faker object.
fake = Faker()
def load_test_page(page):
"""Load a test page."""
page_fd = TEST_DATA_DIR / page
return page_fd.read_text()
@pytest.fixture
def news_page(scope='session'):
"""Returns the test news page."""
page_fd = TEST_DATA_DIR / 'news_page.html'
return page_fd.read_text()
parse_twitter_fields_scenarios = {
'traffic-fatality-2-3': {
Fields.CASE: '19-0161105',
Fields.CRASHES: '2',
},
'traffic-fatality-73-2': {
Fields.AGE: 38,
Fields.CASE: '18-3640187',
Fields.CRASHES: '73',
Fields.DOB: date(1980, 2, 9),
Fields.DATE: date(2018, 12, 30),
Fields.ETHNICITY: 'White',
Fields.FIRST_NAME: 'Corbin',
Fields.GENDER: 'male',
Fields.LAST_NAME: 'Sabillon-Garcia',
Fields.LOCATION: '1400 E. Highway 71 eastbound',
Fields.NOTES: 'The preliminary investigation shows that a 2003 Ford F150 was '
'traveling northbound on the US Highway 183 northbound ramp to E. '
'Highway 71, eastbound. The truck went across the E. Highway 71 and '
'US Highway 183 ramp, rolled and came to a stop north of the '
'roadway.',
Fields.TIME: '2:24 a.m.',
},
'traffic-fatality-72-1': {
Fields.CASE: '18-3551763',
Fields.CRASHES: '72',
Fields.DATE: date(2018, 12, 21),
Fields.LOCATION: '9500 N Mopac SB',
Fields.TIME: '8:20 p.m.',
},
'traffic-fatality-71-2': {
Fields.CASE: '18-3381590',
Fields.CRASHES: '71',
Fields.DATE: date(2018, 12, 4),
Fields.LOCATION: '183 service road westbound and Payton Gin Rd.',
Fields.TIME: '8:39 p.m.',
},
}
parse_page_content_scenarios = {
'traffic-fatality-2-3': {
**parse_twitter_fields_scenarios['traffic-fatality-2-3'],
Fields.AGE: 58,
Fields.CRASHES: '2',
Fields.DOB: date(1960, 2, 15),
Fields.DATE: date(2019, 1, 16),
Fields.ETHNICITY: 'White',
Fields.FIRST_NAME: 'Ann',
Fields.GENDER: 'female',
Fields.LAST_NAME: 'Bottenfield-Seago',
Fields.LOCATION: 'West William Cannon Drive and Ridge Oak Road',
Fields.TIME: '3:42 p.m.',
},
'traffic-fatality-73-2': {
Fields.AGE: 38,
Fields.CASE: '18-3640187',
Fields.CRASHES: '73',
Fields.DOB: date(1980, 2, 9),
Fields.DATE: date(2018, 12, 30),
Fields.ETHNICITY: 'White',
Fields.FIRST_NAME: 'Corbin',
Fields.GENDER: 'male',
Fields.LAST_NAME: 'Sabillon-Garcia',
Fields.LOCATION: '1400 E. Highway 71 eastbound',
Fields.TIME: '2:24 a.m.',
},
'traffic-fatality-72-1': {
**parse_twitter_fields_scenarios['traffic-fatality-72-1'],
Fields.AGE: 22,
Fields.DOB: date(1996, 3, 29),
Fields.ETHNICITY: 'White',
Fields.FIRST_NAME: 'Elijah',
Fields.GENDER: 'male',
Fields.LAST_NAME: 'Perales',
},
'traffic-fatality-71-2': {
**parse_twitter_fields_scenarios['traffic-fatality-71-2'],
Fields.DOB: date(1964, 6, 1),
Fields.FIRST_NAME: 'Barkat',
Fields.LAST_NAME: 'Umatia',
Fields.ETHNICITY: 'Other',
Fields.GENDER: 'male',
Fields.AGE: 54,
}
}
parse_page_scenarios = {
'traffic-fatality-2-3': {
**parse_page_content_scenarios['traffic-fatality-2-3'],
**parse_twitter_fields_scenarios['traffic-fatality-2-3'],
},
'traffic-fatality-73-2': {
**parse_page_content_scenarios['traffic-fatality-73-2'],
**parse_twitter_fields_scenarios['traffic-fatality-73-2'],
},
'traffic-fatality-72-1': {
**parse_page_content_scenarios['traffic-fatality-72-1'],
**parse_twitter_fields_scenarios['traffic-fatality-72-1'],
},
}
@pytest.mark.parametrize('input_,expected', (
(
mock_data.twitter_title_00,
{
'Fatal crashes this year': '73'
},
),
(None, {}),
))
def test_parse_twitter_title_00(input_, expected):
"""Ensure the Twitter title gets parsed correct."""
actual = apd.parse_twitter_title(input_)
assert actual == expected
@pytest.mark.parametrize('input_,expected', (
(
mock_data.twitter_description_00,
{
'Case': '18-3640187',
'Date': date(2018, 12, 30),
'Time': '2:24 a.m.',
'Location': '1400 E. Highway 71 eastbound',
'DOB': date(1980, 2, 9),
'Notes': 'The preliminary investigation shows that a 2003 Ford F150 was '
'traveling northbound on the US Highway 183 northbound ramp to E. Highway 71, eastbound. '
'The truck went across the E. Highway 71 and US Highway 183 ramp, rolled '
'and came to a stop north of the roadway.',
'Gender': 'male',
'Ethnicity': 'White',
'Last Name': 'Sabillon-Garcia',
'First Name': 'Corbin',
'Age': 38,
},
),
(None, {}),
))
def test_parse_twitter_description_00(input_, expected):
"""Ensure the Twitter description gets parsed correctly."""
actual = apd.parse_twitter_description(input_)
assert actual == expected
def test_parse_twitter_description_01():
"""Ensure the Twitter description gets parsed correctly."""
actual = apd.parse_twitter_description(mock_data.twitter_description_01)
expected = {
Fields.CASE: '19-0161105',
}
assert actual == expected
def test_parse_twitter_description_02():
"""Ensure a DOB recognized as a field can be parsed."""
actual = apd.parse_twitter_description(mock_data.twitter_description_02)
expected = {
'Age': 57,
'Case': '18-160882',
'DOB': date(1961, 1, 22),
'Date': date(2018, 1, 16),
'Location': '1500 W. Slaughter Lane',
'Time': '5:14 p.m.',
}
assert actual == expected
def test_parse_twitter_description_03():
"""Ensure a DOB recognized as a field can be parsed."""
actual = apd.parse_twitter_description(mock_data.twitter_description_03)
expected = {}
assert actual == expected
parse_details_page_notes_scenarios = [
((mock_data.details_page_notes_01, ''), 'Ensure a malformed entry is not parsed'),
((mock_data.details_page_notes_02, mock_data.details_page_notes_02_expected),
'Ensure details page notes parsed correctly'),
]
@pytest.mark.parametrize('input_,expected',
scenario_inputs(parse_details_page_notes_scenarios),
ids=scenario_ids(parse_details_page_notes_scenarios))
def test_parse_details_page_notes_01(input_, expected):
"""Ensure details page notes parsed correctly."""
actual = apd.parse_details_page_notes(input_)
assert actual == expected
def test_extract_traffic_fatalities_page_details_link_00(news_page):
"""Ensure page detail links are extracted from news page."""
actual = apd.extract_traffic_fatalities_page_details_link(news_page)
expected = [
('/news/traffic-fatality-2-3', 'Traffic Fatality #2', '2'),
('/news/traffic-fatality-1-4', 'Traffic Fatality #1', '1'),
('/news/traffic-fatality-72-1', 'Traffic Fatality #72', '72'),
('/news/traffic-fatality-73-2', 'Traffic Fatality #73', '73'),
('/news/traffic-fatality-71-2', 'Traffic Fatality #71', '71'),
('/news/traffic-fatality-69-3', 'Traffic Fatality #69', '69'),
]
assert actual == expected
@pytest.mark.parametrize('deceased,expected', (
("Rosbel “Rudy” Tamez, Hispanic male (D.O.B. 10-10-54)", {
Fields.FIRST_NAME: "Rosbel",
Fields.LAST_NAME: "Tamez",
Fields.ETHNICITY: "Hispanic",
Fields.GENDER: "male",
Fields.DOB: date(1954, 10, 10),
}),
("<NAME>, W/F, DOB: 01-22-1961 (passenger)", {
Fields.FIRST_NAME: "Eva",
Fields.LAST_NAME: "Gonzales",
Fields.ETHNICITY: "White",
Fields.GENDER: 'female',
Fields.DOB: date(1961, 1, 22),
}),
(
'DOB: 01-01-99',
{
Fields.DOB: date(1999, 1, 1),
},
),
(
'<NAME> | Asian male | 08/01/1949',
{
Fields.FIRST_NAME: "Wing",
Fields.LAST_NAME: "Chou",
Fields.ETHNICITY: "Asian",
Fields.GENDER: "male",
Fields.DOB: date(1949, 8, 1),
},
),
(
'<NAME> W/M 10-8-1981',
{
Fields.FIRST_NAME: "Christopher",
Fields.LAST_NAME: "Peterson",
Fields.ETHNICITY: "White",
Fields.GENDER: "male",
Fields.DOB: date(1981, 10, 8),
},
),
(
'Luis <NAME>, Hispanic male (11-12-07',
{
Fields.FIRST_NAME: "Luis",
Fields.LAST_NAME: "Tinoco",
Fields.ETHNICITY: "Hispanic",
Fields.GENDER: "male",
Fields.DOB: date(2007, 11, 12)
},
),
(
'<NAME>, White male, 8-28-51',
{
Fields.FIRST_NAME: "Ronnie",
Fields.LAST_NAME: "Hall",
Fields.ETHNICITY: "White",
Fields.GENDER: "male",
Fields.DOB: date(1951, 8, 28)
},
),
(
'Hispanic male, 19 years of age',
{
Fields.ETHNICITY: "Hispanic",
Fields.GENDER: "male",
Fields.AGE: 19,
},
),
(
'<NAME>, Black male, D.O.B. August 30, 1966',
{
Fields.FIRST_NAME: "Patrick",
Fields.LAST_NAME: "Ervin",
Fields.ETHNICITY: "Black",
Fields.GENDER: "male",
Fields.DOB: date(1966, 8, 30)
},
),
(
'<NAME>, H/M, (DOB: 11/15/1977) ',
{
Fields.FIRST_NAME: "Ernesto",
Fields.LAST_NAME: "Garcia",
Fields.ETHNICITY: "Hispanic",
Fields.GENDER: "male",
Fields.DOB: date(1977, 11, 15)
},
),
(
'B/F, DOB: 01-01-99',
{
Fields.ETHNICITY: "Black",
Fields.GENDER: "female",
Fields.DOB: date(1999, 1, 1)
},
),
))
def test_process_deceased_field_00(deceased, expected):
"""Ensure a deceased field is parsed correctly."""
d = apd.process_deceased_field(deceased)
for key in expected:
assert d[key] == expected[key]
@pytest.mark.parametrize('name,expected', (
(['Jonathan,', 'Garcia-Pineda,'], {
'first': 'Jonathan',
'last': 'Garcia-Pineda'
}),
(['Rosbel', '“Rudy”', 'Tamez'], {
'first': 'Rosbel',
'last': 'Tamez'
}),
(['Christopher', 'M', 'Peterson'], {
'first': 'Christopher',
'last': 'Peterson'
}),
(['David', 'Adam', 'Castro,'], {
'first': 'David',
'last': 'Castro'
}),
(['Delta', 'Olin,'], {
'first': 'Delta',
'last': 'Olin'
}),
(None, {
'first': None,
'last': None,
}),
(['Carlos', 'Cardenas', 'Jr.'], {
'first': 'Carlos',
'last': 'Cardenas',
}),
))
def test_parse_name(name, expected):
"""Ensure parser finds the first and last name given the full name."""
parsed = apd.parse_name(name)
assert parsed.get("first") == expected["first"]
assert parsed.get("last") == expected["last"]
def test_extract_traffic_fatalities_page_details_link_01():
"""Ensure page detail links are extracted from news page."""
news_page = """
<div class="views-field views-field-title">
<span class="field-content">
<a href="/news/traffic-fatality-59-update">Traffic Fatality #59- Update</a>
</span>
</div>
"""
actual = apd.extract_traffic_fatalities_page_details_link(news_page)
expected = [('/news/traffic-fatality-59-update', 'Traffic Fatality #59', '59')]
assert actual == expected
def test_generate_detail_page_urls_00():
"""Ensure a full URL is generated from a partial one."""
actual = apd.generate_detail_page_urls([
('/news/traffic-fatality-1-4', 'Traffic Fatality #1', '1'),
('/news/traffic-fatality-2-3', 'Traffic Fatality #2', '2'),
])
expected = [
'http://austintexas.gov/news/traffic-fatality-1-4',
'http://austintexas.gov/news/traffic-fatality-2-3',
]
assert actual == expected
def test_has_next_00(news_page):
"""Ensure we detect whether there are more news pages."""
assert apd.has_next(news_page)
def test_has_next_01():
"""Ensure we detect whether there are no more news pages."""
assert apd.has_next(None) is False
@pytest.mark.parametrize(
'input_,expected',
(('<div class="item-list"><ul class="pager"><li class="pager-previous first"> </li>'
'<li class="pager-current">1 of 27</li>'
'<li class="pager-next last"><a title="Go to next page" href="/department/news/296-page=1">next ›</a></li>'
'</ul></div>', True), ))
def test_has_next_02(input_, expected):
"""Ensure we detect whether there are more news pages."""
assert apd.has_next(input_) == expected
@pytest.mark.parametrize('filename,expected', [(k, v) for k, v in parse_page_content_scenarios.items()])
def test_parse_page_content_00(filename, expected):
"""Ensure information are properly extracted from the content detail page.
Don't compare notes if parsed from details page."""
page_fd = TEST_DATA_DIR / filename
page = page_fd.read_text()
actual = apd.parse_page_content(page)
if 'Notes' in actual and 'Notes' not in expected:
del actual['Notes']
assert actual == expected
def test_parse_page_content_01(mocker):
"""Ensure a `process_deceased_field` exception is caught and does not propagate."""
page_fd = TEST_DATA_DIR / 'traffic-fatality-2-3'
page = page_fd.read_text()
mocker.patch('scrapd.core.apd.process_deceased_field', side_effect=ValueError)
result = apd.parse_page_content(page)
assert len(result) == 6
def test_parse_page_content_02(mocker):
"""Ensure a log entry is created if there is no deceased field."""
result = apd.parse_page_content('Case: 01-2345678')
assert result
def test_parse_page_content_03():
"""Ensure a missing case number raises an exception."""
with pytest.raises(ValueError):
apd.parse_page_content('The is no case number here.')
@pytest.mark.parametrize('filename,expected', [(k, v) for k, v in parse_twitter_fields_scenarios.items()])
def test_parse_twitter_fields_00(filename, expected):
"""Ensure information are properly extracted from the twitter fields on detail page."""
page_fd = TEST_DATA_DIR / filename
page = page_fd.read_text()
actual = apd.parse_twitter_fields(page)
assert actual == expected
@pytest.mark.parametrize('filename,expected', [(k, v) for k, v in parse_page_scenarios.items()])
def test_parse_page_00(filename, expected):
"""Ensure information are properly extracted from the page.
Don't compare notes if parsed from details page."""
page_fd = TEST_DATA_DIR / filename
page = page_fd.read_text()
actual = apd.parse_page(page)
if 'Notes' in actual and 'Notes' not in expected:
del actual['Notes']
assert actual == expected
@asynctest.patch("scrapd.core.apd.fetch_news_page",
side_effect=[load_test_page(page) for page in ['296', '296-page=1', '296-page=27']])
@asynctest.patch("scrapd.core.apd.fetch_detail_page", return_value=load_test_page('traffic-fatality-2-3'))
@pytest.mark.asyncio
async def test_date_filtering_00(fake_details, fake_news):
"""Ensure the date filtering do not fetch unnecessary data."""
expected = 2
data, actual = await apd.async_retrieve(pages=-1, from_="2050-01-02", to="2050-01-03")
assert actual == expected
assert isinstance(data, list)
@asynctest.patch("scrapd.core.apd.fetch_news_page",
side_effect=[load_test_page(page) for page in ['296', '296-page=1', '296-page=27']])
@asynctest.patch("scrapd.core.apd.fetch_detail_page", return_value=load_test_page('traffic-fatality-2-3'))
@pytest.mark.asyncio
async def test_date_filtering_01(fake_details, fake_news):
"""Ensure the date filtering do not fetch unnecessary data."""
data, _ = await apd.async_retrieve(pages=-5, from_="2019-01-02", to="2019-01-03")
assert isinstance(data, list)
@asynctest.patch("scrapd.core.apd.fetch_news_page",
side_effect=[load_test_page(page) for page in ['296', '296-page=1', '296-page=27']])
@asynctest.patch(
"scrapd.core.apd.fetch_detail_page",
side_effect=[load_test_page(page) for page in ['traffic-fatality-2-3'] + ['traffic-fatality-71-2'] * 14])
@pytest.mark.asyncio
async def test_date_filtering_02(fake_details, fake_news):
"""Ensure the date filtering do not fetch unnecessary data."""
data, page_count = await apd.async_retrieve(from_="2019-01-16", to="2019-01-16")
assert isinstance(data, list)
assert len(data) == 1
assert page_count == 2
@pytest.mark.asyncio
async def test_fetch_text_00():
"""Ensure `fetch_text` retries several times."""
text = None
apd.fetch_text.retry.sleep = mock.Mock()
async with aiohttp.ClientSession() as session:
try:
text = await apd.fetch_text(session, 'fake_url')
except Exception:
pass
assert not text
assert apd.fetch_text.retry.statistics['attempt_number'] > 1
@pytest.mark.asyncio
async def test_fetch_text_01():
"""Ensure fetch_text retrieves some text."""
url = fake.uri()
with aioresponses() as m:
m.get(url, payload=dict(foo='bar'))
async with aiohttp.ClientSession() as session:
text = await apd.fetch_text(session, url)
assert '{"foo": "bar"}' == text
@asynctest.patch("scrapd.core.apd.fetch_news_page", side_effect=ValueError)
@pytest.mark.asyncio
async def test_async_retrieve_00(fake_news):
"""Ensure `async_retrieve` raises `ValueError` when `fetch_news_page` fails to retrieve data."""
with pytest.raises(ValueError):
await apd.async_retrieve()
@pytest.mark.parametrize('input_,expected', (
('<p><strong>Case: </strong>19-0881844</p>', '19-0881844'),
('<p><strong>Case:</strong> 18-3640187</p>', '18-3640187'),
('<strong>Case:</strong></span><span style="color: rgb(32, 32, 32); '
'font-family: "Verdana",sans-serif; font-size: 10.5pt; '
'mso-fareast-font-family: "Times New Roman"; '
'mso-ansi-language: EN-US; mso-fareast-language: EN-US; mso-bidi-language: AR-SA; '
'mso-bidi-font-family: "Times New Roman";"> 19-0161105</span></p>', '19-0161105'),
('<p><strong>Case:</strong> 18-1591949 </p>', '18-1591949'),
('<p><strong>Case:</strong> 18-590287<br />', '18-590287'),
))
def test_parse_case_field_00(input_, expected):
"""Ensure a case field gets parsed correctly."""
actual = apd.parse_case_field(input_)
assert actual == expected
@pytest.mark.parametrize(
'input_, expected',
(('<span property="dc:title" content="Traffic Fatality #12" class="rdf-meta element-hidden"></span>', '12'), ))
def test_parse_crashes_field_00(input_, expected):
"""Ensure the crashes field gets parsed correctly."""
actual = apd.parse_crashes_field(input_)
assert actual == expected
@asynctest.patch("scrapd.core.apd.fetch_detail_page", return_value='')
@pytest.mark.asyncio
async def test_fetch_and_parse_00(empty_page):
"""Ensure an empty page raises an exception."""
with pytest.raises(RetryError):
apd.fetch_and_parse.retry.stop = stop_after_attempt(1)
await apd.fetch_and_parse(None, 'url')
@asynctest.patch("scrapd.core.apd.fetch_detail_page", return_value='Not empty page')
@pytest.mark.asyncio
async def test_fetch_and_parse_01(page, mocker):
"""Ensure a page that cannot be parsed returns an exception."""
mocker.patch("scrapd.core.apd.parse_page", return_value={})
with pytest.raises(RetryError):
apd.fetch_and_parse.retry.stop = stop_after_attempt(1)
await apd.fetch_and_parse(None, 'url')
@asynctest.patch("scrapd.core.apd.fetch_text", return_value='')
@pytest.mark.asyncio
async def test_fetch_news_page_00(fetch_text):
"""Ensure the fetch function is called with the right parameters."""
page = 2
params = {'page': page - 1}
async with aiohttp.ClientSession() as session:
try:
await apd.fetch_news_page(session, page)
except Exception:
pass
fetch_text.assert_called_once_with(session, apd.APD_URL, params)
@asynctest.patch("scrapd.core.apd.fetch_text", return_value='')
@pytest.mark.asyncio
async def test_fetch_detail_page_00(fetch_text):
"""Ensure the fetch function is called with the right parameters."""
url = fake.uri()
async with aiohttp.ClientSession() as session:
try:
await apd.fetch_detail_page(session, url)
except Exception:
pass
fetch_text.assert_called_once_with(session, url)
@pytest.mark.parametrize('input_,expected',
(('<meta name="twitter:title" content="Traffic Fatality #2" />', 'Traffic Fatality #2'), ))
def test_extract_twitter_tittle_meta_00(input_, expected):
"""Ensure we can extract the twitter tittle from the meta tag."""
actual = apd.extract_twitter_tittle_meta(input_)
assert actual == expected
@pytest.mark.parametrize('input_,expected', (
('<meta name="twitter:description" content="Case: 18-3551763 Date: December 21, 2018 '
'Time: 8:20 p.m. Location: 9500 N Mopac SB" />',
'Case: 18-3551763 Date: December 21, 2018 Time: 8:20 p.m. '
'Location: 9500 N Mopac SB'),
('<meta name="twitter:description" content="Case: 19-0161105" />', 'Case: 19-0161105'),
))
def test_extract_twitter_description_meta_00(input_, expected):
"""Ensure we can extract the twitter tittle from the meta tag."""
actual = apd.extract_twitter_description_meta(input_)
assert actual == expected
@pytest.mark.parametrize('input_,expected', (
('Time: </span> Approximately 01:14a.m.', '01:14a.m.'),
('<tag>Time: 08:35 pm<br />', '08:35 pm'),
('Time: 8:47 P.M.', '8:47 P.M.'),
('Time:12:47 p.M.', '12:47 p.M.'),
('Time: 5:16', '5:16'),
('Time: 05:16 ', '05:16'),
('Time: 18:26', '18:26'),
('Time: 22:56', '22:56'),
('Time: 54:34', ''),
('Time: 28:24', ''),
('Time: 4:66 pm', ''),
('Time: 18:46 pm', '18:46'),
('Time: 00:24 a.m.', '00:24'),
))
def test_parse_time_field_00(input_, expected):
"""Ensure a time field gets parsed correctly."""
actual = apd.parse_time_field(input_)
assert actual == expected
@pytest.mark.parametrize('input_,expected', (
('<strong>Date: </strong>April 18, 2019</p>', '04/18/2019'),
('>Date: </strong> Night of May 22 2019</p>', '05/22/2019'),
('>Date:</span></strong> Wednesday, Oct. 3, 2018</p>', '10/03/2018'),
('>Date: night Apr 1-2012</p>', '04/01/2012'),
('>Date: feb. 2 2018</p>', '02/02/2018'),
('>Date: 10-1-17</p>', '10/01/2017'),
('>Date: Morning of 2,2,19 </p>', '02/02/2019'),
('>Date: 3/3/19</p>', '03/03/2019'),
('', ''),
('>Date: Afternoon</p>', ''),
))
def test_parse_date_field_00(input_, expected):
"""Ensure a date field gets parsed correctly."""
actual = apd.parse_date_field(input_)
assert actual == expected
@pytest.mark.parametrize('input_,expected', (
('>Deceased: </strong> <NAME> | Hispanic male | 04/03/1994</p>', \
'<NAME> | Hispanic male | 04/03/1994'),
('>Deceased: </strong> Cecil Wade Walker, White male, D.O.B. 3-7-70<', \
'Cecil Wade Walker, White male, D.O.B. 3-7-70'),
('>Deceased: </span></strong> <NAME> - Black male - 9-24-78<', \
'<NAME> - Black male - 9-24-78'),
('', ''),
))
def test_parse_deceased_field_00(input_, expected):
"""Ensure the deceased field gets parsed correctly."""
actual = apd.parse_deceased_field(input_)
@pytest.mark.parametrize('input_,expected', (
(
{
'Time': 345
},
{
'Time': 345
},
),
(
{
'Time': ['123', '345']
},
{
'Time': '123 345'
},
),
(
{
'Time': ' '
},
{},
),
(
{
'Time': None
},
{},
),
))
def test_sanitize_fatality_entity(input_, expected):
"""Ensure field values are sanitized."""
actual = apd.sanitize_fatality_entity(input_)
assert actual == expected
@pytest.mark.parametrize('input_,expected', (
(
'>Location:</span></strong> West William Cannon Drive and Ridge Oak Road</p>',
'West William Cannon Drive and Ridge Oak Road',
),
(
'>Location:</strong> 183 service road westbound and Payton Gin Rd.</p>',
'183 service road westbound and Payton Gin Rd.',
),
))
def test_parse_location_field_00(input_, expected):
"""Ensure."""
actual = apd.parse_location_field(input_)
assert actual == expected
```
|
{
"source": "JeetKamdar/Big-Data-Assignments",
"score": 3
}
|
#### File: Big-Data-Assignments/Assignment 2/task1.py
```python
2/task1.py
#!/usr/bin/env python
import sys
from pyspark import SparkContext
from csv import reader
def my_counter(x, y):
return (0, 0)
def format_output(x):
return ("%s\t%s, %s, %s, %s" % (x[0], x[1][3], x[1][4],x[1][2],x[1][1]))
sc = SparkContext()
parking_violations_lines = sc.textFile(sys.argv[1])
parking_violations_lines = parking_violations_lines.mapPartitions(lambda x: reader(x))
parking_violations = parking_violations_lines.map(lambda x: (x[0], (1, x[1], x[2], x[-8], x[-16])))
open_violations_lines = sc.textFile(sys.argv[2])
open_violations_lines = open_violations_lines.mapPartitions(lambda x: reader(x))
open_violations = open_violations_lines.map(lambda x: (x[0], (0, 'O')))
all_violations = parking_violations.union(open_violations) \
.reduceByKey(my_counter) \
.filter(lambda x: x[1][0] == 1) \
.sortByKey() \
.map(format_output) \
.saveAsTextFile("task1.out")
```
|
{
"source": "JeetKaria06/Airline-management-system",
"score": 3
}
|
#### File: app/connection/main.py
```python
from flask import current_app as app
from flask_sqlalchemy import SQLAlchemy
from sqlalchemy import text
db = SQLAlchemy(app)
def trial_insert(city, state, country):
query = text(f"INSERT INTO Country VALUES('{city}', '{state}', '{country}'); ")
result = db.engine.execute(query)
return result
```
#### File: app/connection/utils.py
```python
from sqlalchemy import text
from sqlalchemy.exc import SQLAlchemyError
from .main import db
def error_handler(query):
result = {'data': None, 'success': True}
try:
result['data'] = db.engine.execute(query)
except SQLAlchemyError as e:
result['success'] = False
result['data'] = str(e.orig)
return result
def count_instances(table_name):
table_name = str(table_name).lower()
query = f'SELECT COUNT(*) FROM {table_name};'
result = error_handler(query)
if result['success']:
count = 0
for row in result['data']:
count = row[0]
return {'data': int(count), 'success': True}
return result
def get_table_info(table_name):
table_name = str(table_name).lower()
query = f"""SELECT column_name, data_type FROM information_schema.columns
WHERE table_name='{table_name}'
ORDER BY ordinal_position;"""
result = error_handler(query)
if result['success']:
column_data = []
for row in result['data']:
record = dict()
record['column_name'], record['data_type'] = row[0], row[1]
column_data.append(record)
return {'data': column_data, 'success': True}
return result
def get_table_info_records(table_name):
table_name = str(table_name).lower()
column_data = get_table_info(table_name)
if column_data['success']:
records = list()
query = f'SELECT * FROM {table_name};'
result = error_handler(query)
if result['success']:
for row in result['data']:
new_record = dict()
for (column, record) in zip(column_data['data'], row):
new_record[column['column_name']] = record
records.append(new_record)
return {
'data': {
'column_data': column_data['data'],
'records': records
},
'success': True
}
return result
return column_data
def delete_record(table_name, where_condition):
table_name = str(table_name).lower()
query = f"DELETE FROM {table_name} WHERE ({where_condition});"
return error_handler(query)
def add_record(table_name, values):
table_name = str(table_name).lower()
query = f"INSERT INTO {table_name} VALUES({values});"
return error_handler(query)
def update_record(table_name, set_values, where_condition):
table_name = str(table_name).lower()
query = f"UPDATE {table_name} SET {set_values} WHERE ({where_condition})"
return error_handler(query)
def add_account(email, password, type):
query_text = f"call add_account('{email}', '{password}', '{type}');"
query = text(query_text).execution_options(autocommit=True)
return error_handler(query)
def book_ticket(email, source, destination, t_class, date, seat, code):
query_text = f"call book_ticket('{email}', '{source}', '{destination}', '{t_class}', '{date}', '{seat}', '{code}');"
query = text(query_text).execution_options(autocommit=True)
return error_handler(query)
def cancel_ticket(ticket_number):
query_text = f"CALL cancel_ticket(CAST({int(ticket_number)} AS BIGINT));"
query = text(query_text).execution_options(autocommit=True)
return error_handler(query)
def select_particulat_record(table_name, where_condition):
table_name = str(table_name).lower()
query = f"SELECT * FROM {table_name} WHERE {where_condition};"
return error_handler(query)
```
#### File: app/routes/admin.py
```python
from flask import current_app as app
from flask import render_template, request, redirect, url_for, flash
from .utils import create_values, create_where_condition
from ..connection.utils import (count_instances, get_table_info, delete_record,
add_record, get_table_info_records, update_record)
@app.route('/admin')
def admin_home_route():
tables = [
{'table': 'Country', 'name': 'Countries'},
{'table': 'Airport', 'name': 'Airports'},
{'table': 'Airline', 'name': 'Airlines'},
{'table': 'Contains', 'name': 'Connections'},
{'table': 'Employee', 'name': 'Employees'},
{'table': 'Airport_authority', 'name': 'Airport Authorities'},
{'table': 'Engineer', 'name': 'Engineers'},
{'table': 'Traffic_monitor', 'name': 'Traffic Monitors'},
{'table': 'Administration', 'name': 'Administrators'},
{'table': 'Salary', 'name': 'Salary Types'},
]
for table in tables:
response = count_instances(table['table'])
if not response['success']:
return render_template('base/error.html')
table['instances'] = response['data']
context = {'tables': tables}
return render_template('admin/home.html', **(context))
```
|
{
"source": "jeetkunecoder/podio",
"score": 2
}
|
#### File: podio/migrations/0002_create_initial_subjects.py
```python
from django.db import migrations
def create_subjects(apps, schema_editor):
Subject = apps.get_model('podio', 'Subject')
Subject.objects.create(name='Arts', color='#343a40')
Subject.objects.create(name='Computing', color='#007bff')
Subject.objects.create(name='Math', color='#28a745')
Subject.objects.create(name='Biology', color='#17a2b8')
Subject.objects.create(name='History', color='#ffc107')
class Migration(migrations.Migration):
dependencies = [
('podio', '0001_initial'),
]
operations = [
migrations.RunPython(create_subjects),
]
```
#### File: podio/views/podio.py
```python
from django.shortcuts import redirect, render
from django.views.generic import TemplateView
class SignUpView(TemplateView):
template_name = 'registration/signup.html'
def home(request):
if request.user.is_authenticated:
if request.user.is_teacher:
return redirect('teachers:quiz_change_list')
else:
return redirect('students:quiz_list')
return render(request, 'podio/home.html')
```
|
{
"source": "JeetMo/Semantify-NN",
"score": 2
}
|
#### File: JeetMo/Semantify-NN/algo_Semantic.py
```python
import numpy as np
from algo_CROWN import CROWN
from threat_models.threat_lighten import get_first_layers as lighten_layers
from threat_models.threat_saturate import get_first_layers as saturate_layers
from threat_models.threat_hue import get_first_layers as hue_layers
from threat_models.threat_bandc import get_first_layers as bandc_layers
from utils.get_epsilon import get_eps_2 as get_eps
class Semantic(CROWN):
def __init__(self, model):
super().__init__(model)
@staticmethod
def get_layer_bound_implicit(W, b, UB_prev, LB_prev, is_first, x0, eps):
UB_new = np.empty_like(b)
LB_new = np.empty_like(b)
if is_first: # first layer
Ax0 = np.matmul(W, x0)
for j in range(W.shape[0]):
dualnorm_Aj = np.sum(np.abs(np.multiply(W[j], eps)), axis=1)
UB_new[j] = np.max(Ax0[j] + dualnorm_Aj) + b[j]
LB_new[j] = np.min(Ax0[j] - dualnorm_Aj) + b[j]
else: # 2nd layer or more
UB_hat = self.ReLU(UB_prev)
LB_hat = self.ReLU(LB_prev)
W_abs = np.abs(W)
# not sure why, but in numba, W_abs is float32 and 0.5*(UB_hat-LB_hat) is float64
# while in numpy, W_abs and UB_hat are both float32
B_sum = np.float32(0.5) * (UB_hat + LB_hat)
B_diff = np.float32(0.5) * (UB_hat - LB_hat)
term_1st = np.dot(W_abs, B_diff)
term_2nd = np.dot(W, B_sum) + b
# term_1st = np.dot(W_abs,np.float32(0.5)*(UB_hat-LB_hat))
# term_2nd = np.dot(W_Nk,np.float32(0.5)*(UB_hat+LB_hat))+b_Nk
UB_new = term_1st + term_2nd
LB_new = -term_1st + term_2nd
return UB_new, LB_new
@staticmethod
# @jit(nopython=True)
def get_semantic_layer_bound_implicit(Ws, bs, UBs, LBs, neuron_state, nlayer, bounds_ul, x0, eps):
constants_ub = np.copy(bs[-1])
constants_lb = np.copy(bs[-1])
UB_final = np.zeros_like(constants_ub)
LB_final = np.zeros_like(constants_lb)
A_UB = np.copy(Ws[nlayer - 1])
A_LB = np.copy(Ws[nlayer - 1])
for i in range(nlayer - 1, 0, -1):
# create intercepts array for this layer
l_ub = np.empty_like(LBs[i])
l_lb = np.empty_like(LBs[i])
diags_ub = np.empty_like(bounds_ul[i][0, :])
diags_lb = np.empty_like(bounds_ul[i][0, :])
upper_k = bounds_ul[i][0]
upper_b = bounds_ul[i][1]
lower_k = bounds_ul[i][2]
lower_b = bounds_ul[i][3]
for j in range(A_UB.shape[0]):
# index for positive entries in A for upper bound
idx_pos_ub = np.nonzero(A_UB[j] > 0)[0]
# index for negative entries in A for upper bound
idx_neg_ub = np.nonzero(A_UB[j] <= 0)[0]
# index for positive entries in A for lower bound
idx_pos_lb = np.nonzero(A_LB[j] > 0)[0]
# index for negative entries in A for lower bound
idx_neg_lb = np.nonzero(A_LB[j] <= 0)[0]
# for upper bound, set the neurons with positive entries in A to upper bound
diags_ub[idx_pos_ub] = upper_k[idx_pos_ub]
l_ub[idx_pos_ub] = upper_b[idx_pos_ub]
# for upper bound, set the neurons with negative entries in A to lower bound
diags_ub[idx_neg_ub] = lower_k[idx_neg_ub]
l_ub[idx_neg_ub] = lower_b[idx_neg_ub]
# for lower bound, set the neurons with negative entries in A to upper bound
diags_lb[idx_neg_lb] = upper_k[idx_neg_lb]
l_lb[idx_neg_lb] = upper_b[idx_neg_lb]
# for lower bound, set the neurons with positve entries in A to lower bound
diags_lb[idx_pos_lb] = lower_k[idx_pos_lb]
l_lb[idx_pos_lb] = lower_b[idx_pos_lb]
UB_final[j] += np.dot(A_UB[j], l_ub)
LB_final[j] += np.dot(A_LB[j], l_lb)
# update the j-th row of A with diagonal matrice
A_UB[j] = A_UB[j] * diags_ub
# update A with diagonal matrice
A_LB[j] = A_LB[j] * diags_lb
# constants of previous layers
constants_ub += np.dot(A_UB, bs[i - 1])
constants_lb += np.dot(A_LB, bs[i - 1])
"""
if not np.isfinite(constants_ub).all():
print("constants_ub nan detected", i, j)
return UB_final, LB_final
if not np.isfinite(constants_lb).all():
print("constants_lb nan detected", i, j)
return UB_final, LB_final
"""
# compute A for next loop
# diags matrices is multiplied above
A_UB = np.dot(A_UB, Ws[i - 1])
A_LB = np.dot(A_LB, Ws[i - 1])
# after the loop is done we get A0
UB_final += constants_ub
LB_final += constants_lb
# step 6: bounding A0 * x
Ax0_UB = np.dot(A_UB, x0)
Ax0_LB = np.dot(A_LB, x0)
dualnorm_Aj_ub = np.dot(np.abs(A_UB),eps)
dualnorm_Aj_lb = np.dot(np.abs(A_LB),eps)
for j in range(A_UB.shape[0]):
UB_final[j] += np.max(Ax0_UB[j] + dualnorm_Aj_ub[j])
LB_final[j] += np.min(Ax0_LB[j] - dualnorm_Aj_lb[j])
# constant_gap: to be revised after deadline
constant_gap = 0
# probnd: to be revised after deadline
probnd = 0
# final constants A_final_UB, A_final_LB, B_final_UB, B_final_LB
A_final_UB = np.copy(A_UB)
A_final_LB = np.copy(A_LB)
B_final_UB = np.copy(UB_final)
B_final_LB = np.copy(LB_final)
# use tuples instead of list in order to use numba
As = (A_final_UB, A_final_LB)
Bs = (B_final_UB, B_final_LB)
return UB_final, LB_final, constant_gap, probnd, As, Bs
def certify_eps_explicit(self, predict_class, target_class, eps, x0, hsl, p="i", activation="relu", delta = None):
if hsl == "lighten":
w_new, b_new = lighten_layers(x0, self.weights[0], self.biases[0])
elif hsl == "saturate":
w_new, b_new = saturate_layers(x0, self.weights[0], self.biases[0])
elif hsl == "hue":
w_new, b_new = hue_layers(x0, self.weights[0], self.biases[0])
elif hsl == "bandc":
w_new, b_new = bandc_layers(x0, self.weights[0], self.biases[0])
else:
raise ValueError
eps_val = delta
div = int(np.floor((eps / eps_val) + 0.00001))
min_val = 100.0
max_cert = (-eps, eps)
for j in range(1, 2*div+1):
offset = (2 * (j % 2) - 1) * np.floor(j / 2) * eps_val
x0 = (offset + (eps_val/2.0))*np.ones((1,)).astype(np.float32)
preReLU_UB = []
preReLU_LB = []
neuron_states = []
eps = (eps_val/2.0) * np.ones_like(x0).astype(np.float32)
if hsl == "hue":
weights = w_new + self.weights[1:]
biases = b_new + self.biases[1:]
elif hsl == "bandc":
if cont:
offset += 1
eps = np.array([(eps_val / 2.0), eps2]).astype(np.float32)
x0 = np.array([(offset + (eps_val / 2.0)), 0.0]).astype(np.float32).reshape((2,))
else:
eps = np.array([eps2, (eps_val / 2.0)]).astype(np.float32)
x0 = np.array([(0.1, offset + (eps_val / 2.0))]).astype(np.float32).reshape((2,))
weights = w_new + self.weights[1:]
biases = b_new + self.biases[1:]
else:
if offset >= 0:
weights = w_new['pos'] + self.weights[1:]
biases = b_new['pos'] + self.biases[1:]
else:
weights = w_new['neg'] + self.weights[1:]
biases = b_new['neg'] + self.biases[1:]
numlayer = self.numlayer + 1
bounds_ul = super().init_layer_bound_general(weights)
print("before bounds_ul[1][0] = {}".format(bounds_ul[1][0]))
if activation == "relu":
get_bounds = super().get_relu_bounds
elif activation == "tanh":
get_bounds = super().get_tanh_bounds
elif activation == "sigmoid":
get_bounds = super().get_sigmoid_bounds
elif activation == "arctan":
get_bounds = super().get_arctan_bounds
else:
raise ValueError('activation %s is not supported!' % activation)
for num in range(numlayer):
W = weights[num]
b = biases[num]
if num == 0:
UB, LB = super().get_layer_bound(W, b, UB_prev=None, LB_prev=None, is_first=True, x0=x0, eps=eps, p=p)
else:
if num == numlayer - 1:
W, b = super().g0_trick(W, b, predict_class, target_class)
Ws = weights[:num] + [W]
bs = biases[:num] + [b]
UB, LB, _, _, As, Bs = super().get_crown_layer_bound(tuple(Ws), tuple(bs),
tuple([x0 - x0] + preReLU_UB),
tuple([x0 - x0] + preReLU_LB),
tuple(neuron_states),
num + 1, tuple(bounds_ul[:num + 1]),
x0, eps, p)
if num < numlayer - 1:
preReLU_UB.append(UB)
preReLU_LB.append(LB)
neuron_states.append(super().assign_neuron_state(preReLU_UB[-1], preReLU_LB[-1]))
print("layer", num, sum(neuron_states[-1] == -1), "neurons never activated,",
sum(neuron_states[-1] == +1), "neurons always activated")
get_bounds(preReLU_UB[-1], preReLU_LB[-1], neuron_states[-1], bounds_ul[num + 1])
print("epsilon = {:.5f}".format(np.amax(eps)))
print(" {:.2f} < f_c - f_j < {:.2f}".format(LB[0], UB[0]))
gap_gx = LB[0]
min_val = min(min_val, gap_gx)
if min_val < 0:
prevs = [(2*((j-i)%2)-1)*np.floor((j-i)/2)*eps_val for i in range(1, 3) if (j-i) >= 0]
if len(prevs) > 0:
max_cert = (min(prevs), max(prevs) + eps_val)
else:
max_cert = (0.0, 0.0)
break
gap_gx = min_val
return gap_gx, max_cert
def certify_eps_implicit(self, predict_class, target_class, lower_lim, upper_lim, x0, hsl="rotate", divisions=1, activation="relu"):
if hsl == "rotate":
eps, offsets = get_eps(x0 + 0.5, lower_lim, upper_lim, div=divisions)
else:
raise ValueError
x0 = x0.flatten().astype(np.float32)
x = (x0.reshape((x0.shape[0], 1)) + offsets).astype(np.float32)
eps = eps.astype(np.float32)
# contains numlayer arrays, each corresponding to a pre-ReLU bound
preReLU_UB = []
preReLU_LB = []
# save neuron states
neuron_states = []
# initialize diags: in fast-lin, it's called diags; in crown, it's called bounds_ul
bounds_ul = super().init_layer_bound_general(self.weights)
if activation == "relu":
get_bounds = super().get_relu_bounds
elif activation == "tanh":
get_bounds = super().get_tanh_bounds
elif activation == "sigmoid":
get_bounds = super().get_sigmoid_bounds
elif activation == "arctan":
get_bounds = super().get_arctan_bounds
else:
raise ValueError('activation %s is not supported!' % activation)
eps = eps.astype(np.float32)
for num in range(self.numlayer):
W = self.weights[num]
b = self.biases[num]
if num == 0:
UB, LB = Semantic.get_layer_bound_implicit(W, b, UB_prev=None, LB_prev=None, is_first=True, x0=x, eps=eps.T)
else:
# need to modify last layer weights and bias with g0_trick
if num == self.numlayer - 1: # last layer
W, b = super().g0_trick(W, b, predict_class, target_class)
Ws = self.weights[:num] + [W]
bs = self.biases[:num] + [b]
# get pre-ReLU bounds
UB, LB, _, _, As, Bs = Semantic.get_semantic_layer_bound_implicit(tuple(Ws), tuple(bs),
tuple([x0 - x0] + preReLU_UB),
tuple([x0 - x0] + preReLU_LB),
tuple(neuron_states),
num + 1, tuple(bounds_ul[:num + 1]),
x, eps)
if num < self.numlayer - 1:
# save those pre-ReLU bounds
preReLU_UB.append(UB)
preReLU_LB.append(LB)
neuron_states.append(super().assign_neuron_state(preReLU_UB[-1], preReLU_LB[-1]))
print("layer", num, sum(neuron_states[-1] == -1), "neurons never activated,",
sum(neuron_states[-1] == +1), "neurons always activated")
get_bounds(preReLU_UB[-1], preReLU_LB[-1], neuron_states[-1], bounds_ul[num + 1])
print("epsilon = {:.5f}".format(np.amax(eps)))
print(" {:.2f} < f_c - f_j < {:.2f}".format(LB[0], UB[0]))
gap_gx = LB[0]
return gap_gx
```
#### File: JeetMo/Semantify-NN/main_semantic_hsl.py
```python
import faulthandler;
faulthandler.enable()
import numpy as np
import os, sys, random, time, math, argparse
from utils.setup_mnist import MNIST
from utils.setup_cifar import CIFAR
from utils.setup_gtsrb import GTSRB
import utils.save_nlayer_weights as nl
from utils.utils import generate_data
from algo_Semantic import Semantic
def handle_parser(parser):
parser.add_argument('--model',
default="cifar",
choices=["cifar", "mnist", "gtsrb"],
help='model to be used')
parser.add_argument('--eps',
default=0.05,
type=float,
help="epsilon for verification")
parser.add_argument('--hidden',
default=2048,
type=int,
help="number of hidden neurons per layer")
parser.add_argument('--delta',
default=0.001,
type=float,
help='size of divisions')
parser.add_argument('--numlayer',
default=5,
type=int,
help='number of layers in the model')
parser.add_argument('--numimage',
default=2,
type=int,
help='number of images to run')
parser.add_argument('--startimage',
default=0,
type=int,
help='start image')
parser.add_argument('--hsl',
default="lighten",
choices=["lighten", "saturate", "hue", "bandc"],
help='model to be used')
parser.add_argument('--norm',
default="i",
type=str,
choices=["i", "1", "2"],
help='perturbation norm: "i": Linf, "1": L1, "2": L2')
parser.add_argument('--LP',
action="store_true",
help='use LP to get bounds for final output')
parser.add_argument('--LPFULL',
action="store_true",
help='use FULL LP to get bounds for output')
parser.add_argument('--quad',
action="store_true",
help='use quadratic bound to imporve 2nd layer output')
parser.add_argument('--warmup',
action="store_true",
help='warm up before the first iteration')
parser.add_argument('--modeltype',
default="vanilla",
choices=["lighten", "saturate", "hue", "vanilla", "dropout", "distill", "adv_retrain"],
help="select model type")
parser.add_argument('--targettype',
default="top2",
choices=["untargeted", "least", "top2", "random"],
help='untargeted minimum distortion')
parser.add_argument('--steps',
default=15,
type=int,
help='how many steps to binary search')
parser.add_argument('--activation',
default="relu",
choices=["relu", "tanh", "sigmoid", "arctan", "elu", "hard_sigmoid", "softplus"])
parser.add_argument('--test_minUB',
action="store_true",
help='test the idea of minimize UB of g(x) in Fast-Lin')
parser.add_argument('--test_estLocalLips',
action="store_true",
help='test the idea of estimating local lipschitz constant using Fast-Lin')
parser.add_argument('--test_probnd',
default="none",
choices=["gaussian_iid", "gaussian_corr", "uniform", "none"],
help="select input distribution")
parser.add_argument('--test_weightpert',
action="store_true",
help="perturb weight matrices")
return parser
if __name__ == "__main__":
#### parser ####
parser = argparse.ArgumentParser(description='compute activation bound for CIFAR and MNIST')
parser = handle_parser(parser)
args = parser.parse_args()
nhidden = args.hidden
# quadratic bound only works for ReLU
assert ((not args.quad) or args.activation == "relu")
# for all activations we can use general framework
targeted = True
if args.targettype == "least":
target_type = 0b0100
elif args.targettype == "top2":
target_type = 0b0001
elif args.targettype == "random":
target_type = 0b0010
elif args.targettype == "untargeted":
target_type = 0b10000
targeted = False
if args.modeltype == "vanilla":
suffix = ""
else:
suffix = "_" + args.modeltype
# try models/mnist_3layer_relu_1024
activation = args.activation
modelfile = "models/" + args.model + "_" + str(args.numlayer) + "layer_" + activation + "_" + str(nhidden) + suffix
if not os.path.isfile(modelfile):
# if not found, try models/mnist_3layer_relu_1024_1024
modelfile += ("_" + str(nhidden)) * (args.numlayer - 2) + suffix
# if still not found, try models/mnist_3layer_relu
if not os.path.isfile(modelfile):
modelfile = "models/" + args.model + "_" + str(args.numlayer) + "layer_" + activation + "_" + suffix
# if still not found, try models/mnist_3layer_relu_1024_best
if not os.path.isfile(modelfile):
modelfile = "models/" + args.model + "_" + str(args.numlayer) + "layer_" + activation + "_" + str(
nhidden) + suffix + "_best"
if not os.path.isfile(modelfile):
raise (RuntimeError("cannot find model file"))
if args.LP or args.LPFULL:
# use gurobi solver
import gurobipy as grb
# config = tf.ConfigProto()
# config.gpu_options.allow_growth = True
# with tf.Session(config=config) as sess:
if args.model == "mnist":
data = MNIST()
model = nl.NLayerModel([nhidden] * (args.numlayer - 1), modelfile, activation=activation)
elif args.model == "cifar":
data = CIFAR()
model = nl.NLayerModel([nhidden] * (args.numlayer - 1), modelfile, image_size=32, image_channel=3,
activation=activation)
elif args.model == "gtsrb":
data = GTSRB()
model = nl.NLayerModel([nhidden] * (args.numlayer - 1), modelfile, image_size=28, image_channel=3,
activation=activation, num_labels = 43)
else:
raise (RuntimeError("unknown model: " + args.model))
print("Evaluating", modelfile)
sys.stdout.flush()
random.seed(1215)
np.random.seed(1215)
"""
Generate data
"""
inputs, targets, true_labels, true_ids, img_info = generate_data(data, samples=args.numimage, targeted=targeted,
random_and_least_likely=True,
target_type=target_type,
predictor=model.model.predict,
start=args.startimage)
# get the logit layer predictions
preds = model.model.predict(inputs)
Nsamp = 0
r_sum = 0.0
r_gx_sum = 0.0
"""
Start computing robustness bound
"""
print("starting robustness verification on {} images!".format(len(inputs)))
sys.stdout.flush()
sys.stderr.flush()
total_time_start = time.time()
# compute worst case bound: no need to pass in sess, model and data
# just need to pass in the weights, true label, norm, x0, prediction of x0, number of layer and eps
Semantic_BND = Semantic(model)
total_verifiable = 0
lower, upper = 0.0, 0.0
delta = args.delta
for i in range(len(inputs)):
Nsamp += 1
p = args.norm # p = "1", "2", or "i"
predict_label = np.argmax(true_labels[i])
target_label = np.argmax(targets[i])
start = time.time()
eps = args.eps
start_1 = time.time()
# run CROWN
robustness, max_cert = Semantic_BND.certify_eps_explicit(predict_label, target_label, eps, inputs[i],
args.hsl, p, delta = delta)
print("verified", time.time() - start_1, robustness)
if robustness >= 0:
total_verifiable += 1
verifiable = True
else:
verifiable = False
print("[L1] model = {}, seq = {}, id = {}, true_class = {}, target_class = {}, info = {}, "
"verifiable = {}, lower_bound = {}, upper_bound = {}, time = {:.4f}, total_time = {:.4f}"
.format(modelfile, i, true_ids[i], predict_label, target_label, img_info[i],
verifiable, max_cert[0], max_cert[1], time.time() - start, time.time() - start))
lower += max_cert[0]
upper += max_cert[1]
sys.stdout.flush()
sys.stderr.flush()
print("[L0] model = {}, info = {}, numimage = {}, lower_bound_avg = {}, uper_bound_avg = {}, total verifiable = {:.2f}%, time = {:.4f}, total_time = {:.4f}".format(modelfile, img_info[i], Nsamp, lower/Nsamp, upper/Nsamp, 100 * total_verifiable / Nsamp, time.time() - start, time.time() - total_time_start))
sys.stdout.flush()
sys.stderr.flush()
```
|
{
"source": "jeetpadhya/3D-Human-Body-Shape",
"score": 2
}
|
#### File: 3D-Human-Body-Shape/src/reshaper.py
```python
import os
import numpy as np
import scipy
import scipy.sparse
import scipy.sparse.linalg
import utils
from fancyimpute import MICE
# A Reshaper provide multiple methods for synthesizing body mesh
class Reshaper:
def __init__(self, label="female"):
self.label_ = label
self.facets = np.load(open(os.path.join(utils.MODEL_DIR, "facets.npy"), "rb"))
self.normals = np.load(open(os.path.join(utils.MODEL_DIR, "normals.npy"), "rb"))
self.mask = np.load(open(os.path.join(utils.MODEL_DIR, "mask.npy"), "rb"))
self.rfemask = np.load(open(os.path.join(utils.MODEL_DIR, "%s_rfemask.npy" % label), "rb"))
self.rfemat = np.load(open(os.path.join(utils.MODEL_DIR, "%s_rfemat.npy" % label), "rb"))
self.m2d = np.load(open(os.path.join(utils.MODEL_DIR, "%s_m2d.npy" % label), "rb"))
self.d_basis = np.load(open(os.path.join(utils.MODEL_DIR, "%s_d_basis.npy" % label), "rb"))
self.t_measure = np.load(open(os.path.join(utils.MODEL_DIR, "%s_t_measure.npy" % label), "rb"))
self.mean_measure = np.load(open(os.path.join(utils.MODEL_DIR, "%s_mean_measure.npy" % label), "rb"))
self.mean_deform = np.load(open(os.path.join(utils.MODEL_DIR, "%s_mean_deform.npy" % label), "rb"))
self.mean_vertex = np.load(open(os.path.join(utils.MODEL_DIR, "%s_mean_vertex.npy" % label), "rb"))
self.std_measure = np.load(open(os.path.join(utils.MODEL_DIR, "%s_std_measure.npy" % label), "rb"))
self.std_deform = np.load(open(os.path.join(utils.MODEL_DIR, "%s_std_deform.npy" % label), "rb"))
self.cp = np.load(open(os.path.join(utils.MODEL_DIR, "cp.npy"), "rb"), allow_pickle=True)
loader = np.load(os.path.join(utils.MODEL_DIR, "%s_d2v.npz" % label))
self.d2v = scipy.sparse.coo_matrix((loader['data'], (loader['row'], loader['col'])), shape=loader['shape'])
self.lu = scipy.sparse.linalg.splu(self.d2v.transpose().dot(self.d2v).tocsc())
self.local_mat = []
tmp = np.load(open(os.path.join(utils.MODEL_DIR, "%s_local.npy" % label), "rb"), allow_pickle=True)
for i in range(0, len(tmp)):
self.local_mat.append(np.array([c for c in tmp[i]]))
def mapping(self, weight, flag=0):
if flag == 0:
return self.mapping_global(weight)
elif flag == 1:
return self.mapping_mask(weight)
else:
return self.mapping_rfemat(weight)
# global mapping using t_measure
def mapping_global(self, weight):
weight = np.array(weight).reshape(utils.M_NUM, 1)
weight = self.m2d.dot(weight)
d = np.matmul(self.d_basis, weight)
d.shape = (utils.F_NUM, 9)
d *= self.std_deform
d += self.mean_deform
d.shape = (utils.F_NUM * 9, 1)
[v, n, f] = self.d_synthesize(d)
return [v, n, f]
# local mapping using measure + mask
def mapping_mask(self, weight):
weight = np.array(weight).reshape(utils.M_NUM, 1)
weight *= self.std_measure
weight += self.mean_measure
d = []
for i in range(0, utils.F_NUM):
mask = np.array(self.mask[:, i]).reshape(utils.M_NUM, 1)
alpha = np.array(weight[mask])
alpha.shape = (alpha.size, 1)
s = self.local_mat[i].dot(alpha)
d += [a for a in s.flat]
d = np.array(d).reshape(utils.F_NUM * 9, 1)
[v, n, f] = self.d_synthesize(d)
return [v, n, f]
# local mapping using measure + rfe_mat
def mapping_rfemat(self, weight):
weight = np.array(weight).reshape(utils.M_NUM, 1)
weight *= self.std_measure
weight += self.mean_measure
d = []
for i in range(0, utils.F_NUM):
mask = np.array(self.rfemask[:, i]).reshape(utils.M_NUM, 1)
alpha = np.array(weight[mask])
alpha.shape = (alpha.size, 1)
s = self.rfemat[i].dot(alpha)
d += [a for a in s.flat]
d = np.array(d).reshape(utils.F_NUM * 9, 1)
[v, n, f] = self.d_synthesize(d)
return [v, n, f]
# synthesize a body by deform-based, given deform, output vertex
def d_synthesize(self, deform):
d = np.array(deform.flat).reshape(deform.size, 1)
Atd = self.d2v.transpose().dot(d)
x = self.lu.solve(Atd)
x = x[:utils.V_NUM * 3]
# move to center
x.shape = (utils.V_NUM, 3)
x_mean = np.mean(x, axis=0)
x -= x_mean
return [x, -self.normals, self.facets - 1]
# given flag, value, predict completed measures
def test(self, flag, data):
if (flag == 1).sum() == self.data.m_num:
return data
else:
solver = MICE()
return self.imputate(flag, data, solver)
# using imputation for missing data
def get_predict(self, flag, in_data):
output = in_data.copy()
output.shape = (utils.M_NUM, 1)
output[~flag] = np.nan
solver = MICE()
tmp = self.t_measure.copy()
tmp = np.column_stack((tmp, output)).transpose()
tmp = solver.complete(tmp)
output = np.array(tmp[-1, :]).reshape(utils.M_NUM, 1)
return output
if __name__ == "__main__":
label = "female"
body = Reshaper(label)
measure = np.load(open(os.path.join(utils.MODEL_DIR, "%s_measure.npy" % label), "rb"))
mean_measure = np.array(measure.mean(axis=1)).reshape(utils.M_NUM, 1)
std_measure = np.array(measure.std(axis=1)).reshape(utils.M_NUM, 1)
t_measure = measure - mean_measure
t_measure /= std_measure
for i in range(100):
[v, n, f] = body.mapping(t_measure[:, i], 2)
utils.save_obj(os.path.join(utils.MODEL_DIR, "test.obj"), v, f + 1)
```
|
{
"source": "jeetpatel9/django-restframework-2fa",
"score": 2
}
|
#### File: django-restframework-2fa/django_restframework_2fa/views.py
```python
from django.shortcuts import get_object_or_404
from django.contrib.auth import get_user_model
from rest_framework.views import APIView
from rest_framework.response import Response
from rest_framework import status
from django_restframework_2fa.serializers import RequestLoginSerializer
from twilio.base.exceptions import TwilioRestException
User = get_user_model()
class Login2FARequestOTPView(APIView):
'''
This view is used to request an OTP on the user's mobile number for verification.
It uses basic authentication instead of JWTAuthentication.
'''
def post(self, request, format=None):
serialized_data = RequestLoginSerializer(data=request.data)
serialized_data.is_valid(raise_exception=True)
try:
user_instance = User.objects.get(email=serialized_data.validated_data['email'])
except User.DoesNotExist:
return Response({'message':'Account with the provided credentials does not exist.'}, status.HTTP_400_BAD_REQUEST)
if not user_instance.check_password(serialized_data.validated_data['password']):
return Response({'message':'Invalid credentials.'}, status.HTTP_401_UNAUTHORIZED)
try:
response = serialized_data.get_response(user_instance)
except ValueError as e:
return Response({"message": str(e)}, status.HTTP_400_BAD_REQUEST)
except Exception as e:
return Response({"message": str(e)}, status.HTTP_503_SERVICE_UNAVAILABLE)
return Response(response, status=status.HTTP_200_OK )
```
|
{
"source": "jeetsagar/turbojet",
"score": 3
}
|
#### File: src/TF-gui/tftrain.py
```python
import os
import pandas as pd
import tensorflow as tf
from tensorflow.keras import layers
os.environ["CUDA_VISIBLE_DEVICES"] = "0"
# gpu_devices = tf.config.experimental.list_physical_devices("GPU")
# for device in gpu_devices:
# tf.config.experimental.set_memory_growth(device, True)
def trainModel(data_in, params_in):
data_in = data_in.take(2048)
data_in = data_in.shuffle(24)
data_in = data_in.batch(1024)
arch = params_in["Architecture"]
dropout = params_in["Dropout"]
lr = params_in["LearningRate"]
attrs = params_in["Attrs"]
epochs = params_in["Epochs"]
if arch == "BaseCNN":
if params_in["BatchNorm"]:
model = tf.keras.Sequential([
layers.Conv1D(filters=10, kernel_size=5, padding="same", activation="relu", input_shape=(1, 50, attrs)),
layers.Dropout(dropout),
layers.Conv1D(filters=10, kernel_size=5, padding="same", activation="relu"),
layers.Dropout(dropout),
layers.Conv1D(filters=1, kernel_size=5, padding="same", activation="relu"),
layers.Dropout(dropout),
layers.BatchNormalization(),
layers.Flatten(),
layers.Dense(50, "relu"),
layers.Dense(1)
])
else:
model = tf.keras.Sequential([
layers.Conv1D(filters=10, kernel_size=5, padding="same", activation="relu", input_shape=(1, 50, attrs)),
layers.Dropout(dropout),
layers.Conv1D(filters=10, kernel_size=5, padding="same", activation="relu"),
layers.Dropout(dropout),
layers.Conv1D(filters=1, kernel_size=5, padding="same", activation="relu"),
layers.Dropout(dropout),
layers.Flatten(),
layers.Dense(50, "relu"),
layers.Dense(1)
])
elif arch == "CNN-LSTM":
if params_in["BatchNorm"]:
model = tf.keras.Sequential([
layers.Conv1D(filters=10, kernel_size=5, padding="same", activation="relu", input_shape=(1, 50, attrs)),
layers.Dropout(dropout),
layers.Conv1D(filters=10, kernel_size=5, padding="same", activation="relu"),
layers.Dropout(dropout),
layers.Conv1D(filters=1, kernel_size=5, padding="same", activation="relu"),
layers.Dropout(dropout),
layers.BatchNormalization(),
layers.Reshape((5, 10)),
layers.LSTM(30, return_sequences=False),
layers.Dense(50, "relu"),
layers.Dense(1)
])
else:
model = tf.keras.Sequential([
layers.Conv1D(filters=10, kernel_size=5, padding="same", activation="relu", input_shape=(1, 50, attrs)),
layers.Dropout(dropout),
layers.Conv1D(filters=10, kernel_size=5, padding="same", activation="relu"),
layers.Dropout(dropout),
layers.Conv1D(filters=1, kernel_size=5, padding="same", activation="relu"),
layers.Dropout(dropout),
layers.Reshape((5, 10)),
layers.LSTM(30, return_sequences=False),
layers.Dense(50, "relu"),
layers.Dense(1)
])
elif arch == "CNN-2LSTM":
if params_in["BatchNorm"]:
model = tf.keras.Sequential([
layers.Conv1D(filters=10, kernel_size=5, padding="same", activation="relu", input_shape=(1, 50, attrs)),
layers.Dropout(dropout),
layers.Conv1D(filters=10, kernel_size=5, padding="same", activation="relu"),
layers.Dropout(dropout),
layers.Conv1D(filters=1, kernel_size=5, padding="same", activation="relu"),
layers.Dropout(dropout),
layers.BatchNormalization(),
layers.Reshape((5, 10)),
layers.LSTM(30, return_sequences=True),
layers.LSTM(30, return_sequences=False),
layers.Dense(1)
])
else:
model = tf.keras.Sequential([
layers.Conv1D(filters=10, kernel_size=5, padding="same", activation="relu", input_shape=(1, 50, attrs)),
layers.Dropout(dropout),
layers.Conv1D(filters=10, kernel_size=5, padding="same", activation="relu"),
layers.Dropout(dropout),
layers.Conv1D(filters=1, kernel_size=5, padding="same", activation="relu"),
layers.Dropout(dropout),
layers.Reshape((5, 10)),
layers.LSTM(30, return_sequences=True),
layers.LSTM(30, return_sequences=False),
layers.Dense(1)
])
model.compile(loss=tf.losses.MeanSquaredError(), optimizer=tf.optimizers.Adam(learning_rate=lr, amsgrad=True))
filepath = "./checkpoints/Model_in-" + arch + str(attrs) + ".h5"
losses = []
class CustomModelCheckPoint(tf.keras.callbacks.Callback):
def __init__(self, **kargs):
super(CustomModelCheckPoint, self).__init__(**kargs)
self.epoch_loss = {} # accuracy at given epoch
def on_epoch_begin(self, epoch, logs={}):
# Things done on beginning of epoch.
return
def on_epoch_end(self, epoch, logs={}):
# things done on end of the epoch
self.epoch_loss[epoch] = logs.get("loss")
losses.append(self.epoch_loss[epoch])
if params_in["ResumeTraining"]:
model.load_weights(filepath)
checkpoint2 = CustomModelCheckPoint()
checkpoint = tf.keras.callbacks.ModelCheckpoint(filepath, monitor='loss', verbos=0, save_best_only=True,
save_freq='epoch')
model.fit(data_in, epochs=epochs, callbacks=[checkpoint, checkpoint2])
df_loss = pd.DataFrame()
df_loss["Epochs"] = list(range(1, epochs + 1))
df_loss["Loss"] = losses
df_loss.to_csv("./losses/lossTrend.csv", index=False)
```
#### File: src/TF-nokalman/tfdata.py
```python
import h5py
import bisect
import numpy as np
import tensorflow as tf
def normalize_data(x):
x_max = np.max(x, axis=0)
x_min = np.min(x, axis=0)
x_denom = (x_max - x_min)
x_denom[x_denom == 0] = 1
x_norm = -1 + (2 * (x - x_min) / x_denom)
return x_norm
class DataProvider:
def __init__(self, filepath, units, mode):
self.window = 50
if mode == 1:
with h5py.File(filepath, 'r') as hdf:
W_dev = np.array(hdf.get('W_dev'))
X_s_dev = np.array(hdf.get('X_s_dev'))
X_v_dev = np.array(hdf.get('X_v_dev'))
T_dev = np.array(hdf.get('T_dev'))
A_dev = np.array(hdf.get('A_dev'))
Y_dev = np.array(hdf.get('Y_dev'))
else:
with h5py.File(filepath, 'r') as hdf:
W_dev = np.array(hdf.get('W_test'))
X_s_dev = np.array(hdf.get('X_s_test'))
X_v_dev = np.array(hdf.get('X_v_test'))
T_dev = np.array(hdf.get('T_test'))
A_dev = np.array(hdf.get('A_test'))
Y_dev = np.array(hdf.get('Y_test'))
unit_array = np.array(A_dev[:, 0], dtype=np.int32)
existing_units = list(np.unique(unit_array))
if units:
units = units[0]
self.units = list(set(units).intersection(set(existing_units)))
self.units.sort()
else:
self.units = existing_units
self.num_units = len(self.units)
dev_data = np.concatenate((W_dev, X_s_dev, X_v_dev, T_dev[:, [-1, -2, -4]]), axis=1)
# dev_data = np.concatenate((W_dev, X_s_dev), axis=1)
# print(np.shape(dev_data))
# exit()
dev_data = normalize_data(dev_data)
self.data_list = []
self.target_list = []
self.length_list = []
self.total_length = 0
for unit in self.units:
unit_ind = (unit_array == unit)
unit_data = dev_data[unit_ind]
unit_target = Y_dev[unit_ind]
unit_target = unit_target[self.window:]
# using a subset of the data for testing
# unit_data = unit_data[:1024+self.window]
# unit_target = unit_target[:1024]
# remove the transpose() call when using tensorflow
# tensorflow uses channels last, but pytorch uses channels first
data_tensor = unit_data
target_tensor = unit_target
self.data_list.append(data_tensor)
self.target_list.append(target_tensor)
target_length = target_tensor.shape[0]
self.total_length += target_length
self.length_list.append(target_length)
self.total_elem = list(np.cumsum(self.length_list))
def _get_index(self, n):
n = n + 1
n = max(1, min(self.total_length, n))
i = bisect.bisect_left(self.total_elem, n)
if i == 0:
j = n - 1
else:
m = self.total_elem[i - 1]
j = n - m - 1
return i, j
def __len__(self):
return self.total_length
def __getitem__(self, index):
i, j = self._get_index(index)
data = self.data_list[i][j:j + self.window, :]
target = self.target_list[i][j]
data = np.expand_dims(data, axis=0)
target = np.expand_dims(target, axis=0)
return data, target
def generate_data(filepath, units, mode):
ds = DataProvider(filepath, units, mode)
for i in range(ds.total_length):
data, value = ds[i]
yield data, value
def get_dataset(filepath, units, mode):
# return tf.data.Dataset.from_generator(generate_data, args=[filepath, units],output_signature=(tf.TensorSpec(shape=(1, 50, 18), dtype=tf.float32),tf.TensorSpec(shape=(1, 1), dtype=tf.float32)))
return tf.data.Dataset.from_generator(generate_data, args=[filepath, units, mode],
output_types=(tf.float32, tf.float32), output_shapes=([1, 50, 35], [1, 1]))
# training_dataset = tf.data.Dataset.from_generator(
# raw_data_gen,
# args=(1),
# output_types=(tf.float32, tf.uint8),
# output_shapes=([None, 1], [None]))
if __name__ == '__main__':
fname = '../../data_set/N-CMAPSS_DS02-006.h5'
a = DataProvider(fname, [], "dev")
b, c = a[0]
print(b.shape, c.shape)
tf_ds = get_dataset(fname, [], "dev")
```
|
{
"source": "jeetsajan/book-review",
"score": 3
}
|
#### File: jeetsajan/book-review/application.py
```python
import os
import requests
from flask import Flask, session, render_template, request, redirect, jsonify
from flask_session import Session
from sqlalchemy import create_engine
from sqlalchemy.orm import scoped_session, sessionmaker
app = Flask(__name__)
# Check for environment variable
if not os.getenv("DATABASE_URL"):
raise RuntimeError("DATABASE_URL is not set")
# Configure session to use filesystem
app.config["SESSION_PERMANENT"] = False
app.config["SESSION_TYPE"] = "filesystem"
Session(app)
# Set up database
engine = create_engine(os.getenv("DATABASE_URL"))
db = scoped_session(sessionmaker(bind=engine))
@app.route("/")
def index():
if session.get("username") is not None:
return render_template("home.html", name=session.get("username"))
return render_template("login.html")
@app.route("/home", methods=["POST"])
def home():
session.clear()
name = request.form.get("name")
dbpwd = db.execute("SELECT password FROM users WHERE username = :username", {"username": name}).fetchone()
if dbpwd != None:
pwd = request.form.get("pwd")
if pwd == dbpwd[0]:
session["username"] = name
return render_template("home.html", name=name)
else:
return redirect("/signup")
return render_template("login.html")
@app.route("/login", methods=["POST"])
def login():
session.clear()
name = request.form.get("name")
pwd = request.form.get("pwd")
email = request.form.get("email")
db.execute("INSERT INTO users (username, password, email) VALUES (:username, :password, :email)",{"username": name, "password": <PASSWORD>, "email": email})
db.commit()
session["username"] = name
return render_template("home.html", name=name)
@app.route("/signup")
def signup():
if session.get("username") is not None:
return render_template("home.html", name=session.get("username"))
return render_template("signup.html")
@app.route("/find", methods=["POST"])
def find():
isbn = request.form.get("isbn")
name = request.form.get("name")
author = request.form.get("author")
if len(isbn)==0:
ans = queryOne("name",name) if len(author)==0 else queryOne("author", author) if len(name)==0 else queryTwo("name", name, "author", author)
elif len(name)==0:
ans = queryOne("isbn",isbn) if len(author)==0 else queryOne("author", author) if len(isbn)==0 else queryTwo("isbn", isbn, "author", author)
elif len(author)==0:
ans = queryOne("isbn",isbn) if len(name)==0 else queryOne("name", name) if len(isbn)==0 else queryTwo("isbn", isbn, "name", name)
else:
ans = db.execute(f"SELECT * FROM books WHERE isbn LIKE '%{isbn}%' AND name LIKE '%{name}%' AND author LIKE '%{author}%'").fetchall()
return render_template("search_result.html", ans=ans)
def queryOne(name, value):
ans = db.execute(f"SELECT * FROM books WHERE {name} LIKE '%{value}%'").fetchall()
return ans
def queryTwo(n1, v1, n2, v2):
ans = db.execute(f"SELECT * FROM books WHERE {n1} LIKE '%{v1}%' AND {n2} LIKE '%{v2}%'").fetchall()
return ans
@app.route("/details/<isbn>")
def details(isbn):
ans = db.execute("SELECT * FROM books WHERE isbn = :isbn", {"isbn": isbn}).fetchone()
response = requests.get("https://www.goodreads.com/book/review_counts.json", params={"key": '<KEY>', "isbns": isbn})
sna = response.json()
if response.status_code != 200:
raise Exception("ERROR: API request unsuccessful.")
reviews = db.execute("SELECT rating, content, username FROM reviews WHERE isbn = :isbn", {"isbn": isbn}).fetchall()
return render_template("details.html", res=ans, avg=sna['books'][0]['average_rating'], rating=sna['books'][0]['average_rating'], total=sna['books'][0]['ratings_count'], reviews=reviews)
@app.route("/logout")
def logout():
session.clear()
return redirect("/")
@app.route("/review/<isbn>", methods=["POST"])
def review(isbn):
username = session.get("username")
ans = db.execute("SELECT * FROM reviews WHERE isbn = :isbn AND username = :username", {"isbn": isbn, "username": username}).fetchall()
if ans == []:
rating = request.form.get("rating")
content = request.form.get("content")
db.execute("INSERT INTO reviews (rating, content, username, isbn) VALUES (:rating, :content, :username, :isbn)",{"rating": rating, "content": content, "username": username, "isbn": isbn})
db.commit()
return redirect("/details/"+isbn)
@app.route("/api/<isbn>", methods=['GET'])
def api(isbn):
row = db.execute("SELECT name, author, year, books.isbn, COUNT(reviews.id) as review_count, AVG(CAST(reviews.rating AS INTEGER)) as average_score FROM books INNER JOIN reviews ON books.isbn = reviews.isbn WHERE books.isbn = :isbn GROUP BY name, author, year, books.isbn", {"isbn": isbn})
if row.rowcount != 1:
return jsonify({"Error": "No Data for this isbn"}), 422
tmp = row.fetchone()
result = dict(tmp.items())
result['average_score'] = float('%.1f'%(result['average_score']))
return jsonify(result)
```
|
{
"source": "jeetsukumaran/archipelago",
"score": 2
}
|
#### File: archipelago/bin/archipelago-generate-data-files-from-tip-labels.py
```python
import sys
import os
import argparse
import collections
import re
import dendropy
from archipelago import model
from archipelago.profile import ArchipelagoProfiler
def create_traits_data_file(traits_data_file_path, tree, num_trait_types, output_format):
lineage_trait_states = collections.OrderedDict()
for trait_idx in range(num_trait_types):
state_symbols = {}
for taxon in tree.taxon_namespace:
lineage_label = taxon.label
state = taxon.traits_vector[trait_idx]
try:
symbol = state_symbols[state]
except KeyError:
symbol = ArchipelagoProfiler.GEIGER_STATE_SYMBOLS[len(state_symbols)]
state_symbols[state] = symbol
try:
lineage_trait_states[lineage_label].append(symbol)
except KeyError:
lineage_trait_states[lineage_label] = [ symbol ]
if output_format == "csv":
lineage_separator = ","
char_separator = ","
elif output_format == "phylip":
lineage_separator = "\t"
char_separator = "\t"
else:
raise ValueError("Unrecognized format: '{}'".format(output_format))
with open(traits_data_file_path, "w") as dataf:
if output_format == "phylip":
dataf.write("{}\t{}\n".format(len(tree.taxon_namespace), num_trait_types))
for lineage_label in lineage_trait_states:
traits = char_separator.join(lineage_trait_states[lineage_label])
dataf.write("{}{}{}\n".format(lineage_label, lineage_separator, traits))
dataf.flush()
dataf.close()
def create_range_data_file(output_path, tree, output_format):
sep = "\t"
area_labels = ["a{}".format(idx+1) for idx, a in enumerate(tree.taxon_namespace[0].distribution_vector)]
# dataf.write("{num_lineages}\t{num_areas}\t({area_labels})\n".format(
# num_lineages=len(tree.taxon_namespace),
# num_areas=len(area_labels),
# area_labels=" ".join(area_labels),
# ))
if output_format == "csv":
lineage_separator = ","
char_separator = ","
elif output_format == "phylip":
lineage_separator = "\t"
char_separator = "\t"
else:
raise ValueError("Unrecognized format: '{}'".format(output_format))
with open(output_path, "w") as dataf:
if output_format == "phylip":
dataf.write("{num_lineages}\t{num_areas}\n".format(
num_lineages=len(tree.taxon_namespace),
num_areas=len(area_labels),
))
for taxon in tree.taxon_namespace:
incidences = [str(i) for i in taxon.distribution_vector]
dataf.write("{}{}{}\n".format(taxon.label, lineage_separator, char_separator.join(incidences)))
dataf.flush()
dataf.close()
def main():
parser = argparse.ArgumentParser()
source_options = parser.add_argument_group("Source Options")
source_options.add_argument(
"source_paths",
nargs="+",
help="Path(s) to tree files.")
source_options.add_argument("-f", "--input-format",
dest="input_schema",
type=str,
default="newick",
choices=["nexus", "newick"],
help="Input data format (default: '%(default)s').")
output_options = parser.add_argument_group("Output Options")
output_options.add_argument("--range-data-format",
type=str,
default="phylip",
choices=["phylip", "csv"],
help="Range data format (default: '%(default)s').")
output_options.add_argument("--traits-data-format",
type=str,
default="phylip",
choices=["phylip", "csv"],
help="Traits data format (default: '%(default)s').")
# output_options.add_argument("-o", "--output-filepath",
# default=None,
# help="Path to profile_results file (default: standard output)."
# )
# output_options.add_argument("-l", "--labels",
# action="append",
# help="Labels to append to output (in format <FIELD-NAME>:value;)")
# output_options.add_argument( "--no-header-row",
# action="store_true",
# default=False,
# help="Do not write a header row.")
# output_options.add_argument( "--append",
# action="store_true",
# default=False,
# help="Append to output file if it already exists instead of overwriting.")
args = parser.parse_args()
source_filepaths = list(args.source_paths)
tree_yielder = dendropy.Tree.yield_from_files(
files=source_filepaths,
schema=args.input_schema,
suppress_internal_node_taxa=True,
suppress_external_node_taxa=True,
)
source_tree_idx = 0
prev_source_idx = None
for tree_idx, tree in enumerate(tree_yielder):
if prev_source_idx != tree_yielder.current_file_index:
prev_source_idx = tree_yielder.current_file_index
source_tree_idx = 0
else:
source_tree_idx += 1
sys.stderr.write("-archipelago- Source {source_idx} of {num_sources} ({source_filepath}), Tree #{tree_idx}\n".format(
source_idx=tree_yielder.current_file_index+1,
num_sources=len(source_filepaths),
source_filepath=source_filepaths[tree_yielder.current_file_index],
tree_idx=source_tree_idx+1,
))
model.ArchipelagoModel.set_lineage_data(
tree=tree,
leaf_nodes_only=True,
lineage_data_source="node",
traits_filepath=None,
areas_filepath=None,
)
tree.original_taxon_namespace = tree.taxon_namespace
tree.taxon_namespace = dendropy.TaxonNamespace()
for node_idx, node in enumerate(tree.leaf_node_iter()):
node.original_taxon = node.taxon
node.taxon = tree.taxon_namespace.new_taxon(label=node.label)
node.taxon.traits_vector = node.traits_vector
node.taxon.distribution_vector = node.distribution_vector
output_file_stem = "{}.{:04d}".format(source_filepaths[tree_yielder.current_file_index], source_tree_idx+1)
create_range_data_file(output_path=output_file_stem + ".ranges", tree=tree, output_format=args.range_data_format)
num_trait_types = len(tree.taxon_namespace[0].traits_vector)
create_traits_data_file(
traits_data_file_path=output_file_stem + ".traits",
tree=tree,
num_trait_types=num_trait_types,
output_format=args.traits_data_format,
)
if __name__ == "__main__":
main()
```
#### File: archipelago/bin/archipelago-profile-trees.py
```python
import sys
import os
import argparse
import collections
import re
from archipelago import model
from archipelago import profile
from archipelago import utility
def parse_fieldname_and_value(labels):
if not labels:
return collections.OrderedDict()
fieldname_value_map = collections.OrderedDict()
for label in labels:
match = re.match(r"\s*(.*?)\s*:\s*(.*)\s*", label)
if not match:
raise ValueError("Cannot parse fieldname and label (format required: fieldname:value): {}".format(label))
fieldname, value = match.groups(0)
fieldname_value_map[fieldname] = value
return fieldname_value_map
def main():
parser = argparse.ArgumentParser(
parents=[
profile.ArchipelagoProfiler.get_profile_options_parser(),
],
)
source_options = parser.add_argument_group("Source Options")
source_options.add_argument(
"source_paths",
nargs="+",
help="Path(s) to tree files.")
source_options.add_argument("-f", "--format",
dest="schema",
type=str,
default="newick",
choices=["nexus", "newick"],
help="Input data format (default: '%(default)s').")
source_options.add_argument("--no-preserve-underscores",
action="store_true",
default=False,
help="Convert unquoted underscores to spaces, as dictated by the Newick/NEXUS standards.")
source_options.add_argument("-r", "--range-data-file",
default=None,
help="Lineage range data file (in Phylip/BayArea/BioGeoBEARS format)."
" If not provided, lineage range data must be encoded in tip labels."
" If provided, lienage traits data file must be provided as well using"
" '-t'/'--traits-data-file' option."
)
source_options.add_argument("-t", "--traits-data-file",
default=None,
help="Lineage traits data file (in Phylip format)."
" If not provided, lineage traits data must be encoded in tip labels."
" If provided, lienage range data file must be provided as well using"
" '-t'/'--range-data-file' option."
)
source_options.add_argument("-m", "--model-file",
default=None,
help="Model file(s) for the input tree file(s)."
" Parameters of the model will be added to the"
" profile profile_results to facilitate analysis."
)
source_options.add_argument("-M", "--model-file-type",
default="json",
choices=("json", "python"),
help="Model file format (default: %(default)s)."
)
output_options = parser.add_argument_group("Output Options")
output_options.add_argument("-o", "--output-filepath",
default=None,
help="Path to profile_results file (default: standard output)."
)
output_options.add_argument("-l", "--labels",
action="append",
help="Labels to append to output (in format <FIELD-NAME>:value;)")
output_options.add_argument( "--no-header-row",
action="store_true",
default=False,
help="Do not write a header row.")
output_options.add_argument( "--append",
action="store_true",
default=False,
help="Append to output file if it already exists instead of overwriting.")
args = parser.parse_args()
extra_fields = parse_fieldname_and_value(args.labels)
source_filepaths = list(args.source_paths)
if args.range_data_file is not None and args.traits_data_file is None:
sys.exit("If specifying '--range-data-file', must also specify '--traits-data-file'")
elif args.range_data_file is None and args.traits_data_file is not None:
sys.exit("If specifying '--traits-data-file', must also specify '--range-data-file'")
elif args.range_data_file is None and args.traits_data_file is not None:
lineage_data_source="filepath"
else:
lineage_data_source="node"
if args.model_file:
if args.model_file_type == "json":
model_file_type = "json-filepath"
elif args.model_file_type == "python":
model_file_type = "python-filepath"
else:
raise ValueError(args.model_file_type)
archipelago_model = model.ArchipelagoModel.create(
model_definition_source=args.model_file,
model_definition_type=model_file_type,
)
else:
archipelago_model = None
profiler = profile.ArchipelagoProfiler.from_option_args(args)
profiles = []
for source_idx, source_filepath in enumerate(source_filepaths):
if args.verbosity >= 2:
sys.stderr.write("-profiler- Source {source_idx} of {num_sources}: {source_filepath}\n".format(
source_idx=source_idx+1,
num_sources=len(source_filepaths),
source_filepath=source_filepath,
))
results = profiler.profile_trees_from_path(
trees_filepath=source_filepath,
schema=args.schema,
preserve_underscores=not args.no_preserve_underscores,
generating_model=archipelago_model,
lineage_data_source=lineage_data_source,
traits_filepath=args.traits_data_file,
areas_filepath=args.range_data_file,
)
if extra_fields:
for r in results:
r.update(extra_fields)
profiles.extend(results)
out = utility.open_output_file_for_csv_writer(
filepath=args.output_filepath,
append=args.append)
profiler.write_profiles(
dest=out,
profiles=profiles,
suppress_headers=False)
if __name__ == "__main__":
main()
```
#### File: archipelago/bin/archipelago-summarize.py
```python
import sys
import os
import argparse
import json
import collections
import csv
import dendropy
import re
from archipelago import summarize
from archipelago import utility
from archipelago.utility import USER_SPECIFIED_TRAIT_TYPE_INDEX_START_VALUE
def parse_trait_states(labels):
if not labels:
return collections.OrderedDict()
trait_states = []
for label in labels:
match = re.match(r"\s*(.*?)\s*:\s*(.*)\s*", label)
if not match:
raise ValueError("Cannot parse fieldname and label (format required: fieldname:value): {}".format(label))
fieldname, value = match.groups(0)
# The trait states need to be an integer if
# ArchipelagoModel.decode_label coerces the labels to integers.
# The reason we do NOT want it parsed to an integer value
# is to allow null traits 'NA', 'null', etc.
trait_states.append( (int(fieldname), value,) )
return trait_states
def parse_fieldname_and_value(labels):
if not labels:
return collections.OrderedDict()
fieldname_value_map = collections.OrderedDict()
for label in labels:
match = re.match(r"\s*(.*?)\s*:\s*(.*)\s*", label)
if not match:
raise ValueError("Cannot parse fieldname and label (format required: fieldname:value): {}".format(label))
fieldname, value = match.groups(0)
fieldname_value_map[fieldname] = value
return fieldname_value_map
def main():
parser = argparse.ArgumentParser()
source_options = parser.add_argument_group("Source Options")
source_options.add_argument(
"source_paths",
nargs="+",
help="Path(s) to simulated tree files.")
source_options.add_argument("-f", "--format",
dest="schema",
type=str,
default="newick",
choices=["nexus", "newick"],
help="Input data format (default: '%(default)s').")
source_options.add_argument("--no-preserve-underscores",
action="store_true",
default=False,
help="Convert unquoted underscores to spaces, as dictated by the Newick/NEXUS standards.")
summarization_options = parser.add_argument_group("Summarization Options")
summarization_options.add_argument("-x", "--exclude-trait",
action="append",
help="Index of trait to exclude, with first trait indexed with value {}; multiple traits can be specified by repeating the option (e.g., '--exclude-trait {} --ingore-trait {}').".format(
USER_SPECIFIED_TRAIT_TYPE_INDEX_START_VALUE,
USER_SPECIFIED_TRAIT_TYPE_INDEX_START_VALUE,
USER_SPECIFIED_TRAIT_TYPE_INDEX_START_VALUE+1,
))
summarization_options.add_argument("-X", "--exclude-trait-state",
action="append",
help="States of traits to exclude, (in format <TRAIT-INDEX:STATE-INDEX>. Not that traits are {}-based indexed, and states are 0-based indexed. E.g. '--exclude-trait-state 1:0 --exclude-trait-state 1:3').".format(
USER_SPECIFIED_TRAIT_TYPE_INDEX_START_VALUE,
))
summarization_options.add_argument("--no-drop-trees-not-spanning-all-areas",
action="store_true",
default=False,
help="Do NOT skip trees that do not span all areas.")
summarization_options.add_argument("--drop-trees-with-single-lineage-areas",
action="store_true",
default=False,
help="Skip trees that have areas with only one lineage.")
summarization_options.add_argument("--drop-trees-with-single-lineage-trait-states",
action="store_true",
default=False,
help="Skip trees that have trait states with only one lineage.")
output_options = parser.add_argument_group("Source Options")
output_options.add_argument("-l", "--labels",
action="append",
help="Labels to append to output (in format <FIELD-NAME>:value;)")
output_options.add_argument(
"-o", "--output-filepath",
default=None,
help="Path to output file.")
output_options.add_argument( "--no-header-row",
action="store_true",
default=False,
help="Do not write a header row.")
output_options.add_argument( "--append",
action="store_true",
default=False,
help="Append to output file if it already exists instead of overwriting.")
run_options = parser.add_argument_group("Run Options")
run_options.add_argument("-q", "--quiet",
action="store_true",
default=False,
help="Suppress progress messages.")
args = parser.parse_args()
args.group_processed_trees_by_model = False
if args.quiet:
run_logger=None,
else:
run_logger = utility.RunLogger(
name="archipelago",
stderr_logging_level="info",
log_to_file=False,
)
# log_frequency_percentage = 1
# def _progress_update_fn(current_idx, total):
if args.exclude_trait:
trait_indexes_to_exclude = [int(i) - USER_SPECIFIED_TRAIT_TYPE_INDEX_START_VALUE for i in args.exclude_trait]
assert -1 not in trait_indexes_to_exclude
else:
trait_indexes_to_exclude = None
trait_states_to_exclude = parse_trait_states(args.exclude_trait_state)
tree_summarizer = summarize.TreeSummarizer(
drop_trees_not_spanning_all_areas=not args.no_drop_trees_not_spanning_all_areas,
trait_indexes_to_exclude=trait_indexes_to_exclude,
trait_states_to_exclude=trait_states_to_exclude,
drop_trees_with_single_lineage_areas=args.drop_trees_with_single_lineage_areas,
drop_trees_with_single_lineage_trait_states=args.drop_trees_with_single_lineage_trait_states,
run_logger=run_logger,
)
summary_results = []
output_root_dir = "."
output_dir = output_root_dir
# if not os.path.exists(output_dir):
# os.makedirs(output_dir)
extra_fields = parse_fieldname_and_value(args.labels)
stats_fields = set()
try:
for source_idx, tree_filepath in enumerate(args.source_paths):
if not args.quiet:
sys.stderr.write("Processing job {} of {}: {}\n".format(source_idx+1, len(args.source_paths), tree_filepath))
trees = dendropy.TreeList.get_from_path(
tree_filepath,
schema=args.schema,
preserve_underscores=not args.no_preserve_underscores,
suppress_internal_node_taxa=True,
suppress_external_node_taxa=True,
)
processed_trees, sub_stats_fields, sub_results = tree_summarizer.summarize_trees(
trees,
# progress_update_fn=_progress_update_fn,
# lineage_data_source=lineage_data_source,
# traits_filepath=traits_filepath,
# areas_filepath=areas_filepath,
)
stats_fields.update(sub_stats_fields)
if extra_fields:
for r in sub_results:
r.update(extra_fields)
summary_results.extend(sub_results)
except KeyboardInterrupt:
pass
stats_fields = sorted(list(stats_fields))
all_fields = list(extra_fields.keys()) + stats_fields
out = utility.open_output_file_for_csv_writer(
filepath=args.output_filepath,
append=args.append)
with out:
writer = csv.DictWriter(
out,
fieldnames=all_fields,
restval="NA",
delimiter=",",
lineterminator=os.linesep,
)
if not args.no_header_row:
writer.writeheader()
writer.writerows(summary_results)
if __name__ == "__main__":
main()
```
#### File: examples/executable_model_file/model1.py
```python
import sys
import random
import archipelago
## functions to return rates
def birth_weight1(**kwargs):
"""
Speciation rate is proportional to the number of areas occupied.
"""
lineage = kwargs["lineage"]
num_areas = len(lineage.areas)
r = float(num_areas) / (num_areas + 1)
return r
def area_gain_weight1(**kwargs):
"""
Something makes dispersal out of area s1 very high relative to the others
if the lineage's trait state is 0.
"""
lineage = kwargs["lineage"]
from_area = kwargs["from_area"]
to_area = kwargs["to_area"]
if from_area.index == 0 and lineage.trait_state("q1") == 0:
return 100.0
else:
return 1.0
def area_loss_weight1(**kwargs):
"""
Another very convoluted condition: if the current lineage has trait state
1, and there are lineages with trait state 0 in the area, then loss weight
is really high.
"""
lineage = kwargs["lineage"]
area = kwargs["area"]
if lineage.trait_state("q1") == 1:
for other_lineage in area.lineages:
if other_lineage.trait_state("q1") == 0:
return 100.0
return 1.0
## model definition
model_definition_source = {
"areas": [
{'is_supplemental': True, 'label': 's1'},
{'is_supplemental': False, 'label': 'a1'},
{'is_supplemental': False, 'label': 'a2'},
{'is_supplemental': False, 'label': 'a3'},
{'is_supplemental': False, 'label': 'a4'},
],
"traits" : [
{"label": "q1", "nstates": 3, "transition_rate": 0.01, },
],
"diversification": {
"mean_birth_rate" : 1.0,
"lineage_birth_weight": {
"definition_type": "function_object",
"definition": birth_weight1,
},
"mean_death_rate" : 0.0,
"lineage_death_weight": {
"definition_type": "fixed_value",
"definition": 1.0
},
},
"anagenetic_range_evolution": {
"global_area_gain_rate": 0.01,
"lineage_area_gain_weight": {
"definition_type": "function_object",
"definition": area_gain_weight1,
},
"mean_area_loss_rate": 1.0,
"lineage_area_loss_weight": {
"definition_type": "function_object",
"definition": area_loss_weight1,
},
},
"cladogenetic_range_evolution": {
"sympatric_subset_speciation_weight": 1.0,
"single_area_vicariance_speciation_weight": 1.0,
"widespread_vicariance_speciation_weight": 1.0,
"founder_event_speciation_weight": 0.0
},
"termination_conditions": {
"target_focal_area_lineages": 20,
}
}
# execute simulations
archipelago.run(
output_prefix="results/m1",
nreps=10,
model_definition_source=model_definition_source,
model_definition_type="python-dict",
random_seed=random.randint(0, sys.maxsize))
```
|
{
"source": "jeetsukumaran/delineate",
"score": 2
}
|
#### File: src/delineate/control.py
```python
import itertools
import csv
import json
import sys
from delineate import model
from delineate import utility
from delineate import summarize
from dendropy.utility.container import OrderedCaselessDict
from dendropy.model import birthdeath
SPECIES_LEAFSET_CONSTRAINTS_KEY = "species_leafset_constraints"
LINEAGE_ID_FIELDNAME = "lineage"
SPECIES_ID_FIELDNAME = "species"
STATUS_FIELDNAME = "status"
CONFIGURATION_REQUIRED_FIELDS = [
LINEAGE_ID_FIELDNAME,
SPECIES_ID_FIELDNAME,
STATUS_FIELDNAME,
]
def get_controller(
args,
name,
logger=None,
logger_kwargs=None):
controller = Controller(
name=name,
is_case_sensitive=getattr(args, "is_case_sensitive", False),
is_fail_on_extra_tree_lineages=getattr(args, "is_fail_on_extra_tree_lineages", True),
is_fail_on_extra_configuration_lineages=getattr(args, "is_fail_on_extra_configuration_lineages", True),
logger=logger,
logger_kwargs=logger_kwargs)
controller.read_tree(
tree_filepath=args.tree_file,
schema=args.tree_format,
preserve_underscores=getattr(args, "preserve_underscores", True),
underflow_protection=getattr(args, "underflow_protection", False),
)
controller.parse_configuration_file(
constraints_filepath=args.constraints_file,
delimiter=None)
for param in (
"speciation_completion_rate_estimation_initial",
"speciation_completion_rate_estimation_min",
"speciation_completion_rate_estimation_max",
):
value = None
if param in controller.config_d:
value = controller.config_d[param]
av = getattr(args, param, None)
if av is not None:
value = av
setattr(controller, param, value)
controller.tree.birth_rate = birthdeath.fit_pure_birth_model_to_tree(tree=controller.tree)["birth_rate"]
if controller.speciation_completion_rate_estimation_initial is None:
controller.speciation_completion_rate_estimation_initial = 0.01 * controller.tree.birth_rate
if controller.speciation_completion_rate_estimation_min is None:
controller.speciation_completion_rate_estimation_min = 1e-8
if controller.speciation_completion_rate_estimation_max is None:
controller.speciation_completion_rate_estimation_max = controller.tree.birth_rate * 10
controller.register_names()
return controller
class Registry(object):
def __init__(self, **kwargs):
self.tree_lineage_names = None
self.config_lineage_names = None
self.is_case_sensitive = kwargs.pop("is_case_sensitive", False)
self.is_fail_on_extra_tree_lineages = kwargs.pop("is_fail_on_extra_tree_lineages", True)
self.is_fail_on_extra_configuration_lineages = kwargs.pop("is_fail_on_extra_configuration_lineages", True)
self.logger = kwargs.pop("logger")
self.original_to_normalized_lineage_name_map = {}
self.config_name_normalization_report = {}
self.preanalysis_constrained_species_lineages_map = {}
if self.is_case_sensitive:
self.normalized_tree_lineage_names = {}
self.normalized_config_lineage_names = {}
self.species_names = {}
self.preanalysis_constrained_lineage_species_map = {}
else:
self.normalized_tree_lineage_names = OrderedCaselessDict()
self.normalized_config_lineage_names = OrderedCaselessDict()
self.normalized_species_names = OrderedCaselessDict()
self.preanalysis_constrained_lineage_species_map = OrderedCaselessDict()
self.extra_tree_lineage_names = []
self.extra_configuration_lineages = []
def normalize_lineage_names(self):
tree_lineage_set = set(self.tree_lineage_names)
# tree lineages give the canonical orthography
for lineage in self.tree_lineage_names:
self.normalized_tree_lineage_names[lineage] = lineage
self.original_to_normalized_lineage_name_map[lineage] = lineage
normalized_configuration_lineages = {}
extra_configuration_lineages = set()
for lineage in self.config_lineage_names:
self.normalized_config_lineage_names[lineage] = lineage
try:
normalized_name = self.normalized_tree_lineage_names[lineage]
self.original_to_normalized_lineage_name_map[lineage] = normalized_name
if normalized_name != lineage:
self.config_name_normalization_report[lineage] = "(NORMALIZED TO: '{}')".format(normalized_name)
normalized_configuration_lineages[lineage] = normalized_name
else:
self.config_name_normalization_report[lineage] = ""
except KeyError as e:
# This is a serious error: it means that the configuration file
# has a taxon that is not on the tree. But we handle this issue
# later so a full report can be shown
self.config_name_normalization_report[lineage] = "(NOT FOUND ON TREE)"
extra_configuration_lineages.add(lineage)
self.normalization_report(
normalized_configuration_lineages=normalized_configuration_lineages,
extra_configuration_lineages=extra_configuration_lineages)
def read_configuration_table_species(self,
conf_lineage_species_map,
conf_constrained_lineages):
if self.is_case_sensitive:
nccl = {}
else:
nccl = OrderedCaselessDict()
for ln in conf_constrained_lineages:
nccl[ln] = True
for lineage_name in conf_lineage_species_map:
if lineage_name not in nccl:
continue
species_name = conf_lineage_species_map[lineage_name]
if species_name not in self.normalized_species_names:
self.normalized_species_names[species_name] = species_name
else:
species_name = self.normalized_species_names[species_name]
try:
normalized_lineage_name = self.original_to_normalized_lineage_name_map[lineage_name]
except KeyError:
utility.error_exit(
msg="Lineage '{}' not defined (missing on tree?)".format(lineage_name),
logger=self.logger)
if normalized_lineage_name in self.preanalysis_constrained_lineage_species_map:
utility.error_exit(
msg="Duplicate lineage species assignment: '{}'".format(normalized_lineage_name),
logger=self.logger)
self.preanalysis_constrained_lineage_species_map[normalized_lineage_name] = species_name
try:
self.preanalysis_constrained_species_lineages_map[species_name].add(normalized_lineage_name)
except KeyError:
self.preanalysis_constrained_species_lineages_map[species_name] = set([normalized_lineage_name])
self.preanalysis_constrained_species_report()
def compile_configuration_species_groupings(self, species_leafset_constraints):
for spi, sp in enumerate(species_leafset_constraints):
lineages = []
species_name = "ConstrainedSp{:03d}".format(spi+1)
self.normalized_species_names[species_name] = species_name
for lineage_name in sp:
try:
normalized_lineage_name = self.original_to_normalized_lineage_name_map[lineage_name]
except KeyError:
utility.error_exit(
msg="Lineage '{}' not defined (missing on tree?)".format(lineage_name),
logger=self.logger)
self.preanalysis_constrained_lineage_species_map[normalized_lineage_name] = species_name
try:
self.preanalysis_constrained_species_lineages_map[species_name].add(normalized_lineage_name)
except KeyError:
self.preanalysis_constrained_species_lineages_map[species_name] = set([normalized_lineage_name])
self.preanalysis_constrained_species_report()
def preanalysis_constrained_species_report(self):
species_names = sorted(self.preanalysis_constrained_species_lineages_map.keys())
num_lineages = ["({} lineages)".format(len(self.preanalysis_constrained_species_lineages_map[n])) for n in species_names]
stbl = utility.compose_table(
columns=[
species_names,
num_lineages,
],
prefixes=["", ""],
quoted=[True, False],
is_indexed=True,
indent=" ")
self.logger.info("{} species defined in configuration constraints, with {} lineages assigned:\n{}".format(
len(species_names),
len(self.preanalysis_constrained_lineage_species_map),
stbl,
))
constrained_lineages = sorted(self.preanalysis_constrained_lineage_species_map.keys(), key=lambda n: (self.preanalysis_constrained_lineage_species_map[n], n))
species_assignments = ["(SPECIES: '{}')".format(self.preanalysis_constrained_lineage_species_map[n]) for n in constrained_lineages]
lntbl = utility.compose_table(
columns=[
constrained_lineages,
species_assignments,
],
prefixes=["", ""],
quoted=[True, False],
is_indexed=True,
indent=" ")
self.logger.info("{} out of {} lineages assigned by constraints to {} species:\n{}".format(
len(constrained_lineages),
len(self.tree_lineage_names),
len(species_names),
lntbl,
))
unconstrained_lineages = sorted(n for n in self.tree_lineage_names if n not in self.preanalysis_constrained_lineage_species_map)
lntbl = utility.compose_table(
columns=[
unconstrained_lineages,
],
prefixes=[""],
quoted=[True],
is_indexed=True,
indent=" ")
self.logger.info("{} out of {} lineages not constrained by species assignments:\n{}".format(
len(unconstrained_lineages),
len(self.tree_lineage_names),
lntbl,
))
assert len(unconstrained_lineages) + len(constrained_lineages) == len(self.tree_lineage_names)
def normalization_report(self,
normalized_configuration_lineages,
extra_configuration_lineages):
treetbl = utility.compose_table(
columns=[self.tree_lineage_names,
["(NOT FOUND IN CONFIGURATION)" if lineage not in self.normalized_config_lineage_names else "" for lineage in self.tree_lineage_names],
],
prefixes=["", ""],
quoted=[True, False],
is_indexed=True,
indent=" ")
self.logger.info("{} lineages found on population tree:\n{}".format(
len(self.tree_lineage_names),
treetbl,
))
if extra_configuration_lineages:
cfntbl = utility.compose_table(
columns=[self.config_lineage_names,
[self.config_name_normalization_report[n] for n in self.config_lineage_names]
],
prefixes=["", ""],
quoted=[True, False],
is_indexed=True,
indent=" ")
self.logger.info("{} lineages found in configuration file:\n{}".format(
len(self.config_lineage_names),
cfntbl,
))
elif normalized_configuration_lineages:
n1 = list(normalized_configuration_lineages.keys())
n2 = [normalized_configuration_lineages[k] for k in n1]
cfntbl = utility.compose_table(
columns=[n1, n2, ],
prefixes=["", "NORMALIZED TO: "],
quoted=[True, True],
is_indexed=True,
indent=" ")
self.logger.info("{} lineages found in configuration file, with the following normalized for concordance with tree lineages:\n{}".format(
len(self.config_lineage_names),
cfntbl,
))
else:
self.logger.info("{} lineages found in configuration file fully concordant with tree lineages".format(
len(self.config_lineage_names),
))
def validate_lineage_names(self):
for lineage in self.config_lineage_names:
if lineage not in self.normalized_tree_lineage_names:
self.extra_configuration_lineages.append(lineage)
for lineage in self.tree_lineage_names:
if lineage not in self.normalized_config_lineage_names:
self.extra_tree_lineage_names.append(lineage)
if self.extra_tree_lineage_names:
s1_error_msg = ["{}: {} lineages found on tree but not in configuration data:".format(
"ERROR" if self.is_fail_on_extra_tree_lineages else "WARNING",
len(self.extra_tree_lineage_names))]
s1_error_msg.append(self.compose_name_list(self.extra_tree_lineage_names))
s1_error_msg = "\n".join(s1_error_msg)
else:
s1_error_msg = ""
if self.extra_configuration_lineages:
s2_error_msg = ["{}: {} lineages found in configuration data but not on tree:".format(
"ERROR" if self.is_fail_on_extra_configuration_lineages else "WARNING",
len(self.extra_configuration_lineages))]
s2_error_msg.append(self.compose_name_list(self.extra_configuration_lineages))
s2_error_msg = "\n".join(s2_error_msg)
else:
s2_error_msg = ""
is_fail = []
if self.extra_tree_lineage_names and self.is_fail_on_extra_tree_lineages:
self.logger.error(s1_error_msg)
is_fail.append("1")
elif s1_error_msg:
self.logger.warning(s1_error_msg)
if self.extra_configuration_lineages and self.is_fail_on_extra_configuration_lineages:
self.logger.error(s2_error_msg)
is_fail.append("2")
elif s2_error_msg:
self.logger.warning(s2_error_msg)
if is_fail:
utility.error_exit(
msg="Lineage identity errors found ({})".format(", ".join(is_fail)),
logger=self.logger)
def compose_name_list(self, names):
s = utility.compose_table(
columns=[names],
prefixes=[""],
quoted=[True],
is_indexed=True,
indent=" ")
return s
def compose_report(self):
msg = []
msg.append("{} terminal lineages on population tree".format(len(self.tree_lineage_names)))
msg.append("{} lineages described in configuration file".format(len(self.config_lineage_names)))
class Controller(object):
def __init__(self,
name,
is_case_sensitive=True,
is_fail_on_extra_tree_lineages=False,
is_fail_on_extra_configuration_lineages=True,
logger=None,
logger_kwargs=None):
self.name = name
self.is_case_sensitive = is_case_sensitive
self.is_fail_on_extra_tree_lineages = is_fail_on_extra_tree_lineages
self.is_fail_on_extra_configuration_lineages = is_fail_on_extra_configuration_lineages
self._logger = logger
if logger_kwargs:
self._logger_kwargs = dict(logger_kwargs)
else:
self._logger_kwargs = {}
self._speciation_completion_rate = None
self._species_leafset_constraint_labels = None
self._constrained_lineage_leaf_labels = None
self.registry = Registry(
is_case_sensitive=self.is_case_sensitive,
is_fail_on_extra_tree_lineages=self.is_fail_on_extra_tree_lineages,
is_fail_on_extra_configuration_lineages=self.is_fail_on_extra_configuration_lineages,
logger=self.logger,
)
self.config_d = {}
self.tree = None
@property
def logger(self):
if self._logger is None:
self._logger = utility.RunLogger(
name=self._logger_kwargs.pop("name", self.name),
is_include_name=self._logger_kwargs.pop("is_include_name", True),
is_include_timestamp=self._logger_kwargs.pop("is_include_timestamp", True),
is_log_to_stderr=self._logger_kwargs.pop("is_log_to_stderr", True),
stderr_logging_level=utility.logging.INFO,
is_log_to_file=self._logger_kwargs.pop("is_log_file", False),
file_logging_level=utility.logging.INFO,
**self._logger_kwargs,
)
return self._logger
@logger.setter
def logger(self, value):
self._logger = value
@logger.deleter
def logger(self):
del self._logger
@property
def speciation_completion_rate(self):
if self._speciation_completion_rate is None:
try:
self._speciation_completion_rate = self.config_d["speciation_completion_rate"]
except KeyError as e:
return None
return self._speciation_completion_rate
@speciation_completion_rate.setter
def speciation_completion_rate(self, value):
self._speciation_completion_rate = value
@speciation_completion_rate.deleter
def speciation_completion_rate(self):
del self._speciation_completion_rate
@property
def species_leafset_constraint_labels(self):
if self._species_leafset_constraint_labels is None:
try:
self._species_leafset_constraint_labels = self.config_d[SPECIES_LEAFSET_CONSTRAINTS_KEY]
except KeyError as e:
pass
return self._species_leafset_constraint_labels
@species_leafset_constraint_labels.setter
def species_leafset_constraint_labels(self, value):
self._species_leafset_constraint_labels = value
@species_leafset_constraint_labels.deleter
def species_leafset_constraint_labels(self):
del self._species_leafset_constraint_labels
@property
def has_species_constraints(self):
return SPECIES_LEAFSET_CONSTRAINTS_KEY in self.config_d
@property
def constrained_lineage_leaf_labels(self):
if self._constrained_lineage_leaf_labels is None:
self._constrained_lineage_leaf_labels = list(itertools.chain(*self.species_leafset_constraint_labels))
return self._constrained_lineage_leaf_labels
def read_tree(self,
tree_filepath,
schema,
preserve_underscores=True,
underflow_protection=True,
):
self.tree = model.LineageTree.get(
path=tree_filepath,
schema=schema,
preserve_underscores=preserve_underscores,
)
self.tree.is_use_decimal_value_type = underflow_protection
# self.tree.birth_rate = birthdeath.fit_pure_birth_model_to_tree(
# tree=self.tree)["birth_rate"]
return self.tree
def parse_configuration_file(self,
constraints_filepath,
delimiter=None,
):
if constraints_filepath:
with open(constraints_filepath) as src:
if constraints_filepath.endswith("json"):
self.config_d = json.load(src)
else:
self.config_d = self.parse_configuration_table_file(
src=src,
delimiter=delimiter)
else:
self.config_d = {}
return self.config_d
def parse_configuration_table_file(
self,
src,
delimiter=None,
):
self.config_d = {}
if delimiter is None:
dialect = csv.Sniffer().sniff(src.read(), delimiters=",\t")
src.seek(0)
# delimiter = dialect.delimiter
src_data = csv.DictReader(src,
dialect=dialect)
else:
src_data = csv.DictReader(src,
delimiter=delimiter,
quoting=csv.QUOTE_NONE,
)
fieldname_set = set(src_data.fieldnames)
for required_field in CONFIGURATION_REQUIRED_FIELDS:
if required_field not in fieldname_set:
utility.error_exit(
msg="Required field '{}' not found in configuration source".format(required_field),
logger=self.logger)
species_constrained_lineage_map = {}
lineage_species_map = {}
known = []
unknown = []
for entry in src_data:
if entry[STATUS_FIELDNAME] == "1":
try:
species_constrained_lineage_map[entry[SPECIES_ID_FIELDNAME]].append(entry[LINEAGE_ID_FIELDNAME])
except KeyError:
species_constrained_lineage_map[entry[SPECIES_ID_FIELDNAME]] = [entry[LINEAGE_ID_FIELDNAME]]
known.append(entry[LINEAGE_ID_FIELDNAME])
lineage_species_map[entry[LINEAGE_ID_FIELDNAME]] = entry[SPECIES_ID_FIELDNAME]
elif entry[STATUS_FIELDNAME] == "0":
unknown.append(entry[LINEAGE_ID_FIELDNAME])
lineage_species_map[entry[LINEAGE_ID_FIELDNAME]] = entry[SPECIES_ID_FIELDNAME]
pass
else:
utility.error_exit(
msg="Unrecognized status: '{}'".format(entry[STATUS_FIELDNAME]),
logger=self.logger)
species_leafset_constraints = []
for key in species_constrained_lineage_map:
species_leafset_constraints.append(species_constrained_lineage_map[key])
assert len(species_leafset_constraints) == len(species_constrained_lineage_map)
self.config_d = {}
self.config_d[SPECIES_LEAFSET_CONSTRAINTS_KEY] = species_leafset_constraints
self.config_d["configuration_table"] = {}
self.config_d["configuration_table"]["lineages"] = known + unknown
self.config_d["configuration_table"]["constrained_lineages"] = known
self.config_d["configuration_table"]["unconstrained_lineages"] = unknown
self.config_d["configuration_table"]["lineage_species_map"] = lineage_species_map
self.config_d["configuration_table"]["constrained_lineage_species_map"] = {lineage_name:lineage_species_map[lineage_name] for lineage_name in known}
self.config_d["configuration_table"]["species_constrained_lineage_map"] = species_constrained_lineage_map
return self.config_d
def register_names(self):
self.register_lineage_names()
self.register_preanalysis_species_names()
def register_lineage_names(self):
if self.tree is None:
raise ValueError("'tree' not set")
self.registry.tree_lineage_names = [t.label for t in self.tree.taxon_namespace]
if "configuration_table" in self.config_d and "lineages" in self.config_d["configuration_table"]:
self.registry.config_lineage_names = list(self.config_d["configuration_table"]["lineages"])
else:
self.registry.config_lineage_names = list(self.registry.tree_lineage_names)
# self.registry.config_lineage_names = []
if not self.registry.config_lineage_names:
return
self.registry.normalize_lineage_names()
self.registry.validate_lineage_names()
def register_preanalysis_species_names(self):
species_constrained_lineage_map = {}
constrained_lineage_species_map = {}
full_lineage_species_map = {}
seen_lineages = set()
if "configuration_table" in self.config_d:
self.registry.read_configuration_table_species(
conf_lineage_species_map=self.config_d["configuration_table"]["lineage_species_map"],
conf_constrained_lineages=self.config_d["configuration_table"]["constrained_lineages"],
)
elif SPECIES_LEAFSET_CONSTRAINTS_KEY in self.config_d:
self.registry.compile_configuration_species_groupings(species_leafset_constraints=self.config_d[SPECIES_LEAFSET_CONSTRAINTS_KEY])
def write_configuration(
self,
output_file,
output_format,
output_delimiter="\t",):
if output_format == "json":
d = {}
names = self.registry.preanalysis_constrained_species_lineages_map
d[SPECIES_LEAFSET_CONSTRAINTS_KEY] = [list(self.registry.preanalysis_constrained_species_lineages_map[n]) for n in names]
d["species_names"] = list(names)
if False: #args.output_format == "json-compact":
json.dump(d, outf)
else:
json.dump(d, output_file, indent=4, separators=(',', ': '))
else:
output_file.write("{}\n".format(output_delimiter.join(CONFIGURATION_REQUIRED_FIELDS)))
for lineage_name in self.registry.normalized_tree_lineage_names:
row = []
row.append(lineage_name)
row.append(self.registry.preanalysis_constrained_lineage_species_map.get(lineage_name, "?"))
if lineage_name in self.registry.preanalysis_constrained_lineage_species_map:
row.append("1")
else:
row.append("0")
output_file.write("{}\n".format(output_delimiter.join(row)))
def compile_postanalysis_lineage_species_name_map(self, postanalysis_species_leafset_labels):
return summarize.compile_postanalysis_lineage_species_name_map(
preanalysis_constrained_lineage_species_map=self.registry.preanalysis_constrained_lineage_species_map,
postanalysis_species_leafset_labels=postanalysis_species_leafset_labels,
)
```
#### File: delineate-docs/source/conf.py
```python
project = 'DELINEATE'
copyright = '2020, <NAME> and <NAME>'
author = '<NAME> and <NAME>'
# The full version, including alpha/beta/rc tags
release = '1.2.3'
# -- General configuration ---------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
"sphinx.ext.githubpages",
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = []
html_sidebars = { '**': ['globaltoc.html', 'relations.html', 'sourcelink.html', 'searchbox.html'] }
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'bizstyle'
def setup(app):
app.add_stylesheet("css/docs.css")
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
rst_epilog = """
.. |Anaconda| replace:: :program:`Anaconda`
.. _Anaconda: https://www.anaconda.com/
.. |BPP| replace:: :program:`BP&P`
.. _BPP: https://github.com/bpp/bpp
.. |delineate| replace:: DELINEATE
.. |pip| replace:: :program:`pip`
.. _pip: https://docs.python.org/3/installing/index.html
.. |Python| replace:: :program:`Python`
.. _Python: https://www.python.org/
.. |StarBeast2| replace:: :program:`StarBeast2`
.. _StarBeast2: https://taming-the-beast.org/tutorials/starbeast2-tutorial/
.. |SumTrees| replace:: :program:`SumTrees`
.. _SumTrees: https://dendropy.org/programs/sumtrees.html
.. |DendroPy| replace:: :program:`DendroPy`
.. _DendroPy: https://dendropy.org/
.. |bppsum| replace:: :program:`delineate-bppsum`
.. _bppsum: https://delineate.org/
.. |FigTree| replace:: :program:`FigTree`
.. _FigTree: http://tree.bio.ed.ac.uk/software/figtree/
.. |Tracer| replace:: :program:`Tracer`
.. _Tracer: http://tree.bio.ed.ac.uk/software/tracer/
"""
```
#### File: tests/supplemental/marginal_prob.py
```python
from __future__ import print_function
import dendropy
import math
class SF(object):
"""Enum class to name bits associated with being anc of none, some and all selected tips."""
UNSET = 0 # has no descendants that are selected
SEL_DES = 1 # anc to some selected tips
CA_BIT = 2 # bit representing "is a common anc to all of the selected tips"
CA_FLAG = 3 # flag for a common ancestor of all selected tips
def calc_marginal_probability_of_species(tree, selected_tip_labels, good_sp_rate):
"""Calculates the marginal probability that there is a "good" species with the tip labels
that correspond to the set `selected_tip_labels`.
"""
num_sel = len(selected_tip_labels)
sel_as_flag = SF.CA_FLAG if num_sel == 1 else SF.SEL_DES
total_prob = 0.0
for nd in tree.postorder_node_iter():
if nd.is_leaf():
if nd.taxon.label in selected_tip_labels:
nd.num_sel = 1
nd.anc_status = sel_as_flag
nd.accum_prob = 1.0
else:
nd.num_sel = 0
nd.anc_status = SF.UNSET
nd.accum_prob = 0.0
else:
nd.num_sel = 0
for c in nd.child_nodes():
nd.num_sel += c.num_sel
if nd.num_sel == 0:
nd.anc_status = SF.UNSET
elif nd.num_sel == num_sel:
nd.anc_status = SF.CA_FLAG
else:
nd.anc_status = SF.SEL_DES
total_prob += accum_prob(nd, good_sp_rate)
total_prob += tree.seed_node.accum_prob
return total_prob
def accum_prob(nd, good_sp_rate):
"""Fills in the accum_prob slot for nd, and returns any contribution to the probability of
the selected taxa being a good species.
"""
ap = 1.0
ret = 0.0
for child in nd.child_nodes():
c_brlen = child.edge.length
scaled_brlen = c_brlen * good_sp_rate
prob_no_sp = math.exp(-scaled_brlen)
prob_sp = 1.0 - prob_no_sp
if child.anc_status & SF.SEL_DES:
if child.anc_status & SF.CA_BIT:
ret = prob_sp * child.accum_prob
contrib = prob_no_sp * child.accum_prob
else:
contrib = prob_sp + prob_no_sp * child.accum_prob
ap *= contrib
nd.accum_prob = ap
return ret
def main(tree_filename, good_sp_rate, selected_tip_labels):
tree = dendropy.Tree.get(path=tree_filename, schema="newick")
selected_nodes = []
labels_found = set()
for tip in tree.leaf_node_iter():
tl = tip.taxon.label
if tl in selected_tip_labels:
if tl in labels_found:
sys.exit('Tip label "{}" occurred twice in the tree!\n'.format(tl))
labels_found.add(tl)
selected_nodes.append(tip)
if labels_found != selected_tip_labels:
d = selected_tip_labels - labels_found
sys.exit('Not all tips were found. Missing: "{}"\n'.format('", "'.join(list(d))))
prob_good = calc_marginal_probability_of_species(tree, selected_tip_labels, good_sp_rate)
stn = list(selected_tip_labels)
stn.sort()
stl = ','.join(stn)
print('Pr({' + stl + '}) = ' + str(prob_good))
if __name__ == '__main__':
import sys
try:
rate = 1.0
filename = sys.argv[1]
assert len(sys.argv) > 3
rate = float(sys.argv[2])
assert rate > 0.0
selected_tip_label_list = sys.argv[3:]
selected_tip_label_set = set(selected_tip_label_list)
if len(selected_tip_label_set) < len(selected_tip_label_list):
sys.stderr.write('WARN: some tip labels were repeated in the command line.\n')
except:
sys.exit('''Expecting up to at least 3 args:
1. the filepath to a rooted newick tree with branch lengths,
2. a rate of good speciation events (branch length multiplier), and
3. a series of taxa labels that designate the possible conspecific lineages.
''')
main(filename, rate, selected_tip_label_set)
```
|
{
"source": "jeetsukumaran/gerenuk",
"score": 3
}
|
#### File: gerenuk/bin/gerenuk-filter-columns.py
```python
import os
import sys
import argparse
import traceback
import time
import tempfile
from gerenuk import utility
def main():
parser = argparse.ArgumentParser(
description="GERENUK Simultaneous Divergence Time Analysis -- Post-Process Columns",
)
parser.add_argument(
"target_data_filepath",
nargs="+",
help="Path to target or observed data file.")
filter_options = parser.add_argument_group("Filter Options")
filter_options.add_argument(
"--master-column-filepath",
help="If specified, then only columns with names"
" in this file will be retained in the target"
" file(s).")
run_options = parser.add_argument_group("Run Options")
run_options.add_argument('--field-delimiter',
type=str,
default='\t',
help="Field delimiter (default: <TAB>').")
run_options.add_argument(
"-q", "--quiet",
action="store_true",
help="Work silently.")
args = parser.parse_args()
if not (sys.version_info.major >= 3 and sys.version_info.minor >= 4):
open = utility.pre_py34_open
if args.master_column_filepath is not None:
if not args.quiet:
sys.stderr.write("-gerenuk- Retaining only columns defined in '{}'\n".format(args.master_column_filepath))
with open(args.master_column_filepath) as master_src:
columns_to_retain = utility.extract_fieldnames_from_file(
src=master_src,
field_delimiter=args.field_delimiter)
for filepath in args.target_data_filepath:
with open(filepath) as src:
with open(filepath + ".filtered", "w") as dest:
# with tempfile.NamedTemporaryFile() as dest:
utility.filter_columns_from_file(
src=src,
dest=dest,
columns_to_retain=columns_to_retain,
field_delimiter=args.field_delimiter)
else:
raise NotImplementedError
if __name__ == "__main__":
main()
```
|
{
"source": "jeetsukumaran/SisterBayes",
"score": 3
}
|
#### File: SisterBayes/bin/sisterbayes-reject.py
```python
import math
import csv
import os
import sys
import argparse
import heapq
import collections
import sisterbayes
from sisterbayes import utility
class SisterBayesRejectorStatsVectorSizeException(Exception):
pass
class SisterBayesRejectorStatsVectorValueException(ValueError):
pass
class SisterBayesRejector(object):
def __init__(self,
rejection_criteria_type,
rejection_criteria_value,
run_logger,
stats_field_prefix="stat",
logging_frequency=1000,
field_delimiter="\t",
is_write_summary_stats=False,
is_write_rejection_score=False,
is_output_target_params=False,
is_suppress_checks=False,
is_ignore_invalid_priors_data_vectors=False,
):
self.rejection_criteria_type = rejection_criteria_type
self.rejection_criteria_value = rejection_criteria_value
self.run_logger = run_logger
self.stats_field_prefix = stats_field_prefix
self.logging_frequency = logging_frequency
self.field_delimiter = field_delimiter
self.is_write_summary_stats = is_write_summary_stats
self.is_write_rejection_score = is_write_rejection_score
self.is_output_target_params = is_output_target_params
self.is_suppress_checks = is_suppress_checks
self.is_ignore_invalid_priors_data_vectors = is_ignore_invalid_priors_data_vectors
self.stat_fieldnames = None
self.stat_fieldnames_check = None
self.non_stat_fieldnames = None
self.distance_score_fieldname = "rejection.score"
self.normalized_distance_score_fieldname = "rejection.score.normalized"
def euclidean_distance(self, vector1, vector2):
assert len(vector1) == len(vector2)
dist = [(a - b)**2 for a, b in zip(vector1, vector2)]
dist = math.sqrt(sum(dist))
return dist
def extract_stat_fieldnames(self, fieldnames):
stat_fieldnames = []
non_stat_fieldnames = []
for fieldname in fieldnames:
if fieldname.startswith(self.stats_field_prefix):
stat_fieldnames.append(fieldname)
else:
non_stat_fieldnames.append(fieldname)
assert len(stat_fieldnames) == len(set(stat_fieldnames))
assert len(non_stat_fieldnames) == len(set(non_stat_fieldnames))
return stat_fieldnames, non_stat_fieldnames
def extract_stats_data_vector_from_csv_row(self, row):
try:
data_vector = [float(v) for v in (row[k] for k in self.stat_fieldnames)]
except ValueError:
raise SisterBayesRejectorStatsVectorValueException()
if len(data_vector) != len(self.stat_fieldnames_set):
raise SisterBayesRejectorStatsVectorSizeException()
return data_vector
def process(self,
target_data_filepath,
priors_data_filepaths,
output_name_prefix,
output_directory,
output_suffix):
if output_name_prefix is None:
output_name_prefix = os.path.splitext(os.path.basename(target_data_filepath))[0]
if output_suffix is None:
output_suffix = ""
else:
output_suffix = "." + output_suffix
with utility.universal_open(target_data_filepath) as src:
target_data_reader = csv.DictReader(
src,
delimiter=self.field_delimiter,
quoting=csv.QUOTE_NONE)
for target_row_idx, target_row in enumerate(target_data_reader):
if target_row_idx == 0:
self.stat_fieldnames, self.non_stat_fieldnames = self.extract_stat_fieldnames(target_data_reader.fieldnames)
self.stat_fieldnames_set = set(self.stat_fieldnames)
self.run_logger.info("Scoring target data {}".format(target_row_idx+1))
target_data_vector = self.extract_stats_data_vector_from_csv_row(target_row)
posteriors_filepath = os.path.join(output_directory, "{}.posterior.{:03d}.samples{}.tsv".format(output_name_prefix, target_row_idx+1, output_suffix))
self.accept_reject(
target_data_vector=target_data_vector,
priors_data_filepaths=priors_data_filepaths,
output_filepath=posteriors_filepath)
if self.is_output_target_params:
target_params_filepath = os.path.join(output_directory, "{}.posterior.{:03d}.target{}.tsv".format(output_name_prefix, target_row_idx+1, output_suffix))
with open(target_params_filepath, "w") as target_params_f:
target_params_f.write(self.field_delimiter.join(self.non_stat_fieldnames))
target_params_f.write("\n")
target_params_f.write(self.field_delimiter.join(str(target_row[k]) for k in self.non_stat_fieldnames))
target_params_f.write("\n")
def accept_reject(self,
target_data_vector,
priors_data_filepaths,
output_filepath):
if self.rejection_criteria_type == "num":
num_to_retain = self.rejection_criteria_value
else:
num_to_retain = None
dest = utility.universal_open(output_filepath, "w")
all_prior_fieldnames = []
all_prior_fieldnames_set = None
accepted_heap = []
for fidx, priors_data_filepath in enumerate(priors_data_filepaths):
self.run_logger.info("Reading simulation file {} of {}: '{}'".format(fidx+1, len(priors_data_filepaths), priors_data_filepath))
with utility.universal_open(priors_data_filepath) as src:
priors_data_reader = csv.DictReader(
src,
delimiter=self.field_delimiter,
quoting=csv.QUOTE_NONE)
for row_idx, row in enumerate(priors_data_reader):
if self.logging_frequency and row_idx > 0 and row_idx % self.logging_frequency == 0:
self.run_logger.info("Reading simulation file {} of {}, row {}".format(fidx+1, len(priors_data_filepaths), row_idx+1))
if row_idx == 0:
if fidx == 0:
all_prior_fieldnames = list(priors_data_reader.fieldnames)
all_prior_fieldnames_set = set(all_prior_fieldnames)
current_file_stat_fieldnames = set(self.extract_stat_fieldnames(priors_data_reader.fieldnames)[0])
s1 = current_file_stat_fieldnames - self.stat_fieldnames_set
if s1:
raise ValueError("File '{}': Following summary statistics fields not found in target: {}".format(
priors_data_filepath, ", ".join(s1)))
s2 = self.stat_fieldnames_set - current_file_stat_fieldnames
if s2:
raise ValueError("File '{}': Following summary statistics fields given in target but not found here: {}".format(
priors_data_filepath, ", ".join(s2)))
header_row = []
for fnidx, fn in enumerate(all_prior_fieldnames):
if self.is_write_summary_stats or fn not in self.stat_fieldnames_set:
header_row.append(fn)
if self.is_write_rejection_score:
header_row.append(self.distance_score_fieldname)
header_row.append(self.normalized_distance_score_fieldname)
dest.write("{}\n".format(self.field_delimiter.join(header_row)))
else:
current_file_fieldnames = set(priors_data_reader.fieldnames)
s1 = current_file_fieldnames - all_prior_fieldnames_set
if s1:
raise ValueError("File '{}': Following fields found, but not found in previous files: {}".format(
priors_data_filepath, ", ".join(s1)))
s2 = all_prior_fieldnames_set - current_file_fieldnames
if s2:
raise ValueError("File '{}': Following fields found in previous files, but not found here: {}".format(
priors_data_filepath, ", ".join(s2)))
try:
prior_data_vector = self.extract_stats_data_vector_from_csv_row(row)
except SisterBayesRejectorStatsVectorValueException:
if self.is_ignore_invalid_priors_data_vectors:
continue
else:
raise
except SisterBayesRejectorStatsVectorSizeException:
if self.is_ignore_invalid_priors_data_vectors:
continue
else:
raise
distance_score = self.euclidean_distance(target_data_vector, prior_data_vector)
row_values = self.field_delimiter.join(row[fn] for fn in priors_data_reader.fieldnames if self.is_write_summary_stats or fn not in self.stat_fieldnames_set)
if self.is_write_rejection_score:
row_values = "{}{}{}".format(row_values, self.field_delimiter, distance_score)
# Normalize by number of comparisons
# How do we get this?
# Consider the following vectors:
# > x1 = c(3.1, 3.1, 3.1)
# > x2 = c(5.1, 5.1, 5.1)
# > y1 = c(3.1, 3.1, 3.1, 3.1, 3.1)
# > y2 = c(5.1, 5.1, 5.1, 5.1, 5.1)
# The naive/raw Euclidean distances are different, due to the different number of comparisons:
# > sqrt(sum((x2-x1)**2))
# [1] 3.464102
# > sqrt(sum((y2-y1)**2))
# [1] 4.472136
# But dividing the be sqrt of the length of the vectors makes them equal:
# > sqrt(sum((x2-x1)**2)) / sqrt(3)
# [1] 2
# > sqrt(sum((y2-y1)**2)) / sqrt(5)
# [1] 2
normalized_distance_score = distance_score / math.sqrt(len(target_data_vector))
row_values = "{}{}{}".format(row_values, self.field_delimiter, normalized_distance_score)
heap_score = -1 * (distance_score)
heap_entry = (heap_score, row_values)
if self.rejection_criteria_type == "distance":
if distance_score <= self.rejection_criteria_value:
accepted_heap.append(heap_entry)
elif self.rejection_criteria_type == "num":
if len(accepted_heap) < num_to_retain:
accepted_heap.append(heap_entry)
if len(accepted_heap) == num_to_retain:
heapq.heapify(accepted_heap)
else:
heapq.heappushpop(accepted_heap, heap_entry)
else:
raise NotImplementedError(self.rejection_criteria_type)
# for fnidx, fn in enumerate(all_prior_fieldnames):
# value = row[fn]
# if self.is_write_summary_stats or fn not in self.stat_fieldnames_set:
# dest.write("{}{}".format(value, self.field_delimiter))
# dest.write("{}\n".format(distance))
accepted_heap.sort(reverse=True)
for hidx, heap_entry in enumerate(accepted_heap):
heap_entry = accepted_heap[hidx]
dest.write(heap_entry[1])
dest.write("\n")
dest.flush()
dest.close()
# def accept_posteriors(self, distanced_scored_params_filepath, num_samples, output_filepath):
# dest = utility.universal_open(output_filepath, "w")
# if self.rejection_criteria_type == "num":
# num_to_retain = self.rejection_criteria_value
# elif self.rejection_criteria_type == "proportion":
# num_to_retain = int(self.rejection_criteria_value * num_samples)
# accepted_heap = []
# self.run_logger.info("Accepting/rejecting simulations from the prior ...")
# with utility.universal_open(distanced_scored_params_filepath) as src:
# priors_data_reader = csv.DictReader(
# src,
# delimiter=self.field_delimiter,
# quoting=csv.QUOTE_NONE)
# for row_idx, row in enumerate(priors_data_reader):
# if self.logging_frequency and row_idx > 0 and row_idx % self.logging_frequency == 0:
# self.run_logger.info("Accepting/rejecting: row {}".format(row_idx+1))
# if row_idx == 0:
# dest.write(self.field_delimiter.join(priors_data_reader.fieldnames))
# dest.write("\n")
# distance_score = float(row[self.distance_score_fieldname])
# row_values = self.field_delimiter.join(row[k] for k in priors_data_reader.fieldnames)
# if self.rejection_criteria_type == "distance":
# if float(distance_score) <= self.rejection_criteria_value:
# dest.write(row_values)
# dest.write("\n")
# else:
# # heap_score = (1.0/(distance_score + 1))
# # heap_score = -1 * (distance_score + 1)
# heap_score = -1 * (distance_score)
# heap_entry = (heap_score, row_values)
# if len(accepted_heap) < num_to_retain:
# accepted_heap.append( heap_entry )
# if len(accepted_heap) == num_to_retain:
# heapq.heapify(accepted_heap)
# else:
# heapq.heappushpop(accepted_heap, heap_entry)
# if self.rejection_criteria_type != "distance":
# accepted_heap.sort(reverse=True)
# for hidx, heap_entry in enumerate(accepted_heap):
# heap_entry = accepted_heap[hidx]
# dest.write(heap_entry[1])
# dest.write("\n")
# dest.flush()
# dest.close()
def main():
package_id = sisterbayes.package_id()
parser = argparse.ArgumentParser(
description="SISTERBAYES Rejection Sampler",
)
parser.add_argument("--version", action="version", version=package_id)
parser.add_argument(
"target_data_filepath",
help="Path to target or observed data file.")
parser.add_argument(
"simulations_data_filepaths",
nargs="+",
help="Path to samples from the prior data files.")
rejection_criteria = parser.add_argument_group("Rejection Criteria")
rejection_criteria.add_argument(
"-n", "--max-num",
type=int,
metavar="#",
default=None,
help="Retain this number of samples from the prior into the posterior.")
# rejection_criteria.add_argument(
# "-p", "--max-proportion",
# type=float,
# metavar="0.##",
# default=None,
# help="Retain this proportion (0 > 'p' > 1.0) of samples from the prior into the posterior.")
rejection_criteria.add_argument(
"-d", "--max-distance",
type=float,
metavar="#.##",
default=None,
help="Retain samples this distance or lower from the prior into the posterior.")
processing_options = parser.add_argument_group("Processing Options")
processing_options.add_argument("--field-delimiter",
type=str,
default="\t",
help="Field delimiter (default: <TAB>).")
processing_options.add_argument("--stats-field-prefix",
type=str,
default="stat",
help="Prefix identifying summary statistic fields (default: '%(default)s').")
processing_options.add_argument(
"--ignore-invalid-priors-data-vectors",
dest="is_ignore_invalid_priors_data_vectors",
action="store_true",
default=False,
help="Ignore invalid vectors (in priors).")
output_options = parser.add_argument_group("Output Options")
output_options.add_argument('-o', '--output-name-prefix',
action='store',
dest='output_name_prefix',
type=str,
default=None,
metavar='NAME-PREFIX',
help="Prefix for output filenames (default: same as configuration filename stem).")
output_options.add_argument('-O', '--output-directory',
action='store',
dest='output_directory',
type=str,
default=None,
metavar='DIRECTORY',
help="Directory for output files (default: current working directory).")
output_options.add_argument('--output-suffix',
action='store',
type=str,
default=None,
metavar='NAME-SUFFIX',
help="Suffix for output filename.")
output_options.add_argument(
"--write-summary-stats",
action="store_true",
help="Include summary stats in the samples from the posterior.")
output_options.add_argument(
"--write-rejection-score",
action="store_true",
help="Include rejection score in the output.")
output_options.add_argument(
"--output-target-params",
action="store_true",
help="For each row in the target data file processed, output a file of non-summary stats fields found.")
run_options = parser.add_argument_group("Run Options")
# run_options.add_argument(
# "-L", "--large-file",
# dest="limit_memory",
# action="store_true",
# default=False,
# help="Use two-pass processing that reduces memory footprint by not requiring entire simulations/priors file(s) to be read into memory at once.")
run_options.add_argument(
"-q", "--quiet",
action="store_true",
help="Work silently.")
run_options.add_argument('--log-to-file',
action='store_true',
dest='log_to_file',
default=None,
help="Save log to file.")
args = parser.parse_args()
num_non_Nones = sum([1 for i in (args.max_num, args.max_distance) if i is not None])
if num_non_Nones == 0:
sys.exit("Require exactly one of '-n'/'--max-num', or '-d'/'--max-distance' to be specified.")
elif num_non_Nones > 1:
sys.exit("Require only one of '-n'/'--max-num', or '-d'/'--max-distance' to be specified.")
if args.max_num:
rejection_criteria_type = "num"
rejection_criteria_value = args.max_num
elif args.max_proportion:
rejection_criteria_type = "proportion"
rejection_criteria_value = args.max_proportion
elif args.max_distance:
rejection_criteria_type = "distance"
rejection_criteria_value = args.max_distance
run_logger = utility.RunLogger(
name="sisterbayes-estimate",
stderr_logging_level="info",
log_to_stderr=not args.quiet,
log_to_file=args.log_to_file,
)
run_logger.info("Running: {}".format(package_id))
rejector = SisterBayesRejector(
rejection_criteria_type=rejection_criteria_type,
rejection_criteria_value=rejection_criteria_value,
run_logger=run_logger,
stats_field_prefix=args.stats_field_prefix,
field_delimiter=args.field_delimiter,
is_write_summary_stats=args.write_summary_stats,
is_write_rejection_score=args.write_rejection_score,
is_ignore_invalid_priors_data_vectors=args.is_ignore_invalid_priors_data_vectors,
is_output_target_params=args.output_target_params,
)
rejector.process(
target_data_filepath=args.target_data_filepath,
priors_data_filepaths=args.simulations_data_filepaths,
output_name_prefix=args.output_name_prefix,
output_directory=args.output_directory,
output_suffix=args.output_suffix)
if __name__ == "__main__":
main()
```
#### File: SisterBayes/bin/sisterbayes-summarize.py
```python
from __future__ import division
import math
import bisect
import csv
import os
import sys
import argparse
import collections
import re
import sisterbayes
from sisterbayes import utility
from sisterbayes import calclib
class SisterBayesSummarizer(object):
def __init__(self,
cluster_criteria=None,
cluster_criteria_value=None,
field_delimiter="\t",
exclude_field_patterns=None,
include_only_field_patterns=None,
output_name_prefix=None,
output_directory=None,
):
self.cluster_criteria = cluster_criteria
self.cluster_criteria_value = cluster_criteria_value
self.field_delimiter = field_delimiter
self.all_fieldnames = None
self.other_fieldnames = None
self.stat_fieldnames = None
self.stat_fieldnames_check = None
self.other_fieldname_check = None
self.output_name_prefix = output_name_prefix
self.output_directory = output_directory
if self.output_directory is not None:
self.output_directory = os.path.realpath(self.output_directory)
self.stat_values = []
self.other_values = []
def cluster_by_absolute_difference_threshold(self,
sp_labels,
realized_div_time_samples,
absolute_difference_threshold):
model_counter = collections.Counter()
for row_idx, realized_div_time_sample in enumerate(realized_div_time_samples):
div_time_model_desc = [None for i in sp_labels]
group_idx = 1
for sp_idx, sp_label in enumerate(sp_labels):
if sp_idx == 0:
div_time_model_desc[sp_idx] = str(group_idx)
group_idx += 1
else:
current_dt = realized_div_time_sample[sp_label]
for prev_sp_idx, prev_sp_label in enumerate(sp_labels[:sp_idx-1]):
ref_dt = realized_div_time_sample[prev_sp_label]
if abs(current_dt - ref_dt) < (absolute_difference_threshold):
div_time_model_desc[sp_idx] = div_time_model_desc[prev_sp_idx]
break
else:
div_time_model_desc[sp_idx] = str(group_idx)
group_idx += 1
model_name = "M"+"".join(div_time_model_desc)
try:
model_counter[model_name] += 1
except KeyError:
model_counter[model_name] = 1
return model_counter
# post-hoc derivation of div time model, clustering species that have
# population splits +/- t time units from each other.
def cluster_by_relative_difference_threshold(self,
sp_labels,
realized_div_time_samples,
all_div_times,
relative_difference_threshold):
v0 = min(all_div_times)
v1 = max(all_div_times)
absolute_difference_threshold = (v1 - v0) * relative_difference_threshold
return self.cluster_by_absolute_difference_threshold(
sp_labels=sp_labels,
realized_div_time_samples=realized_div_time_samples,
absolute_difference_threshold=absolute_difference_threshold)
def cluster_by_bin_size(self,
sp_labels,
realized_div_time_samples,
all_div_times,
bin_size):
bin_size = float(bin_size)
model_counter = collections.Counter()
for row_idx, realized_div_time_sample in enumerate(realized_div_time_samples):
div_time_model_desc = [None for i in sp_labels]
group_idx = 1
bin_idx_group_idx_map = {}
for sp_idx, sp_label in enumerate(sp_labels):
current_dt = realized_div_time_sample[sp_label]
assigned_bin_idx = calclib.bin_index(current_dt, bin_size)
try:
group_idx = bin_idx_group_idx_map[assigned_bin_idx]
except KeyError:
group_idx = len(bin_idx_group_idx_map) + 1
bin_idx_group_idx_map[assigned_bin_idx] = group_idx
div_time_model_desc[sp_idx] = str(group_idx)
model_name = "M"+"".join(div_time_model_desc)
try:
model_counter[model_name] += 1
except KeyError:
model_counter[model_name] = 1
return model_counter
def cluster_by_num_bins(self,
sp_labels,
realized_div_time_samples,
all_div_times,
num_bins):
v0 = min(all_div_times)
v1 = max(all_div_times)
bin_size = (v1-v0)/float(num_bins)
return self.cluster_by_bin_size(
sp_labels=sp_labels,
realized_div_time_samples=realized_div_time_samples,
all_div_times=all_div_times,
bin_size=bin_size)
def summarize(self, target_data_filepath,):
if self.output_name_prefix is None:
self.output_name_prefix = os.path.splitext(os.path.basename(target_data_filepath))[0]
if self.output_directory is None:
self.output_directory = "."
self.output_directory = os.path.realpath(self.output_directory)
output_prefix = os.path.join(self.output_directory, self.output_name_prefix)
with utility.universal_open(target_data_filepath) as src:
reader = csv.DictReader(
src,
delimiter=self.field_delimiter,
quoting=csv.QUOTE_NONE)
categorical_params = collections.OrderedDict()
continuous_params = collections.OrderedDict()
realized_div_time_samples = []
all_div_times = []
sp_labels = []
for row_idx, row in enumerate(reader):
realized_div_time_samples.append({})
for key_idx, key in enumerate(reader.fieldnames):
if key in categorical_params:
categorical_params[key][row[key]] += 1
elif key in continuous_params:
continuous_params[key].append(float(row[key]))
else:
if key in ("param.divTimeModel", "param.numDivTimes"):
val = row[key]
is_categorical = True
else:
try:
val = float(row[key])
is_categorical = False
except ValueError:
val = row[key]
is_categorical = True
if is_categorical:
categorical_params[key] = collections.Counter()
categorical_params[key][val] += 1
else:
continuous_params[key] = [val]
if key.startswith("param.divTime."):
sp_label = key.replace("param.divTime.", "")
realized_div_time_samples[-1][sp_label] = continuous_params[key][-1]
all_div_times.append(val)
if row_idx == 0:
sp_labels.append(sp_label)
### EXPERIMENTAL ###
# categorical_params["param.effectiveDivTimeModel"] = self.cluster_by_relative_difference_threshold(
# sp_labels=sp_labels,
# realized_div_time_samples=realized_div_time_samples,
# all_div_times=all_div_times,
# relative_difference_threshold=0.01)
if self.cluster_criteria is not None:
if self.cluster_criteria == "bin_size":
cluster_results = self.cluster_by_bin_size(
sp_labels=sp_labels,
realized_div_time_samples=realized_div_time_samples,
all_div_times=all_div_times,
bin_size=self.cluster_criteria_value)
elif self.cluster_criteria == "num_bins":
cluster_results = self.cluster_by_num_bins(
sp_labels=sp_labels,
realized_div_time_samples=realized_div_time_samples,
all_div_times=all_div_times,
num_bins=self.cluster_criteria_value)
elif self.cluster_criteria == "absolute_difference_threshold":
cluster_results = self.cluster_by_absolute_difference_threshold(
sp_labels=sp_labels,
realized_div_time_samples=realized_div_time_samples,
absolute_difference_threshold=self.cluster_criteria_value)
elif self.cluster_criteria == "relative_difference_threshold":
cluster_results = self.cluster_by_relative_difference_threshold(
sp_labels=sp_labels,
realized_div_time_samples=realized_div_time_samples,
all_div_times=all_div_times,
relative_difference_threshold=self.cluster_criteria_value)
else:
raise ValueError("Unrecognized cluster criteria: '{}'".format(self.cluster_criteria))
categorical_params["param.effectiveDivTimeModel"] = cluster_results
### EXPERIMENTAL ###
with utility.universal_open(output_prefix + ".summary.continuous.tsv", "w") as dest:
row_results = collections.OrderedDict()
for param_idx, param_name in enumerate(continuous_params):
values = continuous_params[param_name]
row_results["param"] = param_name
summary = calclib.summarize(values)
row_results["mean"] = summary["mean"]
row_results["var"] = summary["var"]
row_results["sd"] = summary["sd"]
row_results["min"] = summary["range"][0]
row_results["max"] = summary["range"][1]
try:
row_results["hpd5"] = summary["hpd95"][0]
row_results["hpd95"] = summary["hpd95"][1]
except TypeError:
row_results["hpd5"] = "NA"
row_results["hpd95"] = "NA"
try:
row_results["quant5"] = summary["quant_5_95"][0]
row_results["quant95"] = summary["quant_5_95"][1]
except TypeError:
row_results["quant5"] = "NA"
row_results["quant95"] = "NA"
if param_idx == 0:
dest.write(self.field_delimiter.join(row_results.keys()) + "\n")
dest.write(self.field_delimiter.join("{}".format(v) for v in row_results.values()) + "\n")
for param_idx, param_name in enumerate(categorical_params):
with utility.universal_open(output_prefix + ".summary.{:02d}.{}.tsv".format(param_idx+1, param_name), "w") as dest:
param_counter = categorical_params[param_name]
total = float(sum(param_counter.values()))
for category_idx, (category_name, category_count) in enumerate(param_counter.most_common()):
row_results = collections.OrderedDict()
row_results["label"] = category_name
row_results["freq"] = category_count/total
row_results["count"] = category_count
if category_idx == 0:
dest.write(self.field_delimiter.join(row_results.keys()) + "\n")
dest.write(self.field_delimiter.join("{}".format(v) for v in row_results.values()) + "\n")
def main():
parser = argparse.ArgumentParser(
description="SISTERBAYES Summarizer",
)
parser.add_argument(
"posteriors_filepath",
help="Path to posteriors parameter file.")
package_id = sisterbayes.package_id()
parser.add_argument("--version", action="version", version=package_id)
clustering_options = parser.add_argument_group("Clustering Options",
"Calculate an 'effective' divergence time model based on clustering lineage pairs into simultaneous diverging groups using actual divergence times rather than labeled generating model.")
# clustering_options = parser.add_mutually_exclusive_group(required=True)
clustering_options.add_argument("--bin-size",
type=float,
default=None,
help="Cluster using divergence times bins of the specified size.")
clustering_options.add_argument("--num-bins",
type=float,
default=None,
help="Cluster by splitting the range of divergence times uniformly into the specified number of bins.")
clustering_options.add_argument("--absolute-difference-threshold",
type=float,
default=None,
help="Cluster by grouping together divergence times less than the specified threshold.")
clustering_options.add_argument("--relative-difference-threshold",
type=float,
default=None,
help="Cluster by grouping together divergence times less than the specified threshold, expressed as a proportion of the range of divergence times.")
processing_options = parser.add_argument_group("Processing Options")
processing_options.add_argument("--field-delimiter",
type=str,
default="\t",
help="Field delimiter (default: <TAB>).")
output_options = parser.add_argument_group("Output Options")
output_options.add_argument('-o', '--output-name-prefix',
action='store',
dest='output_name_prefix',
type=str,
default=None,
metavar='NAME-PREFIX',
help="Prefix for output filenames (default: same as configuration filename stem).")
output_options.add_argument('-O', '--output-directory',
action='store',
dest='output_directory',
type=str,
default=None,
metavar='DIRECTORY',
help="Directory for output files (default: current working directory).")
run_options = parser.add_argument_group("Run Options")
run_options.add_argument(
"-q", "--quiet",
action="store_true",
help="Work silently.")
args = parser.parse_args()
num_selected_clustering_options = 0
cluster_criteria = None
cluster_criteria_value = None
for a in ("bin_size", "num_bins", "absolute_difference_threshold", "relative_difference_threshold"):
if getattr(args, a) is not None:
num_selected_clustering_options += 1
cluster_criteria = a
cluster_criteria_value = getattr(args, a)
if num_selected_clustering_options > 1:
sys.exit("Multiple clustering strategies selected: plese select just one")
summarizer = SisterBayesSummarizer(
cluster_criteria=cluster_criteria,
cluster_criteria_value=cluster_criteria_value,
field_delimiter=args.field_delimiter,
output_name_prefix=args.output_name_prefix,
output_directory=args.output_directory,
)
summarizer.summarize(args.posteriors_filepath)
if __name__ == "__main__":
main()
```
#### File: SisterBayes/tests/test_binning.py
```python
import unittest
import bisect
from sisterbayes import calclib
class BisectTestCase(unittest.TestCase):
def validateBinIndex(self, value, bin_size):
obs = calclib.bin_index(value, bin_size)
factor = 1e6
check_array = [i/float(factor) for i in range(0, int(value * 2 * factor), int(bin_size * factor))]
self.assertEqual(bisect.bisect_left(check_array, value), obs)
def test_int(self):
for value in [3, 4, 7, 9, 17, 1, 0, 15, 2012, 23, 1, 83]:
for bin_size in [1,2,3,4,5,7,11,13,100]:
self.validateBinIndex(value, bin_size)
def test_float(self):
for value in [3.14, 0.04, 0.12, 9.112, 0.0017, 0.00511, 0.12, 15.173741, 2.18182, 0.123, 0.101, 0.00283]:
for bin_size in [0.1, 0.001, 0.5, 1.0, 0.015, 0.13, 0.00001]:
self.validateBinIndex(value, bin_size)
if __name__ == "__main__":
unittest.main()
```
|
{
"source": "jeetsukumaran/spdw",
"score": 2
}
|
#### File: spdw/bin/spdw-evaluate-delineate-jobs.py
```python
import math
import os
import collections
import json
import sys
import argparse
import inspect
import itertools
import io
import dendropy
class Partition(object):
@staticmethod
def build_species_leafsets(d):
if d is None:
return None
return frozenset(frozenset(s) for s in d)
def __init__(self, **kwargs):
self.part_id = kwargs.get("id", None)
self.log_probability = kwargs.get("log_probability", None)
self.probability = kwargs.get("probability", None)
self.probability_given_constraints = kwargs.get("probability_given_constraints", None)
self.log_probability_given_constraints = kwargs.get("log_probability_given_constraints", None)
self.cumulative_probability = kwargs.get("cumulative_probability", None)
self.cumulative_probability_given_constraints = kwargs.get("cumulative_probability_given_constraints", None)
self.is_in_confidence_interval = kwargs.get("is_in_confidence_interval", None)
self.species_leafsets = Partition.build_species_leafsets(kwargs.get("species_leafsets", None))
def is_conspecific(self, lineage1, lineage2):
for sp in self.species_leafsets:
if lineage1 in sp and lineage2 in sp:
return True
return False
def __len__(self):
return len(self.species_leafsets)
class Evaluator(object):
def __init__(self):
pass
def execute(self, args):
self.config_path = args.configuration_filepath
if not self.config_path:
sys.exit("Path to configuration file needs to be specified")
self.analysis_results_filepath = args.analysis_results_filepath
if not self.analysis_results_filepath:
sys.exit("Path to analysis results file needs to be specified")
self.report_dest = sys.stdout
self.load_data()
perf_data_rows = []
if args.is_evaluate_marginal:
if args.lineage_tree_filepath is None:
sys.exit("Require path to lineage tree filepath to analyze marginal probabilities")
with open(os.path.expandvars(os.path.expanduser(args.lineage_tree_filepath))) as src:
self.read_lineage_tree(src, schema="nexus")
self.load_estimated_partitions()
for lineage_pair in self.all_distinct_pairs_of_unconstrained_lineages():
perf_data = collections.OrderedDict()
self.store_basic_features(perf_data)
perf_data.update(self.calc_lineage_pair_features(*lineage_pair))
results = self.calc_marginal_probability_of_conspecificity(*lineage_pair)
for key in [
"lineage_pair_is_true_conspecific",
"lineage_pair_conspecificity_marginal_probability",
"lineage_pair_conspecificity_marginal_probability_given_constraints",
"lineage_pair_nonconspecificity_marginal_probability",
"lineage_pair_nonconspecificity_marginal_probability_given_constraints",
]:
perf_data[key] = results[key]
perf_data_rows.append(perf_data)
else:
perf_data = collections.OrderedDict()
self.store_basic_features(perf_data)
self.standard_performance_assessment(perf_data)
perf_data_rows.append(perf_data)
assert perf_data_rows
self.report(perf_data_rows)
def load_data(self):
with open(self.config_path) as src:
self.config = json.load(src)
with open(self.analysis_results_filepath) as src:
self.estimation_results = json.load(src)
self.set_true_partition(species_leafsets=self.config["test_info"]["true_species_leafsets"])
def set_true_partition(self, species_leafsets):
self.true_partition = Partition(species_leafsets=species_leafsets)
def load_estimated_partitions(self):
self.partitions = [Partition(**p) for p in self.estimation_results["partitions"]]
return self.partitions
def read_lineage_tree(self, src, schema="nexus"):
self.set_lineage_tree(
file=src,
schema=schema,
rooting="force-rooted")
def set_lineage_tree(self, **kwargs):
self.lineage_tree = dendropy.Tree.get(**kwargs)
self.lineage_tree.encode_bipartitions()
self.lineage_tree.calc_node_ages()
# self.lineage_tree_label_node_map = {taxon.label:taxon for taxon in self.lineage_tree.taxon_namespace}
self.lineage_tree_label_node_map = {nd.taxon.label:nd for nd in self.lineage_tree.leaf_node_iter()}
self.phylogenetic_distance_matrix = self.lineage_tree.phylogenetic_distance_matrix(is_store_path_edges=False)
def all_distinct_pairs_of_unconstrained_lineages(self):
unconstrained_lineages = self.config["test_info"]["unconstrained_lineages"]
for x in itertools.combinations(unconstrained_lineages, 2):
yield x
def is_span_root(self, lineage1, lineage2):
n1 = self.lineage_tree_label_node_map[lineage1]
n2 = self.lineage_tree_label_node_map[lineage2]
assert n1 is not n2
r_left, r_right = self.lineage_tree.seed_node._child_nodes
if n1.bipartition.is_nested_within(r_left.bipartition):
return n2.bipartition.is_nested_within(r_right.bipartition)
else:
return n2.bipartition.is_nested_within(r_left.bipartition)
def calc_lineage_pair_features(self, lineage1, lineage2):
result = {}
result["lineage_pair_unnormalized_weighted_distance"] = self.phylogenetic_distance_matrix.distance(
self.lineage_tree_label_node_map[lineage1].taxon,
self.lineage_tree_label_node_map[lineage2].taxon,
is_weighted_edge_distances=True,
is_normalize_by_tree_size=False)
result["lineage_pair_normalized_weighted_distance"] = self.phylogenetic_distance_matrix.distance(
self.lineage_tree_label_node_map[lineage1].taxon,
self.lineage_tree_label_node_map[lineage2].taxon,
is_weighted_edge_distances=True,
is_normalize_by_tree_size=True)
result["lineage_pair_unnormalized_unweighted_distance"] = self.phylogenetic_distance_matrix.distance(
self.lineage_tree_label_node_map[lineage1].taxon,
self.lineage_tree_label_node_map[lineage2].taxon,
is_weighted_edge_distances=False,
is_normalize_by_tree_size=False)
result["lineage_pair_normalized_unweighted_distance"] = self.phylogenetic_distance_matrix.distance(
self.lineage_tree_label_node_map[lineage1].taxon,
self.lineage_tree_label_node_map[lineage2].taxon,
is_weighted_edge_distances=False,
is_normalize_by_tree_size=True)
mrca = self.phylogenetic_distance_matrix.mrca(
self.lineage_tree_label_node_map[lineage1].taxon,
self.lineage_tree_label_node_map[lineage2].taxon)
result["lineage_pair_mrca_age"] = mrca.age
result["lineage_pair_is_span_root"] = self.is_span_root(lineage1, lineage2)
return result
def calc_marginal_probability_of_conspecificity(self, lineage1, lineage2):
results = {
"lineage_pair_conspecificity_marginal_probability": 0.0,
"lineage_pair_conspecificity_marginal_probability_given_constraints": 0.0,
"lineage_pair_nonconspecificity_marginal_probability": 0.0,
"lineage_pair_nonconspecificity_marginal_probability_given_constraints": 0.0,
"lineage_pair_conspecific_partitions": [],
}
for partition in self.partitions:
if partition.is_conspecific(lineage1, lineage2):
results["lineage_pair_conspecific_partitions"].append(partition)
results["lineage_pair_conspecificity_marginal_probability"] += partition.probability
results["lineage_pair_conspecificity_marginal_probability_given_constraints"] += partition.probability_given_constraints
else:
results["lineage_pair_nonconspecificity_marginal_probability"] += partition.probability
results["lineage_pair_nonconspecificity_marginal_probability_given_constraints"] += partition.probability_given_constraints
results["lineage_pair_is_true_conspecific"] = self.true_partition.is_conspecific(lineage1, lineage2)
return results
def store_basic_features(self, perf_data):
perf_data["batch_id"] = self.estimation_results["batch_id"]
perf_data["root_age"] = self.estimation_results["root_age"]
perf_data["num_tips"] = self.estimation_results["num_tips"]
perf_data["total_num_partitions"] = self.estimation_results["num_partitions"]
perf_data["true_speciation_completion_rate"] = self.config["test_info"]["true_speciation_completion_rate"]
perf_data["true_num_species"] = len(self.true_partition)
perf_data["num_constrained_species"] = self.config["test_info"]["species_partition_estimation_num_constrained_species"] # number of species defined (may not include all lineages)
perf_data["num_constrained_lineages"] = self.config["test_info"]["species_partition_estimation_num_constrained_lineages"] # number of lineages assigned to species
perf_data["num_unconstrained_lineages"] = self.config["test_info"]["species_partition_estimation_num_unconstrained_lineages"] # number of lineages not assigned to species
perf_data["estimated_speciation_completion_rate"] = self.estimation_results["speciation_completion_rate"]
perf_data["speciation_completion_rate_source"] = self.estimation_results["speciation_completion_rate_source"]
def standard_performance_assessment(self, perf_data):
perf_data["total_num_partitions_in_confidence_interval"] = self.estimation_results["num_partitions_in_confidence_interval"]
perf_data["inferred_partition_num_species"] = len(self.estimation_results["partitions"][0]["species_leafsets"])
# perf_data["inferred_partition_log_probability"] = self.estimation_results["partitions"][0]["log_probability"]
perf_data["inferred_partition_probability"] = self.estimation_results["partitions"][0]["probability"]
# perf_data["inferred_partition_log_probability_given_constraints"] = self.estimation_results["partitions"][0]["log_probability_given_constraints"]
perf_data["inferred_partition_probability_given_constraints"] = self.estimation_results["partitions"][0]["probability_given_constraints"]
for partition_idx, partition_info in enumerate(self.estimation_results["partitions"]):
current_partition = Partition(**partition_info)
if current_partition.species_leafsets == self.true_partition.species_leafsets:
# perf_data["true_partition_log_probability"] = current_partition.log_probability
perf_data["true_partition_probability"] = current_partition.probability
perf_data["true_partition_cumulative_probability"] = current_partition.cumulative_probability
# perf_data["true_partition_log_probability_given_constraints"] = current_partition.log_probability_given_constraints
perf_data["true_partition_probability_given_constraints"] = current_partition.probability_given_constraints
perf_data["true_partition_cumulative_probability_given_constraints"] = current_partition.cumulative_probability_given_constraints
if partition_idx == 0:
perf_data["is_true_partition_preferred"] = True
else:
perf_data["is_true_partition_preferred"] = False
perf_data["is_true_partition_in_confidence_interval"] = current_partition.is_in_confidence_interval
break
else:
raise ValueError("True partition not found in results")
return perf_data
def report(self, perf_data_rows):
# json.dump(perf_data, sys.stdout, indent=4, separators=(',', ': '))
delimiter = "\t"
self.report_dest.write(delimiter.join(perf_data_rows[0].keys()))
self.report_dest.write("\n")
for perf_data in perf_data_rows:
value_row = []
for idx, v in enumerate(perf_data.values()):
if isinstance(v, bool):
value_row.append(str(v).upper()) # for R
else:
value_row.append(str(v))
self.report_dest.write(delimiter.join(value_row))
self.report_dest.write("\n")
class TestRunner(object):
def __init__(self):
self.test_log = lambda msg: sys.stdout.write("-[{}]: {}\n".format(inspect.stack()[1][3], msg))
self.test_data_dir = os.path.join(os.path.abspath(__file__), "_test_data")
def run_tests(self):
self.test_is_conspecific()
self.test_marginal_probability_of_conspecificity()
self.test_is_span_root()
self.test_lineage_pair_distances()
self.test_all_distinct_pairs_of_unconstrained_lineages()
def test_is_conspecific(self):
d = {
"species_leafsets": [
["a", "b", "c"],
["d", "e", "f"],
["g"],
["h"],
],
}
partition = Partition(**d)
assert partition.is_conspecific("a", "b")
assert partition.is_conspecific("a", "c")
assert not partition.is_conspecific("a", "d")
assert not partition.is_conspecific("a", "e")
assert not partition.is_conspecific("a", "f")
assert not partition.is_conspecific("a", "g")
assert not partition.is_conspecific("a", "h")
assert partition.is_conspecific("b", "a")
assert partition.is_conspecific("b", "c")
assert not partition.is_conspecific("b", "d")
assert not partition.is_conspecific("b", "e")
assert not partition.is_conspecific("b", "f")
assert not partition.is_conspecific("b", "g")
assert not partition.is_conspecific("b", "h")
assert partition.is_conspecific("c", "a")
assert partition.is_conspecific("c", "b")
assert not partition.is_conspecific("c", "d")
assert not partition.is_conspecific("c", "e")
assert not partition.is_conspecific("c", "f")
assert not partition.is_conspecific("c", "g")
assert not partition.is_conspecific("c", "h")
assert not partition.is_conspecific("d", "a")
assert not partition.is_conspecific("d", "b")
assert not partition.is_conspecific("d", "c")
assert partition.is_conspecific("d", "e")
assert partition.is_conspecific("d", "f")
assert not partition.is_conspecific("d", "g")
assert not partition.is_conspecific("d", "h")
assert not partition.is_conspecific("e", "a")
assert not partition.is_conspecific("e", "b")
assert not partition.is_conspecific("e", "c")
assert partition.is_conspecific("e", "d")
assert partition.is_conspecific("e", "f")
assert not partition.is_conspecific("e", "g")
assert not partition.is_conspecific("e", "h")
assert not partition.is_conspecific("f", "a")
assert not partition.is_conspecific("f", "b")
assert not partition.is_conspecific("f", "c")
assert partition.is_conspecific("f", "d")
assert partition.is_conspecific("f", "e")
assert not partition.is_conspecific("f", "g")
assert not partition.is_conspecific("f", "h")
assert not partition.is_conspecific("g", "a")
assert not partition.is_conspecific("g", "b")
assert not partition.is_conspecific("g", "c")
assert not partition.is_conspecific("g", "d")
assert not partition.is_conspecific("g", "e")
assert not partition.is_conspecific("g", "f")
assert not partition.is_conspecific("g", "h")
assert not partition.is_conspecific("h", "a")
assert not partition.is_conspecific("h", "b")
assert not partition.is_conspecific("h", "c")
assert not partition.is_conspecific("h", "d")
assert not partition.is_conspecific("h", "e")
assert not partition.is_conspecific("h", "f")
assert not partition.is_conspecific("h", "g")
self.test_log("OK")
def test_is_span_root(self):
tree_src = io.StringIO("(((a:1.25, b:1.25):1.25, c:2.5):1.5, (d:2.25, (e:0.5,f:0.5):1.75):1.75):2.5;")
ev = Evaluator()
ev.read_lineage_tree(src=tree_src, schema="newick")
expected = {
"ab": False,
"ac": False,
"ad": True,
"ae": True,
"af": True,
"ba": False,
"bc": False,
"bd": True,
"be": True,
"bf": True,
"ca": False,
"cb": False,
"cd": True,
"ce": True,
"cf": True,
"da": True,
"db": True,
"dc": True,
"de": False,
"df": False,
"ea": True,
"eb": True,
"ec": True,
"ed": False,
"ef": False,
"fa": True,
"fb": True,
"fc": True,
"fd": False,
"fe": False,
}
for k, val in expected.items():
assert ev.is_span_root(k[0], k[1]) is val
self.test_log("OK")
def test_lineage_pair_distances(self):
ev = Evaluator()
tree_src = io.StringIO("(((a:1.25, b:1.25):1.25, c:2.5):1.5, (d:2.25, (e:0.5,f:0.5):1.75):1.75):2.5;")
ev.read_lineage_tree(src=tree_src, schema="newick")
expected = {
frozenset({'a', 'b'}): {'lineage_pair_unnormalized_weighted_distance': 2.5, 'lineage_pair_normalized_weighted_distance': 0.14705882352941177, 'lineage_pair_unnormalized_unweighted_distance': 2, 'lineage_pair_normalized_unweighted_distance': 0.18181818181818182, 'lineage_pair_mrca_age': 1.25, 'lineage_pair_is_span_root': False},
frozenset({'a', 'c'}): {'lineage_pair_unnormalized_weighted_distance': 5.0, 'lineage_pair_normalized_weighted_distance': 0.29411764705882354, 'lineage_pair_unnormalized_unweighted_distance': 3, 'lineage_pair_normalized_unweighted_distance': 0.2727272727272727, 'lineage_pair_mrca_age': 2.5, 'lineage_pair_is_span_root': False},
frozenset({'a', 'd'}): {'lineage_pair_unnormalized_weighted_distance': 8.0, 'lineage_pair_normalized_weighted_distance': 0.47058823529411764, 'lineage_pair_unnormalized_unweighted_distance': 5, 'lineage_pair_normalized_unweighted_distance': 0.45454545454545453, 'lineage_pair_mrca_age': 4.0, 'lineage_pair_is_span_root': True},
frozenset({'a', 'e'}): {'lineage_pair_unnormalized_weighted_distance': 8.0, 'lineage_pair_normalized_weighted_distance': 0.47058823529411764, 'lineage_pair_unnormalized_unweighted_distance': 6, 'lineage_pair_normalized_unweighted_distance': 0.5454545454545454, 'lineage_pair_mrca_age': 4.0, 'lineage_pair_is_span_root': True},
frozenset({'a', 'f'}): {'lineage_pair_unnormalized_weighted_distance': 8.0, 'lineage_pair_normalized_weighted_distance': 0.47058823529411764, 'lineage_pair_unnormalized_unweighted_distance': 6, 'lineage_pair_normalized_unweighted_distance': 0.5454545454545454, 'lineage_pair_mrca_age': 4.0, 'lineage_pair_is_span_root': True},
frozenset({'b', 'c'}): {'lineage_pair_unnormalized_weighted_distance': 5.0, 'lineage_pair_normalized_weighted_distance': 0.29411764705882354, 'lineage_pair_unnormalized_unweighted_distance': 3, 'lineage_pair_normalized_unweighted_distance': 0.2727272727272727, 'lineage_pair_mrca_age': 2.5, 'lineage_pair_is_span_root': False},
frozenset({'b', 'd'}): {'lineage_pair_unnormalized_weighted_distance': 8.0, 'lineage_pair_normalized_weighted_distance': 0.47058823529411764, 'lineage_pair_unnormalized_unweighted_distance': 5, 'lineage_pair_normalized_unweighted_distance': 0.45454545454545453, 'lineage_pair_mrca_age': 4.0, 'lineage_pair_is_span_root': True},
frozenset({'b', 'e'}): {'lineage_pair_unnormalized_weighted_distance': 8.0, 'lineage_pair_normalized_weighted_distance': 0.47058823529411764, 'lineage_pair_unnormalized_unweighted_distance': 6, 'lineage_pair_normalized_unweighted_distance': 0.5454545454545454, 'lineage_pair_mrca_age': 4.0, 'lineage_pair_is_span_root': True},
frozenset({'b', 'f'}): {'lineage_pair_unnormalized_weighted_distance': 8.0, 'lineage_pair_normalized_weighted_distance': 0.47058823529411764, 'lineage_pair_unnormalized_unweighted_distance': 6, 'lineage_pair_normalized_unweighted_distance': 0.5454545454545454, 'lineage_pair_mrca_age': 4.0, 'lineage_pair_is_span_root': True},
frozenset({'c', 'd'}): {'lineage_pair_unnormalized_weighted_distance': 8.0, 'lineage_pair_normalized_weighted_distance': 0.47058823529411764, 'lineage_pair_unnormalized_unweighted_distance': 4, 'lineage_pair_normalized_unweighted_distance': 0.36363636363636365, 'lineage_pair_mrca_age': 4.0, 'lineage_pair_is_span_root': True},
frozenset({'c', 'e'}): {'lineage_pair_unnormalized_weighted_distance': 8.0, 'lineage_pair_normalized_weighted_distance': 0.47058823529411764, 'lineage_pair_unnormalized_unweighted_distance': 5, 'lineage_pair_normalized_unweighted_distance': 0.45454545454545453, 'lineage_pair_mrca_age': 4.0, 'lineage_pair_is_span_root': True},
frozenset({'f', 'c'}): {'lineage_pair_unnormalized_weighted_distance': 8.0, 'lineage_pair_normalized_weighted_distance': 0.47058823529411764, 'lineage_pair_unnormalized_unweighted_distance': 5, 'lineage_pair_normalized_unweighted_distance': 0.45454545454545453, 'lineage_pair_mrca_age': 4.0, 'lineage_pair_is_span_root': True},
frozenset({'e', 'd'}): {'lineage_pair_unnormalized_weighted_distance': 4.5, 'lineage_pair_normalized_weighted_distance': 0.2647058823529412, 'lineage_pair_unnormalized_unweighted_distance': 3, 'lineage_pair_normalized_unweighted_distance': 0.2727272727272727, 'lineage_pair_mrca_age': 2.25, 'lineage_pair_is_span_root': False},
frozenset({'f', 'd'}): {'lineage_pair_unnormalized_weighted_distance': 4.5, 'lineage_pair_normalized_weighted_distance': 0.2647058823529412, 'lineage_pair_unnormalized_unweighted_distance': 3, 'lineage_pair_normalized_unweighted_distance': 0.2727272727272727, 'lineage_pair_mrca_age': 2.25, 'lineage_pair_is_span_root': False},
frozenset({'f', 'e'}): {'lineage_pair_unnormalized_weighted_distance': 1.0, 'lineage_pair_normalized_weighted_distance': 0.058823529411764705, 'lineage_pair_unnormalized_unweighted_distance': 2, 'lineage_pair_normalized_unweighted_distance': 0.18181818181818182, 'lineage_pair_mrca_age': 0.5, 'lineage_pair_is_span_root': False},
}
for lineage1, lineage2 in itertools.combinations("abcdef", 2):
d = ev.calc_lineage_pair_features(lineage1, lineage2)
key = frozenset([lineage1,lineage2])
# print("{}: {}".format(key, d))
for field in d:
assert expected[key][field] == d[field]
self.test_log("OK")
def test_marginal_probability_of_conspecificity(self):
results_d = {
"partitions": [
{ "id": 0, "conspecifics": set(["ab", "cd"]), "species_leafsets": [["a", "b"], ["c", "d"], ["e"]], "probability": 2, "probability_given_constraints": 3, }, # ab, cd
{ "id": 1, "conspecifics": set(["ab", ]), "species_leafsets": [["a", "b", "c",], ["d"], ["e"]], "probability": 4, "probability_given_constraints": 5, }, # ab
{ "id": 2, "conspecifics": set(["ab", "cd"]), "species_leafsets": [["a", "b", "c", "d"], ["e"]], "probability": 6, "probability_given_constraints": 7, }, # ab, cd
{ "id": 3, "conspecifics": set(["ab", "cd"]), "species_leafsets": [["a", "b", "e"], ["c", "d"]], "probability": 8, "probability_given_constraints": 9, }, # ab, cd
{ "id": 4, "conspecifics": set(["ab", "cd"]), "species_leafsets": [["a", "b", "c", "d"], ["e"]], "probability": 10, "probability_given_constraints": 11, }, # ab, cd
{ "id": 5, "conspecifics": set([ "cd"]), "species_leafsets": [["a", "e"], ["c", "d"], ["b"]], "probability": 12, "probability_given_constraints": 13, }, # cd
{ "id": 6, "conspecifics": set([ "cd"]), "species_leafsets": [["a"], ["b", "c", "d"], ["e"]], "probability": 14, "probability_given_constraints": 15, }, # cd
{ "id": 7, "conspecifics": set([ ]), "species_leafsets": [["a", "e", "d"], ["b", "c"]], "probability": 16, "probability_given_constraints": 17, },
{ "id": 8, "conspecifics": set([ ]), "species_leafsets": [["a", "c"], ["b", "d"], ["e"]], "probability": 18, "probability_given_constraints": 19, },
{ "id": 9, "conspecifics": set(["ab" ]), "species_leafsets": [["b", "d"], ["b", "c", "a"]], "probability": 20, "probability_given_constraints": 21, }, # ab
]
}
id_partition_map = { p["id"]: p for p in results_d["partitions"] }
pair_keys = [
"ab",
"cd",
"ec",
]
ev = Evaluator()
ev.estimation_results = results_d
# true conspecifics out of pair keys == "ab" only
# ev.set_true_partition([["a", "b", "c",], ["d"], ["e"]])
ev.set_true_partition(id_partition_map[1]["species_leafsets"])
true_conspecifics = set(["ab",])
ev.load_estimated_partitions()
for pk in pair_keys:
partition_ids = [pid for pid in id_partition_map if pk in id_partition_map[pid]["conspecifics"]]
marginal_probability = sum([ id_partition_map[pid]["probability"] for pid in partition_ids ])
marginal_probability_given_constraints = sum([ id_partition_map[pid]["probability_given_constraints"] for pid in partition_ids ])
r = ev.calc_marginal_probability_of_conspecificity(pk[0], pk[1])
obs_pids = [p.part_id for p in r["lineage_pair_conspecific_partitions"]]
assert set(obs_pids) == set(partition_ids)
assert abs(marginal_probability - r["lineage_pair_conspecificity_marginal_probability"]) <= 1e-6
assert abs(marginal_probability_given_constraints - r["lineage_pair_conspecificity_marginal_probability_given_constraints"]) <= 1e-6
assert (pk in true_conspecifics) is (r["lineage_pair_is_true_conspecific"])
self.test_log("OK")
def test_all_distinct_pairs_of_unconstrained_lineages(self):
config_d = {
"test_info": {
"unconstrained_lineages": [
"S1.25",
"S1.29",
"S1.31",
"S1.48",
"S2.39"
],
}
}
expected = set({
frozenset({'S1.25', 'S1.29'}),
frozenset({'S1.31', 'S1.25'}),
frozenset({'S1.25', 'S1.48'}),
frozenset({'S2.39', 'S1.25'}),
frozenset({'S1.31', 'S1.29'}),
frozenset({'S1.29', 'S1.48'}),
frozenset({'S2.39', 'S1.29'}),
frozenset({'S1.31', 'S1.48'}),
frozenset({'S2.39', 'S1.31'}),
frozenset({'S2.39', 'S1.48'}),
})
ev = Evaluator()
ev.config = config_d
observed = set()
for p in ev.all_distinct_pairs_of_unconstrained_lineages():
observed.add(frozenset(p))
assert expected == observed
self.test_log("OK")
def main():
parser = argparse.ArgumentParser()
parser.add_argument(
"configuration_filepath",
metavar="CONFIGURATION-FILEPATH",
nargs="?",
help="Path to analysis configuration file (JSON format).")
parser.add_argument(
"analysis_results_filepath",
metavar="ANALYSIS-RESULTS-FILEPATH",
nargs="?",
help="Path to analysis results file (JSON format).")
parser.add_argument(
"-m", "--marginal",
action="store_true",
dest="is_evaluate_marginal",
default=False,
help="Evaluate marginal probabilities of conspecificity of lineage pairs.")
parser.add_argument(
"-t", "--lineage-tree-filepath",
metavar="TREE-FILEPATH",
default=None,
help="Path to file with lineage tree.")
parser.add_argument(
"--test",
action="store_true",
dest="is_run_tests",
default=False,
help="Run tests.")
args = parser.parse_args()
if args.is_run_tests:
test_runner = TestRunner()
test_runner.run_tests()
else:
ev = Evaluator()
ev.execute(args)
if __name__ == "__main__":
main()
```
#### File: spdw/bin/spdw-fix-bpp-traces.py
```python
import sys
import os
import argparse
__prog__ = os.path.basename(__file__)
__version__ = "1.0.0"
__description__ = __doc__
__author__ = '<NAME>'
__copyright__ = 'Copyright (C) 2019 <NAME>.'
def get_max_cols(src_path):
max_cols = 0
with open(src_path) as src:
for row in src:
row = row.strip()
cols = row.split("\t")
if len(cols) > max_cols:
max_cols = len(cols)
return max_cols
def main():
"""
Main CLI handler.
"""
parser = argparse.ArgumentParser(description=__description__)
parser.add_argument("input_files",
action="store",
type=str,
nargs="+",
help="Path to BPP 'mcmc.txt' files")
args = parser.parse_args()
for src_path in args.input_files:
output_path = src_path + ".traces"
out = open(output_path, "w")
max_cols = get_max_cols(src_path)
with open(src_path) as src:
first_row = src.readline()
col_names = first_row.strip().split("\t")
for new_col_idx in range(max_cols - len(col_names)):
col_names.insert(-2, "col{}".format(new_col_idx+1))
out.write("\t".join(col_names))
out.write("\n")
for row in src:
row = row.strip()
cols = row.split("\t")
for new_col_idx in range(max_cols - len(cols)):
cols.insert(-2, "0.0000")
new_row = "\t".join(cols)
out.write("{}".format(new_row))
out.write("\n")
if __name__ == '__main__':
main()
```
#### File: spdw/bin/spdw-plotcoaltimes.py
```python
import sys
import os
import random
import argparse
import dendropy
import pandas as pd
import seaborn as sns
import numpy as np
import matplotlib.pyplot as plt
import scipy
from scipy import stats
from scipy.optimize import curve_fit
import spdw
def fit_exponential(df):
def func(x, a, b, c):
return a * np.exp(-b * x) + c
x = df["coalescent_event_idx"]
y = df["waiting_time"]
yn = y + 0.2*np.random.normal(size=len(x))
popt, pcov = curve_fit(func, x, yn)
plt.figure()
plt.plot(x, yn, 'ko', label="Original Noised Data")
plt.plot(x, func(x, *popt), 'r-', label="Fitted Curve")
plt.legend()
plt.show()
__prog__ = os.path.basename(__file__)
__version__ = "1.0.0"
__description__ = __doc__
__author__ = '<NAME>'
__copyright__ = 'Copyright (C) 2019 <NAME>umaran.'
def main():
"""
Main CLI handler.
"""
parser = argparse.ArgumentParser(description=__description__)
parser.add_argument("--version", action="version", version="%(prog)s " + __version__)
parser.add_argument("tree_files",
action="store",
type=str,
nargs="+",
metavar="TREEFILE",
help="Path to tree files (default: read from standard input).")
parser.add_argument("-f", "--input-format",
type=str,
default="newick",
choices=["nexus", "newick"],
help="Input data format (default='%(default)s')")
args = parser.parse_args()
args.output_prefix = None
args.show_plot_on_screen = True
fig, ax = plt.subplots()
for src_idx, src_path in enumerate(args.tree_files):
if src_path == "-":
src = sys.stdin
else:
src = open(src_path)
try:
src_id = src.name
except AttributeError:
src_id = "<stdin>"
with src:
data = []
for tree in dendropy.Tree.yield_from_files(
files=[src],
schema=args.input_format):
ages = tree.calc_node_ages(is_return_internal_node_ages_only=True)
coalescence_events = sorted([nd for nd in tree if not nd.is_leaf()],
key=lambda nd:nd.age,
reverse=True)
num_genes = len(coalescence_events) + 1
# assert num_genes == len(tree.taxon_namespace)
previous_age = 0.0
coalescent_event_idx = 0
while coalescence_events:
num_genes -= 1
coalescent_event_idx += 1
nd = coalescence_events.pop()
age = nd.age
# print(age)
assert nd.age >= previous_age
waiting_time = nd.age - previous_age
data.append({
# "src_id": "I{:03d}".format(src_idx+1),
"src_id": src_id,
"num_genes": num_genes,
"coalescent_event_idx": coalescent_event_idx,
"age": age,
"waiting_time": waiting_time,
})
df = pd.DataFrame(data)
sns.distplot(
df["waiting_time"],
bins=20,
ax=ax,
hist=True,
kde=False,
# fit=stats.expon,
label=src_id,
)
# sns.kdeplot(
# df["waiting_time"],
# # bins=range(1, 110, 10),
# ax=ax,
# # bw=0.2,
# label=src_id,
# )
# kwargs = {}
# if len(args.tree_files) > 1:
# kwargs["hue"] = "src_id"
# ax = sns.scatterplot(
# x="coalescent_event_idx",
# y="waiting_time",
# data=df,
# **kwargs
# )
# ax = sns.kdeplot(df["waiting_time"], **kwargs)
fig.legend()
spdw.render_output(args, "Age")
if __name__ == '__main__':
main()
```
|
{
"source": "jeetsukumaran/Spectrasophy",
"score": 3
}
|
#### File: Spectrasophy/bin/spectrasophy-summarize.py
```python
import math
import csv
import os
import sys
import argparse
import collections
import re
from dendropy.calculate import statistics
from spectrasophy import utility
class SpectrasophySummarizer(object):
def __init__(self,
field_delimiter="\t",
exclude_field_patterns=None,
include_only_field_patterns=None,
):
self.field_delimiter = field_delimiter
self.all_fieldnames = None
self.other_fieldnames = None
self.stat_fieldnames = None
self.stat_fieldnames_check = None
self.other_fieldname_check = None
self.stat_values = []
self.other_values = []
def summarize(self, target_data_filepath,):
with utility.universal_open(target_data_filepath) as src:
reader = csv.DictReader(
src,
delimiter=self.field_delimiter,
quoting=csv.QUOTE_NONE)
categorical_params = collections.OrderedDict()
continuous_params = collections.OrderedDict()
for row_idx, row in enumerate(reader):
for key_idx, key in enumerate(reader.fieldnames):
if key in categorical_params:
categorical_params[key][row[key]] += 1
elif key in continuous_params:
continuous_params[key].append(float(row[key]))
else:
if key in ("param.DivTimeModel", "param.numDivTimes"):
val = row[key]
is_categorical = True
else:
try:
val = float(row[key])
is_categorical = False
except ValueError:
val = row[key]
is_categorical = True
if is_categorical:
categorical_params[key] = collections.Counter()
categorical_params[key][val] += 1
else:
continuous_params[key] = [val]
output_prefix = os.path.splitext(os.path.basename(target_data_filepath))[0]
with utility.universal_open(output_prefix + ".summary.continuous.tsv", "w") as dest:
row_results = collections.OrderedDict()
for param_idx, param_name in enumerate(continuous_params):
values = continuous_params[param_name]
row_results["param"] = param_name
summary = statistics.summarize(values)
row_results["mean"] = summary["mean"]
row_results["var"] = summary["var"]
row_results["sd"] = summary["sd"]
row_results["min"] = summary["range"][0]
row_results["max"] = summary["range"][1]
row_results["hpd5"] = summary["hpd95"][0]
row_results["hpd95"] = summary["hpd95"][1]
try:
row_results["quant5"] = summary["quant_5_95"][0]
row_results["quant95"] = summary["quant_5_95"][1]
except TypeError:
row_results["quant5"] = "NA"
row_results["quant95"] = "NA"
if param_idx == 0:
dest.write(self.field_delimiter.join(row_results.keys()) + "\n")
dest.write(self.field_delimiter.join("{}".format(v) for v in row_results.values()) + "\n")
for param_idx, param_name in enumerate(categorical_params):
with utility.universal_open(output_prefix + ".summary.{:02d}.{}.tsv".format(param_idx+1, param_name), "w") as dest:
param_counter = categorical_params[param_name]
total = float(sum(param_counter.values()))
for category_idx, (category_name, category_count) in enumerate(param_counter.most_common()):
row_results = collections.OrderedDict()
row_results["label"] = category_name
row_results["freq"] = category_count/total
row_results["count"] = category_count
if category_idx == 0:
dest.write(self.field_delimiter.join(row_results.keys()) + "\n")
dest.write(self.field_delimiter.join("{}".format(v) for v in row_results.values()) + "\n")
def main():
parser = argparse.ArgumentParser(
description="SPECTRASOPHY Rejection Sampler",
)
parser.add_argument(
"posteriors_filepath",
help="Path to posteriors parameter file.")
processing_options = parser.add_argument_group("Processing Options")
processing_options.add_argument("--field-delimiter",
type=str,
default="\t",
help="Field delimiter (default: <TAB>).")
run_options = parser.add_argument_group("Run Options")
run_options.add_argument(
"-q", "--quiet",
action="store_true",
help="Work silently.")
args = parser.parse_args()
summarizer = SpectrasophySummarizer(field_delimiter=args.field_delimiter)
summarizer.summarize(args.posteriors_filepath)
if __name__ == "__main__":
main()
```
#### File: Spectrasophy/bin/spectrasophy-sumstats.py
```python
import os
import sys
import argparse
import traceback
import time
import spectrasophy
from spectrasophy import sumstats
from spectrasophy import utility
def main():
parser = argparse.ArgumentParser()
package_id = spectrasophy.package_id()
parser.add_argument("--version", action="version", version=package_id)
simulator_options = parser.add_argument_group("Configuration")
simulator_options.add_argument("configuration_filepath",
metavar="CONFIGURATION-FILE",
help="Path to the configuration file listing the data.")
output_options = parser.add_argument_group("Output Options")
output_options.add_argument('-o', '--output-name-prefix',
action='store',
dest='output_name_prefix',
type=str,
default=None,
metavar='NAME-PREFIX',
help="Prefix for output filenames (default: same as configuration filename stem).")
output_options.add_argument('-O', '--output-directory',
action='store',
dest='output_directory',
type=str,
default=None,
metavar='DIRECTORY',
help="Directory for output files (default: current working directory).")
output_options.add_argument(
"-U",
"--unfolded-site-frequency-spectrum",
"--derived-site-frequency-spectrum",
action="store_true",
default=False,
help="Calculate the unfolded or derived site frequency spectrum."
" Otherwise, defaults to the folded or minor site frequency"
" spectrum."
)
output_options.add_argument(
"--calculate-single-population-site-frequency-spectrum",
action="store_true",
default=False,
help="Calculate the single (within) population site frequency"
" spectrum in addition to the joint."
)
output_options.add_argument("-l", "--labels",
action="append",
help="Addition field/value pairs to add to the output (in format <FIELD-NAME>:value;)")
output_options.add_argument('--field-delimiter',
type=str,
default='\t',
help="Delimiter string separating fields in output (default: <TAB>').")
output_options.add_argument('--summary-stats-label-prefix',
type=str,
default='stat',
metavar='PREFIX',
help="Prefix for summar statistic field labels (default: '%(default)s').")
output_options.add_argument( "--append",
action="store_true",
default=False,
help="Append instead of overwriting output file(s).")
output_options.add_argument( "--no-write-header",
action="store_true",
default=False,
help="Do not writer header row.")
args = parser.parse_args()
config_d = {}
utility.parse_legacy_configuration(
filepath=args.configuration_filepath,
config_d=config_d)
config_d["output_prefix"] = utility.output_prefix(
primary_source_filepath=args.configuration_filepath,
output_name_prefix=args.output_name_prefix,
output_directory=args.output_directory)
config_d["is_unfolded_site_frequency_spectrum"] = args.unfolded_site_frequency_spectrum
config_d["is_calculate_single_population_sfs"] = args.calculate_single_population_site_frequency_spectrum
config_d["is_calculate_joint_population_sfs"] = True
config_d["stat_label_prefix"] = args.summary_stats_label_prefix
config_d["supplemental_labels"] = utility.parse_fieldname_and_value(args.labels)
config_d["alignment_directory_head"] = os.path.dirname(os.path.abspath(args.configuration_filepath))
config_d["field_delimiter"] = args.field_delimiter
sscalc = sumstats.SpectrasophySummaryStatsCalculator(**config_d)
filepath = config_d["output_prefix"] + ".obs.sumstats.tsv"
# dest = utility.open_destput_file_for_csv_writer(
# filepath=filepath,
# is_append=args.append)
dest = utility.universal_open(filepath, "a" if args.append else "w")
if args.append or args.no_write_header:
is_write_header = False
else:
is_write_header = True
with dest:
# writer = utility.get_csv_writer(
# dest=dest,
# delimiter=args.field_delimiter)
try:
results = sscalc.write_summary_stats(
dest=dest,
results_store=None,
is_write_header=is_write_header)
except Exception as e:
sys.stderr.write("Traceback (most recent call last):\n {}{}\n".format(
" ".join(traceback.format_tb(sys.exc_info()[2])),
e))
sys.exit(1)
if __name__ == "__main__":
main()
```
#### File: spectrasophy/test/test_post_process.py
```python
import sys
import csv
import unittest
import os
from spectrasophy import utility
from spectrasophy.utility import StringIO
from spectrasophy.test import TESTS_DATA_DIR
class FilterColumnsTestCase(unittest.TestCase):
def test_filter_columns_from_template_file(self):
master = (
"c1|c2|c3|c4|c5|c6|c7|c8",
"11|12|13|14|15|16|17|18",
"21|22|23|24|25|26|27|28",
"31|32|33|34|35|36|37|38",
"41|42|43|44|45|46|47|48",
"51|52|53|54|55|56|57|58",
"61|62|63|64|65|66|67|68",
"71|72|73|74|75|76|77|78",
"81|82|83|84|85|86|87|78",
)
target = (
"c1|x2|c3|x4|c5|x6|c7|x8",
"11|12|13|14|15|16|17|18",
"21|22|23|24|25|26|27|28",
"31|32|33|34|35|36|37|38",
"41|42|43|44|45|46|47|48",
"51|52|53|54|55|56|57|58",
"61|62|63|64|65|66|67|68",
"71|72|73|74|75|76|77|78",
"81|82|83|84|85|86|87|78",
)
expected = (
"c1|c3|c5|c7",
"11|13|15|17",
"21|23|25|27",
"31|33|35|37",
"41|43|45|47",
"51|53|55|57",
"61|63|65|67",
"71|73|75|77",
"81|83|85|87",
)
dest = StringIO()
master_file = StringIO("\n".join(master).replace("|", "\t"))
target_file = StringIO("\n".join(target).replace("|", "\t"))
utility.filter_columns_using_master_template_file(
dest=dest,
master_file=master_file,
source_file=target_file)
result = dest.getvalue().strip()
expected_str = "\n".join(expected).replace("|", "\t")
self.assertEqual(result, expected_str)
if __name__ == "__main__":
unittest.main()
```
|
{
"source": "jeetsukumaran/yakherd",
"score": 2
}
|
#### File: jeetsukumaran/yakherd/setup.py
```python
import os
import sys
import re
from setuptools import setup, find_packages
def _read(path_components, **kwargs):
path = os.path.join(os.path.dirname(__file__), *path_components)
if sys.version_info.major < 3:
return open(path, "rU").read()
else:
with open(path, encoding=kwargs.get("encoding", "utf8")) as src:
s = src.read()
return s
def _read_requirements(path):
return [
line.strip()
for line in _read([path]).split("\n")
if not line.startswith(('"', "#", "-", "git+"))
]
project_init = _read(["src", "yakherd", "__init__.py"])
__version__ = re.match(r".*^__version__\s*=\s*['\"](.*?)['\"]\s*$.*", project_init, re.S | re.M).group(1)
__project__ = re.match(r".*^__project__\s*=\s*['\"](.*?)['\"]\s*$.*", project_init, re.S | re.M).group(1)
setup(
name=__project__,
version=__version__,
author="<NAME>",
author_email="<EMAIL>",
packages=find_packages("src"),
package_dir={"": "src"},
entry_points={
"console_scripts": [
# "name-of-executable = module.with:function_to_execute"
"yakherd = yakherd.application.yakherd:main",
]
},
include_package_data=True,
# MANIFEST.in: only used in source distribution packaging.
# ``package_data``: only used in binary distribution packaging.
package_data={
"": [
"*.txt",
"*.md",
"*.rst",
],
"yakherd": [
# For files in this package's direct namespace
# (e.g., "src/{normalized_project_name}/*.json")
# "*.json",
# For files in a (non-subpackage) subdirectory direct namespace
# (e.g., "src/{normalized_project_name}/resources/config/*.json")
# "resources/config/*.json",
# For files located in 'src/yakherd-data/'
# "../yakherd-data/*.json",
# For files located in 'resources'/'
# "../../resources/*.json",
],
},
test_suite = "tests",
# url="http://pypi.python.org/pypi/yakherd",
url="https://github.com/jeetsukumaran/yakherd",
license="LICENSE",
description="A Project",
long_description=_read(["README.md"]),
long_description_content_type="text/markdown",
# long_description_content_type="text/x-rst",
install_requires=_read_requirements("requirements.txt"),
extras_require={"test": _read_requirements("requirements-test.txt")},
)
```
|
{
"source": "JeetThakare/goodread",
"score": 3
}
|
#### File: crawler/crawler_utils/spider.py
```python
from urllib.request import urlopen
from urllib.request import Request as req
from crawler.crawler_utils.link_finder import LinkFinder
from crawler.models import Link
from crawler.crawler_utils.utils import get_domain_name
import requests
# Adapted version of https://github.com/buckyroberts/Spider
class Spider:
project_name = ''
base_url = ''
domain_name = ''
queue = set()
crawled = set()
def __init__(self, project_name, base_url, domain_name):
Spider.project_name = project_name
Spider.base_url = base_url
Spider.domain_name = domain_name
self.boot()
self.crawl_page('First spider', Spider.base_url)
# Creates directory and files for project on first run and starts the spider
@staticmethod
def boot():
q = Link.objects.filter(visited=False).all()
Spider.queue = set([l.url for l in q])
c = Link.objects.filter(visited=True).all()
Spider.crawled = set([l.url for l in c])
# Updates user display, fills queue and updates files
@staticmethod
def crawl_page(thread_name, page_url):
if page_url not in Spider.crawled:
print(thread_name + ' now crawling ' + page_url)
print('Queue ' + str(len(Spider.queue)) +
' | Crawled ' + str(len(Spider.crawled)))
Spider.add_links_to_queue(Spider.gather_links(page_url))
Spider.queue.remove(page_url)
Spider.crawled.add(page_url)
Spider.update_db()
# Converts raw response data into readable information and checks for proper html formatting
@staticmethod
def gather_links(page_url):
html_string = ''
try:
user_agent = 'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US; rv:1.9.0.7) Gecko/2009021910 Firefox/3.0.7'
my_url = page_url
headers = {'User-Agent': user_agent}
complete_request = req(my_url, None, headers)
response = urlopen(complete_request)
# response = requests.get(page_url)
if 'text/html' in response.getheader('Content-Type'):
html_bytes = response.read()
html_string = html_bytes.decode("utf-8")
finder = LinkFinder(Spider.base_url, response.url)
finder.feed(html_string)
except Exception as e:
print(str(e))
return set()
return finder.page_links()
# Saves queue data to project files
@staticmethod
def add_links_to_queue(links):
for url in links:
if (url in Spider.queue) or (url in Spider.crawled):
continue
if Spider.domain_name != get_domain_name(url):
continue
Spider.queue.add(url)
@staticmethod
def update_db():
for a in Spider.queue.copy():
l = Link(url=a, visited=False)
try:
l.save()
except:
pass
for a in Spider.crawled.copy():
l = Link(url=a, visited=True)
try:
l.save()
except:
pass
```
#### File: goodread/crawler/models.py
```python
from django.db import models
from django.urls import reverse
# Create your models here.
class Link(models.Model):
url = models.CharField(max_length=256, unique=True)
visited = models.BooleanField(default=False)
article_fetched = models.BooleanField(default=False)
class Meta:
verbose_name = "Link"
verbose_name_plural = "Links"
indexes = [
models.Index(fields=['url', 'visited']),
models.Index(fields=['visited'], name='visited_idx'),
]
def __str__(self):
return self.url
def get_absolute_url(self):
return reverse("Link_detail", kwargs={"pk": self.pk})
class Topic(models.Model):
topic = models.IntegerField()
keyword = models.CharField(max_length=50, blank=True)
probability = models.FloatField(blank=True, null=True)
class Meta:
verbose_name = "Topic"
verbose_name_plural = "Topics"
# def __str__(self):
# return self.name
def get_absolute_url(self):
return reverse("Topic_detail", kwargs={"pk": self.pk})
class Article(models.Model):
article = models.TextField()
summary = models.TextField()
topic = models.IntegerField(blank=True,null=True)
# topic = models.ForeignKey(
# Topic, on_delete=models.PROTECT, blank=True, null=True)
url = models.CharField(max_length=256, unique=True)
title = models.CharField(max_length=50, blank=True)
class Meta:
verbose_name = "Article"
verbose_name_plural = "Articles"
def __str__(self):
return self.url
def get_absolute_url(self):
return reverse("Article_detail", kwargs={"pk": self.pk})
class ArticleTopic(models.Model):
articleId = models.ForeignKey(Article, on_delete=models.PROTECT, blank=True, null=True)
topicId = models.IntegerField(blank=True, null=True)
probability = models.FloatField(blank=True, null=True)
class Meta:
verbose_name = "ArticleTopic"
verbose_name_plural = "ArticleTopics"
# def __str__(self):
# return self.name
def get_absolute_url(self):
return reverse("ArticleTopic_detail", kwargs={"pk": self.pk})
```
|
{
"source": "jeettilva/python",
"score": 3
}
|
#### File: jeettilva/python/fun.py
```python
def add(a=12,b=16):
print (a+b)
add()
```
|
{
"source": "jeetu7/imia",
"score": 2
}
|
#### File: imia/imia/impersonation.py
```python
import typing as t
from starlette.requests import HTTPConnection
from starlette.types import ASGIApp, Receive, Scope, Send
from .exceptions import AuthenticationError
from .protocols import UserLike
from .user_providers import UserProvider
from .user_token import LoginState, UserToken
IMPERSONATION_SESSION_KEY = '_impersonated_user_id'
class ImpersonationNotAllowedError(AuthenticationError):
"""Raised when the user is not allowed to perform impersonation."""
class ImpersonationNotActiveError(AuthenticationError):
"""Raised when you try to access impersonation related data
but the impersonation is not active."""
def impersonate(request: HTTPConnection, user: UserLike) -> None:
"""Activate impersonation."""
request.scope['auth'] = UserToken(user, state=LoginState.IMPERSONATOR, original_user_token=request.scope['auth'])
if 'session' in request.scope:
request.scope['session'][IMPERSONATION_SESSION_KEY] = user.get_id()
def exit_impersonation(request: HTTPConnection) -> None:
"""Exit the impersonation session (restores to an original user)."""
if 'session' in request.scope:
request.scope['session'].pop(IMPERSONATION_SESSION_KEY, None)
def impersonation_is_active(request: HTTPConnection) -> bool:
return request.scope['auth'].original_user_id is not None
def get_original_user(request: HTTPConnection) -> UserLike:
"""Get the original user when the impersonation is active."""
return (
request.scope['auth'].original_user_token.user
if request.scope['auth'].original_user_token
else request.scope['auth'].user
)
class ImpersonationMiddleware:
"""A middleware used to temporary impersonate another user."""
def __init__(
self,
app: ASGIApp,
user_provider: UserProvider,
guard_fn: t.Callable[[UserToken, HTTPConnection], bool] = None,
enter_query_param: str = "_impersonate",
exit_user_name: str = "__exit__",
scope: str = 'auth:impersonate_others',
) -> None:
self._app = app
self._user_provider = user_provider
self._guard_fn = guard_fn
self._enter_query_param = enter_query_param
self._exit_user_name = exit_user_name
self._scope = scope
async def __call__(self, scope: Scope, receive: Receive, send: Send) -> None:
if scope["type"] not in ["http", "websocket"]: # pragma: no cover
await self._app(scope, receive, send)
return
request = HTTPConnection(scope)
action, impersonation_target = self._detect_action(request)
if action == 'ignore':
# user haven't asked anything we can offer
return await self._app(scope, receive, send)
if 'auth' not in request.scope:
raise ValueError('ImpersonationMiddleware needs AuthenticationMiddleware to be installed.')
if not self._can_enter_impersonation(request):
return await self._app(scope, receive, send)
if action == 'enter':
user_id = request.query_params[self._enter_query_param]
await self._enter_impersonation(request, user_id)
if action == 'exit':
await self._exit_impersonation(request)
if action == 'activate':
await self._enter_impersonation(request, impersonation_target)
await self._app(scope, receive, send)
async def _enter_impersonation(self, request: HTTPConnection, user_id: str) -> None:
user = await self._user_provider.find_by_id(user_id)
if user:
impersonate(request, user)
async def _exit_impersonation(self, request: HTTPConnection) -> None:
exit_impersonation(request)
def _can_enter_impersonation(self, request: HTTPConnection) -> bool:
"""Test if current user can impersonate other.
Here are two checks. The first one to lookup a presence of self._scope in token scopes.
The other one is to provide guard functions that must return boolean value.
The guard function take the precedence when available."""
if self._guard_fn:
# forbid impersonation if guard function returns False
return self._guard_fn(request.auth, request)
# user must have "can_impersonate" scope
return self._scope in request.auth
def _detect_action(self, request: HTTPConnection) -> t.Tuple[str, str]:
username = request.query_params.get(self._enter_query_param)
if username is None:
impersonation_target = request.scope.get('session', {}).get(IMPERSONATION_SESSION_KEY)
if impersonation_target is not None:
return 'activate', impersonation_target
return 'ignore', ''
if username == self._exit_user_name:
return 'exit', ''
return 'enter', username
```
#### File: imia/imia/user_providers.py
```python
import abc
import typing as t
from .protocols import UserLike
class UserProvider(abc.ABC): # pragma: no cover
"""User provides perform user look ups over data storages.
These classes are consumed by Authenticator instances
and are not designed to be a part of login or logout process."""
async def find_by_id(self, identifier: t.Any) -> t.Optional[UserLike]:
"""Look up a user by ID."""
raise NotImplementedError()
async def find_by_username(self, username_or_email: str) -> t.Optional[UserLike]:
"""Look up a user by it's identity. Where identity may be an email address, or username."""
raise NotImplementedError()
async def find_by_token(self, token: str) -> t.Optional[UserLike]:
"""Look up a user using API token."""
raise NotImplementedError()
class InMemoryProvider(UserProvider):
"""A user provides that uses a predefined map of users."""
def __init__(self, user_map: t.Mapping[str, UserLike]) -> None:
self.user_map = user_map
async def find_by_id(self, identifier: str) -> t.Optional[UserLike]:
return self.user_map.get(identifier)
async def find_by_username(self, username_or_email: str) -> t.Optional[UserLike]:
return self.user_map.get(username_or_email)
async def find_by_token(self, token: str) -> t.Optional[UserLike]:
return self.user_map.get(token)
```
#### File: imia/tests/test_user_token.py
```python
import dataclasses
import typing as t
from imia import AnonymousUser, LoginState, UserToken
@dataclasses.dataclass
class User:
identifier: str = 'root@localhost'
password: str = '<PASSWORD>'
scopes: t.List[str] = dataclasses.field(default_factory=list)
name: str = 'Root'
def get_display_name(self):
return 'Root'
def get_id(self):
return self.identifier
def get_hashed_password(self):
return self.password
def get_scopes(self):
return self.scopes
def test_user_token():
user = User(scopes=['a'])
token = UserToken(user, state=LoginState.FRESH)
assert token.is_authenticated
assert not token.is_anonymous
assert token.original_user_token is None
assert token.original_user_id is None
assert token.scopes == ['a']
assert token.user_id == 'root@localhost'
assert token.user == user
assert token.display_name == 'Root'
assert token.state == LoginState.FRESH
assert bool(token)
assert str(token) == 'Root'
assert 'a' in token
def test_anon_user_token():
user = AnonymousUser()
token = UserToken(user, state=LoginState.ANONYMOUS)
assert not token.is_authenticated
assert token.is_anonymous
assert token.original_user_token is None
assert token.original_user_id is None
assert token.scopes == []
assert token.user_id is None
assert token.user == user
assert token.display_name == 'Anonymous'
assert token.state == LoginState.ANONYMOUS
assert not bool(token)
assert str(token) == 'Anonymous'
def test_impersonated_user_token():
user = User()
root_token = UserToken(user, state=LoginState.FRESH)
customer = User(identifier='customer<EMAIL>', name='Customer')
token = UserToken(customer, state=LoginState.IMPERSONATOR, original_user_token=root_token)
assert token.user_id == 'customer@localhost'
assert token.original_user_token == root_token
assert token.original_user_id == 'root@localhost'
```
|
{
"source": "Jeetu95/Knapsack-Problem",
"score": 3
}
|
#### File: Jeetu95/Knapsack-Problem/genetic_knapsack.py
```python
import os
import math
import numpy as np
def generate_population(size, n):
return np.array([np.random.randint(0, 2, n) for _ in range(size)])
def fitness_score(pop, w, wt, val):
score = np.multiply(pop, wt).sum()
if score <= w:
return np.multiply(pop, val).sum()
else:
return -1
def stop(score, pop, wt):
pack = zip(score, pop)
sort = sorted(pack, key=lambda x: x[0], reverse=True)
sort = np.array(sort[0][1])
return np.multiply(sort, wt).sum(), sort
def get_selection(score, pop):
pack = zip(score, pop)
sort = sorted(pack, key=lambda x: x[0], reverse=True)
sort = np.array([j for _, j in sort[0:SELECTION_SIZE]])
return sort
def get_crossover(pop, n):
child = list()
it = iter(pop)
for i, j in zip(it, it):
cr = np.random.rand()
if cr > CROSSOVER_RATE:
idx = np.random.randint(0, n)
ch1 = list(i[0:idx]) + list(j[idx:n])
child.append(ch1)
ch1 = list(j[0:idx]) + list(i[idx:n])
child.append(ch1)
else:
child.append(i)
child.append(j)
return np.array(child)
def get_mutation(pop, n):
bits = int(math.ceil(n*MUTATION_RATE))
for p in pop:
#print(f"Pop : {p}")
for _ in range(bits):
idx = np.random.randint(0, n)
p[idx] = 0 if p[idx] == 1 else 1
return pop
def knapSack(w, wt, val, n):
print("\tGenerating popullation :- 0")
pop = generate_population(size=POPULATION_SIZE, n=n)
for gen_num in range(1, GENERATIONS+1):
# Calculation fitness score for each popullation.
score = np.array([fitness_score(i, w, wt, val) for i in pop])
# Checking to stop or not.
stp = stop(score, pop, wt)
if stp[0] == w:
print(f"\t\tSolution found at generation {gen_num-1}")
return stp[1]
# Selection process for the current generation popullation.
selected_pop = get_selection(score, pop)
# Crossover proceess for selected popullation. (One- Point CrossOver)
cross_pop = get_crossover(selected_pop, n)
# Mutation processs for selected popullation. (Random bit flip)
mutate_pop = get_mutation(cross_pop, n)
# Generating new popullation.
print(f"\tGenerating popullation : {gen_num}")
pop = list(generate_population(
size=POPULATION_SIZE-len(mutate_pop), n=n))
pop = pop + list(mutate_pop)
pop = np.array(pop)
return np.zeros(n,dtype=int)
def main():
'''Entry point to the program'''
val = list(map(int, input(
"Enter the values for Knapsack Problem, each seperated with a space\n").split(' ')))
wt = list(map(int, input(
f"\nEnter the weights for {val} values, each seperated with a space\n").split(' ')))
w = int(input("\nEnter the total weight of the Knapsack Bag :\t"))
n = len(val)
res =knapSack(w, wt, val, n)
print(
f"\n\nTotal value which the Knapsack Bag of weight {w} can hold is:\t{np.multiply(res,val).sum()}")
if __name__ == '__main__':
os.system('clear')
global GENERATIONS, POPULATION_SIZE, SELECTION_SIZE, CROSSOVER_RATE, MUTATION_RATE
MUTATION_RATE = 0.1
CROSSOVER_RATE = 0.50
POPULATION_SIZE = 8
SELECTION_SIZE = int(POPULATION_SIZE * 0.25)
GENERATIONS = 50
main()
# print(generate_population(10,5))
'''
60 100 120
10 20 30
50
'''
'''
20 40 60 80 100
10 20 30 40 50
70
'''
```
|
{
"source": "Jeetu95/My-Tensorflow",
"score": 3
}
|
#### File: Jeetu95/My-Tensorflow/my_tensorflow.py
```python
import numpy as np
class Operation(object):
"""Base Operation class from which other higher level operations will inherit"""
def __init__(self, input_nodes=[]):
""" Constructior.
After object creation we append it to respective list of _default_graph.
Args :
input_nodes - List of all input nodes
"""
self.input_nodes = input_nodes
self.output_nodes = []
self.inputs = []
self.output= None
for node in input_nodes:
node.output_nodes.append(self)
_default_graph.operations.append(self)
def compute(self):
"""Placeholder method to be overwritten by the inherited class."""
pass
class add(Operation):
"""Add operation """
def __init__(self, x, y):
""" Constructior.
Args :
x, y - Actual data to perform add operation on
"""
super(add, self).__init__([x, y])
def compute(self, x_var, y_var):
""" Actual addition is done here.
Args :
x_var, y_var - Data to perform add operation on.
Return:
summation of args .
"""
self.inputs = [x_var, y_var]
return x_var+y_var
class multiply(Operation):
"""Multiply operation """
def __init__(self, x, y):
""" Constructior.
Args :
x, y - Actuall data to perform multiply operation on
"""
super(multiply, self).__init__([x, y])
def compute(self, x_var, y_var):
""" Actual multiply is done here.
Args :
x_var, y_var - Data to perform multiply operation on.
Return:
Product of args .
"""
self.inputs = [x_var, y_var]
return x_var * y_var
class matmul(Operation):
"""Matrix - Multiplication operation """
def __init__(self, x, y):
""" Constructior.
Args :
x, y - Actuall data to perform operation on
"""
super(matmul, self).__init__([x, y])
def compute(self, x_var, y_var):
""" Actua operation is done here.
Args :
x_var, y_var - Data to perform operation on.
Return:
dot product of args .
"""
self.inputs = [x_var, y_var]
return x_var.dot(y_var)
class Variable():
"""Implementation of tensorflow's placeholder class"""
def __init__(self, initial_value=None):
"""Constructor.
After this object creation we append it to respective list of _default_graph."""
self.value = initial_value
self.output_nodes = []
self.output = None
# _default_graph - It is the default graph object connecting Placeholders and Variables to Operations.
_default_graph.variables.append(self)
class Placeholder():
"""Implementation of tensorflow's placeholder class"""
def __init__(self):
"""Constructor.
After this object creation we append it to respective list of _default_graph."""
self.output_nodes = []
self.output = None
# _default_graph - It is the default graph object connecting Placeholders and Variables to Operations.
_default_graph.placeholders.append(self)
class Session():
""" Implementation of tensorflow's Session class."""
def traverse_postorder(self, operation):
"""PostOrder Traversal of nodes.
This function makes sure that all operations are done in right order.
Args:
operation : the operation whose postorder form is required.
Returns:
List of operations in postorder form
"""
nodes_postorder = []
def recurse(node):
if isinstance(node, type(operation)) or isinstance(node,Operation):
for input_node in node.input_nodes:
recurse(input_node)
nodes_postorder.append(node)
recurse(operation)
return nodes_postorder
def run(self, operation, feed_dict={}):
"""Running the session to produce output
Args:
operation - The operation to compute.
feed_dict - A dictionary to map values to placeholders.
Return:
output of the operation.
"""
nodes_postorder = self.traverse_postorder(operation)
for node in nodes_postorder:
if type(node) == Placeholder:
node.output = feed_dict[node]
elif type(node) == Variable:
node.output = node.value
else: # Operation
node.inputs = [
input_node.output for input_node in node.input_nodes]
node.output = node.compute(*node.inputs)
if type(node.output) == list:
node.output = np.array(node.output)
return operation.output
class Graph():
"""The main thing connecting every operation placeholder and variable."""
def __init__(self):
""" Constructor."""
self.operations = []
self.placeholders = []
self.variables = []
def set_as_default(self):
"""We call this method to set the current graph as the default graph.
In this way we can have multiple computation graph """
global _default_graph
_default_graph = self
```
|
{
"source": "jeetyadav98/LPP-Solver-CLI",
"score": 4
}
|
#### File: jeetyadav98/LPP-Solver-CLI/LPModel.py
```python
import sys, os
import pulp as p
from termcolor import colored
'''
Description:
Command line tool to solve linear programming problems. Run the main script to enter the problem statement interactively. The solution is displayed in the terminal.
Author: <NAME>
License: MIT License
Notes:
This script may be updated in the future, for further convenience. Optional input/output from files, allowing mixed integer problems etc. The latest version can be found at https://github.com/jeetyadav98/LPP-Solver-CLI.
'''
def cyanc(string):
return (colored(string, 'cyan'))
def make_variable_dictionary(n_var):
print(cyanc('\nEnter variable names '))
print(colored('Any alphabet except from the word var_dict', 'red'))
var_dict= {}
for i in range(n_var):
var_string= input()
var_dict[var_string]= p.LpVariable(var_string, lowBound = 0)
return var_dict
def get_objective(var_dict):
obj_string= input(cyanc('\nEnter the objective function: '))
for key,value in var_dict.items():
var_key = "var_dict['" + key + "']"
obj_string = obj_string.replace(key, var_key)
return obj_string
def get_constraint(var_dict):
con_string= input()
for key,value in var_dict.items():
var_key = "var_dict['" + key + "']"
con_string = con_string.replace(key, var_key)
return con_string
def print_intro():
print(cyanc('\nCreate an LP Model from user inputs. Requirements:'))
print(cyanc('-- Objective function (to minimize or maximize)'))
print(cyanc('-- Nonnegative decision variables (N)'))
print(cyanc('-- Inequality or Equality constraints (M)'))
def get_args():
#Print command line introduction
print_intro()
#Get type of optimization and create LpProblem
type_string= input(cyanc('\nType of optimization [max/min]: '))
if type_string=='min':
Lp_prob= p.LpProblem('Problem', p.LpMinimize)
elif type_string=='max':
Lp_prob= p.LpProblem('Problem', p.LpMaximize)
else:
print('Error: bad syntax, optimization type not recognized \nExiting..')
exit()
print(colored('Optimization type selected is ', 'red') + type_string)
# Get number of variables and constraints
n_var= int(input(cyanc('\nNumber of decision variables: ',)))
n_con= int(input(cyanc('Number of constraints (excluding nonnegative): ',)))
#Get variable names and make dictionary
var_dict = make_variable_dictionary(n_var)
#Get objective function and add to LP
Lp_prob += eval(get_objective(var_dict))
#Get constraints and add to LP
print(cyanc('\nEnter constraints',) + '[of form a1*x1 + a2*x2.. (<= , == , >=) RHS]')
for j in range(n_con):
Lp_prob += eval(get_constraint(var_dict))
return Lp_prob, var_dict
def main():
Lp_model= get_args()
Lp_prob= Lp_model[0]
var_dict= Lp_model[1]
#Print problem summary
print(cyanc('\nProblem Summary: ',))
print(Lp_prob)
print(cyanc('------------------------------------------'))
Lp_solve= Lp_prob.solve()
print(cyanc('------------------------------------------'))
print(cyanc('Solution status'), p.LpStatus[Lp_solve])
#Print solution
for key,value in var_dict.items():
print('Value of ' + key + ' is: ', p.value(value))
print('Objective function value: ', p.value(Lp_prob.objective))
if __name__ == "__main__":
main()
```
|
{
"source": "jeev20/pigstack",
"score": 3
}
|
#### File: jeev20/pigstack/pingServer.py
```python
import os
from random import randint
def server_status(ip):
"""
Ping the given server and read the stdout
return: a json formatted string required by influxDB
"""
ping = os.popen(f"ping {ip}").read()
if "Sent = 4" and "Received = 4" in ping:
server = ip
value = 100
elif "Sent = 3" and "Received = 3" in ping:
server = ip
value = 75
elif "Sent = 2" and "Received = 2" in ping:
server = ip
value = 50
elif "Sent = 1" and "Received = 1" in ping:
server = ip
value = 25
else:
server = ip
value = 0
json_body = [
{
"measurement": server,
"tags": {
"tag1" :"pingstatus"
},
"fields": {
"value": value
}
}
]
return json_body
```
|
{
"source": "jeevan449/GtesT",
"score": 2
}
|
#### File: GtesT/gtest/Test_Chaitu.py
```python
class Test2:
def __init__(self):
pass
def Prerequests(self):
print('this is Prerequests chaitu')
def TestCase1_Chai(self):
print('Test Case1')
def TestCase2_Chai(self):
print('Test Case1')
def TestCase3_Chai(self):
print('Test Case1')
def TestCase4_Chai(self):
print('Test Case1')
def closer(self):
print('clossing all connections')
```
|
{
"source": "Jeevananthamcse/Palanisamy",
"score": 3
}
|
#### File: Palanisamy/ai-car-simulation-master/newcar.py
```python
import math
import random
import sys
import os
import neat
import pygame
# Constants
# WIDTH = 1600
# HEIGHT = 880
WIDTH = 1920
HEIGHT = 1080
CAR_SIZE_X = 60
CAR_SIZE_Y = 60
BORDER_COLOR = (255, 255, 255, 255) # Color To Crash on Hit
current_generation = 0 # Generation counter
class Car:
def __init__(self):
# Load Car Sprite and Rotate
self.sprite = pygame.image.load('car.png').convert() # Convert Speeds Up A Lot
self.sprite = pygame.transform.scale(self.sprite, (CAR_SIZE_X, CAR_SIZE_Y))
self.rotated_sprite = self.sprite
# self.position = [690, 740] # Starting Position
self.position = [830, 920] # Starting Position
self.angle = 0
self.speed = 0
self.speed_set = False # Flag For Default Speed Later on
self.center = [self.position[0] + CAR_SIZE_X / 2, self.position[1] + CAR_SIZE_Y / 2] # Calculate Center
self.radars = [] # List For Sensors / Radars
self.drawing_radars = [] # Radars To Be Drawn
self.alive = True # Boolean To Check If Car is Crashed
self.distance = 0 # Distance Driven
self.time = 0 # Time Passed
def draw(self, screen):
screen.blit(self.rotated_sprite, self.position) # Draw Sprite
self.draw_radar(screen) #OPTIONAL FOR SENSORS
def draw_radar(self, screen):
# Optionally Draw All Sensors / Radars
for radar in self.radars:
position = radar[0]
pygame.draw.line(screen, (0, 255, 0), self.center, position, 1)
pygame.draw.circle(screen, (0, 255, 0), position, 5)
def check_collision(self, game_map):
self.alive = True
for point in self.corners:
# If Any Corner Touches Border Color -> Crash
# Assumes Rectangle
if game_map.get_at((int(point[0]), int(point[1]))) == BORDER_COLOR:
self.alive = False
break
def check_radar(self, degree, game_map):
length = 0
x = int(self.center[0] + math.cos(math.radians(360 - (self.angle + degree))) * length)
y = int(self.center[1] + math.sin(math.radians(360 - (self.angle + degree))) * length)
# While We Don't Hit BORDER_COLOR AND length < 300 (just a max) -> go further and further
while not game_map.get_at((x, y)) == BORDER_COLOR and length < 300:
length = length + 1
x = int(self.center[0] + math.cos(math.radians(360 - (self.angle + degree))) * length)
y = int(self.center[1] + math.sin(math.radians(360 - (self.angle + degree))) * length)
# Calculate Distance To Border And Append To Radars List
dist = int(math.sqrt(math.pow(x - self.center[0], 2) + math.pow(y - self.center[1], 2)))
self.radars.append([(x, y), dist])
def update(self, game_map):
# Set The Speed To 20 For The First Time
# Only When Having 4 Output Nodes With Speed Up and Down
if not self.speed_set:
self.speed = 20
self.speed_set = True
# Get Rotated Sprite And Move Into The Right X-Direction
# Don't Let The Car Go Closer Than 20px To The Edge
self.rotated_sprite = self.rotate_center(self.sprite, self.angle)
self.position[0] += math.cos(math.radians(360 - self.angle)) * self.speed
self.position[0] = max(self.position[0], 20)
self.position[0] = min(self.position[0], WIDTH - 120)
# Increase Distance and Time
self.distance += self.speed
self.time += 1
# Same For Y-Position
self.position[1] += math.sin(math.radians(360 - self.angle)) * self.speed
self.position[1] = max(self.position[1], 20)
self.position[1] = min(self.position[1], WIDTH - 120)
# Calculate New Center
self.center = [int(self.position[0]) + CAR_SIZE_X / 2, int(self.position[1]) + CAR_SIZE_Y / 2]
# Calculate Four Corners
# Length Is Half The Side
length = 0.5 * CAR_SIZE_X
left_top = [self.center[0] + math.cos(math.radians(360 - (self.angle + 30))) * length, self.center[1] + math.sin(math.radians(360 - (self.angle + 30))) * length]
right_top = [self.center[0] + math.cos(math.radians(360 - (self.angle + 150))) * length, self.center[1] + math.sin(math.radians(360 - (self.angle + 150))) * length]
left_bottom = [self.center[0] + math.cos(math.radians(360 - (self.angle + 210))) * length, self.center[1] + math.sin(math.radians(360 - (self.angle + 210))) * length]
right_bottom = [self.center[0] + math.cos(math.radians(360 - (self.angle + 330))) * length, self.center[1] + math.sin(math.radians(360 - (self.angle + 330))) * length]
self.corners = [left_top, right_top, left_bottom, right_bottom]
# Check Collisions And Clear Radars
self.check_collision(game_map)
self.radars.clear()
# From -90 To 120 With Step-Size 45 Check Radar
for d in range(-90, 120, 45):
self.check_radar(d, game_map)
def get_data(self):
# Get Distances To Border
radars = self.radars
return_values = [0, 0, 0, 0, 0]
for i, radar in enumerate(radars):
return_values[i] = int(radar[1] / 30)
return return_values
def is_alive(self):
# Basic Alive Function
return self.alive
def get_reward(self):
# Calculate Reward (Maybe Change?)
# return self.distance / 50.0
return self.distance / (CAR_SIZE_X / 2)
def rotate_center(self, image, angle):
# Rotate The Rectangle
rectangle = image.get_rect()
rotated_image = pygame.transform.rotate(image, angle)
rotated_rectangle = rectangle.copy()
rotated_rectangle.center = rotated_image.get_rect().center
rotated_image = rotated_image.subsurface(rotated_rectangle).copy()
return rotated_image
def run_simulation(genomes, config):
# Empty Collections For Nets and Cars
nets = []
cars = []
# Initialize PyGame And The Display
pygame.init()
screen = pygame.display.set_mode((WIDTH, HEIGHT), pygame.FULLSCREEN)
# For All Genomes Passed Create A New Neural Network
for i, g in genomes:
net = neat.nn.FeedForwardNetwork.create(g, config)
nets.append(net)
g.fitness = 0
cars.append(Car())
# Clock Settings
# Font Settings & Loading Map
clock = pygame.time.Clock()
generation_font = pygame.font.SysFont("Arial", 30)
alive_font = pygame.font.SysFont("Arial", 20)
game_map = pygame.image.load('map.png').convert() # Convert Speeds Up A Lot
global current_generation
current_generation += 1
# Simple Counter To Roughly Limit Time (Not Good Practice)
counter = 0
while True:
# Exit On Quit Event
for event in pygame.event.get():
if event.type == pygame.QUIT:
sys.exit(0)
# For Each Car Get The Acton It Takes
for i, car in enumerate(cars):
output = nets[i].activate(car.get_data())
choice = output.index(max(output))
if choice == 0:
car.angle += 10 # Left
elif choice == 1:
car.angle -= 10 # Right
elif choice == 2:
if(car.speed - 2 >= 12):
car.speed -= 2 # Slow Down
else:
car.speed += 2 # Speed Up
# Check If Car Is Still Alive
# Increase Fitness If Yes And Break Loop If Not
still_alive = 0
for i, car in enumerate(cars):
if car.is_alive():
still_alive += 1
car.update(game_map)
genomes[i][1].fitness += car.get_reward()
if still_alive == 0:
break
counter += 1
if counter == 30 * 40: # Stop After About 20 Seconds
break
# Draw Map And All Cars That Are Alive
screen.blit(game_map, (0, 0))
for car in cars:
if car.is_alive():
car.draw(screen)
# Display Info
text = generation_font.render("Generation: " + str(current_generation), True, (0,0,0))
text_rect = text.get_rect()
text_rect.center = (900, 450)
screen.blit(text, text_rect)
text = alive_font.render("Still Alive: " + str(still_alive), True, (0, 0, 0))
text_rect = text.get_rect()
text_rect.center = (900, 490)
screen.blit(text, text_rect)
pygame.display.flip()
clock.tick(60) # 60 FPS
if __name__ == "__main__":
# Load Config
config_path = "voice.txt"
config = neat.config.Config(neat.DefaultGenome,
neat.DefaultReproduction,
neat.DefaultSpeciesSet,
neat.DefaultStagnation,
config_path)
# Create Population And Add Reporters
population = neat.Population(config)
population.add_reporter(neat.StdOutReporter(True))
stats = neat.StatisticsReporter()
population.add_reporter(stats)
# Run Simulation For A Maximum of 1000 Generations
population.run(run_simulation, 1000)
```
|
{
"source": "Jeevananthamcse/Python-programs",
"score": 4
}
|
#### File: Jeevananthamcse/Python-programs/sumpy.py
```python
def max(b):
if(b%3==0):
print("fizz")
elif(b%5==0):
print("buzz")
elif(b%3==0 and b%5==0):
print("fizzbuzz")
b=int(input("enter the number"))
max(b)
```
|
{
"source": "JEEVANARANDE/Data-Structures",
"score": 3
}
|
#### File: Data-Structures/Recursion/recursion.py
```python
import logging
import os
logging_str = "[%(asctime)s: %(levelname)s: %(module)s:] %(message)s"
log_dir = "logs"
os.makedirs(log_dir,exist_ok=True)
logging.basicConfig(filename=os.path.join(log_dir,"running.log"),level=logging.INFO,
format=logging_str,filemode='a')
def Factorial(n):
try:
assert n>=0 and int(n)==n, "The number must be an positive integer only!"
if n in [0,1]:
return 1
else:
return n * Factorial(n-1)
except Exception as e:
return e
logging.info(Factorial(3))
logging.info(Factorial(4))
logging.info(Factorial(5))
logging.info(Factorial(-9))
print("Hello, World!")
```
|
{
"source": "JeevanCd/woid",
"score": 2
}
|
#### File: services/migrations/0016_remove_medium.py
```python
from django.db import migrations
def remove_medium(apps, schema_editor):
Service = apps.get_model('services', 'Service')
try:
medium = Service.objects.get(slug='medium')
medium.delete()
except Service.DoesNotExist:
pass
class Migration(migrations.Migration):
dependencies = [
('services', '0015_auto_20190109_0805'),
]
operations = [
migrations.RunPython(remove_medium),
]
```
#### File: apps/services/wrappers.py
```python
import logging
import re
from django.conf import settings
from django.utils import timezone
import requests
from bs4 import BeautifulSoup
logger = logging.getLogger(__name__)
class AbstractBaseClient:
def __init__(self):
self.headers = {'user-agent': 'woid/1.0'}
class HackerNewsClient(AbstractBaseClient):
base_url = 'https://hacker-news.firebaseio.com'
def request(self, endpoint):
r = requests.get(endpoint, headers=self.headers)
result = r.json()
return result
def get_top_stories(self):
endpoint = '%s/v0/topstories.json' % self.base_url
return self.request(endpoint)
def get_story(self, code):
endpoint = '%s/v0/item/%s.json' % (self.base_url, code)
return self.request(endpoint)
def get_max_item(self):
endpoint = '%s/v0/maxitem.json' % self.base_url
return self.request(endpoint)
class RedditClient(AbstractBaseClient):
def get_front_page_stories(self):
stories = list()
try:
r = requests.get('https://www.reddit.com/.json', headers=self.headers)
result = r.json()
stories = result['data']['children']
except ValueError:
logger.exception('An error occurred while executing RedditClient.get_front_page_stories')
return stories
class GithubClient(AbstractBaseClient):
def get_today_trending_repositories(self):
r = requests.get('https://github.com/trending?since=daily', headers=self.headers)
html = r.text
soup = BeautifulSoup(html, 'html.parser')
repos = soup.select('ol.repo-list li')
data = list()
for repo in repos:
repo_data = dict()
repo_data['name'] = repo.h3.a.get('href')
description = repo.p.text
if description:
description = description.strip()
else:
description = ''
repo_data['description'] = description
lang = repo.find(attrs={'itemprop': 'programmingLanguage'})
if lang:
repo_data['language'] = lang.text.strip()
else:
repo_data['language'] = ''
stars_text = repo.findAll(text=re.compile('stars today'))
stars_numbers_only = re.findall(r'\d+', stars_text[0])
repo_data['stars'] = int(stars_numbers_only[0])
data.append(repo_data)
return data
class NYTimesClient(AbstractBaseClient):
base_url = 'http://api.nytimes.com/svc/mostpopular/v2/'
def get_most_popular_stories(self):
data = dict()
mostviewed_endpoint = '{0}mostviewed/all-sections/1.json?api-key={1}'.format(
self.base_url,
settings.NYTIMES_API_KEY
)
r = requests.get(mostviewed_endpoint, headers=self.headers)
json_data = r.json()
data['mostviewed'] = json_data['results']
mostemailed_endpoint = '{0}mostemailed/all-sections/1.json?api-key={1}'.format(
self.base_url,
settings.NYTIMES_API_KEY
)
r = requests.get(mostemailed_endpoint, headers=self.headers)
json_data = r.json()
data['mostemailed'] = json_data['results']
mostshared_endpoint = '{0}mostshared/all-sections/1.json?api-key={1}'.format(
self.base_url,
settings.NYTIMES_API_KEY
)
r = requests.get(mostshared_endpoint, headers=self.headers)
json_data = r.json()
data['mostshared'] = json_data['results']
return data
class ProductHuntClient(AbstractBaseClient):
def __init__(self):
super().__init__()
extra_headers = {
'Accept': 'application/json',
'Content-Type': 'application/json',
'Authorization': 'Bearer %s' % settings.PRODUCT_HUNT_TOKEN,
'Host': 'api.producthunt.com'
}
self.headers.update(extra_headers)
def get_top_posts(self):
today = timezone.now().strftime('%Y-%m-%d')
r = requests.get('https://api.producthunt.com/v1/posts?day=%s' % today, headers=self.headers)
data = r.json()
return data['posts']
```
|
{
"source": "jeevan-exa/haystack",
"score": 2
}
|
#### File: haystack/test/test_weaviate.py
```python
import numpy as np
import pytest
from haystack import Document
from conftest import get_document_store
import uuid
embedding_dim = 768
def get_uuid():
return str(uuid.uuid4())
DOCUMENTS = [
{"content": "text1", "id":get_uuid(), "key": "a", "embedding": np.random.rand(embedding_dim).astype(np.float32)},
{"content": "text2", "id":get_uuid(), "key": "b", "embedding": np.random.rand(embedding_dim).astype(np.float32)},
{"content": "text3", "id":get_uuid(), "key": "b", "embedding": np.random.rand(embedding_dim).astype(np.float32)},
{"content": "text4", "id":get_uuid(), "key": "b", "embedding": np.random.rand(embedding_dim).astype(np.float32)},
{"content": "text5", "id":get_uuid(), "key": "b", "embedding": np.random.rand(embedding_dim).astype(np.float32)},
]
DOCUMENTS_XS = [
# current "dict" format for a document
{"content": "My name is Carla and I live in Berlin", "id":get_uuid(), "meta": {"metafield": "test1", "name": "filename1"}, "embedding": np.random.rand(embedding_dim).astype(np.float32)},
# meta_field at the top level for backward compatibility
{"content": "My name is Paul and I live in New York", "id":get_uuid(), "metafield": "test2", "name": "filename2", "embedding": np.random.rand(embedding_dim).astype(np.float32)},
# Document object for a doc
Document(content="My name is Christelle and I live in Paris", id=get_uuid(), meta={"metafield": "test3", "name": "filename3"}, embedding=np.random.rand(embedding_dim).astype(np.float32))
]
@pytest.fixture(params=["weaviate"])
def document_store_with_docs(request):
document_store = get_document_store(request.param)
document_store.write_documents(DOCUMENTS_XS)
yield document_store
document_store.delete_documents()
@pytest.fixture(params=["weaviate"])
def document_store(request):
document_store = get_document_store(request.param)
yield document_store
document_store.delete_documents()
@pytest.mark.weaviate
@pytest.mark.parametrize("document_store_with_docs", ["weaviate"], indirect=True)
def test_get_all_documents_without_filters(document_store_with_docs):
documents = document_store_with_docs.get_all_documents()
assert all(isinstance(d, Document) for d in documents)
assert len(documents) == 3
assert {d.meta["name"] for d in documents} == {"filename1", "filename2", "filename3"}
assert {d.meta["metafield"] for d in documents} == {"test1", "test2", "test3"}
@pytest.mark.weaviate
def test_get_all_documents_with_correct_filters(document_store_with_docs):
documents = document_store_with_docs.get_all_documents(filters={"metafield": ["test2"]})
assert len(documents) == 1
assert documents[0].meta["name"] == "filename2"
documents = document_store_with_docs.get_all_documents(filters={"metafield": ["test1", "test3"]})
assert len(documents) == 2
assert {d.meta["name"] for d in documents} == {"filename1", "filename3"}
assert {d.meta["metafield"] for d in documents} == {"test1", "test3"}
@pytest.mark.weaviate
def test_get_all_documents_with_incorrect_filter_name(document_store_with_docs):
documents = document_store_with_docs.get_all_documents(filters={"incorrectmetafield": ["test2"]})
assert len(documents) == 0
@pytest.mark.weaviate
def test_get_all_documents_with_incorrect_filter_value(document_store_with_docs):
documents = document_store_with_docs.get_all_documents(filters={"metafield": ["incorrect_value"]})
assert len(documents) == 0
@pytest.mark.weaviate
def test_get_documents_by_id(document_store_with_docs):
documents = document_store_with_docs.get_all_documents()
doc = document_store_with_docs.get_document_by_id(documents[0].id)
assert doc.id == documents[0].id
assert doc.content == documents[0].content
@pytest.mark.weaviate
@pytest.mark.parametrize("document_store", ["weaviate"], indirect=True)
def test_get_document_count(document_store):
document_store.write_documents(DOCUMENTS)
assert document_store.get_document_count() == 5
assert document_store.get_document_count(filters={"key": ["a"]}) == 1
assert document_store.get_document_count(filters={"key": ["b"]}) == 4
@pytest.mark.weaviate
@pytest.mark.parametrize("document_store", ["weaviate"], indirect=True)
@pytest.mark.parametrize("batch_size", [2])
def test_weaviate_write_docs(document_store, batch_size):
# Write in small batches
for i in range(0, len(DOCUMENTS), batch_size):
document_store.write_documents(DOCUMENTS[i: i + batch_size])
documents_indexed = document_store.get_all_documents()
assert len(documents_indexed) == len(DOCUMENTS)
documents_indexed = document_store.get_all_documents(batch_size=batch_size)
assert len(documents_indexed) == len(DOCUMENTS)
@pytest.mark.weaviate
@pytest.mark.parametrize("document_store", ["weaviate"], indirect=True)
def test_get_all_document_filter_duplicate_value(document_store):
documents = [
Document(
content="Doc1",
meta={"fone": "f0"},
id = get_uuid(),
embedding= np.random.rand(embedding_dim).astype(np.float32)
),
Document(
content="Doc1",
meta={"fone": "f1", "metaid": "0"},
id = get_uuid(),
embedding = np.random.rand(embedding_dim).astype(np.float32)
),
Document(
content="Doc2",
meta={"fthree": "f0"},
id = get_uuid(),
embedding=np.random.rand(embedding_dim).astype(np.float32)
)
]
document_store.write_documents(documents)
documents = document_store.get_all_documents(filters={"fone": ["f1"]})
assert documents[0].content == "Doc1"
assert len(documents) == 1
assert {d.meta["metaid"] for d in documents} == {"0"}
@pytest.mark.weaviate
@pytest.mark.parametrize("document_store", ["weaviate"], indirect=True)
def test_get_all_documents_generator(document_store):
document_store.write_documents(DOCUMENTS)
assert len(list(document_store.get_all_documents_generator(batch_size=2))) == 5
@pytest.mark.weaviate
@pytest.mark.parametrize("document_store", ["weaviate"], indirect=True)
def test_write_with_duplicate_doc_ids(document_store):
id = get_uuid()
documents = [
Document(
content="Doc1",
id=id,
embedding=np.random.rand(embedding_dim).astype(np.float32)
),
Document(
content="Doc2",
id=id,
embedding=np.random.rand(embedding_dim).astype(np.float32)
)
]
document_store.write_documents(documents, duplicate_documents="skip")
with pytest.raises(Exception):
document_store.write_documents(documents, duplicate_documents="fail")
@pytest.mark.weaviate
@pytest.mark.parametrize("document_store", ["weaviate"], indirect=True)
@pytest.mark.parametrize("update_existing_documents", [True, False])
def test_update_existing_documents(document_store, update_existing_documents):
id = uuid.uuid4()
original_docs = [
{"content": "text1_orig", "id": id, "metafieldforcount": "a", "embedding": np.random.rand(embedding_dim).astype(np.float32)},
]
updated_docs = [
{"content": "text1_new", "id": id, "metafieldforcount": "a", "embedding": np.random.rand(embedding_dim).astype(np.float32)},
]
document_store.update_existing_documents = update_existing_documents
document_store.write_documents(original_docs)
assert document_store.get_document_count() == 1
if update_existing_documents:
document_store.write_documents(updated_docs, duplicate_documents="overwrite")
else:
with pytest.raises(Exception):
document_store.write_documents(updated_docs, duplicate_documents="fail")
stored_docs = document_store.get_all_documents()
assert len(stored_docs) == 1
if update_existing_documents:
assert stored_docs[0].content == updated_docs[0]["content"]
else:
assert stored_docs[0].content == original_docs[0]["content"]
@pytest.mark.weaviate
@pytest.mark.parametrize("document_store", ["weaviate"], indirect=True)
def test_write_document_meta(document_store):
uid1 = get_uuid()
uid2 = get_uuid()
uid3 = get_uuid()
uid4 = get_uuid()
documents = [
{"content": "dict_without_meta", "id": uid1, "embedding": np.random.rand(embedding_dim).astype(np.float32)},
{"content": "dict_with_meta", "metafield": "test2", "name": "filename2", "id": uid2, "embedding": np.random.rand(embedding_dim).astype(np.float32)},
Document(content="document_object_without_meta", id=uid3, embedding=np.random.rand(embedding_dim).astype(np.float32)),
Document(content="document_object_with_meta", meta={"metafield": "test4", "name": "filename3"}, id=uid4, embedding=np.random.rand(embedding_dim).astype(np.float32)),
]
document_store.write_documents(documents)
documents_in_store = document_store.get_all_documents()
assert len(documents_in_store) == 4
assert not document_store.get_document_by_id(uid1).meta
assert document_store.get_document_by_id(uid2).meta["metafield"] == "test2"
assert not document_store.get_document_by_id(uid3).meta
assert document_store.get_document_by_id(uid4).meta["metafield"] == "test4"
@pytest.mark.weaviate
@pytest.mark.parametrize("document_store", ["weaviate"], indirect=True)
def test_write_document_index(document_store):
documents = [
{"content": "text1", "id": uuid.uuid4(), "embedding": np.random.rand(embedding_dim).astype(np.float32)},
{"content": "text2", "id": uuid.uuid4(), "embedding": np.random.rand(embedding_dim).astype(np.float32)},
]
document_store.write_documents([documents[0]], index="Haystackone")
assert len(document_store.get_all_documents(index="Haystackone")) == 1
document_store.write_documents([documents[1]], index="Haystacktwo")
assert len(document_store.get_all_documents(index="Haystacktwo")) == 1
assert len(document_store.get_all_documents(index="Haystackone")) == 1
assert len(document_store.get_all_documents()) == 0
@pytest.mark.weaviate
@pytest.mark.parametrize("retriever", ["dpr", "embedding"], indirect=True)
@pytest.mark.parametrize("document_store", ["weaviate"], indirect=True)
def test_update_embeddings(document_store, retriever):
documents = []
for i in range(6):
documents.append({"content": f"text_{i}", "id": str(uuid.uuid4()), "metafield": f"value_{i}", "embedding": np.random.rand(embedding_dim).astype(np.float32)})
documents.append({"content": "text_0", "id": str(uuid.uuid4()), "metafield": "value_0", "embedding": np.random.rand(embedding_dim).astype(np.float32)})
document_store.write_documents(documents, index="HaystackTestOne")
document_store.update_embeddings(retriever, index="HaystackTestOne", batch_size=3)
documents = document_store.get_all_documents(index="HaystackTestOne", return_embedding=True)
assert len(documents) == 7
for doc in documents:
assert type(doc.embedding) is np.ndarray
documents = document_store.get_all_documents(
index="HaystackTestOne",
filters={"metafield": ["value_0"]},
return_embedding=True,
)
assert len(documents) == 2
for doc in documents:
assert doc.meta["metafield"] == "value_0"
np.testing.assert_array_almost_equal(documents[0].embedding, documents[1].embedding, decimal=4)
documents = document_store.get_all_documents(
index="HaystackTestOne",
filters={"metafield": ["value_1", "value_5"]},
return_embedding=True,
)
np.testing.assert_raises(
AssertionError,
np.testing.assert_array_equal,
documents[0].embedding,
documents[1].embedding
)
doc = {"content": "text_7", "id": str(uuid.uuid4()), "metafield": "value_7",
"embedding": retriever.embed_queries(texts=["a random string"])[0]}
document_store.write_documents([doc], index="HaystackTestOne")
doc_before_update = document_store.get_all_documents(index="HaystackTestOne", filters={"metafield": ["value_7"]})[0]
embedding_before_update = doc_before_update.embedding
document_store.update_embeddings(
retriever, index="HaystackTestOne", batch_size=3, filters={"metafield": ["value_0", "value_1"]}
)
doc_after_update = document_store.get_all_documents(index="HaystackTestOne", filters={"metafield": ["value_7"]})[0]
embedding_after_update = doc_after_update.embedding
np.testing.assert_array_equal(embedding_before_update, embedding_after_update)
# test update all embeddings
document_store.update_embeddings(retriever, index="HaystackTestOne", batch_size=3, update_existing_embeddings=True)
assert document_store.get_document_count(index="HaystackTestOne") == 8
doc_after_update = document_store.get_all_documents(index="HaystackTestOne", filters={"metafield": ["value_7"]})[0]
embedding_after_update = doc_after_update.embedding
np.testing.assert_raises(AssertionError, np.testing.assert_array_equal, embedding_before_update, embedding_after_update)
@pytest.mark.weaviate
@pytest.mark.parametrize("document_store_with_docs", ["weaviate"], indirect=True)
def test_query_by_embedding(document_store_with_docs):
docs = document_store_with_docs.query_by_embedding(np.random.rand(embedding_dim).astype(np.float32))
assert len(docs) == 3
docs = document_store_with_docs.query_by_embedding(np.random.rand(embedding_dim).astype(np.float32),
top_k=1)
assert len(docs) == 1
docs = document_store_with_docs.query_by_embedding(np.random.rand(embedding_dim).astype(np.float32),
filters = {"name": ['filename2']})
assert len(docs) == 1
@pytest.mark.weaviate
@pytest.mark.parametrize("document_store_with_docs", ["weaviate"], indirect=True)
def test_query(document_store_with_docs):
query_text = 'My name is Carla and I live in Berlin'
with pytest.raises(Exception):
docs = document_store_with_docs.query(query_text)
docs = document_store_with_docs.query(filters = {"name": ['filename2']})
assert len(docs) == 1
docs = document_store_with_docs.query(filters={"content":[query_text.lower()]})
assert len(docs) == 1
docs = document_store_with_docs.query(filters={"content":['live']})
assert len(docs) == 3
@pytest.mark.weaviate
@pytest.mark.parametrize("document_store_with_docs", ["weaviate"], indirect=True)
def test_delete_all_documents(document_store_with_docs):
assert len(document_store_with_docs.get_all_documents()) == 3
document_store_with_docs.delete_documents()
documents = document_store_with_docs.get_all_documents()
assert len(documents) == 0
@pytest.mark.weaviate
@pytest.mark.parametrize("document_store_with_docs", ["weaviate"], indirect=True)
def test_delete_documents_with_filters(document_store_with_docs):
document_store_with_docs.delete_all_documents(filters={"metafield": ["test1", "test2"]})
documents = document_store_with_docs.get_all_documents()
assert len(documents) == 1
assert documents[0].meta["metafield"] == "test3"
```
|
{
"source": "jeevangelista/HashClose",
"score": 3
}
|
#### File: HashClose/InClose2/__init__.py
```python
from bitarray import bitarray
from anytree import Node, LevelOrderGroupIter
class InClose2:
def __init__(self):
self.intents = []
self.extents = []
self.rnew = 1
def process_matrix_to_bitarray(self, key_list, matrix):
'''Processes the list of list to a list of bitarrays'''
bit_matrix = []
for key in key_list:
bitrep = bitarray()
for l in matrix:
if key in l:
bitrep.append(True)
else:
bitrep.append(False)
bit_matrix.append(bitrep)
# for l in matrix:
# bitrep = bitarray()
# for key in key_list:
# if key in l:
# bitrep.append(True)
# else:
# bitrep.append(False)
# bit_matrix.append(bitrep)
return bit_matrix
def count_true(bitarr):
counts = 0
for i in bitarr:
if i:
counts+=1
return counts
def get_counts_per_key(key_list, bit_matrix):
bit_key_dict = {}
bit_key_counts_dict = {}
for i in range(len(key_list)):
bit_key_dict[key_list[i]] = bit_matrix[i]
for k,v in bit_key_dict.items():
bit_key_counts_dict[k] = count_true(v)
return bit_key_dict, bit_key_counts_dict
def sort_bitmatrix(bit_key_dict, bit_key_counts_dict):
sorted_keys_by_counts = [i[0] for i in sorted(bit_key_counts_dict.items(), key=lambda kv: kv[1], reverse=True)]
bit_matrix_sorted = []
for i in sorted_keys_by_counts:
bit_matrix_sorted.append(bit_key_dict[i])
return sorted_keys_by_counts, bit_matrix_sorted
def InClose2(self, r, y, bit_matrix, key_list, parent):
jchildren = []
rchildren = []
for j in range(y,len(bit_matrix)):
if not self.intents[r][j]:
new_extent = self.extents[r] & bit_matrix[j]
if new_extent == self.extents[r]:
self.intents[r][j] = [True]
else:
if new_extent.any() and self.IsCanonical(r, j, new_extent, bit_matrix):
self.extents.append(new_extent)
jchildren.append(j)
rchildren.append(self.rnew)
self.intents[self.rnew] = self.intents[r].copy()
self.intents[self.rnew][j] = True
self.intents.append(bitarray([False]*len(key_list)))
self.rnew += 1
for k in range(0,len(jchildren)):
child = Node(rchildren[k], parent=parent)
self.InClose2(rchildren[k], jchildren[k] + 1, bit_matrix, key_list, child)
def IsCanonical(self, r, j, new_extent, bit_matrix):
for k in range(0, j):
if not self.intents[r][k]:
intersect = new_extent & bit_matrix[k]
if intersect == new_extent:
return False
return True
def initialize(self, key_list, matrix):
r = 0
self.extents.append(bitarray([True]*len(matrix))) # equal to the number of entries in matrix list
self.intents.append(bitarray([False]*len(key_list)))
self.intents.append(bitarray([False]*len(key_list)))
bit_matrix = self.process_matrix_to_bitarray(key_list, matrix)
return bit_matrix, Node(r)
def process_output(self, key_list):
len_matrix = len(self.extents[0])
result = []
for r in range(self.rnew):
extent = self.extents[r]
intent = self.intents[r]
intent_list = []
extent_list = []
index = 0
for i in intent:
if i:
intent_list.append(key_list[index])
index += 1
index = 0
for e in extent:
if e:
extent_list.append(index)
index += 1
result.append({"intent":intent_list,
"extent": extent_list})
return result
```
|
{
"source": "Jeevan-Guduru/ETL_Marketing_Campaign",
"score": 3
}
|
#### File: SparkNetworks_dwh/dwh/ETL_MySql.py
```python
import os
import sys
import pandas as pd
from sql_wrapper import db_connector
import dask.dataframe as dd
from io import StringIO
from sqlalchemy import create_engine, func
import logging
import logging.config
#logger is configured from 'logger.conf' file in the project directory
logging.config.fileConfig(os.path.dirname(__file__)+r"\logger.conf", disable_existing_loggers=False)
logger = logging.getLogger(__name__)
#creating a list of file names that we use in the upcoming ETL functions below'''
file_names=['user_data.csv','event_data.csv']
def file_check():
'''
Parameters: None
Returns: None
Definition:
Checks if source files are present in the input path and exits if file is not present.
'''
file_exist = [file for file in file_names if os.path.isfile(os.path.dirname(__file__)+"\input\{}".format(file))];
#sorting the lists
file_exist.sort()
file_names.sort()
if file_exist==file_names:
pass
else:
logger.error("Source file is not present in the input path.Please check!!")
sys.exit()
# In[87]:
def user_data_extract(Filepath):
'''
Parameter: Filepath (string)
Returns: user_data_df (dataframe)
Description:
1.Function to extract the User data from source CSV file 'user_data.csv'.
Used dask dataframe dd.read_csv for efficient and fast data read.
2.Also calls Data masking method that anonymises 'user_id' and 'email' fields of 'user_data.csv' as per the requirement.
'''
logger.info("Extracting user data from source file user_data.csv")
user_data_df=dd.read_csv(Filepath,delimiter=';',usecols=['user_id','email'])
user_data_df=user_data_df.compute() #converting dask dataframe to pandas dataframe for further processing
user_data_df=data_masking(user_data_df)
count_user_data=len(user_data_df)
logger.info("{} records fetched from user_data.csv!".format(count_user_data))
return user_data_df
# In[102]:
def event_data_extract(Filepath):
'''
Parameters: Filepath(String)
Returns: event_data_df (dataframe)
Description:
1.Function to extract the event data from source CSV file 'event_data.csv' into dataframe 'event_data_df'.
Used dask dataframe dd.read_csv for efficient and fast data read.
2.Converts event_date field of 'event_data_df' dataframe to standard date format using pandas.to_datetime()' method.
3.Week_number is retrived from 'event_date' and added to new column in 'event_data_df' dataframe using insert() method.
4.Also calls Data masking method to anonymizes user_id field of 'event_data.csv' as per the requirement.
'''
logger.info("Extracting event data from source file event_data.csv")
event_data_df=dd.read_csv(Filepath,delimiter=';',usecols=['event_date','event_id','user_id'])
event_data_df=event_data_df.compute() #converting dask dataframe to pandas dataframe for further processing
event_data_df.event_date= pd.to_datetime(event_data_df.event_date,format='%d.%m.%y')
#inserting week number column
event_data_df.insert(3,'week_number',pd.Series([date.strftime("%V") for date in list(event_data_df.event_date)]))
event_data_df=data_masking(event_data_df)
count_user_data=len(event_data_df)
logger.info("{} records fetched from event_data.csv!".format(count_user_data))
return event_data_df
# In[103]:
def data_masking(data_df):
'''
Parameters: data_df (dataframe) - Dataframe obtained from source files in above methods
Retruns : data_df (Dataframe) - with masked data.
Description:
1.This fucntion anonymises the user sensitive information as below:
user_id: it is multiplied with 5 and then converted this result to octadecimal.
Also replaced additional letter 'o' in the resultant which indicates that number is octadecimal with ''.
email: only retained domain name after '@' by removing the user name.
'''
data_df.user_id=pd.Series([int(oct(x*5).replace('o','')) for x in data_df.user_id])
if 'email' in data_df.columns:
data_df.email =pd.Series([x[1] for x in data_df.email.str.split('@')])
return data_df
# In[113]:
@db_connector
def ingest_to_temp_tables(connection,event_data_df,user_data_df,event_table_name,user_table_name):
'''
Parameters:
1.connection - DB connection string, will receive this from sql_wrpper.py module using @db_connector
2.user_data_df - Dataframe formed from user_data.csv post preprocessing in above methods
3.event_data_df - Dataframe formed from event_data.csv post preprocessing in above methods
4.user_table_name - User_data DB table name
5.event_table_name - Event_data DB table name
Returns: none
Description:
1.Mysql file upload folder path is retrived using query - "SHOW VARIABLES LIKE \"secure_file_priv\";".
Replace \\ with / which is the path format accepted by LOAD INFILE .. statement.
2.Above source dataframes - user_data_df,event_data_df are written to corresponding temp csv files.
3.Forming LOAD INFILE sql query that can be executed in mysql DB.
4.Truncating tables before load.
5.Executing LOAD INFILE .. statments for both user_data and event_data.
'''
#1
default_path_rows=connection.execute("SHOW VARIABLES LIKE \"secure_file_priv\";").fetchall()
load_infile_path=[row[1].replace('\\','/') for row in default_path_rows]
#2
user_data_df.to_csv(load_infile_path[0]+r"user_data_temp.csv",index=False,chunksize=1000)
event_data_df.index+=1
event_data_df.to_csv(load_infile_path[0]+r"event_data_temp.csv",index_label='id',chunksize=1000)
logger.info("Ingesting source data to temp tables in DB ... ")
#3
user_data_load_sql="LOAD DATA INFILE "+"\""+load_infile_path[0]+r"user_data_temp.csv"+"\""+" INTO TABLE "+user_table_name+" FIELDS TERMINATED BY ',' ENCLOSED BY '\"' IGNORE 1 LINES;"
event_data_load_sql="LOAD DATA INFILE "+"\""+load_infile_path[0]+r"event_data_temp.csv"+"\""+" INTO TABLE "+event_table_name+" FIELDS TERMINATED BY ',' ENCLOSED BY '\"' IGNORE 1 LINES;"
#4
connection.execute("Truncate table "+ user_table_name+";")
connection.execute("Truncate table "+ event_table_name+";")
#5
connection.execute(user_data_load_sql)
connection.execute(event_data_load_sql)
logger.info("Source Data ingested to DB successfully!!")
@db_connector
def transform_and_load(connection):
'''
Parameters:connection - DB connection string,this will be received from sql_wrpper.py module using @db_connector
Returns: none
Description:
1.Executes the stored procedure that has sql query for applying transformations as part of Task1.
This procedure also loads the final target table campaign_performance.
2.Executes the stored procedure that has sql query for applying transformations as part of Task2.
This procedure also loads the final target table active_users.
'''
logger.info("Applying Transformations..")
logger.info("Loading Target tables..")
with connection.begin() as conn:
conn.execute("CALL `Load_Active_Users`")
conn.execute("CALL `Load_Mail_Campaign_performance`")
logger.info("Transformations applied and Target tables loaded successfully.")
logger.info('EXIT 0')
# In[112]:
'''
Description:
1. Main module, from here all the above functions are called.
2. Please note that input source files are to be placed inside 'input' folder of the project space.
'''
if __name__ == '__main__':
file_check()
filepath=os.path.dirname(__file__)+"\input"
for file in file_names:
if file=='event_data.csv':
event_data_df=event_data_extract(filepath+r"\{}".format(file))
else:
user_data_df=user_data_extract(filepath+r"\{}".format(file))
ingest_to_temp_tables(event_data_df,user_data_df,file_names[0].split('.')[0],file_names[1].split('.')[0])
transform_and_load()
# In[ ]:
```
|
{
"source": "JeevanjotS/argoverse-api",
"score": 3
}
|
#### File: argoverse/utils/camera_stats.py
```python
import logging
from typing import List, Optional, Tuple
"""
Since we use images of different sizes (ring vs. stereo), we cannot
fix the image size throughout -- must be adaptive.
"""
STEREO_IMG_WIDTH = 2464
STEREO_IMG_HEIGHT = 2056
RING_IMG_WIDTH = 1600
RING_IMG_HEIGHT = 900
RING_CAMERA_LIST = [
"ring_front_center",
"ring_front_left",
"ring_front_right",
# "ring_rear_left",
# "ring_rear_right",
"ring_side_left",
"ring_side_right",
]
STEREO_CAMERA_LIST = [] # ["stereo_front_left", "stereo_front_right"]
CAMERA_LIST = RING_CAMERA_LIST + STEREO_CAMERA_LIST
logger = logging.getLogger(__name__)
def get_image_dims_for_camera(camera_name: str) -> Tuple[Optional[int], Optional[int]]:
""" Get image dimensions for camera.
Args:
camera_name: Camera name.
Returns:
Tuple of [img_width, image_height] in pixels
"""
if camera_name in RING_CAMERA_LIST:
img_width = RING_IMG_WIDTH
img_height = RING_IMG_HEIGHT
elif camera_name in STEREO_CAMERA_LIST:
img_width = STEREO_IMG_WIDTH
img_height = STEREO_IMG_HEIGHT
else:
logger.error(f"{camera_name} not recognized")
return None, None
return img_width, img_height
```
|
{
"source": "Jeevan-J/Python_Funcode",
"score": 2
}
|
#### File: Python_Funcode/plugins/github.py
```python
app_emailharvester = None
def search(domain, limit):
all_emails = []
app_emailharvester.show_message("[+] Searching in Github")
yahooUrl = "http://search.yahoo.com/search?p=site%3Agithub.com+%40{word}&n=100&ei=UTF-8&va_vt=any&vo_vt=any&ve_vt=any&vp_vt=any&vd=all&vst=0&vf=all&vm=p&fl=0&fr=yfp-t-152&xargs=0&pstart=1&b={counter}"
app_emailharvester.init_search(yahooUrl, domain, limit, 1, 100, 'Yahoo + Github')
app_emailharvester.process()
all_emails += app_emailharvester.get_emails()
bingUrl = "http://www.bing.com/search?q=site%3Agithub.com+%40{word}&count=50&first={counter}"
app_emailharvester.init_search(bingUrl, domain, limit, 0, 50, 'Bing + Github')
app_emailharvester.process()
all_emails += app_emailharvester.get_emails()
googleUrl = 'https://www.google.com/search?num=100&start={counter}&hl=en&q=site%3Agithub.com+"%40{word}"'
app_emailharvester.init_search(googleUrl, domain, limit, 0, 100, 'Google + Github')
app_emailharvester.process()
all_emails += app_emailharvester.get_emails()
url = 'http://www.baidu.com/search/s?wd=site%3Agithub.com+"%40{word}"&pn={counter}'
app_emailharvester.init_search(url, domain, limit, 0, 10, 'Baidu + Github')
app_emailharvester.process()
all_emails += app_emailharvester.get_emails()
url = "http://www.exalead.com/search/web/results/?q=site%3Agithub.com+%40{word}&elements_per_page=10&start_index={counter}"
app_emailharvester.init_search(url, domain, limit, 0, 50, 'Exalead + Github')
app_emailharvester.process()
all_emails += app_emailharvester.get_emails()
#dogpile seems to not support site:
return all_emails
class Plugin:
def __init__(self, app, conf):#
global app_emailharvester, config
#config = conf
app.register_plugin('github', {'search': search})
app_emailharvester = app
```
|
{
"source": "Jeevan-kumar-Raj/Diet-e-mentance-App",
"score": 3
}
|
#### File: Jeevan-kumar-Raj/Diet-e-mentance-App/Marksheet_Frontend.py
```python
from tkinter import *
import random
import Marksheet_Backend
import tkinter.messagebox
from tkinter import ttk
def marksheet():
root = Tk()
root.title('Marksheet')
root.geometry('1350x750')
root.config(bg = 'Navajo white')
#================================================Variables======================================================
name = StringVar()
roll = StringVar()
fname = StringVar()
mname = StringVar()
DOB = StringVar()
gender = StringVar()
scl = StringVar()
email = StringVar()
m1 = DoubleVar()
m2 = DoubleVar()
m3 = DoubleVar()
m4 = DoubleVar()
m5 = DoubleVar()
gt = DoubleVar()
per = DoubleVar()
cgpa = DoubleVar()
grade = StringVar()
div = StringVar()
result = StringVar()
#==============================================Functions==========================================================
def Add():
if (len(roll.get()) != 0):
Marksheet_Backend.insert(name.get(),roll.get(),fname.get(),mname.get(),DOB.get(),gender.get(), \
scl.get(),email.get(),m1.get(),m2.get(),m3.get(),m4.get(),m5.get(), \
gt.get(),per.get(),cgpa.get(),grade.get(),div.get(),result.get())
def Update():
if (len(roll.get()) != 0):
Marksheet_Backend.update(name.get(),roll.get(),fname.get(),mname.get(),DOB.get(),gender.get(), \
scl.get(),email.get(),m1.get(),m2.get(),m3.get(),m4.get(),m5.get(), \
gt.get(),per.get(),cgpa.get(),grade.get(),div.get(),result.get())
def Exit():
Exit = tkinter.messagebox.askyesno('Marksheet','Confirm if you want to Exit')
if Exit > 0:
root.destroy()
return
def Compute():
x1 = (m1.get()); x2 = (m2.get()); x3 = (m3.get()); x4 = (m4.get()); x5 = (m5.get())
if x1 > 100:
tkinter.messagebox.askokcancel('Attention','Please enter Correct Marks')
return
if x2 > 100:
tkinter.messagebox.askokcancel('Attention','Please enter Correct Marks')
return
if x3 > 100:
tkinter.messagebox.askokcancel('Attention','Please enter Correct Marks')
return
if x4 > 100:
tkinter.messagebox.askokcancel('Attention','Please enter Correct Marks')
return
if x5 > 100:
tkinter.messagebox.askokcancel('Attention','Please enter Correct Marks')
return
tot = x1+x2+x3+x4+x5
gt.set(tot)
Per = ((x1+x2+x3+x4+x5) * 100)/500
per.set(Per)
cg = (((x1+x2+x3+x4+x5) * 100)/500) / 9.5
cgpa.set(round(cg,1))
if cg > 10:
cgpa.set(10)
if (((x1+x2+x3+x4+x5) * 100)/500) <= 40:
grd = 'G'
elif (((x1+x2+x3+x4+x5) * 100)/500) <= 50:
grd = 'F'
elif (((x1+x2+x3+x4+x5) * 100)/500) <= 60:
grd = 'E'
elif (((x1+x2+x3+x4+x5) * 100)/500) <= 70:
grd = 'D'
elif (((x1+x2+x3+x4+x5) * 100)/500) <= 80:
grd = 'C'
elif (((x1+x2+x3+x4+x5) * 100)/500) <= 90:
grd = 'B'
else:
grd = 'A'
grade.set(grd)
count = 0
if x1 < 33:
count = count + 1
if x2 < 33:
count = count + 1
if x3 < 33:
count = count + 1
if x4 < 33:
count = count + 1
if x5 < 33:
count = count + 1
if (count == 0):
result.set('PASS')
elif (count == 1 or count == 2 ):
result.set('SUPPLY')
else:
result.set('FAIL')
if Per <= 45 and result != "FAIL":
div.set('THIRD')
elif Per <= 60 and result != "FAIL":
div.set('SECOND')
elif Per <= 100:
div.set('FIRST')
def Reset():
name.set(' ')
roll.set(' ')
fname.set(' ')
mname.set(' ')
DOB.set(' ')
gender.set(' ')
scl.set(' ')
email.set(' ')
m1.set(' ')
m2.set(' ')
m3.set(' ')
m4.set(' ')
m5.set(' ')
gt.set(' ')
per.set(' ')
cgpa.set(' ')
grade.set(' ')
div.set(' ')
result.set(' ')
#========================================================Frame_1===============================================================
Frame_1 = LabelFrame(root, width = 1200, height = 400, font = ('arial',20,'bold'), bg = 'Navajo white', bd = 10, \
text = 'Student Details', relief = 'ridge')
Frame_1.grid(row = 1, column = 0, pady = 20, padx = 20)
#=================================================Labels and Entries for Frame_1===============================================
Label_Name = Label(Frame_1, text = 'Name', font = ('arial',15,'bold'), bg = 'Navajo white')
Label_Name.grid(row = 0, column = 0, padx = 80)
Entry_Name = Entry(Frame_1, font = ('arial',15), width = 25, textvariable = name)
Entry_Name.grid(row = 0, column = 1, padx = 5, pady = 5)
Label_Roll_no = Label(Frame_1, text = 'Roll Number', font = ('arial',15,'bold'), bg = 'Navajo white')
Label_Roll_no.grid(row = 0, column = 3, padx = 80)
Entry_Roll_no = Entry(Frame_1, font = ('arial',15), width = 25, textvariable = roll)
Entry_Roll_no.grid(row = 0, column = 4, padx = 40)
Label_Father_Name = Label(Frame_1, text = 'Father Name', font = ('arial',15,'bold'), bg = 'Navajo white')
Label_Father_Name.grid(row = 1, column = 0, padx = 80)
Entry_Father_Name = Entry(Frame_1, font = ('arial',15), width = 25, textvariable = fname)
Entry_Father_Name.grid(row = 1, column = 1, padx = 5, pady = 10)
Label_Mother_Name = Label(Frame_1, text = 'Mother Name', font = ('arial',15,'bold'), bg = 'Navajo white')
Label_Mother_Name.grid(row = 1, column = 3, padx = 80)
Entry_Mother_Name = Entry(Frame_1, font = ('arial',15), width = 25, textvariable = mname)
Entry_Mother_Name.grid(row = 1, column = 4, padx = 5)
Label_DOB = Label(Frame_1, text = 'Date of Birth', font = ('arial',15,'bold'), bg = 'Navajo white')
Label_DOB.grid(row = 2, column = 0, padx = 80)
Entry_DOB = Entry(Frame_1, font = ('arial',15), width = 25, textvariable = DOB)
Entry_DOB.grid(row = 2, column = 1, padx = 5, pady = 5)
Label_Gender = Label(Frame_1, text = 'Gender', font = ('arial',15,'bold'), bg = 'Navajo white')
Label_Gender.grid(row = 2, column = 3, padx = 80)
Entry_Gender = ttk.Combobox(Frame_1, values = (' ','Male','Female','Others'), font = ('arial',15), width = 23, textvariable = gender)
Entry_Gender.grid(row = 2, column = 4, padx = 5, pady = 5)
Label_School = Label(Frame_1, text = 'School Name', font = ('arial',15,'bold'), bg = 'Navajo white')
Label_School.grid(row = 3, column = 0, padx = 80)
Entry_School = Entry(Frame_1, font = ('arial',15), width = 25, textvariable = scl)
Entry_School.grid(row = 3, column = 1, padx = 5, pady = 5)
Label_Email = Label(Frame_1, text = 'Email ID', font = ('arial',15,'bold'), bg = 'Navajo white')
Label_Email.grid(row = 3, column = 3, padx = 80)
Entry_Email = Entry(Frame_1, font = ('arial',15), width = 25, textvariable = email)
Entry_Email.grid(row = 3, column = 4, padx = 5, pady = 5)
#========================================================Frame_2==================================================================
Frame_2 = LabelFrame(root, width = 1200, height = 400, font = ('arial',20,'bold'), bg = 'Navajo white', bd = 10 \
, text = 'Grades Point Obtained', relief = 'ridge')
Frame_2.grid(row = 3, column = 0)
#======================================================Labels of Frame_2===========================================================
Label_Subject = Label(Frame_2, text = 'SUBJECT', font = ('arial',16,'bold'), bg = 'Navajo white')
Label_Subject.grid(row = 3, column = 0, padx = 50, pady = 10)
Label_obt_Marks = Label(Frame_2, text = 'MARKS OBTAINED', font = ('arial',16,'bold'), bg = 'Navajo white')
Label_obt_Marks.grid(row = 3, column = 1, padx = 20)
Label_Subject = Label(Frame_2, text = 'PASSING MARKS', font = ('arial',16,'bold'), bg = 'Navajo white')
Label_Subject.grid(row = 3, column = 2, padx = 20)
Label_obt_Marks = Label(Frame_2, text = 'TOTAL MARKS', font = ('arial',16,'bold'), bg = 'Navajo white')
Label_obt_Marks.grid(row = 3, column = 3, padx = 20)
Label_1 = Label(Frame_2, text = 'MATHEMATICS', font = ('arial',14), bg = 'Navajo white')
Label_1.grid(row = 4, column = 0)
Label_2 = Label(Frame_2, text = 'PHYSICS', font = ('arial',14), bg = 'Navajo white')
Label_2.grid(row = 5, column = 0)
Label_3 = Label(Frame_2, text = 'CHEMISTRY', font = ('arial',14), bg = 'Navajo white')
Label_3.grid(row = 6, column = 0)
Label_4 = Label(Frame_2, text = 'HINDI', font = ('arial',14), bg = 'Navajo white')
Label_4.grid(row = 7, column = 0)
Label_5 = Label(Frame_2, text = 'ENGLISH', font = ('arial',14), bg = 'Navajo white')
Label_5.grid(row = 8, column = 0)
Label_6 = Label(Frame_2, text = 'GRAND TOTAL', font = ('arial',16), bg = 'Navajo white')
Label_6.grid(row = 9, column = 0)
Label_7 = Label(Frame_2, text = 'PERCENTAGE', font = ('arial',16,'bold'), bg = 'Navajo white')
Label_7.grid(row = 10, column = 0)
Label_8 = Label(Frame_2, text = 'CGPA', font = ('arial',16,'bold'), bg = 'Navajo white')
Label_8.grid(row = 10, column = 2)
Label_9 = Label(Frame_2, text = 'GRADE', font = ('arial',16,'bold'), bg = 'Navajo white')
Label_9.grid(row = 10, column = 4)
Label_10 = Label(Frame_2, text = 'DIVISION', font = ('arial',16,'bold'), bg = 'Navajo white')
Label_10.grid(row = 11, column = 0)
Label_10 = Label(Frame_2, text = 'RESULT', font = ('arial',16,'bold'), bg = 'Navajo white')
Label_10.grid(row = 11, column = 2)
#======================================================Entries of Frame_2===========================================================
var_1 = StringVar(Frame_2, value = '33')
var_2 = StringVar(Frame_2, value = '100')
var_3 = StringVar(Frame_2, value = '500')
Entry__1 = Entry(Frame_2, font = ('arial',16), width = 5, textvariable = m1)
Entry__1.grid(row = 4, column = 1)
Entry__2 = Entry(Frame_2, font = ('arial',16), width = 5, textvariable = m2)
Entry__2.grid(row = 5, column = 1)
Entry__3 = Entry(Frame_2, font = ('arial',16), width = 5, textvariable = m3)
Entry__3.grid(row = 6, column = 1)
Entry__4 = Entry(Frame_2, font = ('arial',16), width = 5, textvariable = m4)
Entry__4.grid(row = 7, column = 1)
Entry__5 = Entry(Frame_2, font = ('arial',16), width = 5, textvariable = m5)
Entry__5.grid(row = 8, column = 1)
Entry__6 = Entry(Frame_2, font = ('arial',14), width = 5, textvariable = gt, state = 'readonly')
Entry__6.grid(row = 9, column = 1, pady = 8)
Entry__7 = Entry(Frame_2, font = ('arial',14,'bold'), width = 5, textvariable = per, state = 'readonly')
Entry__7.grid(row = 10, column = 1, pady = 8)
Entry__8 = Entry(Frame_2, font = ('arial',14,'bold'), width = 5, textvariable = cgpa, state = 'readonly')
Entry__8.grid(row = 10, column = 3, pady = 8)
Entry__9 = Entry(Frame_2, font = ('arial',14,'bold'), width = 5, textvariable = grade, state = 'readonly')
Entry__9.grid(row = 10, column = 5, padx = 20, pady = 8)
Entry__10 = Entry(Frame_2, font = ('arial',14,'bold'), width = 8, textvariable = div, state = 'readonly')
Entry__10.grid(row = 11, column = 1, padx = 20, pady = 8)
Entry__11 = Entry(Frame_2, font = ('arial',14,'bold'), width = 7, textvariable = result, state = 'readonly')
Entry__11.grid(row = 11, column = 3, padx = 20, pady = 8)
Entry_1_2 = Entry(Frame_2, textvariable = var_1, font = ('arial',16), width = 5, state = 'readonly')
Entry_1_2.grid(row = 4, column = 2, pady = 5)
Entry_1_3 = Entry(Frame_2, textvariable = var_2, font = ('arial',16), width = 5, state = 'readonly')
Entry_1_3.grid(row = 4, column = 3)
Entry_2_2 = Entry(Frame_2, textvariable = var_1, font = ('arial',16), width = 5, state = 'readonly')
Entry_2_2.grid(row = 5, column = 2, pady = 5)
Entry_2_3 = Entry(Frame_2, textvariable = var_2, font = ('arial',16), width = 5, state = 'readonly')
Entry_2_3.grid(row = 5, column = 3)
Entry_3_2 = Entry(Frame_2, textvariable = var_1, font = ('arial',16), width = 5, state = 'readonly')
Entry_3_2.grid(row = 6, column = 2, pady = 5)
Entry_3_3 = Entry(Frame_2, textvariable = var_2, font = ('arial',16), width = 5, state = 'readonly')
Entry_3_3.grid(row = 6, column = 3)
Entry_4_2 = Entry(Frame_2, textvariable = var_1, font = ('arial',16), width = 5, state = 'readonly')
Entry_4_2.grid(row = 7, column = 2, pady = 5)
Entry_4_3 = Entry(Frame_2, textvariable = var_2, font = ('arial',16), width = 5, state = 'readonly')
Entry_4_3.grid(row = 7, column = 3)
Entry_5_2 = Entry(Frame_2, textvariable = var_1, font = ('arial',16), width = 5, state = 'readonly')
Entry_5_2.grid(row = 8, column = 2, pady = 5)
Entry_5_3 = Entry(Frame_2, textvariable = var_2, font = ('arial',16), width = 5, state = 'readonly')
Entry_5_3.grid(row = 8, column = 3)
Entry_6_3 = Entry(Frame_2, textvariable = var_3, font = ('arial',16), width = 5, state = 'readonly')
Entry_6_3.grid(row = 9, column = 3)
#======================================================Buttons===========================================================
Btn_Compute = Button(Frame_2, text = 'COMPUTE', font = ('arial',12,'bold'), width = 10, command = Compute)
Btn_Compute.grid(row = 4, column = 4, padx = 50, pady = 6)
Btn_Save = Button(Frame_2, text = 'SAVE', font = ('arial',12,'bold'), width = 10, command = Add)
Btn_Save.grid(row = 5, column = 4, padx = 50, pady = 6)
Btn_Update = Button(Frame_2, text = 'UPDATE', font = ('arial',12,'bold'), width = 10, command = Update)
Btn_Update.grid(row = 6, column = 4, padx = 50, pady = 6)
Btn_Cancel = Button(Frame_2, text = 'RESET', font = ('arial',12,'bold'), width = 10, command = Reset)
Btn_Cancel.grid(row = 7, column = 4, padx = 50, pady = 6)
Btn_Exit = Button(Frame_2, text = 'EXIT', font = ('arial',12,'bold'), width = 10, command = Exit)
Btn_Exit.grid(row = 8, column = 4, padx = 50, pady = 6)
root.mainloop()
def search_result_marksheet(row):
root = Tk()
root.title('Marksheet')
root.geometry('1350x750')
root.config(bg = 'Navajo white')
#==============================================Functions==========================================================
def Compute():
x1 = (m1.get()); x2 = (m2.get()); x3 = (m3.get()); x4 = (m4.get()); x5 = (m5.get())
tot = x1+x2+x3+x4+x5
gt.set(tot)
Per = ((x1+x2+x3+x4+x5) * 100)/500
per.set(Per)
cg = (((x1+x2+x3+x4+x5) * 100)/500) / 9.5
cgpa.set(round(cg,1))
if (((x1+x2+x3+x4+x5) * 100)/500) <= 40:
grd = 'G'
elif (((x1+x2+x3+x4+x5) * 100)/500) <= 50:
grd = 'F'
elif (((x1+x2+x3+x4+x5) * 100)/500) <= 60:
grd = 'E'
elif (((x1+x2+x3+x4+x5) * 100)/500) <= 70:
grd = 'D'
elif (((x1+x2+x3+x4+x5) * 100)/500) <= 80:
grd = 'C'
elif (((x1+x2+x3+x4+x5) * 100)/500) <= 90:
grd = 'B'
else:
grd = 'A'
grade.set(grd)
count = 0
if x1 < 33:
count = count + 1
if x2 < 33:
count = count + 1
if x3 < 33:
count = count + 1
if x4 < 33:
count = count + 1
if x5 < 33:
count = count + 1
if (count == 0):
result.set('PASS')
elif (count == 1 or count == 2 ):
result.set('SUPPLY')
else:
result.set('FAIL')
if Per <= 45 and result != "FAIL":
div.set('THIRD')
elif Per <= 60 and result != "FAIL":
div.set('SECOND')
elif Per <= 100:
div.set('FIRST')
#========================================================Frame_1===============================================================
Frame_1 = LabelFrame(root, width = 1200, height = 400, font = ('arial',20,'bold'), bg = 'Navajo white', bd = 10, \
text = 'Student Details', relief = 'ridge')
Frame_1.grid(row = 1, column = 0, pady = 20, padx = 20)
name = StringVar(Frame_1,value=row[0][1])
roll = StringVar(Frame_1,value=row[0][2])
fname = StringVar(Frame_1,value=row[0][3])
mname = StringVar(Frame_1,value=row[0][4])
DOB = StringVar(Frame_1,value=row[0][5])
gender = StringVar(Frame_1,value=row[0][6])
scl = StringVar(Frame_1,value=row[0][7])
email = StringVar(Frame_1,value=row[0][8])
#=================================================Labels and Entries for Frame_1===============================================
Label_Name = Label(Frame_1, text = 'Name', font = ('arial',15,'bold'), bg = 'Navajo white')
Label_Name.grid(row = 0, column = 0, padx = 80)
Entry_Name = Entry(Frame_1, font = ('arial',15), width = 25, textvariable = name)
Entry_Name.grid(row = 0, column = 1, padx = 5, pady = 5)
Label_Roll_no = Label(Frame_1, text = 'Roll Number', font = ('arial',15,'bold'), bg = 'Navajo white')
Label_Roll_no.grid(row = 0, column = 3, padx = 80)
Entry_Roll_no = Entry(Frame_1, font = ('arial',15), width = 25, textvariable = roll)
Entry_Roll_no.grid(row = 0, column = 4, padx = 40)
Label_Father_Name = Label(Frame_1, text = 'Father Name', font = ('arial',15,'bold'), bg = 'Navajo white')
Label_Father_Name.grid(row = 1, column = 0, padx = 80)
Entry_Father_Name = Entry(Frame_1, font = ('arial',15), width = 25, textvariable = fname)
Entry_Father_Name.grid(row = 1, column = 1, padx = 5, pady = 10)
Label_Mother_Name = Label(Frame_1, text = 'Mother Name', font = ('arial',15,'bold'), bg = 'Navajo white')
Label_Mother_Name.grid(row = 1, column = 3, padx = 80)
Entry_Mother_Name = Entry(Frame_1, font = ('arial',15), width = 25, textvariable = mname)
Entry_Mother_Name.grid(row = 1, column = 4, padx = 5)
Label_DOB = Label(Frame_1, text = 'Date of Birth', font = ('arial',15,'bold'), bg = 'Navajo white')
Label_DOB.grid(row = 2, column = 0, padx = 80)
Entry_DOB = Entry(Frame_1, font = ('arial',15), width = 25, textvariable = DOB)
Entry_DOB.grid(row = 2, column = 1, padx = 5, pady = 5)
Label_Gender = Label(Frame_1, text = 'Gender', font = ('arial',15,'bold'), bg = 'Navajo white')
Label_Gender.grid(row = 2, column = 3, padx = 80)
Entry_Gender = Entry(Frame_1, font = ('arial',15), width = 25, textvariable = gender)
Entry_Gender.grid(row = 2, column = 4, padx = 5, pady = 5)
Label_School = Label(Frame_1, text = 'School Name', font = ('arial',15,'bold'), bg = 'Navajo white')
Label_School.grid(row = 3, column = 0, padx = 80)
Entry_School = Entry(Frame_1, font = ('arial',15), width = 25, textvariable = scl)
Entry_School.grid(row = 3, column = 1, padx = 5, pady = 5)
Label_Email = Label(Frame_1, text = 'Email ID', font = ('arial',15,'bold'), bg = 'Navajo white')
Label_Email.grid(row = 3, column = 3, padx = 80)
Entry_Email = Entry(Frame_1, font = ('arial',15), width = 25, textvariable = email)
Entry_Email.grid(row = 3, column = 4, padx = 5, pady = 5)
#========================================================Frame_2==================================================================
Frame_2 = LabelFrame(root, width = 1200, height = 400, font = ('arial',20,'bold'), bg = 'Navajo white', bd = 10 \
, text = 'Grades Point Obtained', relief = 'ridge')
Frame_2.grid(row = 3, column = 0)
m1 = DoubleVar(Frame_2,row[0][9])
m2 = DoubleVar(Frame_2,row[0][10])
m3 = DoubleVar(Frame_2,row[0][11])
m4 = DoubleVar(Frame_2,row[0][12])
m5 = DoubleVar(Frame_2,row[0][13])
gt = DoubleVar(Frame_2,row[0][14])
per = DoubleVar(Frame_2,row[0][15])
cgpa = DoubleVar(Frame_2,row[0][16])
grade = StringVar(Frame_2,row[0][17])
div = StringVar(Frame_2,row[0][18])
result = StringVar(Frame_2,row[0][19])
#======================================================Labels of Frame_2===========================================================
Label_Subject = Label(Frame_2, text = 'SUBJECT', font = ('arial',16,'bold'), bg = 'Navajo white')
Label_Subject.grid(row = 3, column = 0, padx = 50, pady = 10)
Label_obt_Marks = Label(Frame_2, text = 'MARKS OBTAINED', font = ('arial',16,'bold'), bg = 'Navajo white')
Label_obt_Marks.grid(row = 3, column = 1, padx = 20)
Label_Subject = Label(Frame_2, text = 'PASSING MARKS', font = ('arial',16,'bold'), bg = 'Navajo white')
Label_Subject.grid(row = 3, column = 2, padx = 20)
Label_obt_Marks = Label(Frame_2, text = 'TOTAL MARKS', font = ('arial',16,'bold'), bg = 'Navajo white')
Label_obt_Marks.grid(row = 3, column = 3, padx = 20)
Label_1 = Label(Frame_2, text = 'MATHEMATICS', font = ('arial',14), bg = 'Navajo white')
Label_1.grid(row = 4, column = 0)
Label_2 = Label(Frame_2, text = 'PHYSICS', font = ('arial',14), bg = 'Navajo white')
Label_2.grid(row = 5, column = 0)
Label_3 = Label(Frame_2, text = 'CHEMISTRY', font = ('arial',14), bg = 'Navajo white')
Label_3.grid(row = 6, column = 0)
Label_4 = Label(Frame_2, text = 'HINDI', font = ('arial',14), bg = 'Navajo white')
Label_4.grid(row = 7, column = 0)
Label_5 = Label(Frame_2, text = 'ENGLISH', font = ('arial',14), bg = 'Navajo white')
Label_5.grid(row = 8, column = 0)
Label_6 = Label(Frame_2, text = 'GRAND TOTAL', font = ('arial',16), bg = 'Navajo white')
Label_6.grid(row = 9, column = 0)
Label_7 = Label(Frame_2, text = 'PERCENTAGE', font = ('arial',16,'bold'), bg = 'Navajo white')
Label_7.grid(row = 10, column = 0)
Label_8 = Label(Frame_2, text = 'CGPA', font = ('arial',16,'bold'), bg = 'Navajo white')
Label_8.grid(row = 10, column = 2)
Label_9 = Label(Frame_2, text = 'GRADE', font = ('arial',16,'bold'), bg = 'Navajo white')
Label_9.grid(row = 10, column = 4)
Label_10 = Label(Frame_2, text = 'DIVISION', font = ('arial',16,'bold'), bg = 'Navajo white')
Label_10.grid(row = 11, column = 0)
Label_10 = Label(Frame_2, text = 'RESULT', font = ('arial',16,'bold'), bg = 'Navajo white')
Label_10.grid(row = 11, column = 2)
#======================================================Entries of Frame_2===========================================================
var_1 = StringVar(Frame_2, value = '33')
var_2 = StringVar(Frame_2, value = '100')
var_3 = StringVar(Frame_2, value = '500')
Entry__1 = Entry(Frame_2, font = ('arial',16), width = 5, textvariable = m1)
Entry__1.grid(row = 4, column = 1)
Entry__2 = Entry(Frame_2, font = ('arial',16), width = 5, textvariable = m2)
Entry__2.grid(row = 5, column = 1)
Entry__3 = Entry(Frame_2, font = ('arial',16), width = 5, textvariable = m3)
Entry__3.grid(row = 6, column = 1)
Entry__4 = Entry(Frame_2, font = ('arial',16), width = 5, textvariable = m4)
Entry__4.grid(row = 7, column = 1)
Entry__5 = Entry(Frame_2, font = ('arial',16), width = 5, textvariable = m5)
Entry__5.grid(row = 8, column = 1)
Entry__6 = Entry(Frame_2, font = ('arial',14), width = 5, textvariable = gt)
Entry__6.grid(row = 9, column = 1, pady = 8)
Entry__7 = Entry(Frame_2, font = ('arial',14,'bold'), width = 5, textvariable = per)
Entry__7.grid(row = 10, column = 1, pady = 8)
Entry__8 = Entry(Frame_2, font = ('arial',14,'bold'), width = 5, textvariable = cgpa)
Entry__8.grid(row = 10, column = 3, pady = 8)
Entry__9 = Entry(Frame_2, font = ('arial',14,'bold'), width = 5, textvariable = grade)
Entry__9.grid(row = 10, column = 5, padx = 20, pady = 8)
Entry__10 = Entry(Frame_2, font = ('arial',14,'bold'), width = 8, textvariable = div)
Entry__10.grid(row = 11, column = 1, padx = 20, pady = 8)
Entry__11 = Entry(Frame_2, font = ('arial',14,'bold'), width = 7, textvariable = result)
Entry__11.grid(row = 11, column = 3, padx = 20, pady = 8)
Entry_1_2 = Entry(Frame_2, textvariable = var_1, font = ('arial',16), width = 5)
Entry_1_2.grid(row = 4, column = 2, pady = 5)
Entry_1_3 = Entry(Frame_2, textvariable = var_2, font = ('arial',16), width = 5)
Entry_1_3.grid(row = 4, column = 3)
Entry_2_2 = Entry(Frame_2, textvariable = var_1, font = ('arial',16), width = 5)
Entry_2_2.grid(row = 5, column = 2, pady = 5)
Entry_2_3 = Entry(Frame_2, textvariable = var_2, font = ('arial',16), width = 5)
Entry_2_3.grid(row = 5, column = 3)
Entry_3_2 = Entry(Frame_2, textvariable = var_1, font = ('arial',16), width = 5)
Entry_3_2.grid(row = 6, column = 2, pady = 5)
Entry_3_3 = Entry(Frame_2, textvariable = var_2, font = ('arial',16), width = 5)
Entry_3_3.grid(row = 6, column = 3)
Entry_4_2 = Entry(Frame_2, textvariable = var_1, font = ('arial',16), width = 5)
Entry_4_2.grid(row = 7, column = 2, pady = 5)
Entry_4_3 = Entry(Frame_2, textvariable = var_2, font = ('arial',16), width = 5)
Entry_4_3.grid(row = 7, column = 3)
Entry_5_2 = Entry(Frame_2, textvariable = var_1, font = ('arial',16), width = 5)
Entry_5_2.grid(row = 8, column = 2, pady = 5)
Entry_5_3 = Entry(Frame_2, textvariable = var_2, font = ('arial',16), width = 5)
Entry_5_3.grid(row = 8, column = 3)
Entry_6_3 = Entry(Frame_2, textvariable = var_3, font = ('arial',16), width = 5)
Entry_6_3.grid(row = 9, column = 3)
#======================================================Buttons===========================================================
Btn_Exit = Button(Frame_2, text = 'EXIT', font = ('arial',12,'bold'), width = 10, command = root.destroy)
Btn_Exit.grid(row = 8, column = 4, padx = 50, pady = 6)
root.mainloop()
if __name__ == '__main__':
marksheet()
```
#### File: Jeevan-kumar-Raj/Diet-e-mentance-App/Std_info_FrontEnd.py
```python
from tkinter import*
import tkinter.messagebox
import random
import Std_info_BackEnd
from tkinter import ttk
class Std_info():
def __init__(self, master):
self.master = master
self.master.title('Student Information')
self.master.geometry('1350x750')
self.master.config(bg = 'navajowhite')
def information():
#========================================================Variables=====================================================================
self.name = StringVar()
self.fname = StringVar()
self.mname = StringVar()
self.address = StringVar()
self.mobno = StringVar()
self.email = StringVar()
self.dob = StringVar()
self.gender = StringVar()
#==========================================================Functions====================================================================
def StudentRec(event):
try:
global selected_tuple
index = self.listbox.curselection()[0]
selected_tuple = self.listbox.get(index)
self.Entry_name.delete(0, END)
self.Entry_name.insert(END, selected_tuple[1])
self.Entry_fname.delete(0, END)
self.Entry_fname.insert(END, selected_tuple[2])
self.Entry_mname.delete(0, END)
self.Entry_mname.insert(END, selected_tuple[3])
self.Entry_address.delete(0, END)
self.Entry_address.insert(END, selected_tuple[4])
self.Entry_mobno.delete(0, END)
self.Entry_mobno.insert(END, selected_tuple[5])
self.Entry_emailID.delete(0, END)
self.Entry_emailID.insert(END, selected_tuple[6])
self.Entry_dob.delete(0, END)
self.Entry_dob.insert(END, selected_tuple[7])
self.Entry_gender.delete(0, END)
self.Entry_gender.insert(END, selected_tuple[8])
except IndexError:
pass
def Add():
if(len(self.name.get()) != 0):
Std_info_BackEnd.insert(self.name.get(), self.fname.get(), self.mname.get(), self.address.get(), self.mobno.get(), self.email.get(), self.dob.get(), \
self.gender.get())
self.listbox.delete(0, END)
self.listbox.insert(END, (self.name.get(), self.fname.get(), self.mname.get(), self.address.get(), self.mobno.get(), self.email.get(), self.dob.get(), \
self.gender.get()))
def Display():
self.listbox.delete(0, END)
for row in Std_info_BackEnd.view():
self.listbox.insert(END, row, str(' '))
def Exit():
Exit = tkinter.messagebox.askyesno("Login System", "Confirm if you want to Exit")
if Exit > 0:
self.master.destroy()
return
def Reset():
self.name.set('')
self.fname.set('')
self.mname.set('')
self.address.set('')
self.mobno.set('')
self.email.set('')
self.dob.set('')
self.gender.set('')
self.listbox.delete(0, END)
def Delete():
if(len(self.name.get()) != 0):
Std_info_BackEnd.delete(selected_tuple[0])
Reset()
Display()
def Search():
self.listbox.delete(0, END)
for row in Std_info_BackEnd.search(self.name.get(), self.fname.get(), self.mname.get(), self.address.get(), self.mobno.get(), self.email.get(), self.dob.get(),self.gender.get()):
self.listbox.insert(END, row, str(' '))
def Update():
if(len(self.name.get()) != 0):
Std_info_BackEnd.delete(selected_tuple[0])
if(len(self.name.get()) != 0):
Std_info_BackEnd.insert(self.name.get(), self.fname.get(), self.mname.get(), self.address.get(), self.mobno.get(), self.email.get(), self.dob.get(), \
self.gender.get())
self.listbox.delete(0, END)
self.listbox.insert(END, (self.name.get(), self.fname.get(), self.mname.get(), self.address.get(), self.mobno.get(), self.email.get(), self.dob.get(), \
self.gender.get()))
#============================================================Frames=====================================================================
self.Main_Frame = LabelFrame(self.master, width = 1300, height = 500, font = ('arial',20,'bold'), \
bg = 'navajowhite',bd = 15, relief = 'ridge')
self.Main_Frame.grid(row = 0, column = 0, padx = 10, pady = 20)
self.Frame_1 = LabelFrame(self.Main_Frame, width = 600, height = 400, font = ('arial',15,'bold'), \
relief = 'ridge', bd = 10, bg = 'navajowhite', text = 'STUDENT INFORMATION ')
self.Frame_1.grid(row = 1, column = 0, padx = 10)
self.Frame_2 = LabelFrame(self.Main_Frame, width = 750, height = 400, font = ('arial',15,'bold'), \
relief = 'ridge', bd = 10, bg = 'navajowhite', text = 'STUDENT DATABASE')
self.Frame_2.grid(row = 1, column = 1, padx = 5)
self.Frame_3 = LabelFrame(self.master, width = 1200, height = 100, font = ('arial',10,'bold'), \
bg = 'navajowhite', relief = 'ridge', bd = 13)
self.Frame_3.grid(row = 2, column = 0, pady = 10)
#========================================================Labels of Frame_1========================================================
self.Label_name = Label(self.Frame_1, text = 'Name', font = ('arial',20,'bold'), bg = 'navajowhite')
self.Label_name.grid(row = 0, column = 0, sticky = W, padx = 20, pady = 10)
self.Label_fname = Label(self.Frame_1, text = 'Father Name', font = ('arial',20,'bold'), bg = 'navajowhite')
self.Label_fname.grid(row = 1, column = 0, sticky = W, padx = 20)
self.Label_mname = Label(self.Frame_1, text = 'Mother Name', font = ('arial',20,'bold'), bg = 'navajowhite')
self.Label_mname.grid(row = 2, column = 0, sticky = W, padx = 20)
self.Label_address = Label(self.Frame_1, text = 'Address', font = ('arial',20,'bold'), bg = 'navajowhite')
self.Label_address.grid(row = 3, column = 0, sticky = W, padx = 20)
self.Label_mobno = Label(self.Frame_1, text = 'Mobile Number', font = ('arial',20,'bold'), bg = 'navajowhite')
self.Label_mobno.grid(row = 4, column = 0, sticky = W, padx = 20)
self.Label_emailID = Label(self.Frame_1, text = 'Email ID', font = ('arial',20,'bold'), bg = 'navajowhite')
self.Label_emailID.grid(row = 5, column = 0, sticky = W, padx = 20)
self.Label_dob = Label(self.Frame_1, text = 'Date of Birth', font = ('arial',20,'bold'), bg = 'navajowhite')
self.Label_dob.grid(row = 6, column = 0, sticky = W, padx = 20)
self.Label_gender = Label(self.Frame_1, text = 'Gender', font = ('arial',20,'bold'), bg = 'navajowhite')
self.Label_gender.grid(row = 7, column = 0, sticky = W, padx = 20, pady = 10)
#========================================================Entries of Frame_1========================================================
self.Entry_name = Entry(self.Frame_1, font = ('arial',17,'bold'), textvariable = self.name)
self.Entry_name.grid(row = 0, column = 1, padx = 10, pady = 5)
self.Entry_fname = Entry(self.Frame_1, font = ('arial',17,'bold'), textvariable = self.fname)
self.Entry_fname.grid(row = 1, column = 1, padx = 10, pady = 5)
self.Entry_mname = Entry(self.Frame_1, font = ('arial',17,'bold'), textvariable = self.mname)
self.Entry_mname.grid(row = 2, column = 1, padx = 10, pady = 5)
self.Entry_address = Entry(self.Frame_1, font = ('arial',17,'bold'), textvariable = self.address)
self.Entry_address.grid(row = 3, column = 1, padx = 10, pady = 5)
self.Entry_mobno = Entry(self.Frame_1, font = ('arial',17,'bold'), textvariable = self.mobno)
self.Entry_mobno.grid(row = 4, column = 1, padx = 10, pady = 5)
self.Entry_emailID = Entry(self.Frame_1, font = ('arial',17,'bold'), textvariable = self.email)
self.Entry_emailID.grid(row = 5, column = 1, padx = 10, pady = 5)
self.Entry_dob = Entry(self.Frame_1, font = ('arial',17,'bold'), textvariable = self.dob)
self.Entry_dob.grid(row = 6, column = 1, padx = 10, pady = 5)
self.Entry_gender = ttk.Combobox(self.Frame_1, values = (' ','Male','Female','Others'),\
font = ('arial',17,'bold'), textvariable = self.gender, width = 19)
self.Entry_gender.grid(row = 7, column = 1, padx = 10, pady = 5)
#========================================================Buttons of self.Frame_3=========================================================
self.btnSave = Button(self.Frame_3, text = 'SAVE', font = ('arial',17,'bold'), width = 8, command = Add)
self.btnSave.grid(row = 0, column = 0, padx = 10, pady = 10)
self.btnDisplay = Button(self.Frame_3, text = 'DISPLAY', font = ('arial',17,'bold'), width = 8, command = Display)
self.btnDisplay.grid(row = 0, column = 1, padx = 10, pady = 10)
self.btnReset = Button(self.Frame_3, text = 'RESET', font = ('arial',17,'bold'), width = 8, command = Reset)
self.btnReset.grid(row = 0, column = 2, padx = 10, pady = 10)
self.btnUpdate = Button(self.Frame_3, text = 'UPDATE', font = ('arial',17,'bold'), width = 8, command = Update)
self.btnUpdate.grid(row = 0, column = 3, padx = 10, pady = 10)
self.btnDelete = Button(self.Frame_3, text = 'DELETE', font = ('arial',17,'bold'), width = 8, command = Delete)
self.btnDelete.grid(row = 0, column = 4, padx = 10, pady = 10)
self.btnSearch = Button(self.Frame_3, text = 'SEARCH', font = ('arial',17,'bold'), width = 8, command = Search )
self.btnSearch.grid(row = 0, column = 5, padx = 10, pady = 10)
self.btnExit = Button(self.Frame_3, text = 'EXIT', font = ('arial',17,'bold'), width = 8, command = Exit)
self.btnExit.grid(row = 0, column = 6, padx = 10, pady = 10)
#===============================================List Box and self.scrollbar========================================================
self.scrollbar = Scrollbar(self.Frame_2)
self.scrollbar.grid(row = 0, column = 1, sticky = 'ns')
self.listbox = Listbox(self.Frame_2, width = 75, height = 20 , font = ('arial',12,'bold'))
self.listbox.bind('<<ListboxSelect>>', StudentRec)
self.listbox.grid(row = 0, column = 0)
self.scrollbar.config(command = self.listbox.yview)
information()
root = Tk()
obj = Std_info(root)
root.mainloop()
```
|
{
"source": "Jeevan-kumar-Raj/September-LeetCoding-Challenge",
"score": 4
}
|
#### File: September-LeetCoding-Challenge/Week -2/Day-4 Maximum Product Subarray.py
```python
Day-4 Maximum Product Subarray.py
/*
Given an integer array nums, find the contiguous subarray within an array (containing at least one number) which has the largest product.
Example 1:
Input: [2,3,-2,4]
Output: 6
Explanation: [2,3] has the largest product 6.
Example 2:
Input: [-2,0,-1]
Output: 0
Explanation: The result cannot be 2, because [-2,-1] is not a subarray.
*/
class Solution(object):
# @param A, a list of integers
# @return an integer
def maxProduct(self, A):
global_max, local_max, local_min = float("-inf"), 1, 1
for x in A:
local_max = max(1, local_max)
if x > 0:
local_max, local_min = local_max * x, local_min * x
else:
local_max, local_min = local_min * x, local_max * x
global_max = max(global_max, local_max)
return global_max
```
|
{
"source": "jeevannavar/tic-tac-toe",
"score": 4
}
|
#### File: jeevannavar/tic-tac-toe/main_functions.py
```python
def new_board():
'''
This function creates a list of lists of shape (3,3) containing " "s, single spaces
Inputs:
None
Outputs:
list of lists of size (3,3)
'''
empty_board = [[" "]*3 for _ in range(3)]
return empty_board
def render_board(board):
'''
This function takes a 3x3 board as input and prints it to console
Input:
board = list of lists of size (3,3)
Output:
None
'''
print("_"*25)
print("|", " "*5, "|", " "*5, "|", " "*5, "|")
print("|", board[0][0], "|", board[0][1], "|", board[0][2], "|")
print("|", " "*5, "|", " "*5, "|", " "*5, "|")
print("—"*25)
print("|", " "*5, "|", " "*5, "|", " "*5, "|")
print("|", board[1][0], "|", board[1][1], "|", board[1][2], "|")
print("|", " "*5, "|", " "*5, "|", " "*5, "|")
print("—"*25)
print("|", " "*5, "|", " "*5, "|", " "*5, "|")
print("|", board[2][0], "|", board[2][1], "|", board[2][2], "|")
print("|", " "*5, "|", " "*5, "|", " "*5, "|")
print("—"*25)
return None
def get_move(board):
'''
This function ...
Input:
board = current state of the board
Output:
move = int in the range of 1 through 9
'''
print("Choose the tile you would like to mark: (Enter number between 1 and 9)")
valid_moves = [i+1 for i in range(9) if board[i//3][i%3] == " "] # List of valid moves
try:
move = int(input())
assert move in valid_moves
except:
print("\nInvalid move! Try again!")
move = get_move(board)
return move
def make_move(board, marker, coords):
'''
This function makes the move on the board.
Currently it can take only fixed player names and has fixed player markers.
Input:
board = list of lists of size (3,3)
marker = str, either "X" or "O"
move = int in the range of 1 through 9
Output:
board = list of lists of size (3,3)
'''
coords -= 1 #This is to bring the coordinate to zero indexing
board[coords//3][coords%3] = marker
return board
def winner(board):
'''
This function checks if a winner can be declared
Input:
board = list of lists of size (3,3)
Output:
winner = boolean, True if winner can be declared
'''
for i in range(3):
if board[i][0] == board[i][1] == board[i][2] != " ":
return True
if board[0][i] == board[1][i] == board[2][i] != " ":
return True
if board[0][0] == board[1][1] == board[2][2] != " ":
return True
if board[0][2] == board[1][1] == board[2][0] != " ":
return True
return False
def isBoardFull(board):
'''
This function checks if the board if full
Input:
board = list of lists of size (3,3)
Output:
fullness = boolean, True if the board is full
'''
if any(" " in row for row in board):
return False
return True
```
|
{
"source": "jeevanpuchakay/BTP",
"score": 2
}
|
#### File: Codes/DataAugmentation/main.py
```python
import os
import json
from PIL import Image
from pathlib import Path
import torch
from torchvision import transforms
def rotate_images(input_folder, output_folder, rotation_angle, old_annotations, new_annotations):
for file_name in os.listdir(input_folder):
if file_name.endswith(".jpg") == False:
continue
img = Image.open(input_folder + file_name)
width, height = img.width, img.height
rotated_image = img.rotate(rotation_angle, expand=False)
rotated_image.save(output_folder + file_name) # , file_name.split('.')[-1].lower())
x_center, y_center = width / 2, height / 2
failed_files = []
try:
if rotation_angle == -90:
old_annotation = old_annotations["imgs"][file_name.split('.')[0]]
new_annotation = old_annotation
new_annotation["objects"] = []
for object in old_annotation["objects"]:
bbox = object["bbox"]
bbox["xmin"] -= x_center
bbox["ymin"] -= y_center
bbox["xmax"] -= x_center
bbox["ymax"] -= y_center
bbox["xmin"], bbox["ymin"] = bbox["ymin"], -bbox["xmin"]
bbox["xmax"], bbox["ymax"] = bbox["ymax"], -bbox["xmax"]
bbox["xmin"], bbox["ymin"] = bbox["xmin"] + x_center, bbox["ymin"] + y_center
bbox["xmax"], bbox["ymax"] = bbox["xmax"] + x_center, bbox["ymax"] + y_center
new_annotation["objects"].append(bbox)
elif rotation_angle == -180:
old_annotation = old_annotations["imgs"][file_name.split('.')[0]]
new_annotation = old_annotation
new_annotation["objects"] = []
for object in old_annotation["objects"]:
bbox = object["bbox"]
bbox["xmin"] -= x_center
bbox["ymin"] -= y_center
bbox["xmax"] -= x_center
bbox["ymax"] -= y_center
bbox["xmin"], bbox["ymin"] = -bbox["xmin"], -bbox["ymin"]
bbox["xmax"], bbox["ymax"] = -bbox["xmax"], -bbox["ymax"]
bbox["xmin"], bbox["ymin"] = bbox["xmin"] + x_center, bbox["ymin"] + y_center
bbox["xmax"], bbox["ymax"] = bbox["xmax"] + x_center, bbox["ymax"] + y_center
new_annotation["objects"].append(bbox)
elif rotation_angle == -270:
old_annotation = old_annotations["imgs"][file_name.split('.')[0]]
new_annotation = old_annotation
new_annotation["objects"] = []
for object in old_annotation["objects"]:
bbox = object["bbox"]
bbox["xmin"] -= x_center
bbox["ymin"] -= y_center
bbox["xmax"] -= x_center
bbox["ymax"] -= y_center
bbox["xmin"], bbox["ymin"] = bbox["ymin"], -bbox["xmin"]
bbox["xmax"], bbox["ymax"] = bbox["ymax"], -bbox["xmax"]
bbox["xmin"], bbox["ymin"] = bbox["xmin"] + x_center, bbox["ymin"] + y_center
bbox["xmax"], bbox["ymax"] = bbox["xmax"] + x_center, bbox["ymax"] + y_center
new_annotation["objects"].append(bbox)
except KeyError:
print(file_name + " File details not found in source annotation.")
failed_files.append(file_name)
return new_annotations, failed_files
def change_brightness(new_brightness_level, input_folder, output_folder):
for file_name in os.listdir(input_folder):
if not file_name.endswith(".jpg"):
continue
img = Image.open(input_folder + file_name)
new_image = transforms.ColorJitter(brightness=new_brightness_level)(img)
new_image.save(output_folder + file_name)
return
def change_contrast(new_contrast, input_folder, output_folder):
for file_name in os.listdir(input_folder):
if not file_name.endswith(".jpg"):
continue
img = Image.open(input_folder + file_name)
new_image = transforms.ColorJitter(contrast=new_contrast)(img)
new_image.save(output_folder + file_name)
return
def change_hue(new_hue, input_folder, output_folder):
for file_name in os.listdir(input_folder):
if not file_name.endswith(".jpg"):
continue
img = Image.open(input_folder + file_name)
new_image = transforms.ColorJitter(hue=new_hue)(img)
new_image.save(output_folder + file_name)
return
def resize_pictures(new_size, input_folder, output_folder):
for file_name in os.listdir(input_folder):
if not file_name.endswith(".jpg"):
continue
img = Image.open(input_folder + file_name)
resized_image = img.resize(new_size)
resized_image.save(output_folder + file_name)
def read_json_file(file_path):
with open(file_path, "r") as file:
return json.load(file)
def makedir(path):
try:
path = Path(path)
path.mkdir(parents=True)
print("Directory created")
except FileExistsError as e:
print("Output directory already exists.")
def write_to_json(new_annotations, new_annotation_path):
with open(new_annotation_path, 'w') as f:
f.write(json.dumps(new_annotations))
def write_to_txt(array, txt_file_path):
with open(txt_file_path, 'w') as f:
json.dump(array, f)
if __name__ == "__main__":
base_path = "/mnt/g/Drive/BTP/TSD"
input_folder = base_path + "/tt100k_2021/TSD/"
dataset_name = "tt100k_2021_hu_0_4"
output_folder = base_path + "/HueVariations/" + dataset_name + "/TSD/"
annotations_file_path = base_path + "/tt100k_2021/annotations_all.json"
new_annotations_file_path = base_path + "/HueVariations/" + dataset_name + "/annotations_all.json"
failed_files_list_path = base_path + "/HueVariations/" + dataset_name + "/failed_files.txt"
old_annotations = read_json_file(annotations_file_path)
makedir(output_folder)
new_annotations = {"types": old_annotations["types"], "imgs": {}}
# resize_pictures((1024,1024),input_folder=input_folder, output_folder=output_folder)
change_hue(new_hue=0.4, input_folder=input_folder, output_folder=output_folder)
# change_contrast(new_contrast=2.5, input_folder=input_folder, output_folder=output_folder)
# change_brightness(new_brightness_level=3.5, input_folder=input_folder, output_folder=output_folder)
# new_annotations, failed_files = rotate_images(input_folder, output_folder, -180, old_annotations, new_annotations)
# write_to_json(new_annotations, new_annotations_file_path)
# print(failed_files)
# write_to_txt(failed_files, failed_files_list_path)
```
#### File: Codes/SuperResolution/ImproveResolution.py
```python
import numpy as np
from PIL import Image
from ISR.models import RDN
import os
home_path = "/mnt/g/Drive/BTP/Pictures/"
def get_unrecognised_pictures_list():
unrecognised_pictures_path = home_path + "Det/PicturesWithUnRecognizedSigns/"
unrecognised_pictures_list = []
for file_name in os.listdir(unrecognised_pictures_path):
unrecognised_pictures_list.append(file_name.split('_det')[0]+".png")
return unrecognised_pictures_list
if __name__ == "__main__":
unrecognised_pictures_list = get_unrecognised_pictures_list()
all_pictures_path = home_path + "AllPicturesFromVideos/"
output_directory = "/mnt/g/Results/"
failed_pictures_list = []
for file_name in unrecognised_pictures_list:
try:
print(all_pictures_path+file_name)
img = Image.open(all_pictures_path+file_name)
img = img.convert("RGB")
lr_img = np.array(img)
rdn = RDN(weights='psnr-small')
sr_img = rdn.predict(lr_img, by_patch_of_size=50)
enhanced_image = Image.fromarray(sr_img)
enhanced_image.save(
output_directory+file_name.split('.')[0]+"_res"+".jpg")
except:
print("Processing "+file_name+" is Failed.\n")
failed_pictures_list.append(file_name)
with open('failed.txt', 'w') as f:
for item in failed_pictures_list:
f.write("%s\n" % item)
```
|
{
"source": "jeevan-revaneppa-hirethanad/audio-to-speech-pipeline",
"score": 2
}
|
#### File: ekstep_data_pipelines/audio_analysis/analyse_speaker.py
```python
from ekstep_data_pipelines.common.utils import get_logger
from ekstep_data_pipelines.audio_analysis.speaker_analysis.file_cluster_mapping import (
speaker_to_file_name_map,
)
from ekstep_data_pipelines.audio_analysis.speaker_analysis.speaker_clustering import (
create_speaker_clusters,
)
from ekstep_data_pipelines.common.utils import get_logger
Logger = get_logger("analyse_speakers")
def analyse_speakers(
embed_file_path,
source,
min_cluster_size,
partial_set_size,
min_samples,
fit_noise_on_similarity,
):
file_map_dict, noise_file_map_dict = create_speaker_clusters(
embed_file_path,
source,
min_cluster_size,
partial_set_size,
min_samples,
fit_noise_on_similarity,
)
Logger.info("Noise count:", len(noise_file_map_dict))
speaker_to_file_name = speaker_to_file_name_map(file_map_dict)
Logger.info("total speakers:%s", str(len(speaker_to_file_name)))
return speaker_to_file_name
```
#### File: ekstep_data_pipelines/audio_analysis/audio_analysis.py
```python
import sys
import multiprocessing
import os
from ekstep_data_pipelines.audio_analysis.analyse_speaker import analyse_speakers
from ekstep_data_pipelines.audio_analysis.analyse_gender import analyse_gender
from ekstep_data_pipelines.audio_analysis.constants import (
CONFIG_NAME,
REMOTE_PROCESSED_FILE_PATH,
AUDIO_ANALYSIS_PARAMS,
ANALYSIS_OPTIONS,
)
from ekstep_data_pipelines.common.utils import get_logger
from ekstep_data_pipelines.common import BaseProcessor, CatalogueDao
from ekstep_data_pipelines.audio_analysis.speaker_analysis.create_embeddings import (
concatenate_embed_files,
)
MIN_SAMPLES = 1
PARTIAL_SET_SIZE = 11122
MIN_CLUSTER_SIZE = 5
FIT_NOISE_ON_SIMILARITY = 0.80
ESTIMATED_CPU_SHARE = 0.1
LOGGER = get_logger("AudioSpeakerClusteringProcessor")
class AudioAnalysis(BaseProcessor):
"""
Class to identify speaker for each utterance in a source
"""
DEFAULT_DOWNLOAD_PATH = "./audio_speaker_cluster"
@staticmethod
def get_instance(data_processor, **kwargs):
return AudioAnalysis(data_processor, **kwargs)
def __init__(self, data_processor, **kwargs):
self.data_processor = data_processor
self.audio_analysis_config = None
self.catalogue_dao = CatalogueDao(self.data_processor)
super().__init__(**kwargs)
def handle_termination_gracefully(self, signum, frame):
LOGGER.info(
f"SIGINT/SIGTERM invoked with the following information {signum}/{frame}"
)
sys.exit(1)
def process(self, **kwargs):
"""
Function for mapping utterance to speakers
"""
self.audio_analysis_config = self.data_processor.config_dict.get(CONFIG_NAME)
remote_base_path = self.audio_analysis_config.get("path_for_embeddings")
source = self.get_source_from_config(**kwargs)
parameters = self.get_speaker_analysis_params()
path_for_embeddings = f'{remote_base_path}/{source}'
embed_file_path = (
f"{AudioAnalysis.DEFAULT_DOWNLOAD_PATH}/{source}_embed_file.npz"
)
local_audio_download_path = f"{AudioAnalysis.DEFAULT_DOWNLOAD_PATH}/{source}/"
self.ensure_path(local_audio_download_path)
local_embeddings_path = f'{self.DEFAULT_DOWNLOAD_PATH}/embeddings/'
LOGGER.info(f"Ensured {local_audio_download_path} exists")
remote_download_path = self.get_full_path(source)
LOGGER.info("Total available cpu count:" + str(multiprocessing.cpu_count()))
LOGGER.info("Running speaker clustering using parameters: " + str(parameters))
min_cluster_size = parameters.get("min_cluster_size", MIN_CLUSTER_SIZE)
partial_set_size = parameters.get("partial_set_size", PARTIAL_SET_SIZE)
min_samples = parameters.get("min_samples", MIN_SAMPLES)
# path_for_embeddings = parameters.get("path_for_embeddings")
fit_noise_on_similarity = parameters.get(
"fit_noise_on_similarity", FIT_NOISE_ON_SIMILARITY
)
npz_destination_path = f"{remote_download_path}/{source}_embed_file.npz"
analysis_options = self.get_analysis_options()
speaker_to_file_name = None
file_to_speaker_gender_mapping = None
self.ensure_path(local_embeddings_path)
self.download_all_embedding(path_for_embeddings,local_embeddings_path)
self.merge_embeddings(
embed_file_path,
local_embeddings_path,
npz_destination_path
)
if analysis_options.get("speaker_analysis") == 1:
speaker_to_file_name = analyse_speakers(
embed_file_path,
source,
min_cluster_size,
partial_set_size,
min_samples,
fit_noise_on_similarity,
)
if analysis_options.get("gender_analysis") == 1:
file_to_speaker_gender_mapping = analyse_gender(embed_file_path)
self.update_info_in_db(
self.catalogue_dao,
speaker_to_file_name,
file_to_speaker_gender_mapping,
source,
)
def download_all_embedding(self,full_path,local_embeddings_path):
all_blobs = self.fs_interface.list_blobs_in_a_path(full_path)
bucket_name = full_path.split('/')[0]
for blob in all_blobs:
print(blob.name)
if '.npz' in blob.name:
print(f'{local_embeddings_path}{os.path.basename(blob.name)}{blob.name},{blob}')
self.fs_interface.download_file_to_location(
f'{bucket_name}/{blob.name}', f'{local_embeddings_path}{os.path.basename(blob.name)}'
)
def merge_embeddings(
self,
embed_file_path,
local_embeddings_path,
npz_bucket_destination_path,
):
if self.fs_interface.path_exists(npz_bucket_destination_path):
LOGGER.info("npz file is already present in bucket.")
self.fs_interface.download_file_to_location(
npz_bucket_destination_path, embed_file_path
)
else:
LOGGER.info(
f"Start merging embedding files"
)
concatenate_embed_files(embed_file_path, local_embeddings_path)
is_uploaded = self.fs_interface.upload_to_location(
embed_file_path, npz_bucket_destination_path
)
if is_uploaded:
LOGGER.info("npz file uploaded to :" + npz_bucket_destination_path)
else:
LOGGER.info(
"npz file could not be uploaded to :" + npz_bucket_destination_path
)
def update_info_in_db(
self,
catalogue_dao,
speaker_to_file_name,
file_to_speaker_gender_mapping,
source,
):
if speaker_to_file_name:
self._update_speaker_count_info(catalogue_dao, speaker_to_file_name, source)
if file_to_speaker_gender_mapping:
self._update_speaker_gender_mapping(
catalogue_dao, file_to_speaker_gender_mapping
)
def _update_speaker_gender_mapping(
self, catalogue_dao, file_speaker_gender_mapping
):
male_files = []
female_files = []
for file, gender in file_speaker_gender_mapping.items():
utterance_name = file.split("/")[-1]
if gender == "m":
male_files.append(utterance_name)
else:
female_files.append(utterance_name)
catalogue_dao.update_utterance_speaker_gender(male_files, "m")
LOGGER.info(f"Updating the {male_files} with the value with value male")
catalogue_dao.update_utterance_speaker_gender(female_files, "f")
LOGGER.info(f"Updating the {female_files} with the value with value Female")
def _update_speaker_count_info(self, catalogue_dao, speaker_to_file_name, source):
for speaker in speaker_to_file_name:
speaker_id = catalogue_dao.select_speaker(speaker, source)
if speaker_id == -1:
speaker_inserted = catalogue_dao.insert_speaker(source, speaker)
else:
LOGGER.info("Speaker already exists:" + speaker)
speaker_inserted = True
if not speaker_inserted:
# do nothing incase the speaker_inserted is false
continue
LOGGER.info("updating utterances for speaker:" + speaker)
utterances = speaker_to_file_name.get(speaker)
LOGGER.info("utterances:" + str(utterances))
to_file_name = lambda u: u[0]
was_noise_utterances = list(
map(to_file_name, (filter(lambda u: u[1] == 1, utterances)))
)
fitted_utterances = list(
map(to_file_name, (filter(lambda u: u[1] == 0, utterances)))
)
if len(was_noise_utterances) > 0:
catalogue_dao.update_utterance_speaker(
was_noise_utterances, speaker, True
)
if len(fitted_utterances) > 0:
catalogue_dao.update_utterance_speaker(
fitted_utterances, speaker, False
)
def get_full_path(self, source):
remote_file_path = self.audio_analysis_config.get(REMOTE_PROCESSED_FILE_PATH)
remote_download_path = f"{remote_file_path}/{source}"
return remote_download_path
def ensure_path(self, path):
os.makedirs(path, exist_ok=True)
def get_source_from_config(self, **kwargs):
source = kwargs.get("source")
if source is None:
raise Exception("filter by source is mandatory")
return source
def get_speaker_analysis_params(self):
return self.audio_analysis_config.get(AUDIO_ANALYSIS_PARAMS)
def get_analysis_options(self):
return self.audio_analysis_config.get(ANALYSIS_OPTIONS)
```
#### File: audio_analysis/speaker_analysis/file_cluster_mapping.py
```python
import json
def save_json(file_path, mappings):
with open(file_path, "w+") as file:
json.dump(mappings, file)
def file_to_speaker_map(speaker_to_file_map):
file_to_speaker = {}
for speaker in speaker_to_file_map:
files = speaker_to_file_map.get(speaker)
for file in files:
file_name = file.split("/")[-1]
file_to_speaker[file_name] = speaker
return file_to_speaker
def speaker_to_file_name_map(speaker_to_file_map):
speaker_to_utterances = {}
for speaker in speaker_to_file_map:
utterances = list(
map(lambda f: (f[0].split("/")[-1], f[1]), speaker_to_file_map.get(speaker))
)
speaker_to_utterances[speaker] = utterances
return speaker_to_utterances
```
#### File: common/audio_commons/chunking_conversion_util.py
```python
import collections
import contextlib
import glob
import os
import subprocess
import sys
import wave
import sox
import webrtcvad
from ekstep_data_pipelines.common.utils import get_logger
Logger = get_logger("Chunking Util")
class ChunkingConversionUtil:
re_chunking_aggressiveness = 3
@staticmethod
def get_instance():
return ChunkingConversionUtil()
def convert_to_wav(self, input_dir, output_dir=None, ext="mp4"):
Logger.info("Convert all the files in %s to wav", input_dir)
audio_paths = glob.glob(input_dir + "/*." + ext)
Logger.info("Files to be completed: %s", audio_paths)
if len(audio_paths) < 1:
return None, False
input_file_name = audio_paths[0]
output_file_name = input_file_name.split("/")[-1].split(".")[0] + ".wav"
if output_dir is None:
output_file_path = (
"/".join(input_file_name.split("/")[:-1]) + "/" + output_file_name
)
else:
output_file_path = output_dir + "/" + output_file_name
Logger.info("Output path for converted wav file is:%s", output_file_name)
if os.path.exists(output_file_path) and os.path.isfile(output_file_path):
Logger.info(
"WAV file at %s already exists, not doing anything", output_file_name
)
return output_file_path, True
Logger.info("No file exists on %s, running the command", output_file_name)
command = (
f"ffmpeg -i {input_file_name} -ar 16000 -ac 1 -bits_per_raw_sample 16 -vn "
f"{output_file_path}"
)
subprocess.call(command, shell=True)
Logger.info("No file exists on %s, running the command", output_file_name)
return output_file_path, True
def create_audio_clips(
self,
aggressiveness,
max_duration,
wav_file_path,
dir_to_save_chunks,
vad_output_file_path,
base_chunk_name,
is_rechunking=True,
):
audio, sample_rate = self.read_wave(wav_file_path)
vad = webrtcvad.Vad(int(aggressiveness))
frames = self.frame_generator(30, audio, sample_rate)
frames = list(frames)
file = open(vad_output_file_path, "w+")
segments = self.vad_collector(
sample_rate, 30, 300, vad, frames, vad_output_file_path, file
)
for i, segment in enumerate(segments):
path = f"{dir_to_save_chunks}/{i}_{base_chunk_name}"
file.write("\nWriting %s" % (path,))
file.write("\n")
self.write_wave(path, segment, sample_rate)
file.close()
if is_rechunking:
self.rechunking_acc_to_duration(
max_duration, dir_to_save_chunks, vad_output_file_path
)
def rechunking_acc_to_duration(
self, max_duration, dir_of_chunks, vad_output_file_path
):
file_list = glob.glob(dir_of_chunks + "/*.wav")
for file_path in file_list:
duration = self.calculate_duration(file_path)
if duration > max_duration:
base_chunk_name = file_path.split("/").pop()
Logger.info(
"rechunking of file %s and duration of file is: %s",
base_chunk_name,
duration,
)
self.create_audio_clips(
ChunkingConversionUtil.re_chunking_aggressiveness,
max_duration,
file_path,
dir_of_chunks,
vad_output_file_path,
base_chunk_name,
False,
)
os.remove(file_path)
def calculate_duration(self, input_filepath):
duration = sox.file_info.duration(input_filepath)
Logger.info(
"Duration for input_filepath:%s : %s", input_filepath, str(duration)
)
return duration
def read_wave(self, path):
"""Reads a .wav file.
Takes the path, and returns (PCM audio data, sample rate).
"""
with contextlib.closing(wave.open(path, "rb")) as wave_file:
num_channels = wave_file.getnchannels()
assert num_channels == 1
sample_width = wave_file.getsampwidth()
assert sample_width == 2
sample_rate = wave_file.getframerate()
assert sample_rate in (8000, 16000, 32000, 48000)
pcm_data = wave_file.readframes(wave_file.getnframes())
return pcm_data, sample_rate
def write_wave(self, path, audio, sample_rate):
"""Writes a .wav file.
Takes path, PCM audio data, and sample rate.
"""
with contextlib.closing(wave.open(path, "wb")) as wave_file:
wave_file.setnchannels(1)
wave_file.setsampwidth(2)
wave_file.setframerate(sample_rate)
wave_file.writeframes(audio)
def frame_generator(self, frame_duration_ms, audio, sample_rate):
"""Generates audio frames from PCM audio data.
Takes the desired frame duration in milliseconds, the PCM data, and
the sample rate.
Yields Frames of the requested duration.
"""
n = int(sample_rate * (frame_duration_ms / 1000.0) * 2)
offset = 0
timestamp = 0.0
duration = (float(n) / sample_rate) / 2.0
while offset + n < len(audio):
# print("offset, offset+n: ", offset, offset+n)
# print("timestamp:", timestamp)
# print("duration:", duration)
yield Frame(audio[offset : offset + n], timestamp, duration)
timestamp += duration
offset += n
def vad_collector(
self,
sample_rate,
frame_duration_ms,
padding_duration_ms,
vad,
frames,
vad_output_file_path,
file,
):
"""Filters out non-voiced audio frames.
Given a webrtcvad.Vad and a source of audio frames, yields only
the voiced audio.
Uses a padded, sliding window algorithm over the audio frames.
When more than 90% of the frames in the window are voiced (as
reported by the VAD), the collector triggers and begins yielding
audio frames. Then the collector waits until 90% of the frames in
the window are unvoiced to detrigger.
The window is padded at the front and back to provide a small
amount of silence or the beginnings/endings of speech around the
voiced frames.
Arguments:
sample_rate - The audio sample rate, in Hz.
frame_duration_ms - The frame duration in milliseconds.
padding_duration_ms - The amount to pad the window, in milliseconds.
vad - An instance of webrtcvad.Vad.
frames - a source of audio frames (sequence or generator).
Returns: A generator that yields PCM audio data.
"""
num_padding_frames = int(padding_duration_ms / frame_duration_ms)
# We use a deque for our sliding window/ring buffer.
ring_buffer = collections.deque(maxlen=num_padding_frames)
# We have two states: TRIGGERED and NOTTRIGGERED. We start in the
# NOTTRIGGERED state.
triggered = False
voiced_frames = []
for frame in frames:
is_speech = vad.is_speech(frame.bytes, sample_rate)
sys.stdout.write("1" if is_speech else "0")
if not triggered:
ring_buffer.append((frame, is_speech))
num_voiced = len([f for f, speech in ring_buffer if speech])
# If we're NOTTRIGGERED and more than 90% of the frames in
# the ring buffer are voiced frames, then enter the
# TRIGGERED state.
if num_voiced > 0.9 * ring_buffer.maxlen:
triggered = True
sys.stdout.write("+(%s)" % (ring_buffer[0][0].timestamp,))
file.write("+(%s)" % (ring_buffer[0][0].timestamp,))
# We want to yield all the audio we see from now until
# we are NOTTRIGGERED, but we have to start with the
# audio that's already in the ring buffer.
for f, s in ring_buffer:
voiced_frames.append(f)
ring_buffer.clear()
else:
# We're in the TRIGGERED state, so collect the audio data
# and add it to the ring buffer.
voiced_frames.append(frame)
ring_buffer.append((frame, is_speech))
num_unvoiced = len([f for f, speech in ring_buffer if not speech])
# If more than 90% of the frames in the ring buffer are
# unvoiced, then enter NOTTRIGGERED and yield whatever
# audio we've collected.
if num_unvoiced > 0.9 * ring_buffer.maxlen:
sys.stdout.write("-(%s)" % (frame.timestamp + frame.duration))
file.write("-(%s)" % (frame.timestamp + frame.duration))
# file.write('\n')
triggered = False
yield b"".join([f.bytes for f in voiced_frames])
ring_buffer.clear()
voiced_frames = []
# W0631: Using possibly undefined loop variable 'frame' (undefined-loop-variable)
# out of scope frame loop variable
if triggered:
sys.stdout.write("-(%s)" % (frame.timestamp + frame.duration))
file.write("-(%s)" % (frame.timestamp + frame.duration))
sys.stdout.write("\n")
# file.write('\n')
# If we have any leftover voiced audio when we run out of input,
# yield it.
if voiced_frames:
yield b"".join([f.bytes for f in voiced_frames])
class Frame(object):
"""Represents a "frame" of audio data."""
def __init__(self, bytes, timestamp, duration):
self.bytes = bytes
self.timestamp = timestamp
self.duration = duration
```
#### File: ekstep_data_pipelines/common/postgres_db_client.py
```python
import numpy as np
from psycopg2._json import Json
from psycopg2.extensions import register_adapter, AsIs
from sqlalchemy import create_engine, text
def addapt_numpy_float64(numpy_float64):
return AsIs(numpy_float64)
def addapt_numpy_int64(numpy_int64):
return AsIs(numpy_int64)
def addapt_numpy_float32(numpy_float32):
return AsIs(numpy_float32)
def addapt_numpy_int32(numpy_int32):
return AsIs(numpy_int32)
def addapt_numpy_array(numpy_array):
return AsIs(tuple(numpy_array))
register_adapter(np.float64, addapt_numpy_float64)
register_adapter(np.int64, addapt_numpy_int64)
register_adapter(np.float32, addapt_numpy_float32)
register_adapter(np.int32, addapt_numpy_int32)
register_adapter(np.ndarray, addapt_numpy_array)
register_adapter(dict, Json)
class PostgresClient:
"""
PostgresClient for DB related operations
1. Load Configeration
2. execute select
3. execute update
4. execute batch updates
"""
GET_UNIQUE_ID = "SELECT nextval('audio_id_seq');"
IS_EXIST = (
"select exists(select 1 from media_metadata_staging where raw_file_name= "
":file_name or media_hash_code = :hash_code);"
)
@staticmethod
def get_instance(config_dict, **kwargs):
data_processor = PostgresClient(config_dict, **kwargs)
data_processor.setup_peripherals()
return data_processor
def __init__(self, config_dict, **kwargs):
self.config_dict = config_dict
self.db = None
self._connection = None
def setup_peripherals(self):
self.setup_db_access()
@property
def connection(self):
if self._connection:
return self._connection
if not self.db:
self.setup_db_access()
self._connection = self.db.connect()
return self._connection
def setup_db_access(self):
"""
Function for setting up the database access
"""
db_configuration = self.config_dict.get("common", {}).get(
"db_configuration", {}
)
db_name = db_configuration.get("db_name")
db_user = db_configuration.get("db_user")
db_pass = db_configuration.get("db_pass")
cloud_sql_connection_name = db_configuration.get("cloud_sql_connection_name")
port = db_configuration.get("port",5432)
valid_config = all([db_name, db_user, db_pass, cloud_sql_connection_name])
if not valid_config:
# TODO: Raise DB config missing exception
pass
self.db = create_engine(
f"postgresql://{db_user}:{db_pass}@{cloud_sql_connection_name}:{port}/{db_name}"
)
def execute_query(self, query, **parm_dict):
return self.connection.execute(text(query), **parm_dict).fetchall()
def execute_update(self, query, **parm_dict):
return self.connection.execute(text(query), **parm_dict)
def execute_batch(self, query, data_list):
conn = self.db.raw_connection()
cur = conn.cursor()
cur.executemany(query, data_list)
updated_rows = cur.rowcount
conn.commit()
cur.close()
return updated_rows
def get_unique_id(self):
return self.connection.execute(self.GET_UNIQUE_ID).fetchall()[0][0]
def check_file_exist_in_db(self, file_name, hash_code):
return self.connection.execute(
text(self.IS_EXIST), file_name=file_name, hash_code=hash_code
).fetchall()[0][0]
```
#### File: audio_commons/transcription_clients_tests/azure_transcription_client_tests.py
```python
import unittest
from unittest import mock
import sys
from azure.cognitiveservices.speech import speech
from ekstep_data_pipelines.common.audio_commons.transcription_clients.azure_transcription_client import (
AzureTranscriptionClient,
)
from ekstep_data_pipelines.common.audio_commons.transcription_clients.transcription_client_errors import (
AzureTranscriptionClientError,
)
class TestAzureTranscriptionClient(unittest.TestCase):
def setUp(self):
super(TestAzureTranscriptionClient, self).setUp()
config = {"speech_key": "dummy_key", "service_region": "centralindia"}
self.azure_client = AzureTranscriptionClient(**config)
@mock.patch("azure.cognitiveservices.speech.SpeechRecognizer")
def test__speech_to_text_success(self, mock_speechrecongnizer):
result = mock.Mock()
result.text = "कोरोना के प्रभाव से हमारी मन की बात भी अछूती नहीं रही है।"
result.reason = speech.ResultReason.RecognizedSpeech
mock_speechrecongnizer.return_value.recognize_once.return_value = result
audio_file_path = "chunk-2.wav"
actual_value = self.azure_client.generate_transcription(
"hi-IN", audio_file_path
)
self.assertEqual(
"कोरोना के प्रभाव से हमारी मन की बात भी अछूती नहीं रही है।", actual_value
)
@mock.patch("azure.cognitiveservices.speech.SpeechRecognizer")
def test__speech_to_text_no_match(self, mock_speechrecongnizer):
result = mock.Mock()
result.text = None
result.reason = speech.ResultReason.NoMatch
result.no_match_details = "test_api_error"
mock_speechrecongnizer.return_value.recognize_once.return_value = result
audio_file_path = "chunk-2.wav"
self.assertRaises(
AzureTranscriptionClientError,
self.azure_client.generate_transcription,
"hi-IN",
audio_file_path,
)
@mock.patch("azure.cognitiveservices.speech.SpeechRecognizer")
def test__speech_to_text_cancelled(self, mock_speechrecongnizer):
result = mock.Mock()
result.text = None
result.reason = speech.ResultReason.Canceled
mock_speechrecongnizer.return_value.recognize_once.return_value = result
audio_file_path = "chunk-2.wav"
self.assertRaises(
AzureTranscriptionClientError,
self.azure_client.generate_transcription,
"hi-IN",
audio_file_path,
)
```
#### File: audio_commons/transcription_clients_tests/ekstepmodel_transcription_client_tests.py
```python
import unittest
from unittest import mock
from unittest.mock import Mock
from ekstep_data_pipelines.common.audio_commons.transcription_clients.azure_transcription_client import (
EkstepTranscriptionClient,
)
class TestEkstepTranscriptionClient(unittest.TestCase):
def setUp(self):
super(TestEkstepTranscriptionClient, self).setUp()
config = {"server_host": '127.0.0.1', "port": '50051', "language": "hi"}
self.ekstep_client = EkstepTranscriptionClient(**config)
@mock.patch("pickle.dump")
def test_call_speech_to_text_ekstep(self, mock_dump):
mock_client = Mock()
self.ekstep_client.client = mock_client
mock_new_result = Mock()
mock_client.recognize.return_value = mock_new_result
mock_new_result.transcript = (
" कोरोना के प्रभाव से हमारी मन की बात भी अछूती नहीं रही है।"
)
actual_result = self.ekstep_client.generate_transcription(
"test_language", "input_file_path"
)
self.assertEqual(mock_client.recognize.call_count, 1)
self.assertEqual(
actual_result, " कोरोना के प्रभाव से हमारी मन की बात भी अछूती नहीं रही है।"
)
```
#### File: common/infra_commons/google_storage_tests.py
```python
from unittest import TestCase
class GoogleStorage(TestCase):
def setUp(self):
pass
def test__given_a_existing_path__when_path_exists_invoked_with_the_existing_path__then_return_true(
self,
):
pass
def test__given_non_existent_path__when_path_exists_invoked_with_the_existing_path__then_return_true(
self,
):
pass
```
#### File: packages/ekstep_pipelines_tests/transcription_sanitizer_tests.py
```python
import unittest
from ekstep_data_pipelines.audio_transcription.transcription_sanitizers import (
get_transcription_sanitizers,
)
from ekstep_data_pipelines.audio_transcription.transcription_sanitizers.audio_transcription_errors import (
TranscriptionSanitizationError,
)
class TestTrancriptionSanitizer(unittest.TestCase):
def setUp(self):
transcription_sanitizers = get_transcription_sanitizers()
self.hindi_transcription_sanitizers = transcription_sanitizers.get("hindi")
def test_transcription_containing_empty_string_should_raise_runtime_exception(self):
transcript_obj = self.hindi_transcription_sanitizers
transcript = " "
with self.assertRaises(TranscriptionSanitizationError):
transcript_obj.sanitize(transcription=transcript)
def test_transcription_containing_space_in_start_should_return_None(self):
transcript_obj = self.hindi_transcription_sanitizers
transcript = " अलग अलग होते हैं"
self.assertEqual(transcript_obj.sanitize(transcript), "अलग अलग होते हैं")
def test_transcription_punctuations_are_being_removed(self):
transcript_obj = self.hindi_transcription_sanitizers
transcript = "अलग-अलग होते है!\"#%&'()*+,./;<=>?@[\\]^_`{|}~।"
self.assertEqual(transcript_obj.replace_bad_char(transcript), "अलग अलग होते है")
def test_transcription_containing_numbers_0123456789_should_be_accepted(self):
transcript_obj = self.hindi_transcription_sanitizers
transcript = "लेकिन मैक्सिमॅम 0123456789"
self.assertEqual(transcript_obj.shouldReject(transcript), False)
def test_transcription_containing_english_character_should_give_runtime_exception(
self,
):
transcript_obj = self.hindi_transcription_sanitizers
transcriptions = "4K की स्पीड थी"
self.assertEqual(transcript_obj.shouldReject(transcriptions), True)
def test_transcription_should_pass_for_given_samples(self):
transcript_obj = self.hindi_transcription_sanitizers
transcripts = [
("अलग-अलग होते हैं ", "अलग अलग होते हैं"),
("इफ यू हॅव ठीक थी", "इफ यू हॅव ठीक थी"),
("डिस्कॅशंस", "डिस्कॅशंस"),
("लेकिन मैक्सिमॅम ", "लेकिन मैक्सिमॅम"),
("फ्लैट चलाते-चलाते", "फ्लैट चलाते चलाते"),
("1126 वॅन", "1126 वॅन"),
(
"दो बच्चे हो गए दोनों का दो-दो बच्चे",
"दो बच्चे हो गए दोनों का दो दो बच्चे",
),
(
"कॅन्फ़्यूज़न हो जाता है कि मैं कौनसा लू ",
"कॅन्फ़्यूज़न हो जाता है कि मैं कौनसा लू",
),
]
for each_transcript, correct_response in transcripts:
self.assertEqual(transcript_obj.sanitize(each_transcript), correct_response)
def test_transcription_containing_time_should_fail(self):
transcript_obj = self.hindi_transcription_sanitizers
with self.assertRaises(TranscriptionSanitizationError):
transcript_obj.sanitize(transcription="8:00 से")
def test_transcription_should_fail_for_given_samples(self):
transcript_obj = self.hindi_transcription_sanitizers
transcriptions = [
"8:00 से",
"टेक्स्ट टू दीपा वन ऍफ़ टू",
"रजिस्ट्री ऍफ़",
"3dmili",
"x-ray निकाल के दिखाते हैं ",
"e-filing आ जाती है ",
"B.Ed कॉलेज ",
"m.a. B.Ed पूरी कर ",
"दिनभर patient-centered ",
"₹300",
"$500",
]
for each_transcription in transcriptions:
with self.assertRaises(TranscriptionSanitizationError):
transcript_obj.sanitize(each_transcription)
if __name__ == "__main__":
unittest.main()
```
#### File: python/dags/ulca_dataset.py
```python
import json
import datetime
from airflow import DAG
from airflow.models import Variable
from airflow.contrib.kubernetes import secret
from airflow.contrib.operators import kubernetes_pod_operator
ulca_dataset_config = json.loads(Variable.get("ulca_dataset_config"))
bucket_name = Variable.get("bucket")
env_name = Variable.get("env")
composer_namespace = Variable.get("composer_namespace")
YESTERDAY = datetime.datetime.now() - datetime.timedelta(days=1)
project = Variable.get("project")
secret_file = secret.Secret(
deploy_type="volume",
deploy_target="/tmp/secrets/google",
secret="gc-storage-rw-key",
key="key.json",
)
def create_dag(ulca_dataset_config, default_args):
dag = DAG(
dag_id="ulca_dataset_pipeline",
schedule_interval=datetime.timedelta(days=1),
default_args=default_args,
start_date=YESTERDAY,
)
with dag:
for source in ulca_dataset_config.keys():
source_config = ulca_dataset_config.get(source)
language = source_config.get("language").lower()
print(f"Language for source is {language}")
ulca_dataset_task = kubernetes_pod_operator.KubernetesPodOperator(
task_id=f"ulca-dataset-{source}",
name="data-dataset",
cmds=[
"python",
"invocation_script.py",
"-b",
bucket_name,
"-a",
"ulca_dataset",
"-rc",
"data/audiotospeech/config/config.yaml",
"-as",
source,
"-ulca_config",
json.dumps(source_config),
"-l",
language,
],
namespace=composer_namespace,
startup_timeout_seconds=300,
secrets=[secret_file],
image=f"us.gcr.io/{project}/ekstep_data_pipelines:{env_name}_1.0.0",
image_pull_policy="Always",
)
ulca_dataset_task
return dag
dag_args = {
"email": ["<EMAIL>"],
}
globals()["ulca_dataset_pipeline"] = create_dag(ulca_dataset_config, dag_args)
```
|
{
"source": "Jeevanshi/sih-2019",
"score": 3
}
|
#### File: sih-2019/apps/agg_eco_activities.py
```python
import dash
from dash.dependencies import Input, Output
import dash_core_components as dcc
import dash_html_components as html
from collections import OrderedDict
import dash_table
from utils import get_excel
import pandas as pd
import time
import math
from app import app
filename = get_excel('aggregate_economic_activities', 'data/2018/aggregates-economic-activity/S7.1.xlsx')
data = pd.read_excel(filename)
years = data.iloc[2:3, 2:-2]
year_set = [year for year in list(OrderedDict.fromkeys(years.values[0]).keys()) if type(year) == str]
process = data[5:]
headers = data.iloc[4][2:-2]
header_set = list(OrderedDict.fromkeys(headers.values).keys())
sections = process.iloc[:, 0]
main_sections = [index for index in sections.index if str(sections[index]).isdigit() or (type(sections[index]) != str and math.isnan(sections[index]))]
section_rows = [data.iloc[idx] for idx in main_sections]
labels = [row.iloc[-1] for row in section_rows]
labelIds = main_sections
def app_layout():
children = [dcc.Tab(label=label, value=labelIds[idx]) for (idx, label) in enumerate(labels)]
return (
html.Div([
html.H2('Aggregated Economic Activities')
]),
html.Div([
dcc.Dropdown(
id='tabs',
options=[{'label': label, 'value': labelIds[idx]} for (idx, label) in enumerate(labels)],
placeholder="Select a category",
value=labelIds[-1]
),
dcc.Graph(id='agc-graph'),
generate_table(data)
], className="container")
)
def generate_table(dataframe, max_rows=10):
data = pd.read_excel('data/2018/aggregates-economic-activity/S7.1.xlsx', header = None)
df = data[3:]
df.columns = df.iloc[0].fillna(value=pd.Series(range(100)))
return(dash_table.DataTable(
data=df.to_dict('rows'),
columns=[{'id': c, 'name': c} for c in df.columns],
style_table={
'height': '400px',
'overflowY': 'scroll',
'border': 'thin lightgrey solid'
}))
layout = app_layout()
@app.callback(Output('agc-graph', 'figure'),
[Input('tabs', 'value')])
def display_content(value):
index = int(value)
year_list = ['Y ' + year for year in year_set]
arrays=[]
for i in range(len(header_set)):
arrays.append([])
rows = data.iloc[index][2:-2]
length = len(header_set)
for (idx, column) in enumerate(rows):
arrays[idx%length].append(column)
graphs = [{
'x': year_list,
'y': array,
'name': header_set[idx],
'line': {
'width': 3,
'shape': 'spline'
}
} for (idx, array) in enumerate(arrays)]
return {
'data': graphs,
'layout': {
'margin': {
'l': 30,
'r': 0,
'b': 30,
't': 0
},
'name': 'Current Price'
}
}
```
#### File: sih-2019/apps/gcf_time_series.py
```python
import dash
import dash_core_components as dcc
import dash_html_components as html
from dash.dependencies import Input, Output, State
import plotly.graph_objs as go
import math
import dash_table
from app import app
import pandas as pd
data = pd.read_excel('data/2018/economic-aggregates/S1.10.xlsx')
years = data.iloc[5:6, 2:-2]
process = data[7:]
sections = process.iloc[:, 0]
main_sections = [index for index in sections.index if str(sections[index]).isdigit() or (type(sections[index]) != str and math.isnan(sections[index]))]
rows = [data.iloc[idx] for idx in main_sections]
labels = [row.iloc[1] for row in rows[0:-1]]
labelIds = main_sections
print(labelIds[1])
def generate_table(dataframe, max_rows=10):
data = pd.read_excel('data/2018/economic-aggregates/S1.10.xlsx', header = None)
df = data[6:]
df.columns = df.iloc[0].fillna(value=pd.Series(range(100)))
return(dash_table.DataTable(
data=df.to_dict('rows'),
columns=[{'id': c, 'name': c} for c in df.columns],
style_table={
'height': '400px',
'overflowY': 'scroll',
'border': 'thin lightgrey solid'
}))
layout = html.Div([
html.H2('Gross Capital Formation Timeseries'),
dcc.Dropdown(
id='gcf-my-dropdown',
options=[{'label': category, 'value': labelIds[idx]} for (idx, category) in enumerate(labels)],
value=labelIds[1],
style={'margin-bottom': '20px'}
),
dcc.Graph(id='gcf-time-series',
style={'padding-top': '20px'}),
generate_table(data)
], className="container")
@app.callback(Output('gcf-time-series', 'figure'),
[Input('gcf-my-dropdown', 'value')])
def update_graph(selected_dropdown_value):
index = int(selected_dropdown_value)
row = data.iloc[index][2:-2]
year_list = ['Y ' + year for year in years.values[0]]
mid = int(len(row) / 2)
return {
'data': [go.Bar(
x=year_list[:mid],
y=row[:mid],
name='Current Price'
), go.Bar(
x=year_list[mid:],
y=row[mid:],
name='Constant Price'
)],
'layout': {
'title': data.iloc[index][1]
}
}
```
|
{
"source": "jeevansio/cracking-wifi-with-pcap",
"score": 2
}
|
#### File: jeevansio/cracking-wifi-with-pcap/halfHandshake.py
```python
from pcapParser import load_savefile
from cracker import crack
from multiprocessing import Queue
def crackClients(clients, usersMac, SSID, passphraseQ):
clientHandshakes = []
for client in clients:
handshake = []
for message in clients[client]:
if message['message'] == 1:
handshake = [message]
elif len(handshake) == 1:
handshake.append(message)
clientHandshakes.append(handshake)
break
else:
handshake = []
for clientHandshake in clientHandshakes:
if clientHandshake[0]['AP'] == usersMac:
cracked = crack(SSID, clientHandshake[0]['client'], clientHandshake[0]['AP'], clientHandshake[0]['Anonce'], clientHandshake[1]['Snonce'], clientHandshake[1]['mic'], clientHandshake[1]['data'], passphraseQ)
if cracked != False:
return cracked
return False
if __name__ == "__main__":
from sys import argv, exit
import getopt
# read argument from command line
try:
opts, args = getopt.getopt(argv[1:], "r:m:s:d:")
except getopt.GetoptError:
print "bad args"
exit(2)
for opt, arg in opts:
if opt == '-r':
readFile = arg
if opt == '-m':
usersMac = arg.replace(":", "").decode('hex')
if opt == '-s':
SSID = arg
# load dictionary and read passphrase from dictionary
print "loading dictionary..."
f = open('dictionary.txt', 'r')
passphraseQ = Queue()
for passphrase in f.read().split('\n'):
passphraseQ.put(passphrase)
f.close()
# argument missing?
try:
usersMac
SSID
readFile
except NameError:
print "missing args, requires: -m (AP mac address) -s (SSID) -r (PCAP filename)"
exit(2)
# wrong file format
try:
caps, header = load_savefile(open(readFile))
except IOError:
print "Error reading file"
exit(2)
# LINKTYPE_ETHERNET = 1; LINKTYPE_IEEE802_11 = 105
# https://www.tcpdump.org/linktypes.html
# https://community.cisco.com/t5/wireless-mobility-documents/802-11-sniffer-capture-analysis-wpa-wpa2-with-psk-or-eap/ta-p/3116990
if header.ll_type != 1 and header.ll_type != 105:
print "unsupported linklayer type, only supports ethernet and 802.11"
exit(2)
clients = {}
if header.ll_type == 105:
# analyze 802.11 packet
for packet in caps.packets:
auth = packet[1].raw()[32:34]
if auth == '\x88\x8e':
AP = packet[1].raw()[16:22]
dest = packet[1].raw()[4:10]
source = packet[1].raw()[10:16]
part = packet[1].raw()[39:41]
relivent = True
if part == '\x00\x8a':
# from AP to client, handshake 01
message = 1
client = dest
Anonce = packet[1].raw()[51:83]
info = {'AP': AP, 'client': client, 'Anonce': Anonce, 'message': message}
elif part == '\x01\x0a':
# from client to AP, handshake 02
Snonce = packet[1].raw()[51:83]
client = source
mic = packet[1].raw()[115:131]
data = packet[1].raw()[34:115] + "\x00"*16 + packet[1].raw()[131:]
message = 2
info = {'AP': AP, 'data': data, 'client': client, 'Snonce': Snonce, 'mic': mic, 'message': message}
else:
relivent = False
if relivent:
if info['client'] in clients:
# find target client and append infos into a clients list
clients[info['client']].append(info)
else:
# do nothing if the client doesn't match
clients[info['client']] = [info]
else:
# analyze ethernet packet
for packet in caps.packets:
auth = packet[1].raw()[12:14]
if auth == '\x88\x8e':
relivent = True
part = packet[1].raw()[19:21]
if part == '\x00\x8a':
# from AP to client, handshake 01
message = 1
client = packet[1].raw()[0:6]
AP = packet[1].raw()[6:12]
Anonce = packet[1].raw()[31:63]
info = {'AP': AP, 'client': client, 'Anonce': Anonce, 'message': message}
elif part == '\x01\x0a':
# from client to AP, handshake 02
Snonce = packet[1].raw()[31:63]
AP = packet[1].raw()[0:6]
client = packet[1].raw()[6:12]
mic = packet[1].raw()[95:111]
data = packet[1].raw()[14:95] + "\x00"*16 + packet[1].raw()[111:]
message = 2
info = {'AP': AP, 'data': data, 'client': client, 'Snonce': Snonce, 'mic': mic, 'message': message}
else:
relivent = False
if relivent:
if info['client'] in clients:
# find target client and append infos into a clients list
clients[info['client']].append(info)
else:
# do nothing if the client doesn't match
clients[info['client']] = [info]
# start brute force
cracked = crackClients(clients, usersMac, SSID, passphraseQ)
if cracked == False:
# passphrase isn't in hte dictionary
print "Unable to find passphrase"
else:
# successfully cracked
print "Passphrase found! " + cracked
```
|
{
"source": "jeevatkm/policyuniverse",
"score": 2
}
|
#### File: policyuniverse/policyuniverse/expander_minimizer.py
```python
from __future__ import print_function
from policyuniverse import all_permissions
import json
import fnmatch
import sys
import copy
policy_headers = ["rolepolicies", "grouppolicies", "userpolicies", "policy"]
def expand_minimize_over_policies(policies, activity, **kwargs):
for header in policy_headers:
if header in policies:
output = {header: {}}
for policy in policies[header]:
output[header][policy] = activity(
policy=policies[header][policy], **kwargs
)
return output
return activity(policy=policies, **kwargs)
def _get_prefixes_for_action(action):
"""
:param action: iam:cat
:return: [ "iam:", "iam:c", "iam:ca", "iam:cat" ]
"""
(technology, permission) = action.split(":")
retval = ["{}:".format(technology)]
phrase = ""
for char in permission:
newphrase = "{}{}".format(phrase, char)
retval.append("{}:{}".format(technology, newphrase))
phrase = newphrase
return retval
def _expand_wildcard_action(action):
"""
:param action: 'autoscaling:*'
:return: A list of all autoscaling permissions matching the wildcard
"""
if isinstance(action, list):
expanded_actions = []
for item in action:
expanded_actions.extend(_expand_wildcard_action(item))
return expanded_actions
else:
if "*" in action:
expanded = [
expanded_action.lower()
for expanded_action in all_permissions
if fnmatch.fnmatchcase(expanded_action.lower(), action.lower())
]
# if we get a wildcard for a tech we've never heard of, just return the wildcard
if not expanded:
return [action.lower()]
return expanded
return [action.lower()]
def _get_desired_actions_from_statement(statement):
desired_actions = set()
actions = _expand_wildcard_action(statement["Action"])
for action in actions:
if action not in all_permissions:
raise Exception(
"Desired action not found in master permission list. {}".format(action)
)
desired_actions.add(action)
return desired_actions
def _get_denied_prefixes_from_desired(desired_actions):
denied_actions = all_permissions.difference(desired_actions)
denied_prefixes = set()
for denied_action in denied_actions:
for denied_prefix in _get_prefixes_for_action(denied_action):
denied_prefixes.add(denied_prefix)
return denied_prefixes
def _check_min_permission_length(permission, minchars=None):
if minchars and len(permission) < int(minchars) and permission != "":
print(
"Skipping prefix {} because length of {}".format(
permission, len(permission)
),
file=sys.stderr,
)
return True
return False
def minimize_statement_actions(statement, minchars=None):
minimized_actions = set()
if statement["Effect"] != "Allow":
raise Exception("Minification does not currently work on Deny statements.")
desired_actions = _get_desired_actions_from_statement(statement)
denied_prefixes = _get_denied_prefixes_from_desired(desired_actions)
for action in desired_actions:
if action in denied_prefixes:
print("Action is a denied prefix. Action: {}".format(action))
minimized_actions.add(action)
continue
found_prefix = False
prefixes = _get_prefixes_for_action(action)
for prefix in prefixes:
permission = prefix.split(":")[1]
if _check_min_permission_length(permission, minchars=minchars):
continue
if prefix not in denied_prefixes:
if prefix not in desired_actions:
prefix = "{}*".format(prefix)
minimized_actions.add(prefix)
found_prefix = True
break
if not found_prefix:
print("Could not suitable prefix. Defaulting to {}".format(prefixes[-1]))
minimized_actions.add(prefixes[-1])
# sort the actions
minimized_actions_list = list(minimized_actions)
minimized_actions_list.sort()
return minimized_actions_list
def get_actions_from_statement(statement):
allowed_actions = set()
if not type(statement.get("Action", [])) == list:
statement["Action"] = [statement["Action"]]
for action in statement.get("Action", []):
allowed_actions = allowed_actions.union(set(_expand_wildcard_action(action)))
if not type(statement.get("NotAction", [])) == list:
statement["NotAction"] = [statement["NotAction"]]
inverted_actions = set()
for action in statement.get("NotAction", []):
inverted_actions = inverted_actions.union(set(_expand_wildcard_action(action)))
if inverted_actions:
actions = _invert_actions(inverted_actions)
allowed_actions = allowed_actions.union(actions)
return allowed_actions
def _invert_actions(actions):
from policyuniverse import all_permissions
return all_permissions.difference(actions)
def expand_policy(policy=None, expand_deny=False):
# Perform a deepcopy to avoid mutating the input
result = copy.deepcopy(policy)
if type(result["Statement"]) is dict:
result["Statement"] = [result["Statement"]]
for statement in result["Statement"]:
if statement["Effect"].lower() == "deny" and not expand_deny:
continue
actions = get_actions_from_statement(statement)
if "NotAction" in statement:
del statement["NotAction"]
statement["Action"] = sorted(list(actions))
return result
def minimize_policy(policy=None, minchars=None):
str_pol = json.dumps(policy, indent=2)
size = len(str_pol)
for statement in policy["Statement"]:
minimized_actions = minimize_statement_actions(statement, minchars=minchars)
statement["Action"] = minimized_actions
str_end_pol = json.dumps(policy, indent=2)
end_size = len(str_end_pol)
# print str_end_pol
print("Start size: {}. End size: {}".format(size, end_size), file=sys.stderr)
return policy
```
#### File: policyuniverse/updater/service_action.py
```python
class ServiceActionConditionKey:
"""Stores a condition key that is associated with a ServiceAction."""
def __init__(self, body):
self.doc_page_rel = body["docPageRel"]
self.name = body["name"]
self.value_type = body["type"]
self.description = body["description"]
class ServiceAction:
"""Stores data on an AWS service permission
Args:
service (str): A python object representing an AWS service
body (dict): Contains data about one permission.
"""
def __init__(self, service, body):
self.service = service
self.description = self._get_description(body)
self.action_groups = self._get_action_groups(body)
self.api_doc = self._get_api_doc(body)
self.doc_page_rel = self._get_doc_page_rel(body)
self.doc_page = self._get_doc_page(body)
self.action_name = self._get_action_name(body)
self._condition_keys = self._get_condition_keys(body)
@property
def condition_keys(self):
"""Simplify access to condition keys."""
return sorted([k.name for k in self._condition_keys])
def calculate_action_groups(self):
"""Convert AWS Action groups into something that makes more sense."""
if "Permissions" in self.action_groups:
return "Permissions"
if "ListOnly" in self.action_groups:
return "List"
if "ReadOnly" in self.action_groups:
return "Read"
if "Tagging" in self.action_groups:
return "Tagging"
if "ReadWrite" in self.action_groups:
return "Write"
return "Unknown"
def toJSON(self):
"""Actually returns a dict."""
return dict(
description=self.description,
aws_action_groups=self.action_groups,
calculated_action_group=self.calculate_action_groups(),
docs=dict(
api_doc=self.api_doc,
doc_page_rel=self.doc_page_rel,
doc_page=self.doc_page,
),
condition_keys=self.condition_keys,
)
def _get_description(self, body):
return body["description"]
def _get_action_groups(self, body):
return body["actionGroups"]
def _get_api_doc(self, body):
return body["apiDoc"]
def _get_doc_page_rel(self, body):
return body["docPageRel"]
def _get_doc_page(self, body):
return body["docPage"]
def _get_action_name(self, body):
return body["id"]
def _get_condition_keys(self, body):
keys = list()
for key_body in body["contextKeys"]:
key = ServiceActionConditionKey(key_body)
keys.append(key)
return keys
```
|
{
"source": "jeevb/awsm",
"score": 2
}
|
#### File: awsm/keys/__init__.py
```python
import os
from .exceptions import KeyNotFoundError
from .validators import KEY_SCHEMA
from awsm.storage.file_storage import USER_KEYS_CFG
from awsm.utils import load_from_yaml
from voluptuous.humanize import validate_with_humanized_errors
class KeyManager(object):
def __init__(self):
super(KeyManager, self).__init__()
self._keys = None
self._load_keys()
def _load_keys(self):
self._keys = {
name: validate_with_humanized_errors(config, KEY_SCHEMA)
for name, config in load_from_yaml(USER_KEYS_CFG).items()
}
def get(self, name):
key = self._keys.get(name)
if key is None:
raise KeyNotFoundError(name)
return key['name'], key['path']
def find_path(self, name):
path = None
try:
_, path = self.get(name)
except KeyNotFoundError:
for key_dict in self._keys.values():
if name == key_dict['name']:
path = key_dict['path']
break
finally:
if path is None:
raise KeyNotFoundError(name)
path = os.path.abspath(os.path.expanduser(path))
if not os.path.exists(path):
raise KeyNotFoundError(
name, 'Key file \'{}\' does not exist.'.format(path))
return path
```
#### File: awsm/awsm/project.py
```python
from ansible import constants
setattr(constants, 'HOST_KEY_CHECKING', False)
import os
import base64
import boto3
import docker
import getpass
import itertools
import json
import random
import socket
import sys
import time
import uuid
import yaml
from .constants import *
from .context_managers import shell_env
from .exceptions import (
ProjectError,
InvalidAvailabilityZone,
InvalidResource,
CannotDetachVolume,
CannotAttachVolume,
CannotFindRunningInstance,
CannotLoginToInstance
)
from .hooks import HooksManager, HookExecutor
from .keys import KeyManager
from .prettytable import InstanceTable, VolumeTable
from .profiles import ProfileManager
from .storage.database import PROJECT_DB as db
from .storage.file_storage import (
PROJECT_CFG_DIR,
PROJECT_IDENTITY,
PROJECT_TAGS
)
from .transport import ssh_tunnel
from .utils import load_from_yaml
from .validators import (
aws_fmt_tag,
IDENTIFIER_SCHEMA,
TAGS_SCHEMA,
CREATE_VOLUME_SCHEMA
)
from botocore.exceptions import ClientError
from inflection import underscore
from fabric.api import settings, hide, local, run
from functools import wraps
from string import ascii_lowercase as letters
from voluptuous import Error
from voluptuous.humanize import validate_with_humanized_errors
class Project(object):
##
## Decorators
##
def requires_project_cfg_dir(func):
@wraps(func)
def wrapper(*args, **kwargs):
os.makedirs(PROJECT_CFG_DIR, exist_ok=True)
return func(*args, **kwargs)
return wrapper
def retry_with_timeout(timeout, interval=1):
def decorator(func):
def wrapper(*args, **kwargs):
start = time.time()
while time.time() < start + timeout:
result = func(*args, **kwargs)
if result:
return result
time.sleep(interval)
return wrapper
return decorator
def __init__(self):
super(Project, self).__init__()
# TODO(jeev): Validate proper initialization of boto session
# Required variables for AWS
self._session = boto3.session.Session()
self._client = self._session.client('ec2')
self._resource = self._session.resource('ec2')
# Initialize the project DB
self._load_db()
# Identifier
self._identifier = None
self._load_existing_identity()
# Tags
self._extra_tags = None
self._load_tags()
# Initialize profiles manager
self._profiles = ProfileManager()
# Initialize key manager
self._keys = KeyManager()
# Initialize hooks manager
self._hooks = HooksManager()
##
## Project Attributes
##
@property
def profile_manager(self):
return self._profiles
@property
def identifier(self):
if not self._identifier:
self.create_identity()
return self._identifier
@identifier.setter
def identifier(self, value):
if self._identifier is not None:
raise ProjectError('An identity already exists for this project.')
self._identifier = validate_with_humanized_errors(
value,
IDENTIFIER_SCHEMA
)
@property
def available_zones(self):
response = self._client.describe_availability_zones()
zones = response.get('AvailabilityZones', None) or []
return [i['ZoneName'] for i in zones if i['State'] == 'available']
@property
def identifier_hash(self):
dump = json.dumps(self.identifier, sort_keys=True).encode('utf-8')
return base64.b64encode(dump).decode('utf-8')
@property
def availability_zone(self):
return self.identifier.get('availability_zone')
@property
def uuid(self):
return self.identifier.get('uuid')
@property
def name(self):
return underscore(
'awsm_{hostname}_{user}_{project}'.format(
hostname=self.identifier['hostname'],
user=self.identifier['user'],
project=os.path.basename(self.identifier['working_directory'])
)
)
@property
def filters(self):
return {UUID_TAG_KEY: self.uuid}
##
## Database
##
@requires_project_cfg_dir
def _load_db(self):
db.connect()
##
## Identity
##
@requires_project_cfg_dir
def _load_existing_identity(self):
if os.path.exists(PROJECT_IDENTITY):
with open(PROJECT_IDENTITY) as handle:
self.load_identity(handle.read())
def load_identity(self, identifier):
self.identifier = json.loads(
base64.b64decode(identifier.encode('utf-8')).decode('utf-8'))
with open(PROJECT_IDENTITY, 'w') as handle:
print(self.identifier_hash, file=handle)
def create_identity(self, availability_zone=None):
if (
availability_zone is not None and
availability_zone not in set(self.available_zones)
):
raise InvalidAvailabilityZone(availability_zone)
self.identifier = {
'availability_zone': (
availability_zone or
random.choice(self.available_zones)
),
'hostname': socket.gethostname(),
'user': getpass.getuser(),
'uuid': uuid.uuid4().hex,
'working_directory': os.getcwd()
}
with open(PROJECT_IDENTITY, 'w') as handle:
print(self.identifier_hash, file=handle)
def show_identity(self):
yaml.dump(self.identifier, sys.stdout, default_flow_style=False)
border = '-' * 10
print(border, self.identifier_hash, border, sep='\n')
##
## Tags
##
@property
def tags(self):
tags = {PROJECT_TAG_KEY: self.name}
tags.update(self.filters)
tags.update(self._extra_tags)
return tags
# TODO (jeev): Move project tags to DB
@requires_project_cfg_dir
def _load_tags(self):
self._extra_tags = load_from_yaml(PROJECT_TAGS)
def set_project_tags(self, tags, refresh=False, remove=False):
if refresh:
# Clear existing tags
self._extra_tags.clear()
elif not tags:
if self._extra_tags:
yaml.dump(self._extra_tags,
sys.stdout,
default_flow_style=False)
return
if isinstance(tags, (list, tuple)):
tmp = {}
for tag in tags:
tmp.update(tag)
tags = tmp
if tags:
# Validate tags
tags = validate_with_humanized_errors(tags, TAGS_SCHEMA)
for key, value in tags.items():
if not remove:
if key in RESERVED_TAG_NAMES:
print('\'{}\''.format(key),
'is a reserved tag name.',
file=sys.stderr)
continue
print('Setting tag: {}={}'.format(key, value))
self._extra_tags[key] = value
elif key in self._extra_tags:
print('Deleting tag: {}'.format(key))
del self._extra_tags[key]
# Update project tags
with open(PROJECT_TAGS, 'w') as handle:
yaml.dump(self._extra_tags, handle, default_flow_style=False)
def _fmt_tags(self, tags=None):
tags = tags or self.tags
_tags = []
for k, v in tags.items():
_tags.append({'Key': str(k), 'Value': str(v)})
return _tags
##
## EC2 Helpers
##
def _get_device_info_for_image(self, image):
response = self._client.describe_images(ImageIds=[image])['Images'][0]
root_device_name = response['RootDeviceName']
return (
root_device_name,
EC2_DEVICE_NAME_REGEX.match(root_device_name).group('prefix')
)
def _get_streaming_response(self, api_method, key, **kwargs):
response = api_method(**kwargs)
yield from response.get(key, [])
next_token = response.get('nextToken')
while next_token is not None:
response = api_method(nextToken=next_token, **kwargs)
yield from response.get(key, [])
next_token = response.get('nextToken')
@retry_with_timeout(BOTO3_WAIT_TIMEOUT)
def _wait_for_state(self, resource, **states):
# Reload the resource
resource.load()
# If at least one condition is not met, resource is not ready.
ready = True
for attr, allowed_values in states.items():
value = getattr(resource, attr)
if value not in allowed_values:
ready = False
return ready
def _infer_resource_type(self, resource_id):
# Sanity check to make sure resource ID is a string
if isinstance(resource_id, str):
# Infer the type of resource
if resource_id.startswith('i-'):
return 'instance'
elif resource_id.startswith('vol-'):
return 'volume'
def get_resource_attrs(self, resource_id, restrict_to_project=True):
# Make sure resource ID is valid
resource_type = self._infer_resource_type(resource_id)
if not resource_type:
return
# Update tags with resource type
tags = {RESOURCE_TYPE_KEY: resource_type}
# Get additional tags for resource
response = self._client.describe_tags(
Filters=[{
'Name': 'resource-id',
'Values': [resource_id]
}]
).get('Tags', [])
if response:
# Collapse tags
for tag in response:
tags.update(aws_fmt_tag(tag))
# Ensure that resource belongs to this project
if not restrict_to_project or tags.get(UUID_TAG_KEY) == self.uuid:
return tags
def get_valid_resource_type(self, resource_id):
tags = self.get_resource_attrs(resource_id, restrict_to_project=True)
if tags is not None:
return tags.get(RESOURCE_TYPE_KEY)
def get_resource(self, resource_id, expected_resource_type=None):
resource_type = self.get_valid_resource_type(resource_id)
valid = (
resource_type is not None and
(expected_resource_type is None or
resource_type == expected_resource_type)
)
if not valid:
raise InvalidResource(resource_id, expected_resource_type)
handler = getattr(self, '_get_{}'.format(resource_type), None)
if handler:
return handler(resource_id)
def _get_instance(self, instance_id):
instance = self._resource.Instance(instance_id)
instance.load()
return instance
def _get_volume(self, volume_id):
volume = self._resource.Volume(volume_id)
volume.load()
return volume
##
## EC2 Resource Tags
##
def set_resource_tags(self,
resources,
tags,
refresh=True,
remove=False):
if not tags:
for resource in resources:
tags = self.get_resource_attrs(resource)
if tags is not None:
print(resource)
print('-' * len(resource))
yaml.dump(tags, sys.stdout, default_flow_style=False)
return
if isinstance(tags, (list, tuple)):
tmp = {}
for tag in tags:
tmp.update(tag)
tags = tmp
# Validate tags
tags = validate_with_humanized_errors(tags, TAGS_SCHEMA)
# Remove tags instead of adding
if remove:
tags = [{'Key': k} for k in tags]
self._client.delete_tags(Resources=resources, Tags=tags)
return
if refresh:
# Clear existing tags
self._client.delete_tags(Resources=resources)
# Add tags for this project
new_tags = self.tags
new_tags.update(tags)
tags = new_tags
self._set_resource_tags(*resources, **tags)
def _set_resource_tags(self, *resources, **tags):
self._resource.create_tags(Resources=resources,
Tags=self._fmt_tags(tags))
##
## Ansible Integration
##
def run_hooks_on_instance(self,
*instance_id,
hook=None,
config=None,
task_vars=None):
# Nothing specified to be run
if hook is None and config is None:
return
instance = None
attrs = None
hosts = []
# Validate and collect attributes for every instance
for iid in instance_id:
instance, attrs = self.find_usable_instance(iid)
hosts.append(attrs['host_string'])
# Prepare vars for ansible Play
play_vars = {'gather_facts': False}
executor = HookExecutor(attrs['username'],
attrs['key_filename'],
*hosts,
task_vars=task_vars or {},
play_vars=play_vars)
executor.manager = self._hooks
executor(hook=hook, config=config)
##
## EC2 Instances
##
def provision(self, name, count=1):
print('Provisioning profile:', name)
profile = self._profiles.get(name)
# Get device naming convention for image
root_device, device_prefix = self._get_device_info_for_image(
profile['ami'])
# Construct list of volumes to launch profile with
volumes = profile.get('volumes') or []
for volume in volumes:
volume['device'] = device_prefix + volume['device']
# Update and add definition for root volume
root_volume = profile['root_volume']
root_volume['device'] = root_device
volumes.append(root_volume)
# Retrieve key to use for resource
key_profile = profile['key']
key_name, _ = self._keys.get(key_profile)
request = dict(
ImageId=profile['ami'],
MinCount=count,
MaxCount=count,
KeyName=key_name,
SecurityGroups=profile['security_groups'],
InstanceType=profile['instance_type'],
Placement={'AvailabilityZone': self.availability_zone},
BlockDeviceMappings=[
{
'DeviceName': volume['device'],
'Ebs': {
'VolumeSize': volume['size'],
'VolumeType': volume['type'],
'DeleteOnTermination': volume['delete_on_termination']
}
}
for volume in volumes
]
)
# If a role is specified, use it
role = profile.get('role')
if role is not None:
request.update({'IamInstanceProfile': {'Name': role}})
# Create the instances
response = self._resource.create_instances(**request)
# Make a list of all elements provisioned for this profile
# Properly tag all elements
tags = self.tags
profile_tags = profile.get('tags', {})
profile_name = profile_tags.pop('Name', None) or self.name
profile_uid = uuid.uuid4().hex[:6]
tags.update(profile_tags)
# Profile-specific 'on_provision' hook override
on_provision = profile.get('on_provision')
instance_ids = []
for idx, instance in enumerate(response):
elements = []
# Block until the instance is SSHable
# TODO(jeev): Use coroutines for this
instance.wait_until_running()
self._wait_until_usable(instance)
elements.append(instance.id)
for volume in instance.volumes.all():
elements.append(volume.id)
self._set_resource_tags(
Name='{}_{}_{}_run_{}'.format(
profile_name,
name,
profile_uid,
idx
),
*elements,
**tags
)
instance_ids.append(instance.id)
# Run hooks
self.run_hooks_on_instance(
*instance_ids,
hook='on_provision',
config=on_provision
)
return instance_ids
def ls(self, all=False, quiet=False, verbose=True):
filters = [
{'Name': 'tag:{}'.format(k), 'Values': [v]}
for k, v in self.filters.items()
]
if not all:
filters.append({
'Name': 'instance-state-name',
'Values': ['pending', 'running',]
})
stream = self._get_streaming_response(
self._client.describe_instances,
key='Reservations',
Filters=filters
)
table = InstanceTable(skip_tags=set(self.tags.keys()))
table.load_stream(
itertools.chain.from_iterable(i['Instances'] for i in stream))
if verbose:
output = (
table.get_string(header=False, fields=['ID'])
if quiet else
table.get_string()
)
if output:
print(output)
return table
def enroll(self, resource_id):
resource_type = self._infer_resource_type(resource_id)
if not resource_type:
raise InvalidResource(resource_id)
elements = [resource_id]
# If resource is an instance, enroll all of its volumes
if resource_type == 'instance':
resource = self._get_instance(resource_id)
# Instance must be in the correct availability zone
if (
resource.placement['AvailabilityZone'] !=
self.availability_zone
):
raise InvalidResource(resource_id)
for volume in resource.volumes.all():
elements.append(volume.id)
# If resource is a volume, enroll the instance it is attached to
elif resource_type == 'volume':
resource = self._get_volume(resource_id)
# Volume must be in the correct availability zone
if resource.availability_zone != self.availability_zone:
raise InvalidResource(resource_id)
for attachment in resource.attachments:
elements.append(attachment['InstanceId'])
# Set tags for all related elements
self._set_resource_tags(*elements)
def start(self, instance_id):
print('Starting instance:', instance_id)
instance = self.get_resource(instance_id, 'instance')
instance.start()
# Block until the instance is SSHable
# TODO(jeev): Use coroutines for this
instance.wait_until_running()
self._wait_until_usable(instance)
def rm(self, instance_id, remove_volumes=False):
print('Removing instance:', instance_id)
instance = self.get_resource(instance_id, 'instance')
# Track volumes attached to this instance
attached_volumes = [v.id for v in instance.volumes.all()]
# Terminate instance
instance.terminate()
instance.wait_until_terminated()
# Clean up volumes if necessary
if remove_volumes:
print('Removing volumes for instance.')
for v in attached_volumes:
try:
_v = self._get_volume(v)
_v.delete()
except ClientError:
pass
def stop(self, instance_id):
print('Stopping instance:', instance_id)
instance = self.get_resource(instance_id, 'instance')
instance.stop()
instance.wait_until_stopped()
##
## EC2 Instance Access
##
@retry_with_timeout(BOTO3_WAIT_TIMEOUT, interval=5)
def _find_username(self, host_string, key_filename):
for user in AWS_SHELL_USERS:
try:
with settings(
hide('running',
'warnings',
'aborts',
'stdout',
'stderr'),
host_string=host_string,
user=user,
abort_on_prompts=True,
key_filename=key_filename,
warn_only=True
):
run('ls')
return user
except:
pass
def _get_instance_ssh_attrs(self, instance):
if not isinstance(instance, boto3.resources.base.ServiceResource):
instance = self.get_resource(instance, 'instance')
else:
instance.load()
key_filename = self._keys.find_path(instance.key_name)
host_string = instance.public_ip_address
if not host_string:
raise CannotLoginToInstance(
instance.id, 'No public IP address available.')
username = self._find_username(host_string, key_filename)
if not username:
raise CannotLoginToInstance(
instance.id, 'Cannot find a valid shell user.')
return instance, {
'key_filename': key_filename,
'host_string': host_string,
'username': username
}
def _get_single_running_instance(self):
instance_table = self.ls(verbose=False)
running = [
instance
for instance in instance_table.data
if instance['State'] == 'running'
]
# Handle the case of no running instances
if not running:
raise CannotFindRunningInstance('No running instances found.')
# Handle ambiguity - too many running instances
elif len(running) != 1:
raise CannotFindRunningInstance(
'Multiple running instances found.')
# One running instance found
return running[0]['ID']
def find_usable_instance(self,
instance=None,
raise_exception=True,
verbose=False):
if instance is None:
instance = self._get_single_running_instance()
try:
return self._get_instance_ssh_attrs(instance)
except Exception as e:
if raise_exception:
raise
if verbose:
print(e, file=sys.stderr)
@retry_with_timeout(BOTO3_WAIT_TIMEOUT)
def _wait_until_usable(self, instance):
return self.find_usable_instance(instance, raise_exception=False)
def ssh(self, instance_id):
_, attrs = self.find_usable_instance(instance_id)
with settings(
hide('running', 'warnings',),
warn_only=True
):
local("""
ssh \
-o PreferredAuthentications=publickey \
-o StrictHostKeyChecking=no \
-i {key_filename} \
{username}@{host_string}
""".format(**attrs))
def machine(self, instance_id):
instance, attrs = self.find_usable_instance(instance_id)
tunnel = ssh_tunnel.start(
attrs['host_string'],
attrs['username'],
attrs['key_filename'],
self._hooks.get_var('remote_docker_port', 2375)
)
with shell_env(
DOCKER_HOST='tcp://{}'.format(tunnel.bind_string),
DOCKER_MACHINE_NAME=instance.id
):
with settings(
hide('running', 'warnings',),
warn_only=True
):
local("""
/bin/bash \
--rcfile <(echo \'PS1="[{}] \\w$ "\')
""".format(instance.id), shell='/bin/bash')
##
## EC2 Volumes
##
def volumes(self, all=False, quiet=False, verbose=True):
filters = [
{'Name': 'tag:{}'.format(k), 'Values': [v]}
for k, v in self.filters.items()
]
if not all:
filters.append({
'Name': 'status',
'Values': ['creating', 'available',]
})
stream = self._get_streaming_response(
self._client.describe_volumes,
key='Volumes',
Filters=filters
)
table = VolumeTable(skip_tags=set(self.tags.keys()))
table.load_stream(stream)
if verbose:
output = (
table.get_string(header=False, fields=['ID'])
if quiet else
table.get_string()
)
if output:
print(output)
return table
def create_volume(self,
instance_id,
size,
type='gp2',
snapshot=None):
instance = self.get_resource(instance_id, 'instance')
request = {
'Size': size,
'AvailabilityZone': self.availability_zone,
'VolumeType': type,
'TagSpecifications': [{
'ResourceType': 'volume',
'Tags': instance.tags
}]
}
if snapshot is not None:
request.update({'SnapshotId': snapshot})
request = validate_with_humanized_errors(request, CREATE_VOLUME_SCHEMA)
volume = self._resource.create_volume(**request)
# Wait for volume to become available
self._wait_for_state(volume, state=('available',))
return self.attach_volume(instance_id, volume.id)
def _attach_volume_helper(self, instance_id, volume_id, device_name):
volume = self.get_resource(volume_id, 'volume')
# Find instance this volume is attached to
if volume.attachments:
raise CannotAttachVolume(
volume_id, 'Volume is attached to another instance.')
print('Attaching volume:', volume_id)
volume.attach_to_instance(InstanceId=instance_id, Device=device_name)
def attach_volume(self, instance_id, *volume_ids):
instance = self.get_resource(instance_id, 'instance')
# Get device name prefix for instance
device_prefix = (
EC2_DEVICE_NAME_REGEX
.match(instance.root_device_name)
.group('prefix')
)
# Get the maximum mounted device name ID
curr_char = max(
EC2_DEVICE_NAME_REGEX
.match(i.get('DeviceName'))
.group('partition')
for i in instance.block_device_mappings
)
next_id = max(letters.index('f'), letters.index(curr_char) + 1)
for volume_id in volume_ids:
try:
self._attach_volume_helper(
instance_id,
volume_id,
device_prefix + letters[next_id]
)
except CannotAttachVolume as e:
print(e, file=sys.stderr)
else:
# Increment ID for next device
next_id += 1
# Run hook on attaching volumes
self.run_hooks_on_instance(instance_id, hook='on_attach_volume')
def detach_volume(self, volume_id, force=False):
volume = self.get_resource(volume_id, 'volume')
# Find instance this volume is attached to
if not volume.attachments:
return
instance_id = volume.attachments[0]['InstanceId']
instance = self.get_resource(instance_id, 'instance')
# Instance should be stopped if volume is to be detached
if (
not force
and instance.state['Name'] not in ('stopped', 'terminated',)
):
raise CannotDetachVolume(
volume_id, 'The attached instance is still running.')
print('Detaching volume:', volume_id)
volume.detach_from_instance({
'InstanceId': instance_id,
'Force': force
})
def rm_volume(self, volume_id):
print('Removing volume:', volume_id)
volume = self.get_resource(volume_id, 'volume')
volume.delete()
```
#### File: awsm/transport/__init__.py
```python
import atexit
import bgtunnel
import socket
import time
from .exceptions import SSHTunnelError
class SSHTunnel(object):
def __init__(self):
super(SSHTunnel, self).__init__()
self._cache = {}
@staticmethod
def _check_tunnel(tunnel, timeout=10):
if not tunnel.isAlive():
tunnel.start()
done = False
valid = False
start = time.time()
while not done and time.time() - start < timeout:
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.settimeout(timeout)
try:
s.connect((tunnel.bind_address, tunnel.bind_port))
except ConnectionRefusedError:
time.sleep(.1)
except socket.error:
done = True
else:
done = True
valid = True
finally:
s.close()
if not valid:
raise SSHTunnelError(tunnel.host_address, tunnel.host_port)
def start(self, host, user, private_key, remote_port):
# Try retrieving a valid tunnel from the cache
tunnel = self._cache.get((host, user, private_key, remote_port))
# Create a new tunnel
if tunnel is None:
tunnel = bgtunnel.open(
host,
ssh_user=user,
identity_file=private_key,
host_port=remote_port,
expect_hello=False,
silent=True,
strict_host_key_checking=False
)
# Cache new tunnel
self._cache[host, user, private_key, remote_port] = tunnel
# Check tunnel
self._check_tunnel(tunnel)
return tunnel
def stop(self):
for tunnel in self._cache.values():
tunnel.close()
ssh_tunnel = SSHTunnel()
atexit.register(ssh_tunnel.stop)
```
|
{
"source": "jeevb/prime",
"score": 2
}
|
#### File: prime/bot/bot.py
```python
import traceback
from .command import CommandMgr
from .constants import SYSTEM_USER, SYSTEM_CHANNEL, SHORTHAND_TRIGGER_RE
from .listener import ListenerMgr
from .job import JobsMgr
from .query import Query
from .utils import strip
from gevent import Greenlet, sleep, spawn_raw, spawn_later
from gevent.event import Event
from greenlet import GreenletExit
from prompt_toolkit import prompt, AbortAction
class GenericBot(object):
command_mgr_class = CommandMgr
listener_mgr_class = ListenerMgr
jobs_mgr_class = JobsMgr
query_class = Query
def __init__(self, ping_interval=3):
super(GenericBot, self).__init__()
self._greenlet = None
self._prompt_greenlet = None
self._command_mgr = self.command_mgr_class(self)
self._listener_mgr = self.listener_mgr_class(self)
self._jobs_mgr = self.jobs_mgr_class(self)
self._stop_event = Event()
self._stop_event.set()
# For pinging server
self._ping_interval = ping_interval
# Pattern to determine if incoming messages are targeting bot
self._targeting_me_re = None
def handle_cmd(self, query):
self._command_mgr.handle(query)
def start(self):
self._stop_event.clear()
if not self._greenlet:
self._greenlet = Greenlet(self.run)
self._greenlet.start()
if not self._prompt_greenlet:
self._prompt_greenlet = Greenlet(self.prompt)
self._prompt_greenlet.start()
def join(self, timeout=None):
try:
self._stop_event.wait(timeout)
except KeyboardInterrupt:
pass
def stop(self):
self._stop_event.set()
self._greenlet.kill()
self._greenlet = None
self._prompt_greenlet.kill()
self._prompt_greenlet = None
def prompt(self):
while True:
try:
message = prompt('>>> ')
except (GreenletExit, KeyboardInterrupt, SystemExit):
self.stop()
except:
traceback.print_exc()
else:
if message:
query = Query(SYSTEM_USER, SYSTEM_CHANNEL, message)
query.is_targeting_me = True
query.is_private = True
query.send_handler = (
lambda _, m: self._send_helper(print, m))
spawn_raw(self._listener_mgr.handle, query)
finally:
sleep(.5)
def run(self):
while True:
try:
self._connect()
self.poll()
except (GreenletExit, KeyboardInterrupt, SystemExit):
self.stop()
except:
traceback.print_exc()
sleep(10)
def poll(self):
raise NotImplementedError(
'%r should implement the `poll` method.'
% self.__class__.__name__
)
def _send_helper(self, handler, message):
message = strip(message)
if isinstance(message, (str, bytes)):
if message:
handler(message)
elif hasattr(message, '__iter__'):
for chunk in message:
if chunk:
sleep(.5)
handler(chunk)
def send(self, channel, message):
raise NotImplementedError(
'%r should implement the `send` method.'
% self.__class__.__name__
)
def on_query(self, query):
query.send_handler = self.send
self._listener_mgr.handle(query)
def _ping(self):
self.ping()
spawn_later(self._ping_interval, self._ping)
def _connect(self):
self.connect()
self._ping()
def connect(self):
raise NotImplementedError(
'%r should implement the `connect` method.'
% self.__class__.__name__
)
def ping(self):
raise NotImplementedError(
'%r should implement the `ping` method.'
% self.__class__.__name__
)
def _is_targeting_me(self, message):
targeting_me = self._targeting_me_re.match(message) is not None
if targeting_me:
message = self._targeting_me_re.sub('', message)
shorthand = SHORTHAND_TRIGGER_RE.match(message) is not None
return message, targeting_me, shorthand
```
#### File: bot/commands/hello.py
```python
from prime.bot.command import Command
cmd = Command.create(
'hello',
aliases=('hi', 'hey', 'yo', 'hola',),
timeout=10,
description='Say hello.'
)
@cmd.register
def main(command, query, args):
query.reply_with_one_of(
'Hello there!',
'Well, hello!',
'Hi there!',
'Hey you!',
'Hey there!'
)
```
#### File: prime/mattermost/decorators.py
```python
from .exceptions import UnsupportedAPIVersion
from functools import wraps
def api_specific(func):
def wrapper(obj, *args, **kwargs):
handler = getattr(
obj,
'_{}_helper_{}_{}'.format(func.__name__, *obj.api_version),
None
)
if handler is None:
raise UnsupportedAPIVersion(obj._api_version)
return handler(*args, **kwargs)
return wrapper
```
#### File: prime/mattermost/groups.py
```python
import re
from prime.bot.groups import GroupsMixin
from prime.bot.exceptions import InvalidEntity
MM_LINK_RE = r'[%s](?P<entity>[^\s]+)'
MM_LINK_USER_RE = re.compile(MM_LINK_RE % '\@')
MM_LINK_CHANNEL_RE = re.compile(MM_LINK_RE % '\~')
class MMGroupsMixin(GroupsMixin):
database_name = 'mattermost_groups'
def _find_in_user_cache(self, user):
try:
user = self._user_cache[user]
except KeyError:
raise InvalidEntity('Invalid user: %r' % user)
return user
def _find_in_channel_cache(self, channel):
try:
channel = self._channel_cache[channel]
except KeyError:
raise InvalidEntity('Invalid channel: %r' % channel)
return channel
def validate_user(self, user):
match = MM_LINK_USER_RE.match(user)
if match:
user = match.group('entity')
return self._find_in_user_cache(user)['id']
def validate_channel(self, channel):
match = MM_LINK_CHANNEL_RE.match(channel)
if match:
channel = match.group('entity')
return self._find_in_channel_cache(channel)['id']
def _user_display(self, user):
user = self._find_in_user_cache(user)
return '@{0}'.format(user['username'])
def _channel_display(self, channel):
channel = self._find_in_channel_cache(channel)
return '~{0}'.format(channel['name'])
```
#### File: prime/slack/bot.py
```python
import re
import sys
import time
from .client import SlackClient2
from .groups import SlackGroupsMixin
from .query import SlackQuery
from gevent import sleep, spawn_raw
from prime.bot.bot import GenericBot
from prime.bot.constants import SEPARATORS
from prime.bot.utils import strip
class SlackBot(SlackGroupsMixin, GenericBot):
query_class = SlackQuery
def __init__(self, cfg):
super(SlackBot, self).__init__()
self._client = SlackClient2(cfg.slack_token)
@property
def _attrs(self):
return self._client.server.login_data['self']
def _handle_message(self, event):
message = event.get('text')
user = event.get('user')
# Handle case of edited message
event_message = event.get('message')
if not message and event_message:
message = event_message.get('text')
user = event_message.get('user')
if not message:
return
message, is_targeting_me, shorthand = self._is_targeting_me(message)
channel = event.get('channel')
query = self.query_class(user=self._get_user(user),
channel=self._get_channel(channel),
message=message)
query.is_targeting_me = (is_targeting_me or
shorthand or
query.is_direct_message)
if shorthand:
query.is_private = True
return self.on_query(query)
def _handle_user_change(self, event):
user_data = event.get('user')
if user_data:
user_id = user_data.get('id')
tz = user_data.get('tz', 'unknown')
name = user_data.get('name')
real_name = user_data.get('real_name', name)
if not self._update_user(name=name,
id=user_id,
real_name=real_name,
tz=tz):
self._add_user(name, user_id, real_name, tz)
def _handle_channel_left(self, event):
self._remove_channel(event.get('channel'))
def _handle_channel_joined(self, event):
channel_data = event.get('channel')
if channel_data:
channel_id = channel_data.get('id')
name = channel_data.get('name')
members = channel_data.get('members')
if not self._update_channel(name=name,
id=channel_id,
members=members):
self._add_channel(name, channel_id, members)
def _handle_channel_rename(self, event):
channel_data = event.get('channel')
if channel_data:
self._update_channel(name=channel_data.get('name'),
id=channel_data.get('id'))
def _handle_event(self, event):
event_type = event.get('type')
handler = getattr(self, '_handle_{}'.format(event_type), None)
if handler:
spawn_raw(handler, event)
def _add_user(self, *args, **kwargs):
self._client.server.attach_user(*args, **kwargs)
def _update_user(self, **kwargs):
user = self._get_user(kwargs.get('id'))
if not user:
return False
user.__dict__.update(**kwargs)
return True
def _get_user(self, user):
return self._client.server.users.find(user)
def _add_channel(self, *args, **kwargs):
self._client.server.attach_channel(*args, **kwargs)
def _remove_channel(self, channel_id):
channel = self._get_channel(channel_id)
if channel:
self._client.server.channels.remove(channel)
def _update_channel(self, **kwargs):
channel = self._get_channel(kwargs.get('id'))
if not channel:
return False
channel.__dict__.update(**kwargs)
return True
def _get_channel(self, channel):
return self._client.server.channels.find(channel)
def poll(self):
while True:
data = self._client.rtm_read()
for event in data:
self._handle_event(event)
sleep(.5)
def ping(self):
self._client.server.ping()
def connect(self):
self._client.server.rtm_connect()
# Cache pattern to determine if incoming message is targeting bot
link = re.escape('<@{0}>'.format(self._attrs.get('id')))
name = re.escape(self._attrs.get('name'))
self._targeting_me_re = re.compile(
r'^(%s|%s)[%s]+' % (link, name, SEPARATORS), re.I)
def send(self, channel, message):
handler = lambda m: self._client.rtm_send_message(channel, m)
return self._send_helper(handler, message)
```
|
{
"source": "JeevenMann/RedditBot",
"score": 3
}
|
#### File: JeevenMann/RedditBot/reddit.py
```python
import praw
import credentials
import json
from urllib.request import (Request, urlopen, urlretrieve)
import urllib.request, json
import requests
CLIENT_ID,CLIENT_SECRET,USER_AGENT = credentials.get_reddit_info()
REDDIT = praw.Reddit(client_id=CLIENT_ID,
client_secret=CLIENT_SECRET,
user_agent=USER_AGENT)
def get_subreddit():
return REDDIT.random_subreddit(False)
def get_post(subreddit):
for value in subreddit.top(limit=1):
title = value.title
img_url = value.url #url of the image
subreddit_url = value.permalink #comes back as /r/subreddit...post
post_url = "https://www.reddit.com"+subreddit_url
json_url = "https://www.reddit.com"+subreddit_url+'.json'
with urllib.request.urlopen(json_url) as url:
data = json.loads(url.read().decode())
return title,post_url,img_url
```
|
{
"source": "jeevers/hyperlapsemb_example",
"score": 3
}
|
#### File: jeevers/hyperlapsemb_example/boilerplate.py
```python
import tornado.httpserver
import tornado.ioloop
import tornado.web
import tornado.gen
import os
import signal
class mainhandler(tornado.web.RequestHandler):
'''
When a user goes first goes to /wgsi, the browser sends a GET HTTP
request to the server which in turn calls the 'get' method.
When the uses fills out the form and clicks on the 'submit' button.
The browser will send a POST request to the server and call the 'post'
method, which will get the arguments passed to the server and render
a new page based on the submitted data.
'''
def get(self):
self.render('form.html')
def post(self):
name = self.get_arguments('name')[0]
#print name
self.render('submit.html', name=name)
class urlhandler(tornado.web.RequestHandler):
'''
This will take any text after '/name/' and render a template with
that string. A regex is used to extract the name.
'''
def get(self, name):
self.render('submit.html', name=name)
routes = [
(r'/wsgi', mainhandler),
(r'/name/([0-9A-Za-z ]+)', urlhandler),
]
settings = dict(
static_path=os.path.join(os.path.dirname(__file__), "static"),
template_path=os.path.join(os.path.dirname(__file__), "templates"),
)
application = tornado.web.Application(routes, **settings)
if __name__ == '__main__':
http_server = tornado.httpserver.HTTPServer(application)
port_listen = 8800
http_server.listen(port_listen)
loop = tornado.ioloop.IOLoop.instance()
def sigint_handler(signum, frame):
print('signal handler called with %s, frame %s' % (signum, frame))
#periodic_cbk.stop()
loop.stop()
signal.signal(signal.SIGINT, sigint_handler)
signal.signal(signal.SIGHUP, sigint_handler)
signal.signal(signal.SIGTERM, sigint_handler)
#periodic_cbk = tornado.ioloop.PeriodicCallback(ip_poll,
# poll_interval*60*1000,
# loop)
#periodic_cbk.start()
loop.start()
# vim: set tabstop=4 shiftwidth=4 softtabstop=4 expandtab:
```
|
{
"source": "jeevers/onetimepass",
"score": 3
}
|
#### File: jeevers/onetimepass/auth.py
```python
from passlib.hash import pbkdf2_sha256
import string
import random
def generate_hash(passwd):
return pbkdf2_sha256.encrypt(passwd, rounds=200000, salt_size=16)
def verify_hash(passwd, hash):
return pbkdf2_sha256.verify(passwd, hash)
#def generate_otp_secret(size=24, chars=string.ascii_uppercase+string.digits):
# ##turns out the otp secret needs to be base32 encoded
# return ''.join(random.choice(chars) for _ in range(size))
```
#### File: jeevers/onetimepass/db.py
```python
from peewee import *
import pyotp
import auth
USERSDB = 'users.db'
db = SqliteDatabase(USERSDB)
class BaseModel(Model):
class Meta:
database = db
class User(BaseModel):
username = CharField(unique=True)
email = CharField()
passwdhash = CharField()
#otp_enabled = BoolField(default=False)
otp_secret = CharField()
hotp_counter = IntegerField(default=0)
failed_attempts = IntegerField(default=0)
def init_db(tables, database):
"""
Initializes tables. The tables argument must be a list.
"""
database.create_tables(tables)
def create_user(username, password, email):
"""
Adds user to database. The password is hashed using pbkdf2_sha256.
The otp secret is automatically generated, and needs to be base32 encoded.
"""
new_user = User(username=username,
email=email,
passwdhash=auth.generate_hash(password),
otp_secret=pyotp.random_base32())
new_user.save()
def get_user(username):
"""
Queries database for specified username. Will throw a User.DoesNotExist
exception if not found. The username field is unique, so
only one user account will be returned.
"""
user_query = User.select().where(User.username == username)
return user_query.get()
```
|
{
"source": "Jeevesh8/AutoRegressive-MLM",
"score": 2
}
|
#### File: Jeevesh8/AutoRegressive-MLM/finetune.py
```python
import copy
import numpy as np
from functools import partial
from copy import deepcopy
import pickle
import jax
import jax.numpy as jnp
import haiku as hk
from haiku.data_structures import to_immutable_dict, to_mutable_dict
import optax
from transformers import RobertaTokenizer
from src.DataLoaders.xml import load_xml_data
from src.DataLoaders.json import load_reddit_data
from src.Tokenizers.thread_tokenizer import Thread_Tokenizer
from src.model.transformers import TransformerFeaturizer, FineTuningExtendedEncoder
from src.model.utils import logits_to_ar_classifier_params, print_keys, get_pretrained_weights, copy_available_keys
from src.model.purified_jitted_fns import get_fn_to_transform, get_pure_jitted_fn
from src.optimizers.adam import get_adam_opt
from src.Tokenizers.masking_utils import get_masking_func
from config import config
from loss_eval_utils import ft_loss, get_params, get_classification_report, flatten_dict
import wandb
def load_pretrained_tokenizer():
"""Loads Pre-Trained Tokenizers if config['initialize_pretrained'] is specified, into the global config"""
if 'initialize_pretrained' in config and config['initialize_pretrained']!='':
huggingface_tokenizer = RobertaTokenizer.from_pretrained(config['initialize_pretrained'])
config['pt_hf_tokenizer'] = huggingface_tokenizer
def get_dataloaders():
data_loader = load_reddit_data(config)
train_data_loader = load_xml_data(config, split='train/')
valid_data_loader = load_xml_data(config, split='valid/')
test_data_loader = load_xml_data(config, split='test/')
return data_loader, train_data_loader, valid_data_loader, test_data_loader
def train_tokenizer(data_loader):
if config['initialize_pretrained'] == '':
lm_tokeniser = Thread_Tokenizer(config)
lm_tokeniser.train_tokenizer(str_iter=data_loader.get_sentences())
else:
#Will automatically load pre-trained version if config['pt_hf_tokenizer'] is defined.
lm_tokeniser = Thread_Tokenizer(config)
return lm_tokeniser
def update_config(config, train_data_loader):
print("Vocabulary : ", lm_tokeniser.tokenizer.get_vocab())
config['vocab_size'] = lm_tokeniser.tokenizer.get_vocab_size()
#Tokenization ids
config['mask_id'] = lm_tokeniser.tokenizer.token_to_id("<mask>")
config['pad_id'] = lm_tokeniser.tokenizer.token_to_id("<pad>")
config['sos_id'] = lm_tokeniser.tokenizer.token_to_id("<s>")
config['eos_id'] = lm_tokeniser.tokenizer.token_to_id("</s>")
config['dsm_list'] = [lm_tokeniser.tokenizer.token_to_id(token)
for token in lm_tokeniser.dms]
config['total_steps'] = len([0 for thread in train_data_loader.thread_generator()])
print("Total steps: ", config['total_steps'])
return config
def load_pretrained_wts(featurizer_params, ExtendedEncoder_params):
"""Merging pre-trained and initialised parameters"""
if config['params_file']!='':
with open(config['params_file'], 'rb') as f:
pt_wts = pickle.load(f)
featurizer_params = to_mutable_dict(featurizer_params)
featurizer_params = copy_available_keys(pt_wts['comments_encoder'], featurizer_params,)
ExtendedEncoder_params = to_mutable_dict(ExtendedEncoder_params)
ExtendedEncoder_params = copy_available_keys(pt_wts['mlm_predictor'], ExtendedEncoder_params,)
else:
print("No pretrained wts file was provided, initializing with random wts. Provide the pt wts file\
in config['param_file'], if you wish to use pretrained weights.")
params = to_immutable_dict( {'comments_encoder' : featurizer_params,
'ar_classifier' : ExtendedEncoder_params } )
return params
def jit_fns(pure_featurizer_fn, pure_loss_fn, pure_pred_fn):
global featurizer_f, loss_f, eval_featurizer_f, eval_pred_f, loss, accuracy
featurizer_f = get_pure_jitted_fn(pure_featurizer_fn, True, config)
loss_f = get_pure_jitted_fn(pure_loss_fn, True, config)
loss = partial(ft_loss, featurizer_f, loss_f, mode='loss')
eval_featurizer_f = get_pure_jitted_fn(pure_featurizer_fn, False, config)
eval_pred_f = get_pure_jitted_fn(pure_pred_fn, False, config)
accuracy = partial(ft_loss, eval_featurizer_f, eval_pred_f, mode='accuracy')
def update(opt_state, params, key, thread, config):
turn = 0
(batch_loss, remaining_comments), grad = jax.value_and_grad(loss, has_aux=True)(params, key, thread, config, turn)
turn += 1
while remaining_comments:
print("Big tree, turn: ", turn)
tup, grads = jax.value_and_grad(loss, has_aux=True)(params, key, thread, config, turn)
turn += 1
batch_loss += tup[0]
grad = jax.tree_util.tree_multimap(lambda x,y: x+y, grad, grads)
remaining_comments = tup[1]
updates, opt_state = opt.update(grad, opt_state)
new_params = optax.apply_updates(params, updates)
return new_params, opt_state, batch_loss
def thread_accuracy(params, key, thread, config):
turn = 0
all_preds, all_labels = [], []
remaining_comments = True
while remaining_comments:
tup, remaining_comments = accuracy(params, key, thread, config, turn)
all_preds += tup[0]
all_labels += tup[1]
turn += 1
return all_preds, all_labels
def evaluate(config, params, data_loader, key):
all_preds = []
all_labels = []
for step, thread in enumerate(data_loader.thread_generator()):
if step%100==0:
print(f'[Step {step}]')
thread = lm_tokeniser.tokenize_thread(thread)
key, subkey = jax.random.split(key)
tup = thread_accuracy(params, subkey, thread, config)
all_preds += tup[0]
all_labels += tup[1]
return all_preds, all_labels
def train(config, params, train_data_loader, key, opt_state):
losses = []
val_losses = []
for _ in range(config['n_epochs']):
for step, thread in enumerate(train_data_loader.thread_generator()):
if step%(config['total_steps'])==0:
print(f'[Step {step}]')
thread = lm_tokeniser.tokenize_thread(thread)
key, subkey = jax.random.split(key)
params, opt_state, batch_loss = update(opt_state, params, subkey,
thread, config)
losses.append(batch_loss.item())
if step%(config['total_steps'])==0:
print(sum(losses)/len(losses))
losses = []
if step==config['total_steps']-1:
all_preds, all_labels = evaluate(config, params, valid_data_loader, key)
wandb.log(flatten_dict({'Validation' : get_classification_report(config, all_labels, all_preds)}))
all_preds, all_labels = evaluate(config, params, test_data_loader, key)
wandb.log(flatten_dict({'Test' : get_classification_report(config, all_labels, all_preds)}))
return val_losses
if __name__=='__main__' :
global lm_tokenizer, featurizer_f, loss_f, mask_batch_mlm, eval_featurizer_f, eval_pred_f, loss, accuracy, opt
load_pretrained_tokenizer()
data_loader, train_data_loader, valid_data_loader, test_data_loader = get_dataloaders()
lm_tokeniser = train_tokenizer(data_loader)
config = update_config(config, train_data_loader)
wandb.init(project='autoregressive-mlm-ft', config=config)
config = hk.data_structures.to_immutable_dict(config)
pure_featurizer_fn = hk.transform( get_fn_to_transform(TransformerFeaturizer) )
pure_loss_fn = hk.transform( get_fn_to_transform(FineTuningExtendedEncoder) )
pure_pred_fn = hk.transform( get_fn_to_transform(FineTuningExtendedEncoder, training=False) )
key, subkey = jax.random.split( jax.random.PRNGKey(42) )
featurizer_params, ExtendedEncoder_params = get_params(config, key, pure_loss_fn, pure_featurizer_fn)
params = load_pretrained_wts(featurizer_params, ExtendedEncoder_params)
mask_batch_mlm = get_masking_func(config)
jit_fns(pure_featurizer_fn, pure_loss_fn, pure_pred_fn)
lrs = [1e-3]
drs = [0.1]
valid_epoch_losses = []
for lr in lrs:
for dr in drs:
config = hk.data_structures.to_mutable_dict(config)
config['learning_rate'] = lr
config['classifier_drop_rate']= dr
config = hk.data_structures.to_immutable_dict(config)
opt = get_adam_opt(config)
opt_state = opt.init(params)
jit_fns(pure_featurizer_fn, pure_loss_fn, pure_pred_fn)
init_params = copy.deepcopy(params)
val_losses = train(config, init_params, train_data_loader, key, opt_state)
valid_epoch_losses.append( val_losses )
wandb.log({'learning_rate':lr, 'dropout_rate': dr})
print(f"Learning rate={lr}, Dropout Rate={dr} Losses : ", valid_epoch_losses[-1])
```
#### File: AutoRegressive-MLM/preprocess/json_to_xml.py
```python
import argparse
import json, os
import jsonlist
from functools import reduce
def clean_claim(claim_text_lis):
"""
Returns a list of claims with metadata removed.
Don't call multiple times, on same lis.
"""
clean_claims = []
for claim_text in claim_text_lis:
if claim_text is not None:
clean_claim = ' '.join(claim_text.split()[1:]).strip(' ') #Remove initial number
if clean_claim!='':
clean_claims.append( clean_claim )
return clean_claims
def clean_premise(premise_lis):
"""
Returns a list of premises with meta data removed.
"""
if type(premise_lis) is dict:
premise_lis = reduce(lambda x, y: x+y, [v if v is not None else [] for k,v in premise_lis.items()], [])
clean_premises = []
for lis in premise_lis:
if lis is not None:
clean_premises += clean_claim(lis)
return clean_premises
def mark_comment(comment, claim_lis=None, premise_lis=None):
"""
Adds <claim>/<premise> tags to comment.
"""
comment = ' '.join(comment.split(' '))
comment = ' '+comment+' '
if claim_lis is not None:
for claim in claim_lis:
claim = ' '.join(claim.split(' '))
claim = claim.strip(' ')
print("Replacing CLAIM : ", claim)
comment = comment.replace(claim, '<claim>'+claim+'</claim>')
if premise_lis is not None:
for premise in premise_lis:
premise = ' '.join(premise.split(' '))
premise = premise.strip(' ')
print("Replacing PREMISE : ", premise)
comment = comment.replace(premise, '<premise>'+premise+'</premise>')
return comment[1:-1]
def format_annotation(annotation, post_tree):
"""
Modifies annotation to add claim and premise tags and returns xml.
"""
xml_out = ''
comment_ids = [elem['id'] for elem in post_tree['comments']]
comment1_id = annotation['Comment1']
comment2_id = annotation['Comment2']
#Preparing XML for Comment 1
if comment1_id in comment_ids:
cur_comment = post_tree['comments'][comment_ids.index(comment1_id)]
if 'ann_claim_premise' not in cur_comment:
cur_comment['ann_claim_premise'] = mark_comment(cur_comment['body'],
clean_claim(annotation['Claim1']) if 'Claim1' in annotation else None,
clean_premise(annotation['Premise1']) if 'Premise1' in annotation else None)
xml_out += '<reply>'+cur_comment['ann_claim_premise']+'</reply>'
elif comment1_id == post_tree['id']:
if 'ann_claim_premise' not in post_tree:
post_tree['ann_claim_premise'] = mark_comment(post_tree['selftext'],
clean_claim(annotation['Claim1']) if 'Claim1' in annotation else None,
clean_premise(annotation['Premise1']) if 'Premise1' in annotation else None)
xml_out += '<OP>'+post_tree['ann_claim_premise']+'</OP>'
else:
raise AssertionError("Comment id : ", comment1_id, " not found in the post tree : ", post_tree)
#Preparing XML for Comment 2
if comment2_id in comment_ids:
cur_comment = post_tree['comments'][comment_ids.index(comment2_id)]
if 'ann_claim_premise' not in cur_comment:
cur_comment['ann_claim_premise'] = mark_comment(cur_comment['body'],
clean_claim(annotation['Claim2']) if 'Claim2' in annotation else None,
clean_premise(annotation['Premise2']) if 'Premise2' in annotation else None)
xml_out += '<reply>'+cur_comment['ann_claim_premise']+'</reply>'
else:
raise AssertionError("Comment id : ", comment2_id, " not found in the post tree : ", post_tree)
return xml_out
def get_next_file_name(write_dir):
i = 0
file_name = os.path.join(write_dir, str(i)+'.xml')
while True:
while os.path.isfile(file_name):
i+=1
file_name = os.path.join(write_dir, str(i)+'.xml')
yield file_name
def write_xml(thread_xml, write_dir, file_name_iter):
xml_content = """<?xml version="1.0"?> <thread> \n"""
for elem in thread_xml:
xml_content+=(elem+'\n')
xml_content+="</thread>"
with open(next(file_name_iter), 'w') as f:
f.write(xml_content)
if __name__ == '__main__' :
parser = argparse.ArgumentParser()
parser.add_argument('--json_file', type=str, help='Json file with annotations to be converted to XML file')
parser.add_argument('--reddit_file', type==str, help='Jsonlist File with reddit comments; in the format of data of https://chenhaot.com/pages/changemyview.html.\
This file will be searched for comments matching those in json_file')
parser.add_argument('--write_dir', type=str, help='Directory to which the program should write the generated xml files.')
args = parser.parse_args()
file_name_iter = get_next_file_name(args.write_dir)
with open(args.json_file, 'r') as f:
ft_data = json.load(f)
print("Loaded finetuning data")
train_data = jsonlist.load_file(jsonlist.load_file(args.reddit_file))
annotations = []
for key in ft_data.keys():
for k, annotation in ft_data[key].items():
annotations.append(annotation)
annotations = annotations[:-10] #Remove Last 10 annotations, they have discrepancy b/w Claim1 and Claim2
post_ids = [elem['id'] for elem in train_data]
post_comment_ids = [ elem['id'] for elem in train_data ]
parent_post_ids = { elem : elem for elem in post_comment_ids }
for elem in train_data:
for c in elem['comments']:
post_comment_ids += [ c['id'] ]
parent_post_ids[c['id']] = elem['id']
i = 0
while i<len(annotations) :
annotation = annotations[i]
post_comment_id = annotation['Comment1']
thread = []
thread_xml = []
try :
parent_post = parent_post_ids[post_comment_id]
idx = post_ids.index(parent_post)
except KeyError:
raise KeyError("Can't find post/comment of id : ", post_id)
comment_ids = [elem['id'] for elem in train_data[idx]['comments']]
while True:
comment_id = annotation['Comment2']
if comment_id in comment_ids:
thread_xml.append( format_annotation(annotation, train_data[idx]) )
thread.append( annotation )
else :
raise ValueError("Invalid comment id: ", comment_id, " for post with id : ", parent_post)
i+=1
if i==len(annotations):
break
annotation = annotations[i]
if annotation['Comment1']!=comment_id or i==len(annotations):
write_xml(thread_xml)
if annotation['Comment1'] not in [a['Comment2'] for a in thread]:
break
while True:
if thread[-1]['Comment2']!=annotation['Comment1']:
thread.pop()
thread_xml.pop()
else:
break
```
#### File: src/DataLoaders/tfds.py
```python
import tensorflow_datasets as tfds
def load_tf_dataset(config, training, split, n_epochs, n_examples, name='imdb_reviews', data_dir='./'):
ds = tfds.load(name,
split=f"{split}[:{n_examples}]",
data_dir=data_dir).cache().repeat(n_epochs)
if training:
ds = ds.shuffle(10*config['batch_size'], seed=0)
ds = ds.batch(config['batch_size'])
return tfds.as_numpy(ds)
```
|
{
"source": "Jeevesh8/b2b",
"score": 2
}
|
#### File: Jeevesh8/b2b/preprocessing.py
```python
import torch
import pandas as pd
from transformers import XLMTokenizer, XLMWithLMHeadModel, XLMModel
import pickle
tokenizer = XLMTokenizer.from_pretrained("xlm-mlm-ende-1024")
class load_data():
def __init__(self, load_ = True, dataset_path='/', pll_size = 10**5):
paths = [dataset_path+'/train.en',dataset_path+'/train.de']
self.src_lang_path = paths[0]
self.trgt_lang_path = paths[1]
self.pll_size = pll_size
self.load_ = load_
def load(self):
i = 0
self.src_tokens = []
self.trgt_tokens = []
with open(self.src_lang_path, 'rt') as f:
while(i!=self.pll_size):
input_ids = torch.tensor(tokenizer.encode('<s><s>'+f.readline()+'</s>')[1:-1])
self.src_tokens.append(input_ids)
i = i + 1
with open(self.trgt_lang_path, 'rt') as f:
while(i!=2*self.pll_size):
input_ids = torch.tensor(tokenizer.encode('<s><s>'+f.readline()+'</s>')[1:-1])
self.trgt_tokens.append(input_ids)
i = i + 1
def final_data(self):
if(self.load_):
self.load()
zipped_list = list(zip(self.src_tokens, self.trgt_tokens))
df_prllel = pd.DataFrame(zipped_list, columns = ['en', 'de'], dtype=object)
df_eng = df_prllel.drop('de', axis=1)
df_de = df_prllel.drop('en', axis=1)
d = 0
'''
for df in [df_prllel, df_eng, df_de]:
with open(self.dataset_path+'/file_'+str(d)+'.pkl', 'wb+') as f :
pickle.dump(df,f)
d = d+1
'''
else:
[df_prllel,df_en,df_de] = [None]*3
d=0
for var in [df_prllel,df_en, df_de] :
with open(self.dataset_path+'/file_'+str(d)+'.pkl', 'rb') as f :
var = pickle.load(f)
d=d+1
return df_prllel, df_eng, df_de
```
#### File: Jeevesh8/b2b/train.py
```python
import torch
import torch.nn as nn
from torch.utils.data import DataLoader
from dataset import pll_datst, coll, mono_datst
from preprocessing import load_data, tokenizer
from model2 import xlmb2b
from tqdm import tqdm
from os import path
from functools import partial
from nltk.translate.bleu_score import corpus_bleu
import multiprocessing as mp
from Globals import *
import argparse
parser = argparse.ArgumentParser(description= 'Train the Model')
parser.add_argument('--dataset_path')
parser.add_argument('--p', type=float)
parser.add_argument('--ksample', type=int)
parser.add_argument('--batch_size', type=int)
parser.add_argument('--trfrmr_nlayers', type=int)
args = parser.parse_args()
if path.exists(args.dataset_path+"/file_1.csv") :
data_obj = load_data(load_ = False, dataset_path = args.dataset_path)
else:
data_obj = load_data(dataset_path=args.dataset_path)
df_prllel, df_en, df_de = data_obj.final_data()
pll_train_ds = pll_datst(df_prllel)
mono_train_ds_en = mono_datst(df_en)
mono_train_ds_de = mono_datst(df_de, lang='de')
vocab_size = tokenizer.vocab_size
b_sz = args.batch_size
batch_size = args.batch_size
d_model = 1024
model_ed = xlmb2b(trfrmr_nlayers=args.trfrmr_nlayers).double().to(device)
model_de = xlmb2b(trfrmr_nlayers=args.trfrmr_nlayers).double().to(device)
del model_ed.xlm
model_ed.xlm = model_de.xlm
model_ed.p = args.p
model_de.p = args.p
model_ed.beam_size = args.ksample
model_de.beam_size = args.ksample
cpus = mp.cpu_count()
pll_train_loader = DataLoader(pll_train_ds,batch_size=b_sz, collate_fn = partial(coll, pll_dat = True), pin_memory=True, num_workers=cpus)
mono_train_loader_en = DataLoader(mono_train_ds_en, batch_size=b_sz, collate_fn = partial(coll, pll_dat =False), pin_memory=True, num_workers=cpus)
mono_train_loader_de = DataLoader(mono_train_ds_de, batch_size=b_sz, collate_fn = partial(coll, pll_dat =False), pin_memory=True, num_workers=cpus)
optimizer_ed = torch.optim.Adam(model_ed.parameters(), lr = 0.01)
optimizer_de = torch.optim.Adam(model_ed.parameters(), lr = 0.01)
mseloss = nn.MSELoss()
cross_entropy_loss = nn.CrossEntropyLoss()
def calculate_bleu(ref, cand, weights = (0.25, 0.25, 0.25, 0.25)):
"""
ref: (batch_size, seq_len, 1)
cand: (batch_size, seq_len, 1)
"""
references = []
candidates = []
dict_ = tokenizer.decoder
for i in range(ref.shape[0]):
refs = []
cands = []
for j in range(ref[i].shape[0]):
refs.append(dict_[ref[i][j]])
cands.append(dict_[cand[i][j]])
references.append([refs])
candidates.append(cands)
return corpus_bleu(references, candidates, weights)
def reshape_n_edit(probs) :
'''returns probs while removing rows with all 0 probs
the rows with all nan probs are due to padding of all
sequences to same length'''
y = probs.reshape(-1,vocab_size)
return y[y==y].reshape(-1,vocab_size)
def assign_features(batch) :
batch['X']['attention_mask'] = (batch['X']['input_ids']==tokenizer.pad_token_id).float()
batch['X']['lengths'] = batch['X']['attention_mask'].sum(dim=1).long()
max_size = int(batch['X']['lengths'].max())
bs = batch['X']['input_ids'].shape[0]
batch['X']['position_ids'] = torch.tensor([[i for i in range(max_size)]*bs], dtype=torch.long)
if (batch['X']['langs']==en_lang_id).sum() == 0 :
batch['X']['langs'] = torch.LongTensor([[en_lang_id]*max_size for i in range(b_sz)])
else :
batch['X']['langs'] = torch.LongTensor([[de_lang_id]*max_size for i in range(b_sz)])
return batch
def swap(batch,sr_embd,tr_embd,pll=True) :
'''Replaces X with Y and input_ids with embeddings for pll data
For mono data , replaces input_ids with predicted tokens'''
if pll:
z2=batch['X']
z = batch['X']['input_ids'].clone()
z1 = batch['Y']['input_ids'].clone()
batch['X'] = batch['Y']
batch['Y'] = z2
batch['X']['input_ids'] = tr_embd
batch['Y']['input_ids'] = sr_embd
return batch, z, z1
else:
batch1 = {}
batch1['X'] = {}
for k, v in batch['X'].items() :
batch1['X'][k] = v.clone()
z = batch1['X']['input_ids']
batch['X']['input_ids'] = tr_embd
batch = assign_features(batch)
return batch, z, batch1
def flip_masks(batch) :
batch['X']['attention_mask'] = (~(batch['X']['attention_mask'].bool())).float()
batch['Y']['attention_mask'] = (~(batch['Y']['attention_mask'].bool())).float()
return batch
def freeze_weights(model) :
for param in model.parameters() :
param.requires_grad = False
def unfreeze_weights(model) :
for param in model.parameters() :
param.requires_grad = True
def remove_pad_tokens(tensorr):
j = tokenizer.pad_token_id
return tensorr[tensorr!=j]
def set_to_eval(model_lis, beam_size=3) :
for model in model_lis :
model.eval()
model.beam_size = beam_size
def send_to_gpu(batch, pll) :
lis =['X', 'Y'] if pll else ['X']
for elem in lis :
for key, value in batch[elem].items() :
batch[elem][key] = value.to(device, non_blocking=True)
return batch
def evaluate(model, i, beam_size=3) :
set_to_eval(model,beam_size)
print(str(i)+"th, Forward Model: ", model[0](c))
print(str(i)+"th, Backward Model: ", model[1](d))
def synchronize() :
if torch.cuda.is_available() :
torch.cuda.synchronize()
def run(model_forward,model_backward,batch,optimizers,pll=True):
probs, sr_embd, tr_embd = model_forward(batch)
if pll : loss_pll = cross_entropy_loss(reshape_n_edit(probs), remove_pad_tokens(batch['Y']['input_ids'].reshape(-1)) )
batch, a, b = swap(batch, sr_embd, tr_embd, pll)
probs_, sr_embd_, tr_embd_ = model_backward(batch, True)
loss_b2b = cross_entropy_loss(reshape_n_edit(probs_), remove_pad_tokens(a.reshape(-1)))
if pll : loss = loss_pll + loss_b2b
else : loss = loss_b2b
for optimizer in optimizers :
optimizer.zero_grad()
loss.backward()
del probs_, sr_embd, sr_embd_, tr_embd, tr_embd_, probs
synchronize()
for optimizer in optimizers :
optimizer.step()
return a,b,loss
def check_thresholds(loss1,loss2,model_ed,model_de, epochs) :
global xlm_freezed
if xlm_freezed and loss1<thresh_for_xlm_weight_freeze and loss2<thresh_for_xlm_weight_freeze:
unfreeze_weights(model_ed.xlm)
xlm_freezed = False
elif not model_de.begin_prgrsiv_real_to_pred and loss1<thresh_to_start_real_to_pred_prgrsiv and loss2<thresh_to_start_real_to_pred_prgrsiv :
model_de.begin_prgrsiv_real_to_pred = True
model_ed.begin_prgrsiv_real_to_pred = True
return
elif model_de.begin_prgrsiv_xlm_to_plt and epochs>thresh_to_stop_xlm_to_plt_prgrsiv :
model_de.begin_prgrsiv_xlm_to_plt = False
model_ed.begin_prgrsiv_xlm_to_plt = False
def save_checkpoint(model_ed, model_de) :
torch.save(model_ed.state_dict(), '../b2b_wts/model_ed.pt')
torch.save(model_de.state_dict(), '../b2b_wts/model_de.pt')
losses_epochs = {"pll" : [], "mono": []}
optimizers = [optimizer_de,optimizer_ed]
freeze_weights(model_de.xlm)
xlm_freezed = True
for epoch in tqdm(range(num_epochs)) :
print(epoch)
model_ed.pll_dat=True
model_de.pll_dat=True
losses = [[], []]
for i, batch in enumerate(pll_train_loader) :
batch = send_to_gpu(batch, pll=True)
batch['Y']['input_ids'], batch['X']['input_ids'], loss1 = run(model_ed,model_de,batch,optimizers)
losses[0].append(loss1.item())
if i>=2000 and i%2000==0 :
print(i, sum(losses[1][i-2000:i])/2000)
del loss1
synchronize()
batch = flip_masks(batch)
_,_,loss2 = run(model_de,model_ed,batch,optimizers)
losses[1].append(loss2.item())
if i>=2000 and i%2000==0 :
print(i, sum(losses[1][i-2000:i])/2000)
del loss2
synchronize()
check_thresholds(losses[0][-1],losses[1][-1], model_ed, model_de, epoch)
save_checkpoint(model_ed, model_de)
losses_epochs['pll'].append([sum(losses[0])/len(losses[0]), sum(losses[1])/len(losses[1])])
#Training on monolingual data if the above losses are sufficiently low:
if(losses_epochs['pll'][-1][0]<thresh_for_mono_data or losses['pll'][-1][1]<thresh_for_mono_data):
print("Going for Monolingual Training")
model_ed.pll_data = False
model_de.pll_data = False
losses = [[], []]
for i, batch in enumerate(mono_train_loader_en):
batch = send_to_gpu(batch, pll=False)
_,_,loss1 = run(model_ed,model_de,batch,optimizers,pll=False)
losses[0].append(loss1.item())
del loss1
synchronize()
for i, batch in enumerate(mono_train_loader_de):
batch = send_to_gpu(batch, pll=False)
_,_,loss2 = run(model_de,model_ed,batch,optimizers,pll=False)
losses[1].append(loss2.item())
del loss2
synchronize()
losses_epochs['mono'].append([sum(losses[0])/len(losses[0]), sum(losses[1])/len(losses[1])])
```
#### File: Jeevesh8/b2b/utilities.py
```python
from abc import ABC
import numpy as np
import torch
from Globals import *
class model_utils(ABC) :
def __init__(self) :
super().__init__()
def cut_and_paste_down( batch, dim=1) :
return batch.transpose(0,1).reshape(-1)
def cut_and_paste_up( batch, dim=1, beam_size=1) :
'''
batch.size = [batch_size*beam_size, z]
return size = [batch_size,z*beam_size]
'''
return batch.reshape(beam_size,-1,batch.shape[1]).transpose(0,1).reshape(-1,beam_size*batch.shape[1])
def convert_mask_to_inf( mask):
mask[mask==0] = -np.inf
mask[mask==1] = 0
return mask
def infs_to_zero(self,mask) :
mask[mask==0]=1
mask[mask==-np.inf] = 0
return mask
def get_tgt_mask(self, tr_len, it_no=None) :
x = np.zeros((tr_len,tr_len), dtype=np.float32)
upp_indices = np.triu_indices(tr_len, k=1)
x[upp_indices] = -np.inf
if it_no is not None :
e = torch.tensor(x, dtype = torch.float32, device=device)
e[e!=e[it_no]] = -np.inf
return e
return torch.tensor(x, dtype=torch.float32, device=device)
def final_layer(self, trfrmr_out, mask) :
x = trfrmr_out[mask.bool()]
if self.it_no is not None :
return self.final_linear(x), mask
else :
return self.final_linear(x)
def mask_fr_mask(self) :
m = torch.zeros((self.bs,self.max_tr_seq_len),dtype=torch.bool, device=device)
m[:,self.it_no+1]=1
m[~self.not_done_samples] = 0
return m
def apply_final_layer(self, trfrmr_out, mask) :
if self.it_no is not None :
mask_ = self.tgt_key_pad_mask[self.not_done_samples][:,self.it_no].bool()
mask = torch.zeros((self.bs,self.max_tr_seq_len), dtype=torch.bool, device=device)
mask[self.mask_fr_mask()] = mask_
return self.final_layer(trfrmr_out, mask)
def cycle_dims(self, tensor, clockwise=True) :
dims = torch.arange(-1,len(tensor.shape)-1)
if clockwise :
y = tuple(dims)
return tensor.permute(y)
z = list(dims+2)
z = z+[0]
return tensor.permute(z)
def k_sample_to_flat(self, tokens, langs, positions) :
'''
tokens.size == [b_sz, seq_len, k_sample]
langs.size,positions.size == [b_sz, seq_len]
'''
tokens = self.cycle_dims(tokens)
langs = langs.repeat(tokens.size(0),1)
positions = positions.repeat(tokens.size(0),1)
tokens = tokens.reshape(-1, tokens.size(2))
return tokens, langs, positions
def flat_to_k_sample(self, plt_embed) :
'''plt_embed.shape = [k_sample*b_sz, seq_len, d_model]
return shape = [b_sz, seq_len, k_sample, d_model]'''
plt_embed = plt_embed.reshape(k,-1,plt_embed.size(1),plt_embed.size(2))
return plt_embed.transpose(0,1).transpose(1,2)
def plt_embed(self, tokens, langs, positions) :
'''Returns plt_embdng of shape [b_sz, seq_len, d_model] or
[b_sz, seq_len, k, d_model] if nucleus sampling is done.'''
if len(tokens.shape)==3 :
k = tokens.size(2)
tokens, langs, positions = self.k_sample_to_flat(tokens, langs, positions)
y = self.xlm.embeddings(tokens)
z = y + self.xlm.position_embeddings(positions)
plt_embed = z+self.xlm.lang_embeddings(langs)
if len(tokens.shape)==3 :
plt_embed = self.flat_to_k_sample(plt_embed)
return plt_embed
def embed_for_decoder(self, output_at_it_no, lang_id) :
y = self.xlm.embeddings(output_at_it_no) #batch_sizeXd_model
z = y + self.xlm.position_embeddings(torch.tensor(self.it_no).long())
return (z+self.xlm.lang_embeddings(lang_id))
def indi(self) :
y = self.not_done_samples.long()
quotients = torch.div(y,self.beam_size)
rems = torch.remainder(y,self.beam_size)
return quotients,rems
def get_msk_fr_prev_probs_entry(self) :
x = torch.zeros((self.actual_bs, self.max_tr_seq_len+1, self.beam_size), dtype=torch.bool, device=device)
x[:,self.it_no,:] = self.not_done_samples.reshape(-1,self.beam_size)
return x
def reform(self, trfrmr_out) :
prev_probs_here = self.prev_probs[:,self.it_no-1,:] if self.it_no!=0 else torch.zeros((self.actual_bs, self.beam_size),device=device)
m = (trfrmr_out.t()+prev_probs_here.reshape(-1)).t()
m[~self.not_done_samples] = 0
m = m.reshape(-1,self.beam_size*self.vocab_size)
msk_fr_prev_probs_entry = self.get_msk_fr_prev_probs_entry()
value, indices = m.topk(self.beam_size, dim=1)
self.prev_probs[msk_fr_prev_probs_entry]=value.reshape(-1)[self.not_done_samples]
indices = torch.remainder(indices, self.vocab_size)
indices = indices.reshape(-1)
return indices
def change_attn_for_xlm(self, dic) :
k='attention_mask'
dic[k]=dic[k].bool()
dic[k]=~dic[k]
dic[k]=dic[k].float()
return dic
def calc_just_now_completed_samples_mask(self,ind) :
self.just_now_completed_samples_mask[:] = False
self.just_now_completed_samples_mask[self.not_done_samples==True] = ~ind
self.not_done_samples[self.not_done_samples==True] = ind
class clone_batch() :
def __init__(self, n, pll_dat=True) :
super().__init__()
self.n = n
self.pll_dat = pll_dat
def transform_xlm_in(self, sample) :
'''Obtains all possible samples from 1 sample
and returns 'sample' with content,position_ids
and langs of size [self.n, z*self.n]
of form (if self.n=3 and z=4 and sample['input_ids']=[abcd]) :-
sample['input_ids'].t():- [[abcd00000000],
[0000abcd0000],
[00000000abcd]]'''
l = ['X', 'Y'] if self.pll_dat else ['X']
for key in l :
z = len(sample[key]['input_ids'])
for subkey in sample[key] :
if subkey != 'lengths' :
sample[key][subkey] = torch.stack([torch.cat([torch.zeros((i*z)), sample[key][subkey], torch.zeros(((self.n-i-1)*z))])
for i in range(self.n)]).t()
return sample
def get_xlm__att_mask(self, batch) :
'''If input :- [[abcd00000000],
[0000abcd0000],
[00000000abcd],other samples]
output:- [[111100000000],
[000011110000],
[000000001111], similarly for other samples]'''
max_size = batch['lengths'].max()
att_mask = []
for elem in batch['lengths'] :
#self.n elements corres. to 'elem' length
att_mask.append( torch.stack([torch.cat([torch.zeros((i*elem)), torch.ones((elem)), torch.zeros((max_size-(i+1)*elem))])
for i in range(self.n)]) )
return torch.cat(att_mask)
```
|
{
"source": "Jeevesh8/bert-syntax",
"score": 3
}
|
#### File: Jeevesh8/bert-syntax/utils.py
```python
import inflect
infl_eng = inflect.engine()
def gen_inflect_from_vocab(vocab_file, freq_threshold=1000):
vbp = {}
vbz = {}
nn = {}
nns = {}
from_pos = {'NNS': nns, 'NN': nn, 'VBP': vbp, 'VBZ': vbz}
for line in file(vocab_file):
if line.startswith(' '): # empty string token
continue
word, pos, count = line.strip().split()
count = int(count)
if len(word) > 1 and pos in from_pos and count >= freq_threshold:
from_pos[pos][word] = count
verb_infl = {'VBP': 'VBZ', 'VBZ': 'VBP'}
for word, count in vbz.iteritems():
candidate = infl_eng.plural_verb(word)
if candidate in vbp:
verb_infl[candidate] = word
verb_infl[word] = candidate
noun_infl = {'NN': 'NNS', 'NNS': 'NN'}
for word, count in nn.iteritems():
candidate = infl_eng.plural_noun(word)
if candidate in nns:
noun_infl[candidate] = word
noun_infl[word] = candidate
return verb_infl, noun_infl
vinfl, ninfl = gen_inflect_from_vocab('wiki.vocab')
```
|
{
"source": "Jeevesh8/FireFighter-RL",
"score": 3
}
|
#### File: FireFighter-RL/agents/policy_iter.py
```python
import random
from typing import List
import numpy as np
from agents.utils import numpy_dict
class policy_iter_agent():
def __init__(self, n_defend):
self.n_defend = n_defend
self.policy = numpy_dict()
def _get_all_actions(self, defendable, n_defend)->List[np.ndarray]:
if n_defend>=np.sum(defendable):
return [np.copy(defendable)]
elif n_defend<=0:
return [np.zeros(defendable.shape, dtype=np.bool)]
for (i,elem) in enumerate(defendable):
if elem:
return ([np.array(np.concatenate([[0]*i+[0], elem]), dtype=np.bool) for elem in self._get_all_actions(defendable[i+1:], n_defend)]+
[np.array(np.concatenate([[0]*i+[1], elem]), dtype=np.bool) for elem in self._get_all_actions(defendable[i+1:], n_defend-1)])
else:
continue
return []
def get_all_actions(self, observation)->List[np.ndarray]:
adj_mat, burned, defended = observation
defendable = np.asarray(np.logical_and(
np.any(adj_mat[burned], axis=0),
np.logical_not(np.logical_or(defended, burned)),
), dtype=np.bool).reshape(-1)
return self._get_all_actions(defendable, self.n_defend)
def step(self, timestep) -> np.ndarray:
if key not in self.policy:
self.policy[(burned, defended)] = random.choice(self.get_all_actions(timestep.observation))
return self.policy[(burned, defended)]
```
#### File: FireFighter-RL/agents/utils.py
```python
from functools import reduce
import numpy as np
def hash_bool_array(arr):
"""Hashes a boolean ndarray.
Flattens and converts to a packed boolean tuple.
Returns:
A tuple: (packed numpy array, shape of original array)
"""
return tuple(np.packbits(np.reshape(np.copy(arr), (-1)))), arr.shape
def revert_hash(hash):
"""Computes the array given the hash.
Inverse of the function ``hash_bool_array``.
Returns:
np.ndarray, whose hash is the one provided as argument.
"""
total_size = reduce(lambda x,y: x*y, hash[1])
return np.reshape( np.unpackbits(np.array(np.copy(hash[0])))[:total_size], hash[1])
class numpy_dict(dict):
"""Implements a dictionary with numpy boolean arrays as its keys.
This dictionary implementation is corresponding to the agents and environments,
where the state is a pair of boolean arrays.
"""
def __init__(self, *args, **kwargs):
super(bool).__init__(*args, **kwargs)
def __getitem__(self, burned_n_defended):
burned, defended = burned_n_defended
return super().__getitem__((hash_bool_array(burned), hash_bool_array(defended)))
def __setitem__(self, burned_n_defended, value):
burned, defended = burned_n_defended
return super().__setitem__((hash_bool_array(burned), hash_bool_array(defended)), value)
```
#### File: Jeevesh8/FireFighter-RL/env.py
```python
import dm_env
from dm_env import specs
import numpy as np
import networkx as nx
class FireFighter(dm_env.Environment):
"""A FireFighter environment built on the `dm_env.Environment` class.
The agent must choose which vertices to defend at each time step.
The observation is a 3-tuple the first element is a
boolean adjacency matrix of shape (|V|, |V|). The 2nd & 3rd are also boolean arrays,
but with shape (|V|,) corres. to whether a vertex is burnt or defended respectively.
The actions are discrete, and must be a (|V|,) sized boolean vector indicating
which vertices to defend.
The episode terminates when no more vertices can be burnt.
"""
def __init__(
self,
adj_mat: np.ndarray,
initial_fire: np.ndarray,
burn_prob: float = 0.5,
seed=42,
):
"""Initializes a new Catch environment.
Args:
adj_mat: Boolean np.ndarray representing the adjacency matrix
seed: random seed for the RNG.
"""
self.adj_mat = adj_mat
self.inital_fire = initial_fire
self.burn_prob = burn_prob
self._rng = np.random.RandomState(seed)
self._reset_next_step = True
def reset(self):
"""Returns the first `TimeStep` of a new episode."""
self._reset_next_step = False
self.burned = self.inital_fire.copy()
self.defended = np.array([False] * self.adj_mat.shape[0], dtype=np.bool)
self.graph = nx.convert_matrix.from_numpy_matrix(self.adj_mat)
self.nodes = self.graph.nodes()
return dm_env.restart(self._observation())
def burn_vertices(self):
"""
Burns any vertex neighboring to a vertex on fire, and not defended/previously burnt,
with probability self.burn_prob
"""
burnable = np.logical_and(
np.any(self.adj_mat[self.burned], axis=0),
np.logical_not(np.logical_or(self.defended, self.burned)),
)
self._reset_next_step = not np.any(burnable)
to_burn = np.random.uniform(size=burnable.shape) < self.burn_prob
burn_idx = np.asarray(np.logical_and(burnable, to_burn), dtype=np.bool)
self.burned[np.reshape(burn_idx, -1)] = True
def all_possible_env_states(self, action: np.ndarray):
"""Generator for all possible environment states after taking the given action."""
original_defended = self.defended.copy()
self.defended = np.logical_or(self.defended, action)
burnable = np.logical_and(
np.any(self.adj_mat[self.burned], axis=0),
np.logical_not(np.logical_or(self.defended, self.burned)),
)
self._reset_next_step = not np.any(burnable)
if self._reset_next_step:
yield dm_env.termination(reward=0.0, observation=self._observation())
else:
#Loop over all possible to_burn
for num in range(2**np.sum(burnable)):
to_burn = burnable.copy()
#Make a to_burn according to bits of to_burn
j = 0
for i in range(to_burn.shape[1]):
if to_burn[0, i]==1:
to_burn[0, i] = (num & (1<<j)) >> j
j += 1
#burn vertices & yield timestep
burn_idx = np.asarray(to_burn, dtype=np.bool)
self.burned[np.reshape(burn_idx, -1)] = True
yield dm_env.transition(reward=-1.0, observation=self._observation())
#revert burns for checking other possible burns
self.burned[np.reshape(burn_idx, -1)] = False
self.defended = original_defended
def step(self, action: np.ndarray):
"""Updates the environment according to the action."""
if self._reset_next_step:
return self.reset()
self.defended = np.logical_or(self.defended, action)
self.burn_vertices()
if self._reset_next_step:
return dm_env.termination(reward=0.0, observation=self._observation())
return dm_env.transition(reward=-1.0, observation=self._observation())
def observation_spec(self):
"""Returns the observation spec."""
return (
specs.Array(
shape=self.adj_mat.shape, dtype=np.bool, name="adjacency_matrix"
),
specs.Array(
shape=(self.adj_mat.shape[0],), dtype=np.bool, name="burned"
),
specs.Array(
shape=(self.adj_mat.shape[0],), dtype=np.bool, name="defended"
),
)
def action_spec(self):
"""Returns the action spec."""
return specs.Array(
shape=(self.adj_mat.shape[0],), dtype=np.bool, name="defend_vertices"
)
def _observation(self):
return (self.adj_mat.copy(), self.burned.copy(), self.defended.copy())
def set_state(self, state):
self.burned, self.defended = state[0].copy(), state[1].copy()
burnable = np.logical_and(
np.any(self.adj_mat[self.burned], axis=0),
np.logical_not(np.logical_or(self.defended, self.burned)),
)
self._reset_next_step = not np.any(burnable)
```
|
{
"source": "Jeevesh8/GHI-prediction",
"score": 2
}
|
#### File: Jeevesh8/GHI-prediction/Infer.py
```python
import torch.nn as nn
import torch
import argparse
from torch.utils.data import DataLoader
import multiprocessing as mp
from os import path
n_wrkrs = mp.cpu_count()
abs_loss_fn = nn.L1Loss(reduction='none')
device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
def define_variables(args_from_train=None) :
global mask_gammas, maximum, gamma_list_len, gammas, real_vals_sum, pred_loss_sum
if args_from_train is not None :
args=args_from_train
maximum = nn.ReLU()
gamma_list_len = max(1,len(args.gamma_list))
if hasattr(args,'mask_gamma_list') and args.mask_gamma_list is not None :
mask_gammas = torch.tensor(args.mask_gamma_list, device=device, dtype=torch.float64)
print(mask_gammas)
else :
mask_gammas = torch.ones(gamma_list_len, device=device, dtype=torch.float64)
mask_gammas = mask_gammas.repeat_interleave(args.final_len)
gammas = torch.tensor(args.gamma_list, dtype=torch.float64, device=device)
gammas = gammas.repeat_interleave(args.final_len)
real_vals_sum = 0 #For q-risk
pred_loss_sum = 0 #For q-risk
def mape_loss(pred,real) :
return torch.div(abs_loss_fn(pred,real),torch.abs(real))
def interval_loss(pred, tar) :
global real_vals_sum, pred_loss_sum
if gamma_list_len!=1 :
tar = torch.cat([tar]*gamma_list_len,dim=1)
tar = mask_gammas*tar
pred = mask_gammas*pred
real_vals_sum += torch.abs(tar).sum().item()
n = tar.shape[0]
m = mask_gammas.sum() #tar.shape[1] #/gamma_list_len
if lossfn_i == 'qr_loss' :
loss = (1-gammas)*maximum(tar-pred)+(gammas)*maximum(pred-tar)
else :
loss = lossfn_i(tar, pred)
pred_loss_sum += loss.sum().item()
return loss.sum()/(n*m)
def run_to_eval(t, lossfn, give_lists=False, test_dataset=None, times_to_run_model=0, batch_size=1) :
loss_list = []
i = 0
tot_loss = 0
t.eval()
test_data_loader = DataLoader(test_dataset, batch_size = batch_size, num_workers=n_wrkrs, drop_last=True)
it = iter(test_data_loader)
if give_lists :
pred_lis = []
actual_lis = []
time_lis = []
for batch in it :
in_batch = batch['in'].to(device)
out = t(in_batch)
if give_lists :
pred_lis.append(out.tolist())
actual_lis.append(batch['out'].tolist())
time_lis.append(in_batch[0][-1][0:5].int().tolist())
else :
loss = lossfn(out,batch['out'].to(device))
tot_loss += loss.item()
i+=1
if i>times_to_run_model and give_lists :
print(pred_lis)
print(actual_lis)
print(time_lis)
break
print('Evaluation Loss:- ', tot_loss/i)
t.train()
return tot_loss/i
def mae_loss(x,y) :
return torch.abs(x-y)
def diff(x,y) :
return x-y
def evaluate(t, loss = 'mse', test_dataset=None, args_from_train=None) :
t.eval()
define_variables(args_from_train)
lossfn = interval_loss
global lossfn_i
if loss == 'mse' :
lossfn_i = nn.MSELoss(reduction='none')
elif loss == 'mape' :
lossfn_i = mape_loss
elif loss == 'mae' :
lossfn_i = abs_loss_fn
elif loss == 'mbe' :
lossfn_i = diff
elif loss == 'qr_loss' :
lossfn_i = 'qr_loss'
else :
lossfn_i = nn.MSELoss(reduction='none')
return run_to_eval(t, lossfn, test_dataset=test_dataset)
def predict_next(t, date_lis, test_dataset) :
batch = test_dataset.getitem_by_date(date_lis)
in_batch = batch['in'].to(device).unsqueeze(dim=0)
out = t(in_batch)
if 'out' in batch :
print('Real output :-', batch['out'].tolist())
print('Predicted Output :-', out)
if __name__=='__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--mode', default='avg_loss', help='Choose from avg_loss, predict_list, predict_at_date')
parser.add_argument('--loss', default='mse', help='Choose from mse, mbe, mae, mape, qr_loss')
parser.add_argument('--model', default='ar_net', help='Choose from ar_net, trfrmr, cnn_lstm, lstm')
parser.add_argument('--ini_len', type=int, help='Number of columns of input data')
parser.add_argument('--param_file',help='Path to model\'s param file')
parser.add_argument('--batch_size', type=int, default=1, help='To be used in avg_loss mode only.')
parser.add_argument('--date_lis', nargs='*', type=int, help='List of form [Year, Month, Day, Hour, Minute]')
parser.add_argument('--steps', type=int, default=1, help='Number of steps-ahead model was trained to predict')
parser.add_argument('--final_len', type=int, default=1)
parser.add_argument('--seq_len', type=int, default=256)
parser.add_argument('--root_dir',help='Directory where Data*.csv files are located.')
parser.add_argument('--test_start_year', type=int, help='Starting test year. Use only when mode is avg_loss')
parser.add_argument('--test_final_year', type=int, help='Final test year. Use only when mode is avg_loss.')
parser.add_argument('--test_year', type=int, default=-1, help='test data year.')
parser.add_argument('--times_to_run' , type=int, default=200, help='Times to run the model when mode is predict_list')
parser.add_argument('--gamma_list', type=float, nargs='*', help='Gammas for calculating q-risk')
parser.add_argument('--mask_gamma_list', type=int, nargs='*', help='Masks for Gamma values, e.g. use :- to calculate only p50 or p90 risk')
args = parser.parse_args()
from DataSet import Dataset
if args.test_year != -1 :
csv_paths = [args.root_dir+'/Data'+str(args.test_year)+'.csv']
else :
csv_paths = [args.root_dir+'/Data'+str(i)+'.csv' for i in range(args.test_start_year, args.test_final_year+1)]
model_final_len = args.final_len*len(args.gamma_list) if args.gamma_list!=None else args.final_len
dataset_final_len = args.final_len #if not args.interval or args.final_len<=1 else int(args.final_len/2)
test_dataset = Dataset.SRdata(csv_paths, seq_len = args.seq_len, steps = args.steps, final_len=dataset_final_len)
if args.model=='ar_net' :
from Models import AR_Net
t = AR_Net.ar_nt(seq_len = args.seq_len, ini_len=args.ini_len, final_len=model_final_len).to(device)
elif args.model=='cnn_lstm' :
from Models import CNN_LSTM
t = CNN_LSTM.cnn_lstm(seq_len = args.seq_len, ini_len=args.ini_len, final_len=model_final_len).to(device)
elif args.model=='trfrmr' :
from Models import Transformer
t = Transformer.trnsfrmr_nt(seq_len = args.seq_len, ini_len=args.ini_len, final_len=model_final_len).to(device)
elif args.model=='LSTM' :
from Models import LSTM
t = LSTM.lstm(seq_len = args.seq_len, ini_len=args.ini_len, final_len=model_final_len).to(device)
t.load_state_dict(torch.load(args.param_file))
t = t.double()
if args.mode=='avg_loss' :
print(evaluate(t,args.loss, test_dataset, args))
elif args.mode=='predict_list' :
print(run_to_eval(t, None, True, test_dataset, args.times_to_run))
elif args.mode == 'predict_next' :
print(predict_next(t,args.date_lis,test_dataset))
if args.loss=='qr_loss' :
print('Q-risk = ', 2*pred_loss_sum/real_vals_sum)
```
#### File: GHI-prediction/Models/AR_Net.py
```python
import torch.nn as nn
import torch
class ar_nt(nn.Module) :
def __init__(self, seq_len=256, ini_len = 18, final_len=1) :
super().__init__()
self.d_model = 20
self.seq_len = seq_len
self.init_trnsfrm = nn.Sequential(nn.Linear(ini_len,32),nn.ReLU(),nn.Linear(32,32),nn.ReLU(),nn.Linear(32,self.d_model))
self.batch_norm = nn.BatchNorm1d(self.d_model)
self.final = nn.Sequential(nn.Linear(self.d_model*self.seq_len,512),nn.ReLU(), nn.Linear(512,256),nn.ReLU(),nn.Linear(256,final_len))
def forward(self,batch) :
batch = self.init_trnsfrm(batch)
batch = self.batch_norm(batch.transpose(1,2)).transpose(1,2)
batch = batch.transpose(0,1)
t_out = batch.reshape(-1,self.d_model*self.seq_len)
out = self.final(t_out)
return out
```
|
{
"source": "Jeevesh8/LongFormer_AM",
"score": 2
}
|
#### File: Jeevesh8/LongFormer_AM/train_eval_script.py
```python
import argparse
import glob
import sys
import tensorflow as tf
from transformers import create_optimizer
from seqeval.metrics import classification_report
from configs import config, tokenizer
from tokenize_components import get_model_inputs
from simp_utils import labels_to_tags
from my_utils import get_model
from models.TaskModel import TaskModel
from evaluate_relation_preds import single_sample_eval
"""## Loading Model"""
model = get_model(config['max_tokenizer_length'], config['attention_window'])
model.resize_token_embeddings(len(tokenizer))
task_model = TaskModel(model.layers[0],
max_trans=0.1, min_trans=-0.1,
is_padded=False)
task_optimizer, _ = create_optimizer(init_lr = 0.00005,
num_train_steps = config['n_iters'],
num_warmup_steps = 80)
task_ckpt = tf.train.Checkpoint(task_model=task_model, task_optimizer=task_optimizer)
task_ckpt_manager = tf.train.CheckpointManager(task_ckpt, '../SavedModels/LF_With_rel_preds', max_to_keep=40)
def generator(file_list):
file_list_with_lengths = []
for file in file_list:
with open(file) as f:
lines = f.read()
file_list_with_lengths.append((file, len(lines.split())))
file_list_with_lengths.sort(key=lambda x: x[1])
file_list = [elem[0] for elem in file_list_with_lengths]
for elem in get_model_inputs(file_list):
yield elem
def get_datasets(file_list):
def callable_gen():
nonlocal file_list
for elem in generator(file_list):
yield elem
return tf.data.Dataset.from_generator(callable_gen,
output_signature=(tf.TensorSpec(shape=(), dtype=tf.string, name='filename'),
tf.TensorSpec(shape=(None), dtype=tf.int32, name='tokenized_thread'),
tf.TensorSpec(shape=(None), dtype=tf.int32, name='comp_type_labels'),
tf.TensorSpec(shape=(None, None), dtype=tf.int32, name='refers_labels'),
tf.TensorSpec(shape=(None), dtype=tf.int32, name='relation_type_labels'),
tf.TensorSpec(shape=(None), dtype=tf.int32, name='attention_mask'),
tf.TensorSpec(shape=(None), dtype=tf.int32, name='global_attention_mask'))
).padded_batch(config['batch_size'],
padded_shapes=([],[None],[None],[None, None],[None],[None],[None]),
padding_values=(None, *tuple(config['pad_for'].values())),
).cahce() #.repeat(1 if not repeat else int((config['n_iters']*config['batch_size'])/len(file_list))+1).cache()
def get_train_test_data(train_sz, test_sz, op_wise_split):
with open(op_wise_split) as f:
lines = f.readlines()
test_files, train_files = [], []
for threads in lines:
files = [elem.strip("\"\'") for elem in threads.strip('\n').strip('[]').split(', ')]
if len(test_files)<test_sz:
test_files += files
elif len(train_files)<train_sz:
train_files += files
train_dataset = get_datasets(train_files)
test_dataset = get_datasets(test_files)
return train_dataset, test_dataset
"""## Train step"""
@tf.function(input_signature=[(tf.TensorSpec(shape=(None, None), dtype=tf.int32, name='tokenized_thread'),
tf.TensorSpec(shape=(None, None), dtype=tf.int32, name='comp_type_labels'),
tf.TensorSpec(shape=(None, None, None), dtype=tf.int32, name='refers_labels'),
tf.TensorSpec(shape=(None, None), dtype=tf.int32, name='relation_type_labels'),
tf.TensorSpec(shape=(None, None), dtype=tf.int32, name='attention_mask'),
tf.TensorSpec(shape=(None, None), dtype=tf.int32, name='global_attention_mask'))])
def batch_train_step(inp):
tokenized_thread, comp_type_labels, refers_labels, relation_type_labels, attention_mask, global_attention_mask = inp
inputs = {}
inputs['input_ids'] = tokenized_thread
inputs['attention_mask'] = attention_mask
inputs['global_attention_mask'] = global_attention_mask
with tf.GradientTape() as tape:
crf_loss, cc_loss, relation_type_cc_loss, refers_cc_loss = task_model.compute_loss(inputs, (comp_type_labels, relation_type_labels, refers_labels))
tf.print("Losses: ", crf_loss, cc_loss, relation_type_cc_loss, refers_cc_loss, output_stream=sys.stdout)
total_loss = crf_loss + cc_loss + relation_type_cc_loss + refers_cc_loss
gradients = tape.gradient(total_loss, task_model.trainable_variables)
task_optimizer.apply_gradients(zip(gradients, task_model.trainable_variables))
"""## Eval Step"""
@tf.function(input_signature=[(tf.TensorSpec(shape=(None, None), dtype=tf.int32, name='tokenized_thread'),
tf.TensorSpec(shape=(None, None), dtype=tf.int32, name='comp_type_labels'),
tf.TensorSpec(shape=(None, None, None), dtype=tf.int32, name='refers_labels'),
tf.TensorSpec(shape=(None, None), dtype=tf.int32, name='relation_type_labels'),
tf.TensorSpec(shape=(None, None), dtype=tf.int32, name='attention_mask'),
tf.TensorSpec(shape=(None, None), dtype=tf.int32, name='global_attention_mask'))])
def batch_eval_step(inp):
tokenized_thread, comp_type_labels, refers_labels, relation_type_labels, attention_mask, global_attention_mask = inp
inputs = {}
inputs['input_ids'] = tokenized_thread
inputs['attention_mask'] = attention_mask
inputs['global_attention_mask'] = global_attention_mask
viterbi_seqs, seq_lens, relation_type_preds, refers_preds = task_model.infer_step(inputs)
return viterbi_seqs, seq_lens, relation_type_preds, refers_preds
"""## Training Loop"""
def train(train_dataset, test_dataset):
print("Starting Training..")
steps = 0
while steps<config['n_iters']:
for elem in train_dataset:
steps+=1
if steps>config['n_iters']:
break
print("Step: ", steps)
## >>>Add if-else statement to handle long sequences in a batch.
batch_train_step(elem[1:])
if steps%50==0 or steps%799==0:
L, P = [], []
for inp in test_dataset:
viterbi_seqs, seq_lens, relation_type_preds, refers_preds = batch_eval_step(inp[1:])
single_sample_eval(inp[0], seq_lens, viterbi_seqs, refers_preds, relation_type_preds)
for p, l, length in zip(list(viterbi_seqs.numpy()), list(inp[2].numpy()), list(seq_lens.numpy())):
true_tag = labels_to_tags(l)
predicted_tag = labels_to_tags(p)
L.append(true_tag[:length])
P.append(predicted_tag[:length])
s = classification_report(L, P)
print("Classfication Report: ", s)
if __name__=='__main__':
parser = argparse.ArgumentParser()
parser.add_argument("--train_sz", required=True, type=int, nargs='+', help="Number of threads to use for train set.")
parser.add_argument("--test_sz", required=True, type=int, help="Number of threds in test set.")
parser.add_argument("--op_wise_split", default='./try_outs/op_wise_split.txt', help="path to file having op wise split of all threads.")
args = parser.parse_args()
for train_sz in args.train_sz:
train_dataset, test_dataset = get_train_test_data(train_sz, args.test_sz, args.op_wise_split)
train(train_dataset, test_dataset)
```
|
{
"source": "Jeevesh8/relational-pt",
"score": 2
}
|
#### File: src/cmv_modes/__init__.py
```python
import os, shlex, glob
import random
from typing import List, Dict, Optional
from collections import namedtuple
from functools import partial
import tensorflow as tf
from bs4 import BeautifulSoup
from .tokenize_components import get_model_inputs
from ..params import config
from ..globals import stable_config
from .configs import config as data_config
from ..arg_mining_ft.params import ft_config
cmv_modes_data = namedtuple(
"cmv_modes_data",
[
"filenames",
"tokenized_threads",
"masked_thread",
"comp_type_labels",
"refers_to_and_type",
],
)
if data_config["omit_filenames"]:
cmv_modes_data = namedtuple(
"cmv_modes_data",
[
"tokenized_threads",
"masked_thread",
"comp_type_labels",
"refers_to_and_type",
],
)
def convert_to_named_tuple(
filenames,
tokenized_threads,
masked_thread,
comp_type_labels,
refers_to_and_type,
omit_filenames,
):
if omit_filenames:
return cmv_modes_data(tokenized_threads, masked_thread,
comp_type_labels, refers_to_and_type)
return cmv_modes_data(
filenames,
tokenized_threads,
masked_thread,
comp_type_labels,
refers_to_and_type,
)
convert_to_named_tuple = partial(convert_to_named_tuple,
omit_filenames=data_config["omit_filenames"])
def data_generator(file_list: List[str],
mask_tokens: Optional[List[str]] = None):
for elem in get_model_inputs(file_list, mask_tokens):
yield elem
def get_dataset(file_list: List[str], mask_tokens: Optional[List[str]] = None):
def callable_gen():
nonlocal file_list
for elem in data_generator(file_list, mask_tokens):
yield elem
return (tf.data.Dataset.from_generator(
callable_gen,
output_signature=(
tf.TensorSpec(shape=(), dtype=tf.string, name="filenames"),
tf.TensorSpec(shape=(None),
dtype=tf.int32,
name="tokenized_threads"),
tf.TensorSpec(shape=(None), dtype=tf.int32, name="masked_threads"),
tf.TensorSpec(shape=(None),
dtype=tf.int32,
name="comp_type_labels"),
tf.TensorSpec(shape=(None, 3),
dtype=tf.int32,
name="refers_to_and_type"),
),
).padded_batch(
ft_config["batch_size"],
padded_shapes=(
[],
[data_config["max_len"]],
[data_config["max_len"]],
[data_config["max_len"]],
[data_config["max_len"], 3],
),
padding_values=(
None,
data_config["pad_for"]["tokenized_thread"],
data_config["pad_for"]["tokenized_thread"],
data_config["pad_for"]["comp_type_labels"],
data_config["pad_for"]["refers_to_and_type"],
),
).batch(stable_config["num_devices"],
drop_remainder=True).map(convert_to_named_tuple))
def get_op_wise_split(filelist: List[str]) -> Dict[str, List[str]]:
"""Splits up filelist into groups, each of which corresponds to threads with
the same original posts.
"""
splits = {}
for filepath in filelist:
if not filepath.endswith(".xml"):
continue
with open(filepath) as f:
xml_string = f.read()
parsed_xml = BeautifulSoup(xml_string, "lxml")
op_id = parsed_xml.thread["id"]
if op_id not in splits:
splits[op_id] = []
splits[op_id].append(filepath)
return splits
def load_dataset(
cmv_modes_dir: str = None,
mask_tokens: Optional[List[str]] = None,
train_sz: float = 100,
valid_sz: float = 0,
test_sz: float = 0,
shuffle: bool = False,
as_numpy_iter: bool = True,
):
"""Returns a tuple of train, valid, test datasets(the ones having non-zero size only)
Args:
cmv_modes_dir: The directory to the version of cmv modes data from which the dataset is to be loaded.
If None, the data is downloaded into current working directory and v2.0 is used from there.
mask_tokens: A list of strings to be masked from each thread. The masking is done in the de-tokenized string.
Any instance of any string in this list in the thread will be replaced by apt number of <mask> tokens.
train_sz: The % of total threads to include in train data. By default, all the threads are included in train_data.
valid_sz: The % of total threads to include in validation data.
test_sz: The % of total threads to include in testing data.
shuffle: If True, the data is shuffled before splitting into train, test and valid sets.
as_numpy_iter: Tensorflow dataset is converted to numpy iterator, before returning.
Returns:
Tuple of 3 tensorflow datasets, corresponding to train, valid and test data. None is returned for the datasets
for which size of 0 was specified.
"""
if train_sz + valid_sz + test_sz != 100:
raise ValueError("Train, test valid sizes must sum to 100")
if cmv_modes_dir is None:
os.system(
shlex.quote(
"git clone https://github.com/chridey/change-my-view-modes"))
cmv_modes_dir = os.path.join(os.getcwd(), "change-my-view-modes/v2.0/")
splits = get_op_wise_split(glob.glob(os.path.join(cmv_modes_dir, "*/*")))
op_wise_splits_lis = list(splits.values())
if shuffle:
random.shuffle(op_wise_splits_lis)
n_threads = sum([len(threads) for threads in splits.values()])
num_threads_added = 0
train_files, valid_files, test_files, i = [], [], [], 0
if train_sz != 0:
while (num_threads_added /
n_threads) * 100 < train_sz and i < len(op_wise_splits_lis):
train_files += op_wise_splits_lis[i]
num_threads_added += len(op_wise_splits_lis[i])
i += 1
num_threads_added = 0
if valid_sz != 0:
while (num_threads_added /
n_threads) * 100 < valid_sz and i < len(op_wise_splits_lis):
valid_files += op_wise_splits_lis[i]
num_threads_added += len(op_wise_splits_lis[i])
i += 1
num_threads_added = 0
if test_sz != 0:
while (num_threads_added /
n_threads) * 100 <= test_sz and i < len(op_wise_splits_lis):
test_files += op_wise_splits_lis[i]
num_threads_added += len(op_wise_splits_lis[i])
i += 1
train_dataset = (None if len(train_files) == 0 else get_dataset(
train_files, mask_tokens))
valid_dataset = (None if len(valid_files) == 0 else get_dataset(
valid_files, mask_tokens))
test_dataset = (None if len(test_files) == 0 else get_dataset(
test_files, mask_tokens))
if as_numpy_iter:
train_dataset = (None if train_dataset is None else
train_dataset.as_numpy_iterator())
valid_dataset = (None if valid_dataset is None else
valid_dataset.as_numpy_iterator())
test_dataset = (None if test_dataset is None else
test_dataset.as_numpy_iterator())
return train_dataset, valid_dataset, test_dataset
```
#### File: src/dataloaders/utils.py
```python
from typing import List, Tuple
from functools import reduce
from collections import namedtuple
from ..utils import get_tokenizer
from ..params import config
from ..globals import stable_config
def dict_to_inputs(
tree: List[dict],
tokenizer=get_tokenizer(),
get_mat: bool = False,
one_indexed: bool = True,
) -> Tuple[List[int], List[int], List[int], List[List[int]]]:
"""
Takes a list of posts in a subtree. Encodes individual posts, concatenates
the encodings, in the order they appear in tree. Assumes post_id and parent_id
of each are equal to location(in tree) of the post being referred to. If one_indexed
is True, posts are numbered from 1 and a relation to 0-th position means that post is root.
Returns:
input_ids: The input ids of all the posts in the tree concatenated together.
post_tags: The post tag of each input_id. Either int[B-post] or int[I-post].
user_tags: The user tag of each input_id. Either int[B-user<i>] or int[I-user<i>].
If get_mat is True:
relations: len(tree)Xlen(tree) sized matrix where the (i,j)-th entry is
1 if the parent of i is j, 0 otherwise.
else:
relations: A list of tuples of form (child_post_location, parent_post_location).
"""
encodings = [tokenizer.encode(post["body"])[1:-1] for post in tree]
encodings[0] = [tokenizer.bos_token_id] + encodings[0]
encodings[-1] = encodings[-1] + [tokenizer.eos_token_id]
idxing = 1 if one_indexed else 0
post_tags = []
user_tags = []
if get_mat:
relations = [[0 for __ in len(tree) + idxing]
for _ in len(tree) + idxing]
else:
relations = []
for post, post_encoding in zip(tree, encodings):
post_tags += [config["post_tags"]["B"]
] + [config["post_tags"]["I"]] * (len(post_encoding) - 1)
# print(post['user_no'], len(config['user_tags']))
user_tags += [config["user_tags"][post["user_no"]]["B"]
] + [config["user_tags"][post["user_no"]]["I"]
] * (len(post_encoding) - 1)
if post["parent_id"] is not None:
if get_mat:
relations[post["post_id"] + idxing][post["parent_id"] +
idxing] = 1
else:
relations.append(
(post["post_id"] + idxing, post["parent_id"] + idxing))
elif one_indexed:
if get_mat:
relations[post["post_id"] + 1][0] = 1
else:
relations.append((post["post_id"] + 1, 0))
input_ids = reduce(lambda x, y: x + y, encodings, [])
# It is valid to truncate like this becuase the only subtrees whose encodings are greater than config['max_len'] are ones with just single posts,
# (see preprocess.py for how subtrees are generated). One may worry that some component present in relations may get truncated from inpu text,
# But this is not the case as the relations only contains (1, 0, 0).
return (
input_ids[:stable_config["max_len"]],
post_tags[:stable_config["max_len"]],
user_tags[:stable_config["max_len"]],
relations,
)
def only_inside_links(tree: List[dict]):
"""
Takes a list of dictionaries corresponding to posts in a subtree of the form:
{
'post_id' : int,
'parent_id' : int,
'user_no' : int,
'body': str,
}
and removes the link from the root post to the remaining tree.
"""
post_ids = [post["post_id"] for post in tree]
outside_links = 0
for post in tree:
if post["parent_id"] not in post_ids:
outside_links += 1
if outside_links > 1:
raise AssertionError(
"More than one link to posts outside the subtree exists, in the subtree: "
+ str(tree))
post["parent_id"] = None
return tree
import numpy as np
def tree_ids_to_nos(tree: List[dict], convert_users: bool = True):
"""
Converts post_id to to the location of the post in the tree list.
Correspondingly changes parent_id of each post to match changes in post_id.
If parent_id not found in posts, the parent is marked as None.
user_nos are changed to be in [0, distinct users in tree-1] if convert_users is True
"""
post_ids = [post["post_id"] for post in tree]
users = np.unique(np.array([post["user_no"] for post in tree])).tolist()
for i, post in enumerate(tree):
post["post_id"] = i
try:
post["parent_id"] = post_ids.index(post["parent_id"])
except ValueError:
post["parent_id"] = None
if convert_users:
post["user_no"] = users.index(post["user_no"])
return tree
modelInp = namedtuple("modelInp",
["input_ids", "post_tags", "user_tags", "relations"])
def convert_to_named_tuple(input_ids, post_tags, user_tags, relations):
return modelInp(input_ids, post_tags, user_tags, relations)
```
#### File: src/models/__init__.py
```python
from typing import Optional, Dict, List, Tuple
from functools import partial
import jax
import jax.numpy as jnp
import haiku as hk
from .relational_model import relational_model
from .tree_crf import tree_crf
from .linear_crf import crf_layer
from src.globals import stable_config
def comp_prediction_loss(logits, lengths, label_tags):
return crf_layer(n_classes=2)(hk.Linear(2)(logits), lengths, label_tags)
def get_log_energies(embds, choice_mask, attention_mask, max_comps, embed_dim,
n_rels):
rel_model = relational_model(n_rels=n_rels,
max_comps=max_comps,
embed_dim=embed_dim)
log_energies = rel_model(embds, choice_mask)
return log_energies
def relation_prediction_loss(embds, choice_mask, attention_mask,
label_relations, max_comps, embed_dim):
log_energies = get_log_energies(embds, choice_mask, attention_mask,
max_comps, embed_dim, 1)
return tree_crf().disc_loss(log_energies, label_relations)
relation_prediction_loss = partial(
relation_prediction_loss,
max_comps=stable_config["max_comps"],
embed_dim=stable_config["embed_dim"],
)
def predict_components(logits, lengths):
return crf_layer(n_classes=2).batch_viterbi_decode(
hk.Linear(2)(logits), lengths)[0]
def predict_relations(embds, choice_mask, attention_mask, max_comps,
embed_dim):
log_energies = get_log_energies(embds, choice_mask, attention_mask,
max_comps, embed_dim, 1)
return tree_crf().mst(log_energies)[1]
predict_relations = partial(
predict_relations,
max_comps=stable_config["max_comps"],
embed_dim=stable_config["embed_dim"],
)
pure_cpl = hk.transform(comp_prediction_loss)
pure_rpl = hk.transform(relation_prediction_loss)
pure_pc = hk.transform(predict_components)
pure_pr = hk.transform(predict_relations)
################################## FINETUNING #####################################
from ..cmv_modes.configs import config as ft_config
_n_classes = len(ft_config["arg_components"])
_n_rels = len(ft_config["relations_map"])
def ft_comp_prediction_loss(logits, lengths, label_tags):
return crf_layer(n_classes=_n_classes)(hk.Linear(_n_classes)(logits),
lengths, label_tags)
def ft_relation_prediction_loss(embds, choice_mask, attention_mask,
label_relations, max_comps, embed_dim):
log_energies = get_log_energies(embds, choice_mask, attention_mask,
max_comps, embed_dim, _n_rels)
return tree_crf().disc_loss(log_energies, label_relations)
ft_relation_prediction_loss = partial(
ft_relation_prediction_loss,
max_comps=stable_config["max_comps"],
embed_dim=stable_config["embed_dim"],
)
def ft_predict_components(logits, lengths):
return crf_layer(n_classes=_n_classes).batch_viterbi_decode(
hk.Linear(_n_classes)(logits), lengths)[0]
def ft_predict_relations(embds, choice_mask, attention_mask, max_comps,
embed_dim):
log_energies = get_log_energies(embds, choice_mask, attention_mask,
max_comps, embed_dim, _n_rels)
return tree_crf().mst(log_energies)[1]
ft_predict_relations = partial(
ft_predict_relations,
max_comps=stable_config["max_comps"],
embed_dim=stable_config["embed_dim"],
)
ft_pure_cpl = hk.transform(ft_comp_prediction_loss)
ft_pure_rpl = hk.transform(ft_relation_prediction_loss)
ft_pure_pc = hk.transform(ft_predict_components)
ft_pure_pr = hk.transform(ft_predict_relations)
def copy_weights(
old_mat: jnp.ndarray,
new_mat: jnp.ndarray,
mapping: Optional[Dict[int, List[int]]] = None,
) -> jnp.ndarray:
"""
Args:
old_mat: A matrix of shape [old_input_dim, old_output_dim] extracted from some pretrained model.
new_mat: A matrix of shape [old_input_dim, new_output_dim] extracted from randomly initialized weights of some model.
mapping: A dict mapping i to the list of all j such that new_mat[:, i] is to be assigned the mean{old_mat[:, j] over all j}.
By default, for i not specified in the mapping, mean will be taken over all the j.
Returns:
A matrix of same shape as new_mat, with weights copied from old_mat as specified in mapping.
NOTE: This function combines with None indexing and jnp.squeeze to copy 1-D vectors too.
"""
if mapping is None:
mapping = {}
one_dimensional = False
if jnp.size(old_mat.shape) == jnp.size(new_mat.shape) == 1:
one_dimensional = True
old_mat, new_mat = old_mat[None, :], new_mat[None, :]
old_input_dim, old_output_dim = old_mat.shape
new_input_dim, new_output_dim = new_mat.shape
if old_input_dim != new_input_dim:
raise ValueError(
"The layer's between which weights are being copied are expected to have same input dimensions. Received shapes: "
+ str(old_mat.shape) + " and " + str(new_mat.shape))
_mapping = {i: list(range(old_output_dim)) for i in range(new_output_dim)}
_mapping.update(mapping)
for i, mean_over in _mapping.items():
new_mat = jax.ops.index_update(
new_mat,
(tuple(range(new_input_dim)), i),
jnp.mean(jnp.take_along_axis(old_mat,
jnp.array([mean_over]),
axis=-1),
axis=-1),
)
if one_dimensional:
new_mat = jnp.squeeze(new_mat)
return new_mat
################################----------------------------- GENERAL --------------------------##########################
def get_samples(
batch_size: int,
max_len: int,
embed_dim: int,
max_comps: int,
n_token_types: int = 3,
n_rel_types: int = 1,
) -> Tuple[jnp.ndarray, jnp.ndarray, jnp.ndarray, jnp.ndarray]:
"""
Args:
batch_size: The number of samples in a batch that would be passed to the model.
max_len: The maximum length of any tokenized sequence in batch
embed_dim: The size of embedding of any single token in the sequence.
max_comps: The maximum number of components(posts) in any sequence.
n_token_types: Number of types of token labels, for e.g. ("B-P", "I-P") -> 2
n_rel_type: Number of types of relations between any two components.
Returns:
A tuple having:
sample_logits: of shape [batch_size, max_len, embed_dim]
sample_length: of shape [batch_size]; each element < max_len
sample_comp_labels: of shape [batch_size, max_len]; each element < n_token_types
sample_relations: of shape [batch_size, max_comps, 3]; (i,j,k)-th element denotes link
from component i to component j of type k. Not guaranteed to be a tree.
"""
key = jax.random.PRNGKey(32)
sample_logits = jnp.zeros(
(batch_size, max_len, embed_dim),
dtype=jnp.float32,
)
sample_lengths = jnp.full((batch_size), max_len, dtype=jnp.int32)
sample_comp_labels = jax.random.randint(key, (batch_size, max_len), 0,
n_token_types)
sample_relation_links = jax.random.randint(
key,
(batch_size, max_comps, 3),
0,
max_comps,
)
sample_relation_types = jax.random.randint(key, (batch_size, max_comps, 3),
0, n_rel_types)
sample_relations = jnp.where(jnp.array([True, True, False]),
sample_relation_links, sample_relation_types)
return sample_logits, sample_lengths, sample_comp_labels, sample_relations
```
#### File: src/models/tree_crf.py
```python
from typing import Optional, Union, List, Tuple
import jax
import jax.numpy as jnp
import haiku as hk
from .utils import add_garbage_dims, remove_garbage_dims
class tree_crf(hk.Module):
def __init__(
self,
prior: Optional[Union[List[List[List[float]]], jnp.ndarray]] = None,
name: Optional[str] = None,
):
"""Constructs a CRF layer that assigns potentials to all different possible trees
on some sequence of components.
prior: An array of shape [n_comps+1, n_comps+1, n_rel_types+1] where (i,j,k)-th entry
corresponds to the energy predicted by some model for a link from component i
to component j of type k. The links and the components are 1 indexed and the
a link to 0-th component from component i, corresponds to i being root node.
A link of type 0, is None-type link. Usually, the only link with None-type
will be the link to 0-th component. The prior specifies scaling factors for
corresponding energies(of the same shape as prior) predicted by the model.
"""
super(tree_crf, self).__init__(name=name)
self.prior = prior
if self.prior is not None:
raise NotImplementedError(
"The prior functionality will be implemented in future.")
@staticmethod
def _mst(log_energies: jnp.ndarray) -> Tuple[jnp.ndarray, jnp.ndarray]:
"""Finds maximal spanning tree for a single sample. See self.mst() for
detailed documentation.
"""
M, n_rel_types = log_energies.shape[1], log_energies.shape[2]
partitions = jnp.eye(M, dtype=jnp.bool_)
mst_energy = jnp.array([0.0])
edges = []
"""
def scan_fn(carry, x):
_ = x
mst_energy, log_energies, partitions = carry
max_index = jnp.unravel_index(jnp.argmax(log_energies),
jnp.shape(log_energies))
max_energy = log_energies[max_index]
updatable_sample = jnp.logical_not(jnp.isneginf(max_energy))
mst_energy += jnp.where(updatable_sample, max_energy, 0.0)
max_link = jnp.where(
jnp.stack([updatable_sample] * n_rel_types, axis=-1),
jnp.squeeze(jnp.transpose(jnp.array(max_index))),
0.0,
)
link_from, link_to, unused_rel_type = max_index
from_partition = jnp.where(partitions[link_from, :] == 1,
jnp.arange(M), -1)
to_partition = jnp.where(partitions[link_to, :] == 1,
jnp.arange(M), -1)
log_energies = add_garbage_dims(log_energies)
log_energies = jax.ops.index_update(
log_energies,
(
from_partition,
jnp.expand_dims(to_partition, axis=-1),
jnp.arange(n_rel_types),
),
-jnp.inf,
)
log_energies = jax.ops.index_update(
log_energies,
(
to_partition,
jnp.expand_dims(from_partition, axis=-1),
jnp.arange(n_rel_types),
),
-jnp.inf,
)
log_energies = remove_garbage_dims(log_energies)
temp = jnp.logical_or(partitions[link_from, :],
partitions[link_to, :])
temp_idx = jnp.where(temp, jnp.arange(M), -1)
partitions = add_garbage_dims(partitions)
partitions = jax.ops.index_update(
partitions,
(jnp.expand_dims(temp_idx, axis=-1), jnp.arange(M)), temp)
partitions = remove_garbage_dims(partitions)
return (mst_energy, log_energies, partitions), max_link
(mst_energy, _,
_), edges = jax.lax.scan(scan_fn,
init=(mst_energy, log_energies, partitions),
xs=jnp.arange(M),
unroll=M)
return mst_energy, edges
""" # FOR-LOOP equivalent
for _ in range(M):
max_index = jnp.unravel_index(jnp.argmax(log_energies),
jnp.shape(log_energies))
max_energy = log_energies[max_index]
updatable_sample = jnp.logical_not(jnp.isneginf(max_energy))
mst_energy += jnp.where(updatable_sample, max_energy, 0.0)
max_link = jnp.where(
jnp.stack([updatable_sample] * n_rel_types, axis=-1),
jnp.squeeze(jnp.transpose(jnp.array(max_index))),
0,
)
edges.append(max_link)
link_from, link_to, unused_rel_type = max_index
from_partition = jnp.where(partitions[link_from, :] == 1,
jnp.arange(M), -1)
to_partition = jnp.where(partitions[link_to, :] == 1,
jnp.arange(M), -1)
log_energies = add_garbage_dims(log_energies)
log_energies = jax.ops.index_update(
log_energies,
(
from_partition,
jnp.expand_dims(to_partition, axis=-1),
),
-jnp.inf,
)
log_energies = jax.ops.index_update(
log_energies,
(
to_partition,
jnp.expand_dims(from_partition, axis=-1),
),
-jnp.inf,
)
log_energies = remove_garbage_dims(log_energies)
temp = jnp.logical_or(partitions[link_from, :],
partitions[link_to, :])
temp_idx = jnp.where(temp, jnp.arange(M), -1)
partitions = add_garbage_dims(partitions)
partitions = jax.ops.index_update(
partitions,
(jnp.expand_dims(temp_idx, axis=-1), jnp.arange(M)), temp)
partitions = remove_garbage_dims(partitions)
return mst_energy, jnp.stack(edges)
def mst(self,
log_energies: jnp.ndarray) -> Tuple[jnp.ndarray, jnp.ndarray]:
"""Finds the maximal spanning tree and its score.
Args:
log_energies: A tensor of size [batch_size, M, M, n_rel_types+1] where M = max{1+ (n_comps in i-th sample of batch)}, of the energies of various
links predicted by a some model.
Returns:
A tuple having:
1. Tensor of size [batch_size] where the i-th entry denotes cost of MST for i-th sample and
2. A tensor of size [batch_size, M, 3] where the (i,j)-th 3-sized vector corresponds to the
j-th edge (link_from, link_to, relation_type) added to the MST of i-th sample. The tensor
is padded with (0,0,0) jnp.ndarrays.
"""
return jax.vmap(self._mst)(log_energies)
@staticmethod
def _score_tree(log_energies: jnp.ndarray,
tree: jnp.ndarray) -> jnp.ndarray:
"""Finds the score of a label tree under the log_energies predicted by some model.
See self.score_tree() for detailed documentation.
"""
new_log_energies = jax.ops.index_update(log_energies, (0, 0, 0), 0.0)
def scan_fn(carry, x):
edge = x
score = carry
return score + new_log_energies[edge[0], edge[1], edge[2]], None
score, _ = jax.lax.scan(scan_fn, init=jnp.array([0.0]), xs=tree)
"""FOR-LOOP equivalent
score = jnp.array([0.0])
for edge in tree:
score += new_log_energies[edge[0], edge[1], edge[2]]
"""
return score
def score_tree(self, log_energies: jnp.ndarray,
tree: jnp.ndarray) -> jnp.ndarray:
"""Calculates the log energies of a given batch of trees.
Args:
log_energies: same, as in self.mst()
tree: A tensor in the format of second tensor output by self.mst().
Returns:
A tensor of size [batch_size] having the score of each tree corresponding to each sample of the batch.
"""
return jax.vmap(self._score_tree)(log_energies, tree)
def disc_loss(self, log_energies: jnp.ndarray,
label_tree: jnp.ndarray) -> jnp.ndarray:
"""Calculates average loss of a batch of samples.
Args:
log_energies: same, as in self.mst()
label_tree: same, as in self.score_tree() [Labels for the actual thread of the tree]
Returns:
Average discrimnation loss(loss with partition function estimated with the tree with maximum energy).
"""
mst_energies, _ = self.mst(log_energies)
label_tree_scores = self.score_tree(log_energies, label_tree)
return jnp.mean(mst_energies - label_tree_scores)
```
#### File: src/training/optimizer.py
```python
import jax
import optax
import numpy as np
from flax import traverse_util
from ..params import config
if "opt" not in config:
from ..arg_mining_ft.params import ft_config as config
def make_lr_schedule(warmup_percentage, total_steps, restart_from=0):
def lr_schedule(step):
percent_complete = (step + restart_from) / total_steps
# 0 or 1 based on whether we are before peak
before_peak = jax.lax.convert_element_type(
(percent_complete <= warmup_percentage), np.float32)
# Factor for scaling learning rate
scale = (before_peak * (percent_complete / warmup_percentage) +
(1 - before_peak)) * (1 - percent_complete)
return scale
return lr_schedule
def decay_mask_fn(params):
"""To prevent decay of weights when using adamw, for the bias parameters;
FROM: https://colab.research.google.com/github/huggingface/notebooks/blob/master/examples/text_classification_flax.ipynb"""
flat_params = traverse_util.flatten_dict(params)
flat_mask = {
path: (path[-1] != "bias" and path[-2:] != ("LayerNorm", "scale"))
for path in flat_params
}
return traverse_util.unflatten_dict(flat_mask)
def get_adam_opt():
total_steps = config["opt"]["total_steps"] * config["n_epochs"]
lr_schedule = make_lr_schedule(
warmup_percentage=0.1,
total_steps=total_steps,
restart_from=config["opt"]["restart_from"],
)
opt = optax.chain(
optax.identity() if config["opt"]["max_grad_norm"] is None else
optax.clip_by_global_norm(config["opt"]["max_grad_norm"]),
optax.adam(learning_rate=config["opt"]["lr"])
if config["opt"]["weight_decay"] is None else optax.adamw(
learning_rate=config["opt"]["lr"],
weight_decay=config["opt"]["weight_decay"],
mask=decay_mask_fn,
),
optax.scale_by_schedule(lr_schedule)
if config["opt"]["use_schedule"] else optax.identity(),
)
return opt
```
|
{
"source": "JeeveshGarg/oppia",
"score": 2
}
|
#### File: core/domain/rating_services.py
```python
from __future__ import annotations
import datetime
from core import feconf
from core.domain import event_services
from core.domain import exp_fetchers
from core.domain import exp_services
from core.platform import models
from typing import Dict, Optional
MYPY = False
if MYPY: # pragma: no cover
from mypy_imports import transaction_services
from mypy_imports import user_models
(exp_models, user_models,) = models.Registry.import_models([
models.NAMES.exploration, models.NAMES.user])
transaction_services = models.Registry.import_transaction_services()
ALLOWED_RATINGS = [1, 2, 3, 4, 5]
def assign_rating_to_exploration(
user_id: str,
exploration_id: str,
new_rating: int
) -> None:
"""Records the rating awarded by the user to the exploration in both the
user-specific data and exploration summary.
This function validates the exploration id but not the user id.
Args:
user_id: str. The id of the user assigning the rating.
exploration_id: str. The id of the exploration that is
assigned a rating.
new_rating: int. Value of assigned rating, should be between
1 and 5 inclusive.
Raises:
ValueError. The assigned rating is not of type int.
ValueError. The assigned rating is lower than 1 or higher than 5.
ValueError. The exploration does not exist.
"""
if not isinstance(new_rating, int):
raise ValueError(
'Expected the rating to be an integer, received %s' % new_rating)
if new_rating not in ALLOWED_RATINGS:
raise ValueError('Expected a rating 1-5, received %s.' % new_rating)
exploration = exp_fetchers.get_exploration_by_id( # type: ignore[no-untyped-call]
exploration_id, strict=False)
if exploration is None:
raise ValueError('Invalid exploration id %s' % exploration_id)
@transaction_services.run_in_transaction_wrapper
def _update_user_rating_transactional() -> Optional[int]:
"""Updates the user rating of the exploration. Returns the old rating
before updation.
"""
exp_user_data_model = user_models.ExplorationUserDataModel.get(
user_id, exploration_id)
if exp_user_data_model:
old_rating: Optional[int] = exp_user_data_model.rating
else:
old_rating = None
exp_user_data_model = user_models.ExplorationUserDataModel.create(
user_id, exploration_id)
exp_user_data_model.rating = new_rating
exp_user_data_model.rated_on = datetime.datetime.utcnow()
exp_user_data_model.update_timestamps()
exp_user_data_model.put()
return old_rating
old_rating = _update_user_rating_transactional()
exploration_summary = exp_fetchers.get_exploration_summary_by_id(
exploration_id)
if not exploration_summary.ratings:
exploration_summary.ratings = feconf.get_empty_ratings()
exploration_summary.ratings[str(new_rating)] += 1
if old_rating:
exploration_summary.ratings[str(old_rating)] -= 1
event_services.RateExplorationEventHandler.record( # type: ignore[no-untyped-call]
exploration_id, user_id, new_rating, old_rating)
exploration_summary.scaled_average_rating = (
exp_services.get_scaled_average_rating( # type: ignore[no-untyped-call]
exploration_summary.ratings))
exp_services.save_exploration_summary(exploration_summary) # type: ignore[no-untyped-call]
def get_user_specific_rating_for_exploration(
user_id: str, exploration_id: str
) -> Optional[int]:
"""Fetches a rating for the specified exploration from the specified user
if one exists.
Args:
user_id: str. The id of the user.
exploration_id: str. The id of the exploration.
Returns:
int or None. An integer between 1 and 5 inclusive, or None if the user
has not previously rated the exploration.
"""
exp_user_data_model = user_models.ExplorationUserDataModel.get(
user_id, exploration_id)
return exp_user_data_model.rating if exp_user_data_model else None
def get_when_exploration_rated(
user_id: str, exploration_id: str
) -> Optional[datetime.datetime]:
"""Fetches the datetime the exploration was last rated by this user, or
None if no rating has been awarded.
Currently this function is only used for testing purposes.
Args:
user_id: str. The id of the user.
exploration_id: str. The id of the exploration.
Returns:
datetime.datetime or None. When the exploration was last
rated by the user, or None if the user has not previously
rated the exploration.
"""
exp_user_data_model = user_models.ExplorationUserDataModel.get(
user_id, exploration_id)
return exp_user_data_model.rated_on if exp_user_data_model else None
def get_overall_ratings_for_exploration(exploration_id: str) -> Dict[str, int]:
"""Fetches all ratings for an exploration.
Args:
exploration_id: str. The id of the exploration.
Returns:
dict. A dict whose keys are '1', '2', '3', '4', '5' and whose
values are nonnegative integers representing the frequency counts
of each rating.
"""
exp_summary = exp_fetchers.get_exploration_summary_by_id(exploration_id)
return exp_summary.ratings
```
#### File: storage/translation/gae_models_test.py
```python
from __future__ import annotations
from core import feconf
from core.platform import models
from core.tests import test_utils
MYPY = False
if MYPY: # pragma: no cover
from mypy_imports import base_models
from mypy_imports import translation_models
(base_models, translation_models) = models.Registry.import_models(
[models.NAMES.base_model, models.NAMES.translation])
class EntityTranslationsModelTest(test_utils.GenericTestBase):
"""Unit tests for EntityTranslationsModel class."""
def test_create_new_model(self) -> None:
enitity_translation_model = (
translation_models.EntityTranslationsModel.create_new(
feconf.TranslatableEntityType.EXPLORATION, 'exp_id', 1, 'hi', {
'123': {
'content_value': 'Hello world!',
'needs_update': False
}
})
)
self.assertEqual(enitity_translation_model.entity_type, 'exploration')
self.assertEqual(enitity_translation_model.entity_id, 'exp_id')
self.assertEqual(enitity_translation_model.entity_version, 1)
self.assertEqual(enitity_translation_model.language_code, 'hi')
self.assertEqual(
enitity_translation_model.translations['123']['content_value'],
'Hello world!')
self.assertEqual(
enitity_translation_model.translations['123']['needs_update'],
False)
def test_get_model_method_returns_correctly(self) -> None:
translation_models.EntityTranslationsModel.create_new(
feconf.TranslatableEntityType.EXPLORATION, 'exp_id', 1, 'hi', {
'123': {
'content_value': 'Hello world!',
'needs_update': False
}
}
).put()
enitity_translation_model = (
translation_models.EntityTranslationsModel.get_model(
feconf.TranslatableEntityType.EXPLORATION, 'exp_id', 1, 'hi'))
self.assertEqual(enitity_translation_model.entity_type, 'exploration')
self.assertEqual(enitity_translation_model.entity_id, 'exp_id')
self.assertEqual(enitity_translation_model.entity_version, 1)
self.assertEqual(enitity_translation_model.language_code, 'hi')
self.assertEqual(
enitity_translation_model.translations['123']['content_value'],
'Hello world!')
self.assertEqual(
enitity_translation_model.translations['123']['needs_update'],
False)
def test_get_all_for_entity_returns_correctly(self) -> None:
translation_models.EntityTranslationsModel.create_new(
feconf.TranslatableEntityType.EXPLORATION, 'exp_id', 1, 'en', {
'123': {
'content_value': 'Hey I am Jhon.',
'needs_update': False
}
}
).put()
translation_models.EntityTranslationsModel.create_new(
feconf.TranslatableEntityType.EXPLORATION, 'exp_id2', 2, 'hi', {
'123': {
'content_value': 'Hello world!',
'needs_update': False
}
}
).put()
translation_models.EntityTranslationsModel.create_new(
feconf.TranslatableEntityType.EXPLORATION, 'exp_id', 1, 'hi', {
'123': {
'content_value': 'Hey I am Nikhil.',
'needs_update': False
}
}
).put()
enitity_translation_models = (
translation_models.EntityTranslationsModel.get_all_for_entity(
feconf.TranslatableEntityType.EXPLORATION, 'exp_id', 1))
self.assertEqual(len(enitity_translation_models), 2)
enitity_translation_models = (
translation_models.EntityTranslationsModel.get_all_for_entity(
feconf.TranslatableEntityType.EXPLORATION, 'exp_id2', 2))
self.assertEqual(len(enitity_translation_models), 1)
def test_get_export_policy_not_applicable(self) -> None:
self.assertEqual(
translation_models.EntityTranslationsModel.get_export_policy(),
{
'created_on': base_models.EXPORT_POLICY.NOT_APPLICABLE,
'deleted': base_models.EXPORT_POLICY.NOT_APPLICABLE,
'last_updated': base_models.EXPORT_POLICY.NOT_APPLICABLE,
'entity_id': base_models.EXPORT_POLICY.NOT_APPLICABLE,
'entity_type': base_models.EXPORT_POLICY.NOT_APPLICABLE,
'entity_version': base_models.EXPORT_POLICY.NOT_APPLICABLE,
'language_code': base_models.EXPORT_POLICY.NOT_APPLICABLE,
'translations': base_models.EXPORT_POLICY.NOT_APPLICABLE
}
)
def test_get_deletion_policy_not_applicable(self) -> None:
self.assertEqual(
translation_models.EntityTranslationsModel.get_deletion_policy(),
base_models.DELETION_POLICY.NOT_APPLICABLE)
def test_get_model_association_to_user_not_corresponding_to_user(
self
) -> None:
self.assertEqual(
(
translation_models.EntityTranslationsModel
.get_model_association_to_user()
),
base_models.MODEL_ASSOCIATION_TO_USER.NOT_CORRESPONDING_TO_USER)
class MachineTranslationModelTests(test_utils.GenericTestBase):
def test_create_model(self) -> None:
model_id = translation_models.MachineTranslationModel.create(
source_language_code='en',
target_language_code='es',
source_text='hello world',
translated_text='hola mundo'
)
# Ruling out the possibility of None for mypy type checking.
assert model_id is not None
translation_model = (
translation_models.MachineTranslationModel.get(model_id))
self.assertEqual(translation_model.translated_text, 'hola mundo')
def test_create_model_with_same_source_target_language_codes_returns_none(
self
) -> None:
model_id = translation_models.MachineTranslationModel.create(
source_language_code='en',
target_language_code='en',
source_text='hello world',
translated_text='hello world'
)
self.assertIsNone(model_id)
def test_get_machine_translation_with_existing_translation(self) -> None:
translation_models.MachineTranslationModel.create(
source_language_code='en',
target_language_code='es',
source_text='hello world',
translated_text='hola mundo'
)
translation = (
translation_models.MachineTranslationModel
.get_machine_translation(
source_language_code='en',
target_language_code='es',
source_text='hello world',
)
)
self.assertIsNotNone(translation)
# Ruling out the possibility of None for mypy type checking.
assert translation is not None
self.assertEqual(translation.translated_text, 'hola mundo')
def test_get_machine_translation_with_no_existing_translation_returns_none(
self
) -> None:
translation = (
translation_models.MachineTranslationModel
.get_machine_translation(
source_language_code='en',
target_language_code='fr',
source_text='hello world',
)
)
self.assertIsNone(translation)
def test_get_deletion_policy_not_applicable(self) -> None:
self.assertEqual(
translation_models.MachineTranslationModel.get_deletion_policy(),
base_models.DELETION_POLICY.NOT_APPLICABLE)
def test_get_model_association_to_user_not_corresponding_to_user(
self
) -> None:
self.assertEqual(
(
translation_models.MachineTranslationModel
.get_model_association_to_user()
),
base_models.MODEL_ASSOCIATION_TO_USER.NOT_CORRESPONDING_TO_USER)
def test_get_export_policy_not_applicable(self) -> None:
self.assertEqual(
translation_models.MachineTranslationModel.get_export_policy(),
{
'created_on': base_models.EXPORT_POLICY.NOT_APPLICABLE,
'deleted': base_models.EXPORT_POLICY.NOT_APPLICABLE,
'hashed_source_text': base_models.EXPORT_POLICY.NOT_APPLICABLE,
'last_updated': base_models.EXPORT_POLICY.NOT_APPLICABLE,
'source_text': base_models.EXPORT_POLICY.NOT_APPLICABLE,
'source_language_code':
base_models.EXPORT_POLICY.NOT_APPLICABLE,
'target_language_code':
base_models.EXPORT_POLICY.NOT_APPLICABLE,
'translated_text': base_models.EXPORT_POLICY.NOT_APPLICABLE
}
)
```
|
{
"source": "jeeveshlodhi/virtual-tutor",
"score": 3
}
|
#### File: virtual-tutor/src/extract_key.py
```python
import cv2
import numpy as np
element_big = cv2.getStructuringElement(cv2.MORPH_RECT,( 10,10 ),( 0, 0))
element_small = cv2.getStructuringElement(cv2.MORPH_RECT,( 5,5 ),( 0, 0))
def otsuMulti(im):
N = float(im.shape[0]*im.shape[1])
histogram = np.histogram(im,bins=range(0,256),range=(0,255),density=False)
histogram = histogram[0]
optimalThresh1 = 0
optimalThresh2 = 0
W0K = 0
W1K = 0
M0K = 0
M1K = 0
MT = 0
maxBetweenVar = 0
for k in xrange(0,255):
MT += k * (histogram[k] / N)
for t1 in xrange(0,255):
W0K += histogram[t1] / N
M0K += t1 * (histogram[t1] / N)
M0 = M0K / (W0K + 0.00001)
W1K = 0
M1K = 0
for t2 in xrange(t1 + 1,255):
W1K += histogram[t2] / N
M1K += t2 * (histogram[t2] / N)
M1 = M1K / (W1K + 0.00001)
W2K = 1 - (W0K + W1K)
M2K = MT - (M0K + M1K)
if (W2K <= 0):
break
M2 = M2K / W2K
currVarB = W0K * (M0 - MT) * (M0 - MT) + W1K * (M1 - MT) * (M1 - MT) + W2K * (M2 - MT) * (M2 - MT)
if (maxBetweenVar < currVarB):
maxBetweenVar = currVarB
optimalThresh1 = t1
optimalThresh2 = t2
return (optimalThresh1, optimalThresh2)
def get_black_keymap(frame,idx):
lower_black = np.array([0,0,0])
upper_black = np.array([180,255,idx])
mask_black = cv2.inRange(frame,lower_black,upper_black)
mask_black = cv2.dilate(mask_black,element_big)
retval, labels = cv2.connectedComponents(mask_black)
output = np.zeros_like(labels, dtype=np.uint8)
cv2.normalize(labels, output, 0 , 100, cv2.NORM_MINMAX, dtype=cv2.CV_8U)
mask = output.copy()
return output
def get_white_keymap(frame,idx):
con = np.zeros((frame.shape[0],frame.shape[1]),np.uint8)
lower_white = np.array([0,0,idx])
upper_white = np.array([180,255,255])
mask = cv2.inRange(frame,lower_white,upper_white)
_,contours,_ = cv2.findContours(mask.copy(), 1, 2)
# cv2.imshow("Mask",mask)
for cnt in contours:
hull = cv2.convexHull(cnt)
cv2.drawContours(con,[hull],0,255,-1)
mask = cv2.erode(mask,element_big)
mask = cv2.dilate(mask,element_small)
mask = cv2.erode(mask,element_big)
mask = cv2.dilate(mask,element_small)
mask = cv2.erode(mask,element_big)
mask = cv2.dilate(mask,element_small)
mask = cv2.dilate(mask,element_big)
mask = cv2.bitwise_and(mask,con)
retval, labels = cv2.connectedComponents(mask)
output = np.zeros_like(labels, dtype=np.uint8)
cv2.normalize(labels, output, 100 , 255, cv2.NORM_MINMAX, dtype=cv2.CV_8U)
counts = np.bincount(output.flatten())
ind = np.argpartition(counts,-3)[-3:]
output[ output == ind[0]] = 0
output[ output == ind[1]] = 0
output[ output == ind[2]] = 0
# max_label = np.argmax(counts)
# output[output == max_label] = 0
return output
def get_keymaps(calibration_im):
hsv = cv2.cvtColor(calibration_im,cv2.COLOR_BGR2HSV)
idx = otsuMulti(hsv[:,:,2])
white_keymap = get_white_keymap(hsv,idx[1])
black_keymap = get_black_keymap(hsv,idx[0])
keymap = cv2.bitwise_or(white_keymap,black_keymap)
return keymap
```
#### File: src/motion_stabilization/basic_motion.py
```python
import sys
import cv2
# import numpy as np
IMAGE_WIDTH = 500
def resized_frame(frame):
height, width = frame.shape[0: 2]
desired_width = IMAGE_WIDTH
desired_to_actual = float(desired_width) / width
new_width = int(width * desired_to_actual)
new_height = int(height * desired_to_actual)
return cv2.resize(frame, (new_width, new_height))
class BasicMotionDetector(object):
def __init__(self, file_to_read):
self.file_to_read = file_to_read
self.capture = cv2.VideoCapture(self.file_to_read)
self.video_writer = None
self.frames_per_sec = 25
self.codec = cv2.VideoWriter_fourcc('M', 'J', 'P', 'G')
self.frame_number = 0
def _generate_working_frames(self):
while True:
success, frame_from_video = self.capture.read()
if not success:
break
frame_from_video = resized_frame(frame_from_video)
yield frame_from_video
def _generate_motion_detection_frames(self):
previous_frame = None
previous_previous_frame = None
for frame in self._generate_working_frames():
motion_detection_frame = None
if previous_previous_frame is not None:
motion_detection_frame = self._get_motion_detection_frame(previous_previous_frame, previous_frame, frame)
previous_previous_frame = previous_frame
previous_frame = frame
if motion_detection_frame is not None:
yield motion_detection_frame
def _get_motion_detection_frame(self, previous_previous_frame, previous_frame, frame):
d1 = cv2.absdiff(frame, previous_frame)
d2 = cv2.absdiff(previous_frame, previous_previous_frame)
motion_detection_frame = cv2.bitwise_xor(d1, d2)
return motion_detection_frame
def create(self, output_filename):
for motion_detection_frame in self._generate_motion_detection_frames():
height, width = motion_detection_frame.shape[0: 2]
self.video_writer = self.video_writer or cv2.VideoWriter(output_filename, self.codec, self.frames_per_sec, (width, height))
self.video_writer.write(motion_detection_frame)
self.frame_number += 1
print "Writing %s" % self.frame_number
if self.video_writer is not None:
self.video_writer.release()
if __name__ == "__main__":
file_to_read = "../sample_videos/Piano/VID_20161102_204909.mp4"
BasicMotionDetector(file_to_read).create("basic_motion.avi")
```
|
{
"source": "JeeveshN/Face-Detect",
"score": 3
}
|
#### File: JeeveshN/Face-Detect/detect_face.py
```python
from random import randint
import cv2
import sys
import os
import traceback
CASCADE="Face_cascade.xml"
FACE_CASCADE=cv2.CascadeClassifier(CASCADE)
def detect_faces(image_path,display=True):
image=cv2.imread(image_path)
image_grey=cv2.cvtColor(image,cv2.COLOR_BGR2GRAY)
faces = FACE_CASCADE.detectMultiScale(image_grey,scaleFactor=1.16,minNeighbors=5,minSize=(25,25),flags=0)
for x,y,w,h in faces:
sub_img=image[y-10:y+h+10,x-10:x+w+10]
os.chdir("Extracted")
cv2.imwrite(str(randint(0,10000))+".jpg",sub_img)
os.chdir("../")
cv2.rectangle(image,(x,y),(x+w,y+h),(255, 255,0),2)
if display:
cv2.imshow("Faces Found",image)
# if (cv2.waitKey(0) & 0xFF == ord('q')) or (cv2.waitKey(0) & 0xFF == ord('Q')):
# cv2.destroyAllWindows()
if __name__ == "__main__":
if not "Extracted" in os.listdir("."):
os.mkdir("Extracted")
if len(sys.argv) < 2:
print("Usage: python Detect_face.py 'image path'")
sys.exit()
if os.path.isdir(sys.argv[1]):
for image in os.listdir(sys.argv[1]):
try:
print ("Processing.....",os.path.abspath(os.path.join(sys.argv[1],image)))
detect_faces(os.path.abspath(os.path.join(sys.argv[1],image)),False)
except Exception:
print ("Could not process ",os.path.abspath(os.path.join(sys.argv[1],image)))
else:
detect_faces(sys.argv[1])
```
|
{
"source": "jeeves-sh/jeeves-jira",
"score": 3
}
|
#### File: jeeves-jira/jirajumper/client.py
```python
import os
from typing import Optional
from jira import JIRA
from jirajumper.errors import MissingJiraCredentials
def env_server() -> Optional[str]:
"""Retrieve JIRA server address."""
return os.getenv('JIRA_SERVER')
def env_username() -> Optional[str]:
"""Retrieve JIRA server username."""
return os.getenv('JIRA_USERNAME')
def env_token() -> Optional[str]:
"""Retrieve JIRA server token."""
return os.getenv('JIRA_TOKEN')
def jira() -> JIRA:
"""Construct an instance of Jira client and establish an HTTP connection."""
server = env_server()
username = env_username()
token = env_token()
if not all([server, username, token]):
raise MissingJiraCredentials(
server=server,
username=username,
token=token,
)
return JIRA(
server=server,
basic_auth=(
username,
token,
),
)
def issue_url(server: str, key: str) -> str:
"""Generate URL for a given issue."""
return f'{server}/browse/{key}'
def custom_fields(client: JIRA):
fields = client.fields()
for field in fields:
print(field)
```
#### File: jeeves-jira/jirajumper/cli.py
```python
import logging
import sys
from enum import Enum
from functools import partial
from itertools import filterfalse
from pathlib import Path
import click
import rich
from rich.traceback import install
from typer import Context, Option, Typer
from typer.core import TyperArgument, TyperCommand
from jirajumper.cache.cache import GlobalOptions, field_key_by_name
from jirajumper.client import jira
from jirajumper.commands.clone import clone
from jirajumper.commands.fork import fork
from jirajumper.commands.graph import graph
from jirajumper.commands.link import link
from jirajumper.commands.list_issues import list_issues
from jirajumper.commands.select import jump
from jirajumper.commands.status import status
from jirajumper.commands.update import update
from jirajumper.fields import FIELDS, JiraField, JiraFieldsRepository
from jirajumper.models import OutputFormat
app = Typer(
help='Manage JIRA issues.',
no_args_is_help=True,
)
class LogLevel(str, Enum): # noqa: WPS600
"""Available logging levels."""
DEBUG = 'debug'
ERROR = 'error'
def exception_handler(exception_type, exception, traceback):
"""Custom exception handler to look more civilized than we are."""
exception_name = exception_type.__name__
exception_text = str(exception)
rich.print(
f'[red][bold]{exception_name}:[/bold][/red] {exception_text}',
)
@app.callback()
def global_options(
context: Context,
format: OutputFormat = Option(
OutputFormat.PRETTY,
help='Format to print the data in',
),
cache_path: Path = Option(
default=Path.home() / '.cache/jirajumper/jirajumper.json',
envvar='JIRAJUMPER_CACHE_PATH',
help='Path to the JSON file where jirajumper will store its cache.',
),
log_level: LogLevel = Option( # noqa: WPS404, B008
LogLevel.ERROR,
help=(
'Log message level: `debug` (to print all debug messages and '
'exception tracebacks), or `error` (to only log critical errors).'
),
),
):
"""Configure global options valid for most of jeeves-jira commands."""
logger = logging.getLogger('jj')
if log_level == LogLevel.DEBUG:
install(show_locals=False)
logger.setLevel(logging.DEBUG)
else:
sys.excepthook = exception_handler
logger.setLevel(logging.ERROR)
client = jira()
key_by_name = field_key_by_name(
jira=client,
)
resolved_fields = map(
partial(
JiraField.resolve,
field_key_by_name=key_by_name,
),
FIELDS,
)
context.obj = GlobalOptions(
logger=logger,
output_format=format,
jira=jira(),
fields=JiraFieldsRepository(resolved_fields),
cache_path=cache_path,
)
class AutoOptionsCommand(TyperCommand):
writable_only = False
mutable_only = False
def __init__(self, **kwargs):
fields = FIELDS
if self.mutable_only:
fields = fields.mutable()
if self.writable_only:
fields = fields.writable()
custom_options = [
click.Option(
['--{option_name}'.format(
option_name=field.human_name.replace("_", "-"),
)],
help=field.description,
)
for field in fields
]
existing_params = list(filterfalse(
lambda existing_param: (
isinstance(existing_param, TyperArgument) and
existing_param.name == 'options'
),
kwargs.get('params', []),
))
kwargs.update(
params=existing_params + custom_options,
)
super().__init__(**kwargs)
class CloneCommand(AutoOptionsCommand):
writable_only = True
class UpdateCommand(AutoOptionsCommand):
mutable_only = True
app.command()(jump)
app.command(
cls=CloneCommand,
context_settings={
'ignore_unknown_options': True,
},
)(clone)
app.command(
cls=CloneCommand,
context_settings={
'ignore_unknown_options': True,
},
)(fork)
app.command(
cls=UpdateCommand,
context_settings={
'ignore_unknown_options': True,
},
)(update)
app.command(
cls=AutoOptionsCommand,
context_settings={
'ignore_unknown_options': True,
},
name='list',
)(list_issues)
app.command(
cls=AutoOptionsCommand,
context_settings={
'ignore_unknown_options': True,
},
name='graph',
)(graph)
app.command()(link)
app.command()(status)
```
#### File: jirajumper/commands/fork.py
```python
from typing import Optional
import rich
from jira import JIRAError
from typer import Option
from jirajumper import default_options
from jirajumper.cache.cache import JeevesJiraContext
from jirajumper.commands.clone import clone
from jirajumper.commands.link import LinkType, link
from jirajumper.commands.select import jump
from jirajumper.commands.update import JIRAUpdateFailed
def fork(
context: JeevesJiraContext,
link_type: LinkType = Option(LinkType.BLOCKS, '--link'),
stay: bool = False,
assignee: Optional[str] = default_options.ASSIGNEE,
summary: str = default_options.SUMMARY,
**options: str,
):
"""Fork a JIRA issue."""
child_issue = clone(
context=context,
stay=True,
assignee=assignee,
summary=summary,
**options,
)
link(
context=context,
link_type=link_type,
specifiers=[child_issue.key],
)
if not stay:
jump(
context=context,
specifier=child_issue.key,
)
```
#### File: jirajumper/commands/list_issues.py
```python
from typing import Dict
import rich
from jirajumper.cache.cache import JeevesJiraContext
from jirajumper.fields import JiraFieldsRepository
from jirajumper.fields.field import ResolvedField
def generate_jql(
fields: JiraFieldsRepository,
options: Dict[str, str],
):
"""Generate JQL string."""
fields_and_values = fields.match_options(options)
field: ResolvedField
expressions = [
field.to_jql(expression)
for field, expression in fields_and_values
]
return ' AND '.join(expressions)
def list_issues(
context: JeevesJiraContext,
**options,
):
"""List JIRA issues by criteria."""
jql = generate_jql(
fields=context.obj.fields,
options=options,
)
context.obj.logger.info('JQL: `%s`', jql)
issues = context.obj.jira.search_issues(jql, maxResults=None)
for issue in issues:
rich.print(
'* {key} [i]({status} / {assignee})[/i] {summary}'.format(
key=issue.key,
status=issue.fields.status,
assignee=issue.fields.assignee,
summary=issue.fields.summary,
),
)
```
#### File: jirajumper/commands/select.py
```python
import json
from typing import Optional
import backoff
import rich
from documented import DocumentedError
from jira import JIRA, JIRAError
from typer import Argument, echo
from jirajumper.cache.cache import JeevesJiraContext, JiraCache
from jirajumper.client import issue_url
from jirajumper.models import OutputFormat
def normalize_issue_specifier(
client: JIRA,
specifier: str,
current_issue_key: Optional[str],
):
"""Normalize issue specifier."""
if specifier.isnumeric() and current_issue_key:
project_key, _current_issue_number = current_issue_key.split('-')
return f'{project_key}-{specifier}'
if specifier.lower() == 'next':
current_issue = client.issue(current_issue_key)
links = current_issue.fields.issuelinks
if not links:
raise ValueError(
f'Issue {current_issue_key} does not have any issues it blocks.',
)
for link in links:
try:
outward_issue = link.outwardIssue
except AttributeError:
continue
if (
link.type.name == 'Blocks' and
outward_issue.fields.status.statusCategory.name != 'Done'
):
return outward_issue.key
raise ValueError(
f'Cannot find an issue that follows {current_issue_key} :(',
)
return specifier
class NoIssueSelected(DocumentedError):
"""
No issue has been selected.
To select an issue PROJ-123, please run:
jj jump PROJ-123
"""
@backoff.on_exception(backoff.expo, JIRAError, max_time=5)
def jump(
context: JeevesJiraContext,
specifier: Optional[str] = Argument(None), # noqa: WPS404, B008
):
"""Select a Jira issue to work with."""
client = context.obj.jira
cache = context.obj.cache
if specifier:
specifier = normalize_issue_specifier(
client=client,
specifier=specifier,
current_issue_key=cache.selected_issue_key,
)
issue = client.issue(specifier)
cache.selected_issue_key = issue.key
context.obj.store_cache(
JiraCache(
selected_issue_key=issue.key,
),
)
else:
key = cache.selected_issue_key
if not key:
raise NoIssueSelected()
issue = client.issue(cache.selected_issue_key)
if context.obj.output_format == OutputFormat.PRETTY:
rich.print(f'[bold]{issue.key}[/bold] {issue.fields.summary}')
rich.print(issue_url(client.server_url, issue.key))
for print_field in context.obj.fields:
field_value = print_field.retrieve(issue=issue)
rich.print(f' - {print_field.human_name}: {field_value}')
else:
echo(
json.dumps(
{
json_field.human_name: json_field.retrieve(issue=issue)
for json_field in context.obj.fields
},
indent=2,
),
)
return issue
```
#### File: jirajumper/fields/field.py
```python
import operator
import re
from dataclasses import asdict, dataclass
from typing import Optional, Protocol, Tuple, TypeVar, Union
from jira import Issue
from jirajumper.models import (
FieldByName,
FieldKeyByName,
HumanValue,
JiraValue,
NotImplementedType,
)
AnyType = TypeVar('AnyType')
class ToJira(Protocol):
"""Convert a human-readable value to JIRA field."""
def __call__(self, human_value: HumanValue) -> JiraValue:
"""Convert a human-readable value to JIRA field."""
raise NotImplementedError()
class FromJira(Protocol):
"""Convert a native JIRA field value to a human readable one."""
def __call__(self, jira_value: JiraValue) -> HumanValue:
"""Convert a native JIRA field value to a human readable one."""
raise NotImplementedError()
class ToJQL(Protocol):
"""Construct a JQL expression from a raw argument value."""
def __call__(self, expression: str) -> str:
"""Construct a JQL expression from a raw argument value."""
raise NotImplementedError()
def identity(any_value: AnyType) -> AnyType:
"""Identity function."""
return any_value
@dataclass(frozen=True)
class JiraField:
"""Transformations related to a particular JIRA field."""
jira_name: Union[str, FieldByName]
human_name: str
description: str
is_mutable: bool = True
jql_name: Optional[str] = None
to_jira: Union[ToJira, NotImplementedType] = identity
from_jira: FromJira = identity
def retrieve(self, issue: Issue):
"""Retrieve the native field value from given issue."""
return self.from_jira(
operator.attrgetter(self.jira_name)(issue.fields),
)
def store(self, human_value: HumanValue) -> Tuple[str, JiraValue]:
"""Convert the readable value into JIRA native form."""
return self.jira_name, self.to_jira(human_value)
def is_writable(self):
"""Find out if this field is writable."""
return self.to_jira is not NotImplemented
def resolve(self, field_key_by_name: FieldKeyByName) -> 'ResolvedField':
"""Resolve jira_name."""
if isinstance(self.jira_name, str):
unresolved_name = self.jira_name
resolved_name = self.jira_name
elif isinstance(self.jira_name, FieldByName):
unresolved_name = self.jira_name.readable_name
resolved_name = field_key_by_name[unresolved_name]
else:
raise ValueError(
f'`{self.jira_name}` is not a valid JIRA field name.',
)
field_dict = {
**asdict(self),
**{
'unresolved_jira_name': unresolved_name,
'jira_name': resolved_name,
},
}
return ResolvedField(**field_dict)
def _jql_operator(is_multiple: bool, is_positive: bool) -> str:
return {
False: {
False: '!=',
True: '=',
},
True: {
False: 'NOT IN',
True: 'IN',
},
}[is_multiple][is_positive]
@dataclass(frozen=True)
class ResolvedField(JiraField):
"""JIRA field description with resolved field name."""
unresolved_jira_name: Optional[str] = None
def to_jql(self, expression: str) -> str: # noqa: WPS210
"""Convert human readable expression to JQL."""
minus, pattern = re.match('(-*)(.+)', expression).groups()
is_positive = not minus
search_values = list(map(
str.strip,
pattern.split(','),
))
is_multiple = len(search_values) > 1
jql_operator = _jql_operator(
is_multiple=is_multiple,
is_positive=is_positive,
)
jql_values = ', '.join(
f'"{search_value}"'
for search_value in search_values
)
if is_multiple:
jql_values = f'({jql_values})'
field_name = self.jql_name or self.unresolved_jira_name
if ' ' in field_name:
field_name = f'"{field_name}"'
return f'{field_name} {jql_operator} {jql_values}'
```
#### File: jeeves-jira/tests/test_generate_jql.py
```python
from jirajumper.commands.list_issues import generate_jql
from jirajumper.fields import JiraFieldsRepository
from jirajumper.fields.defaults import STATUS_CATEGORY
from jirajumper.fields.field import ResolvedField
def test_status_category():
status_category = ResolvedField(
human_name='status_category',
jira_name='statusCategory',
unresolved_jira_name='statusCategory',
description='',
)
fields = JiraFieldsRepository([status_category])
options = {'status_category': 'boo'}
assert generate_jql(
fields=fields,
options=options,
) == 'statusCategory = "boo"'
```
|
{
"source": "jeevesy83/twitter-python-ads-sdk",
"score": 3
}
|
#### File: twitter-python-ads-sdk/tests/support.py
```python
import sys
import string
import random
def with_resource(resource):
return 'https://ads-api.twitter.com{resource}'.format(resource=resource)
def with_fixture(name):
f = open('tests/fixtures/{name}.json'.format(name=name), 'r')
return f.read()
def characters(length):
chars = string.ascii_uppercase + string.ascii_lowercase + string.digits
return ''.join(random.choice(chars) for _ in range(length))
```
#### File: twitter-python-ads-sdk/tests/test_line_item.py
```python
import responses
import unittest
from tests.support import with_resource, with_fixture, characters
from twitter_ads.account import Account
from twitter_ads.campaign import LineItem
from twitter_ads.client import Client
from twitter_ads.cursor import Cursor
from twitter_ads import API_VERSION
@responses.activate
def test_line_items_all():
responses.add(responses.GET,
with_resource('/' + API_VERSION + '/accounts/2iqph'),
body=with_fixture('accounts_load'),
content_type='application/json')
responses.add(responses.GET,
with_resource('/' + API_VERSION + '/accounts/2iqph/line_items'),
body=with_fixture('line_items_all'),
content_type='application/json')
client = Client(
characters(40),
characters(40),
characters(40),
characters(40)
)
account = Account.load(client, '2iqph')
cursor = account.line_items()
assert cursor is not None
assert isinstance(cursor, Cursor)
assert cursor.count == 10
lineitem = cursor.next()
assert lineitem.id == 'bw2'
assert lineitem.entity_status == 'ACTIVE'
@responses.activate
def test_line_item_load():
responses.add(responses.GET,
with_resource('/' + API_VERSION + '/accounts/2iqph'),
body=with_fixture('accounts_load'),
content_type='application/json')
responses.add(responses.GET,
with_resource('/' + API_VERSION + '/accounts/2iqph/line_items/bw2'),
body=with_fixture('line_items_load'),
content_type='application/json')
client = Client(
characters(40),
characters(40),
characters(40),
characters(40)
)
account = Account.load(client, '2iqph')
line_item = LineItem.load(account, 'bw2')
assert line_item.id == 'bw2'
assert line_item.entity_status == 'ACTIVE'
```
|
{
"source": "Jeevi10/AICR",
"score": 3
}
|
#### File: Jeevi10/AICR/attack_test.py
```python
import torch
import torch.nn as nn
import torchvision
import torchvision.transforms as transforms
from resnet_model import * # Imports the ResNet Model
from networks import *
"""
Adversarial Attack Options: fgsm, bim, mim, pgd
"""
num_classes=10
#torch.cuda.set_device(0)
#model = resnet(num_classes=num_classes,depth=110)
model = sixNet()
#mdoel = model.cuda()
if True:
model = nn.DataParallel(model).cuda()
#Loading Trained Model
softmax_filename= './saved_model/model_pretrain_model_mnist'
#filename= 'Models_PCL/CIFAR10_PCL.pth.tar'
robust_model= './saved_model/model_posttrain_model_mnist_L'
checkpoint = torch.load(softmax_filename)
model.load_state_dict(checkpoint)
model.eval()
device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
# Loading Test Data (Un-normalized)
transform_test = transforms.Compose([transforms.ToTensor(),])
testset = torchvision.datasets.MNIST(root='./file', train=False,
download=True, transform=transform_test)
test_loader = torch.utils.data.DataLoader(testset, batch_size=1000, pin_memory=True,
shuffle=False, num_workers=4)
# Mean and Standard Deiation of the Dataset
mean = [0.1307]
std = [0.3081]
def normalize(t):
t[:, 0, :, :] = (t[:, 0, :, :] - mean[0])/std[0]
return t
def un_normalize(t):
t[:, 0, :, :] = (t[:, 0, :, :] * std[0]) + mean[0]
return t
# Attacking Images batch-wise
def attack(model, criterion, img, label, eps, attack_type, iters):
adv = img.detach()
adv.requires_grad = True
if attack_type == 'fgsm':
iterations = 1
else:
iterations = iters
if attack_type == 'pgd':
step = 20/ 255
else:
step = eps / iterations
noise = 0
for j in range(iterations):
out_adv = model(normalize(adv.clone()),False)
loss = criterion(out_adv, label)
loss.backward()
if attack_type == 'mim':
adv_mean= torch.mean(torch.abs(adv.grad), dim=1, keepdim=True)
adv_mean= torch.mean(torch.abs(adv_mean), dim=2, keepdim=True)
adv_mean= torch.mean(torch.abs(adv_mean), dim=3, keepdim=True)
adv.grad = adv.grad / adv_mean
noise = noise + adv.grad
else:
noise = adv.grad
# Optimization step
adv.data = adv.data + step * noise.sign()
# adv.data = adv.data + step * adv.grad.sign()
if attack_type == 'pgd':
adv.data = torch.where(adv.data > img.data + eps, img.data + eps, adv.data)
adv.data = torch.where(adv.data < img.data - eps, img.data - eps, adv.data)
adv.data.clamp_(0, 1)
adv.grad.data.zero_()
return adv.detach()
# Loss Criteria
criterion = nn.CrossEntropyLoss()
adv_acc = 0
clean_acc = 0
eps = 80/255 # Epsilon for Adversarial Attack
for i, (img, label) in enumerate(test_loader):
img, label = img.to(device), label.to(device)
clean_acc += torch.sum(model(normalize(img.clone().detach())).argmax(dim=-1) == label).item()
adv= attack(model, criterion, img, label, eps=eps, attack_type= 'fgsm', iters= 10 )
adv_acc += torch.sum(model(normalize(adv.clone().detach())).argmax(dim=-1) == label).item()
print('Batch: {0}'.format(i))
print('Clean accuracy:{0:.3%}\t Adversarial accuracy:{1:.3%}'.format(clean_acc / len(testset), adv_acc / len(testset)))
```
#### File: Jeevi10/AICR/loss_fn_prob.py
```python
import torch
import torch.nn.functional as F
import torch.nn as nn
import utils
import parameter as p
class Proximity(nn.Module):
def __init__(self, num_classes=100, feat_dim=1024, use_gpu=True, margin = 0.0, sl= 0.01 , lamda=0.01):
super(Proximity, self).__init__()
self.num_classes = num_classes
self.feat_dim = feat_dim
self.use_gpu = use_gpu
self.margin = margin
self.sl = sl
self.lamda = lamda
if self.use_gpu:
self.centers = nn.Parameter(torch.randn(self.num_classes, self.feat_dim).cuda())
else:
self.centers = nn.Parameter(torch.randn(self.num_classes, self.feat_dim))
def forward(self, x , labels):
batch_size = x.size(0)
distmat = torch.pow(x, 2).sum(dim=1, keepdim=True).expand(batch_size, self.num_classes) + \
torch.pow(self.centers, 2).sum(dim=1, keepdim=True).expand(self.num_classes, batch_size).t()
distmat.addmm_(1, -2, x, self.centers.t())
classes = torch.arange(self.num_classes).long()
if self.use_gpu: classes = classes.cuda()
labels = labels.unsqueeze(1).expand(batch_size, self.num_classes)
mask = labels.eq(classes.expand(batch_size, self.num_classes))
#d_y_1 = distmat.clone()[mask.clone()]
#inv_mask = ~mask.clone()
d_y = distmat.clone()[mask]
#print("before",d_y)
d_y = d_y.clamp(min=1e-12, max=1e+12)
#print("after",d_y)
loss = d_y.mean()
return loss , torch.argmin(distmat,dim=1)
class Con_Proximity(nn.Module):
def __init__(self, num_classes=100, feat_dim=1024, use_gpu=True, margin = 0.0, sl= 0.01 , lamda=0.01):
super(Con_Proximity, self).__init__()
self.num_classes = num_classes
self.feat_dim = feat_dim
self.use_gpu = use_gpu
self.margin = margin
self.sl = sl
self.lamda = lamda
if self.use_gpu:
self.centers = nn.Parameter(torch.randn(self.num_classes, self.feat_dim).cuda())
else:
self.centers = nn.Parameter(torch.randn(self.num_classes, self.feat_dim))
def forward(self, x , labels):
batch_size = x.size(0)
distmat = torch.pow(x, 2).sum(dim=1, keepdim=True).expand(batch_size, self.num_classes) + \
torch.pow(self.centers, 2).sum(dim=1, keepdim=True).expand(self.num_classes, batch_size).t()
distmat.addmm_(1, -2, x, self.centers.t())
classes = torch.arange(self.num_classes).long()
if self.use_gpu: classes = classes.cuda()
labels = labels.unsqueeze(1).expand(batch_size, self.num_classes)
mask = labels.eq(classes.expand(batch_size, self.num_classes))
#d_y_1 = distmat.clone()[mask.clone()]
inv_mask = ~mask.clone()
d_c = distmat.clone()[inv_mask]
d_c = d_c.clamp(min=1e-12, max=1e+12)
loss = d_c.mean()
return loss , torch.argmin(distmat,dim=1)
def off_diagonal(x):
# return a flattened view of the off-diagonal elements of a square matrix
n, m = x.shape
assert n == m
return x.flatten()[:-1].view(n - 1, n + 1)[:, 1:].flatten()
class BarlowTwins(nn.Module):
def __init__(self, feat_dim):
super().__init__()
#self.backbone = torchvision.models.resnet50(zero_init_residual=True)
#self.backbone.fc = nn.Identity()
self.feat_dim = feat_dim
self.scale_loss = 1 / 32
self.lambd= 5e-3
# projector
sizes = [self.feat_dim] + [1024]
layers = []
for i in range(len(sizes) - 2):
layers.append(nn.Linear(sizes[i], sizes[i + 1], bias=False))
layers.append(nn.BatchNorm1d(sizes[i + 1]))
layers.append(nn.ReLU(inplace=True))
layers.append(nn.Linear(sizes[-2], sizes[-1], bias=False))
self.projector = nn.Sequential(*layers)
# normalization layer for the representations z1 and z2
self.bn = nn.BatchNorm1d(1024, affine=False)
def forward(self, y1, y2):
z1 = self.projector(y1)
z2 = self.projector(y2)
Z = data= torch.cat((z1,z2),0)
#mse_loss = F.mse_loss(z1,z2)
# empirical cross-correlation matrix
c = self.bn(z1).T @ self.bn(z2)
# sum the cross-correlation matrix between all gpus
c.div_(p.batch_size_train)
#torch.distributed.all_reduce(c)
# use --scale-loss to multiply the loss by a constant factor
# see the Issues section of the readme
on_diag = torch.diagonal(c).add_(-1).pow_(2).sum().mul(self.scale_loss)
off_diag = off_diagonal(c).pow_(2).sum().mul(self.scale_loss)
loss = on_diag + self.lambd * off_diag
return loss, Z #+ mse_loss*0.001,
```
#### File: Jeevi10/AICR/loss_fn.py
```python
import torch
import torch.nn.functional as F
import torch.nn as nn
import utils
import parameter as p
def center_corr(centers, indices, batch_size, feature_size,lamda):
c = centers.unsqueeze(dim=0).expand(batch_size,p.n_classes,feature_size)
indices_1 = indices[:,0].unsqueeze(-1)
indices_1 = indices_1.repeat(1,feature_size)
indices_1 = indices_1.unsqueeze(1)
indices_2 = indices[:,1].unsqueeze(-1)
indices_2 = indices_2.repeat(1,feature_size)
indices_2 = indices_2.unsqueeze(1)
mat = torch.gather(c, 1, indices_1 , sparse_grad=False, out=None)
mat_1 = torch.gather(c, 1, indices_2 , sparse_grad=False, out=None)
corr = torch.matmul( mat.transpose(2,1),mat_1)
on_diag = torch.diagonal(corr).pow_(2).sum().mul(1/32)
off_diag = torch.triu(corr, diagonal=1, out=None).pow(2).sum(-1).sum(-1).sum(-1)*1/32
loss = on_diag + lamda*off_diag
return loss
class Proximity(nn.Module):
def __init__(self, num_classes=100, feat_dim=1024, use_gpu=True, margin = 0.0, sl= 0.01 , lamda=0.01):
super(Proximity, self).__init__()
self.num_classes = num_classes
self.feat_dim = feat_dim
self.use_gpu = use_gpu
self.margin = margin
self.sl = sl
self.lamda = lamda
if self.use_gpu:
self.centers = nn.Parameter(torch.randn(self.num_classes, self.feat_dim).cuda())
else:
self.centers = nn.Parameter(torch.randn(self.num_classes, self.feat_dim))
def forward(self, x , labels):
batch_size = x.size(0)
distmat = torch.pow(x, 2).sum(dim=1, keepdim=True).expand(batch_size, self.num_classes) + \
torch.pow(self.centers, 2).sum(dim=1, keepdim=True).expand(self.num_classes, batch_size).t()
distmat.addmm_(1, -2, x, self.centers.t())
classes = torch.arange(self.num_classes).long()
if self.use_gpu: classes = classes.cuda()
labels = labels.unsqueeze(1).expand(batch_size, self.num_classes)
mask = labels.eq(classes.expand(batch_size, self.num_classes))
d_y = distmat[mask.clone()]
inv_mask = ~mask.clone()
d_y = d_y.unsqueeze(-1)
#print((distmat*inv_mask).shape)
d_c = distmat[inv_mask]
values, indices = torch.topk(distmat,2, dim=1, largest=False, sorted=True, out=None)
#d_y = d_y.repeat(1,self.num_classes-1)
d_c = d_c.view(batch_size,self.num_classes-1)
d_c = torch.mean(d_c,dim=1)
#d_1 = values[:,0]
#d_2 = values[:,1]
#d_3 = values[:,2]
#d_4 = values[:,3]
#assert(d_y.shape==d_c.shape)
#indicators = utils.in_top_k(labels,distmat,1)[:,0]
#con_indicators = ~ indicators.clone()
#d_c = d_2*indicators + d_1*con_indicators
loss = F.relu((d_y-d_c)/(d_y+d_c) + self.margin)
#loss = loss.mean(dim=1)
loss_corr = center_corr(self.centers, indices, batch_size, self.feat_dim, self.lamda)
mean_loss = loss.mean()
final_loss = mean_loss #+ loss_corr*self.sl
return final_loss , torch.argmin(distmat,dim=1)
```
#### File: Jeevi10/AICR/vgg19.py
```python
import torch
import torch.nn as tnn
import torchvision.datasets as dsets
import torchvision.transforms as transforms
import torchvision.models as models
import torchvision
import torch
import torch.nn as nn
BATCH_SIZE = 256
LEARNING_RATE = 0.01
EPOCH = 50
N_CLASSES = 25
def read_mnist(train):
return torchvision.datasets.MNIST( "./file",
train=train,
download=True,
transform=torchvision.transforms.Compose([
torchvision.transforms.ToTensor(),
transforms.RandomResizedCrop(224),
transforms.RandomHorizontalFlip(),
torchvision.transforms.Normalize(
(0.1307,), (0.3081,)),
]))
trainData = read_mnist(True)
testData = read_mnist(False)
trainLoader = torch.utils.data.DataLoader(dataset=trainData, batch_size=BATCH_SIZE, shuffle=True)
testLoader = torch.utils.data.DataLoader(dataset=testData, batch_size=BATCH_SIZE, shuffle=False)
vgg16 = models.vgg16(pretrained=True)
#print(vgg16.classifier[6].out_features) # 1000
# Freeze training for all layers
for i,param in enumerate(vgg16.features.parameters()):
if i < 23:
param.require_grad = False
# Newly created modules have require_grad=True by default
num_features = vgg16.classifier[6].in_features
features = list(vgg16.classifier.children())[:-1] # Remove last layer
features.extend([nn.Linear(num_features, 10)]) # Add our layer with 4 outputs
vgg16.classifier = nn.Sequential(*features) # Replace the model classifier
#print(vgg16)
vgg16.cuda()
# Loss, Optimizer & Scheduler
cost = tnn.CrossEntropyLoss()
optimizer = torch.optim.Adam(vgg16.parameters(), lr=LEARNING_RATE)
scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(optimizer)
# Train the model
for epoch in range(EPOCH):
avg_loss = 0
cnt = 0
correct = 0
total = 0
for idx, (images, labels) in enumerate(trainLoader):
images=images.repeat(1,3,1,1)
images = images.cuda()
labels = labels.cuda()
# Forward + Backward + Optimize
optimizer.zero_grad()
outputs = vgg16(images)
loss = cost(outputs, labels)
_, predicted = torch.max(outputs.data, 1)
total += labels.size(0)
correct += (predicted == labels).sum()
avg_loss += loss.data
cnt += 1
if idx% 100 ==0:
print("[E: %d] loss: %f, avg_loss: %f, Train_accu: %f" % (epoch, loss.data, avg_loss/cnt,100* correct.item()/total))
loss.backward()
optimizer.step()
scheduler.step(avg_loss)
# Test the model
vgg16.eval()
correct = 0
total = 0
for idx,(images, labels) in enumerate(testLoader):
images=images.repeat(1,3,1,1)
images = images.cuda()
outputs = vgg16(images)
_, predicted = torch.max(outputs.data, 1)
total += labels.size(0)
correct += (predicted.cpu() == labels).sum()
print(predicted, labels, correct, total)
print("avg acc: %f" % (100* correct.item()/total))
# Save the Trained Model
torch.save(vgg16.state_dict(), 'cnn.pkl')
```
#### File: Jeevi10/AICR/vis_help.py
```python
import time
import pandas as pd
import matplotlib.patheffects as PathEffects
#matplotlib inline
import seaborn as sns
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
import matplotlib.pyplot as plt
import torch
import networks
colors = ['#1f77b4', '#ff7f0e', '#2ca02c', '#d62728',
'#9467bd', '#8c564b', '#e377c2', '#7f7f7f',
'#bcbd22', '#17becf']
mnist_classes = ['0', '1', '2', '3', '4', '5', '6', '7', '8', '9']
def plot_embeddings(embeddings, targets, xlim=None, ylim=None):
fig = plt.figure(figsize=(10,10))
ax = fig.add_subplot(111, projection='3d')
for i in range(10):
#ax = fig.add_subplot(111, projection='3d')
inds = np.where(targets==i)[0]
ax.scatter(embeddings[inds,0], embeddings[inds,1], embeddings[inds,2], alpha=0.5, color=colors[i])
if xlim:
plt.xlim(xlim[0], xlim[1])
if ylim:
plt.ylim(ylim[0], ylim[1])
plt.legend(mnist_classes)
def extract_embeddings(dataloader, model, pretrain):
with torch.no_grad():
model.eval()
embeddings_1 = np.zeros((len(dataloader.dataset), networks.vis_size))
embeddings_2 = np.zeros((len(dataloader.dataset), networks.vis_size))
labels = np.zeros(len(dataloader.dataset))
k = 0
for images, target in dataloader:
images = images.cuda()
emb_1, emb_2= model.get_embedding(images, pretrain)
emb_1, emb_2 = emb_1.cpu(), emb_2.cpu()
embeddings_1[k:k+len(images)] = emb_1
embeddings_2[k:k+len(images)] = emb_2
labels[k:k+len(images)] = target.numpy()
k += len(images)
return embeddings_1, embeddings_2, labels
```
|
{
"source": "jeevika2307/the-true-badgerss",
"score": 4
}
|
#### File: the-true-badgerss/feature-addition/add.py
```python
def addition(a,b):
return a+b
a=int(input("Enter the first input"))
b=int(input("Enter the second input"))
print(addition(a,b))
```
|
{
"source": "jeevy222/Tesla_Stock_Sentiment_Analyzer",
"score": 3
}
|
#### File: jeevy222/Tesla_Stock_Sentiment_Analyzer/sentimentalanalysis.py
```python
import nltk
nltk.downloader.download('vader_lexicon')
from nltk.sentiment.vader import SentimentIntensityAnalyzer as SIA
import pandas as pd
sia = SIA()
def perform_sentimental_analysis():
"""perform sentimental analysis on processed/clean data using SentimentIntensityAnalyzer."""
# Initialize variables
results = []
positive_sentiments_count = 0
negative_sentiments_count = 0
neutral_sentiments_count = 0
tesla_processed_content_df = pd.read_csv('tesla_processed_content.csv')
print(tesla_processed_content_df)
# Compute compound polarity scores
for line in tesla_processed_content_df.processed_content:
pol_score = sia.polarity_scores(line)
pol_score['text'] = line
results.append(pol_score)
# Read dataframe and setup compound score based on polarity scores
results_df = pd.DataFrame.from_records(results)
results_df.head()
results_df['score'] = 0
results_df.loc[results_df['compound'] > 0, 'score'] = 1
results_df.loc[results_df['compound'] < -0.2, 'score'] = -1
results_df.head()
df2 = results_df[['text', 'score', 'compound']]
print(df2)
df2.to_csv('tesla_sentiment_analysis.csv', mode='a', encoding='utf-8', index=False)
# Compute count of positive, negative and neutral sentiments
df_positive = results_df[results_df.score == 1]
positive_sentiments_count = positive_sentiments_count + df_positive.score.count()
df_neutral = results_df[results_df.score == 0]
neutral_sentiments_count = neutral_sentiments_count + df_neutral.score.count()
df_negative = results_df[results_df.score == -1]
negative_sentiments_count = negative_sentiments_count + df_negative.score.count()
print("Positive Sentiments Count: ", positive_sentiments_count)
print("Neutral Sentiments Count: ", neutral_sentiments_count)
print("Negative Sentiments Count: ", negative_sentiments_count)
input_content_count = positive_sentiments_count + negative_sentiments_count + neutral_sentiments_count
# Compute percentage of positive, negative and neutral sentiments
positive_sentiments_percentage = (positive_sentiments_count / input_content_count) * 100
negative_sentiments_percentage = (negative_sentiments_count / input_content_count) * 100
neutral_sentiments_percentage = (neutral_sentiments_count / input_content_count) * 100
print("Positive Sentiments Percentage: ", round(positive_sentiments_percentage, 2))
print("Neutral Sentiments Percentage: ", round(neutral_sentiments_percentage, 2))
print("Negative Sentiments Percentage: ", round(negative_sentiments_percentage, 2))
# Conclude Results
if positive_sentiments_percentage > negative_sentiments_percentage:
print(
'Positive sentiments percentage is more than Negative sentiments percentage based on the content analysed '
'on cnbc, so one should buy (i.e. invest) stocks of Tesla')
else:
print(
'Negative sentiments percentage is more than Positive sentiments percentage based on the content analysed '
'on cnbc, so one should sell (i.e. not invest) stocks of Tesla')
if __name__ == '__main__':
print('---Perform sentiment analysis---')
perform_sentimental_analysis()
```
|
{
"source": "JeewhanR/dailyparser",
"score": 3
}
|
#### File: dailyparser/ridibooks/booklist_parser.py
```python
import requests
from bs4 import BeautifulSoup
def get_book(booknumber):
html = requests.get('http://ridibooks.com/v2/Detail?id={}'.format(booknumber)) #input your books origin number
soup = BeautifulSoup(html.text, 'html.parser')
urls = soup.select(
'div.book_list_wrapper'
)
lists = []
for url in urls:
real_url = url['data-book-id'].strip()
lists.append(real_url)
return lists
if __name__ == '__main__':
booknumber = input('책의 고유번호를 입력해주세요: ')
f = open('book_list_{}.txt'.format(booknumber),'w+')
booklist = get_book(booknumber)
for i in booklist:
f.write(
'https://view.ridibooks.com/books/'+i+'\n'
)
f.close()
```
|
{
"source": "jeeyi/cds-demo",
"score": 3
}
|
#### File: jeeyi/cds-demo/utils.py
```python
from io import BytesIO
import zipfile
from flask import jsonify, Response
from werkzeug.utils import secure_filename
import cds as cds
def create_folder(data):
is_demo = True if data["is_demo"] == "true" else False
folder_name = data["folder_name"]
parent_folder_id = data["parent_folder_id"]
if is_demo or parent_folder_id.isdigit():
return jsonify({"name": folder_name})
cds_folder = cds.create_folder(folder_name, parent_folder_id)
return jsonify({"id": cds_folder["id"], "name": cds_folder["label"]})
def get_folder(folder_id):
if folder_id and folder_id.isdigit():
return get_demo(folder_id)
cds_folder = cds.get_folder(folder_id)
folder_name = cds_folder["label"]
ancestors = [
{"id": ancestor["uuid"], "name": ancestor["label"]}
for ancestor in cds_folder["ancestors"]
]
folders = [
{"id": folder["id"], "name": folder["label"]}
for folder in cds_folder["children"]
]
files = [
get_file(file["id"], file["label"]) for file in cds_folder["documents"]
]
folder_data = {
"ancestors": ancestors,
"files": files,
"folder_id": folder_id,
"folder_name": folder_name,
"folders": folders,
}
return folder_data
def upload_file(file, data):
is_demo = True if data["is_demo"] == "true" else False
parent_folder_id = data["parent_folder_id"]
file_name = secure_filename(file.filename)
if is_demo or parent_folder_id.isdigit():
return jsonify({"name": file_name, "icon": get_file_icon(file_name)})
cds_file = cds.upload_file(file, file_name, parent_folder_id)
file_name = cds_file["label"]
return jsonify({"name": file_name, "icon": get_file_icon(file_name)})
def download(data):
is_demo = True if data["is_demo"] == "true" else False
parent_folder_id = data["parent_folder_id"]
files = data["files"].split(",")
if is_demo or parent_folder_id.isdigit():
zipfile = zip_demo_files([int(file_id) for file_id in files])
else:
zipfile = cds.download(files, parent_folder_id)
return Response(zipfile, mimetype="application/octet-stream")
def get_file_icon(file_name):
ext = file_name.split(".")[-1]
ext_to_icon = {
"docx": "fa fa-file-word-o",
"jpg": "fa fa-file-image-o",
"mov": "fa fa-file-movie-o",
"pdf": "fa fa-file-pdf-o",
"pptx": "fa fa-file-powerpoint-o",
"txt": "fa fa-file-text-o",
"xlsx": "fa fa-file-excel-o",
"zip": "fa fa-file-zip-o",
}
return ext_to_icon.get(ext, "fa fa-file-o")
def get_file(id, file_name):
return {"id": id, "name": file_name, "icon": get_file_icon(file_name)}
def get_demo(folder_id="0"):
folder_name = "Demo Folder" if folder_id == "0" else f"Folder{folder_id}"
ancestors = [
{"id": "1", "name": "Weekly"},
{"id": "1", "name": "Meetings"},
]
folders = [
{"id": "1", "name": "Folder1"},
{"id": "2", "name": "Folder2"},
{"id": "3", "name": "Folder3"},
{"id": "4", "name": "Folder4"},
{"id": "5", "name": "Folder5"},
{"id": "6", "name": "Folder6"},
{"id": "7", "name": "Folder7"},
]
files = [get_file(i, get_demo_file(i)) for i in range(1, 9)]
demo_data = {
"ancestors": ancestors,
"files": files,
"folder_id": folder_id,
"folder_name": folder_name,
"folders": folders,
}
return demo_data
def get_demo_file(id):
demo_files = {
"1": "File1.txt",
"2": "File2.docx",
"3": "File3.pptx",
"4": "File4.pdf",
"5": "File5.jpg",
"6": "File6.xlsx",
"7": "File7.zip",
"8": "File8.mov",
}
return demo_files.get(str(id), f"File{id}.txt")
def zip_demo_files(demo_file_ids):
in_memory = BytesIO()
with zipfile.ZipFile(in_memory, "w", compression=zipfile.ZIP_STORED) as zf:
for id in demo_file_ids:
file_name = get_demo_file(id)
zf.write("demo/" + file_name)
in_memory.seek(0)
return in_memory
```
|
{
"source": "jeeyoungk/exercise",
"score": 4
}
|
#### File: exercise/python/circular-palindromes.py
```python
import heapq
def s(cell):
if cell: return 'O'
else: return '.'
def valid(i, j, wrap_start):
return (
(wrap_start <= i <= j) or
(i <= j < wrap_start) or
(wrap_start <= i and j < wrap_start)
)
assert valid(0, 3, 0)
assert not valid(2, 1, 0)
assert valid(1, 3, 1)
assert not valid(0, 3, 1)
assert valid(1, 2, 1)
assert valid(1, 0, 1)
assert not valid(1, 0, 10)
assert valid(20, 0, 10)
assert valid(20, 40, 10)
assert valid(20, 9, 10)
assert not valid(20, 10, 10)
assert not valid(20, 11, 10)
def find_palindromes(string):
N = len(string)
table = [[False for i in xrange(N)] for j in xrange(N)]
def wrap(i): return i % N
def wrap_len(i, j):
if j < i: return N + j - i + 1
else: return j - i + 1 # TODO
for i in xrange(N):
table[i][i] = True
for i in xrange(N):
j = wrap(i+1)
table[i][j] = string[i] == string[j]
for length in xrange(2, len(string)):
print length
for i in xrange(N):
i_inner = wrap(i + 1)
j_inner = wrap(i + length - 1)
j = wrap(i + length)
table[i][j] = string[i] == string[j] and table[i_inner][j_inner]
h = []
for i in xrange(N):
for j in xrange(i, N):
length = wrap_len(i, j)
# print i, j, table[i][j]
if table[i][j]:
heapq.heappush(h, (-length, (i, j)))
return
# print string
# print "\n".join(["".join([s(cell) for cell in row]) for row in table])
# initial heap.
# print h
tpl = h[0]
length, (i, j) = tpl
print -length
# 0 1 2 3 4
for wrap_start in xrange(1, N):
# print "wrap-start %d" % wrap_start
# i = tarting point.
j = wrap(wrap_start - 1)
for i in xrange(N):
if i == j:
continue
assert valid(i, j, wrap_start)
assert not valid(i, j, wrap_start - 1)
if table[i][j]:
# print 'inserting', (i, j), wrap_start
length = wrap_len(i ,j)
heapq.heappush(h, (-length, (i, j)))
else:
pass
# print '__serting', (i, j), wrap_start
while True:
length, (i, j) = h[0]
# print 'validity', i, j, wrap_start, valid(i, j, wrap_start)
if not valid(i, j, wrap_start):
if len(h) == 0:
assert False, "popped all the stack"
heapq.heappop(h)
else:
print -length
break
def main():
q = int(raw_input().strip())
s = raw_input().strip()
result = find_palindromes(s)
if __name__ == '__main__':
# main()
pass
```
#### File: exercise/python/mro.py
```python
class A(object):
def __init__(self, a, *args, **kwargs):
print "a=%s" % a
class B(object):
def __init__(self, b1, b2, *args, **kwargs):
print "b1=%s;b2=%s" % (b1, b2)
class AB(A, B):
def __init__(self, *args, **kwargs):
A.__init__(self, *args, **kwargs)
B.__init__(self, *args, **kwargs)
class C(AB):
def __init__(self, *args, **kwargs):
super(C, self).__init__(*args, **kwargs)
ab = AB('x', 'z')
c = AB('y', 'z')
```
#### File: python/mypy/test.py
```python
from typing import Iterator
def fib(n: int) -> Iterator[int]:
a, b = 0, 1
while a < n:
yield a
a, b = b, a + b
for i in fib(10):
print(i)
for i in fib('foo'):
print(i)
```
#### File: exercise/python/pi.py
```python
from __future__ import division
def iterate(x, y, iteration, N):
'''
Determines whether a square
size = (1 / 2 ^ iteration)
at coord (x, y)
is inside the unit circle or not.
returns 2-tuple of
(volume_inside, volume_outside)
where volume_insdie + volume_outside <= 1 / 4 ^ iteration
'''
length = 2 ** iteration
double_length = length * length
low_x = x
low_y = y
high_x = (x + 1)
high_y = (y + 1)
low_length = low_x ** 2 + low_y ** 2
high_length = high_x ** 2 + high_y ** 2
if low_length >= double_length: return (0, 1) # NOT part of the circle
elif high_length <= double_length: return (1, 0) # part of the circle
elif iteration == N: return (0, 0) # uncertain
# recursive call - subdivide the square to 4 chunks
# and collect their results.
ld1, ud1 = iterate(x * 2, y * 2, iteration + 1, N)
ld2, ud2 = iterate(x * 2, y * 2 + 1, iteration + 1, N)
ld3, ud3 = iterate(x * 2 + 1, y * 2, iteration + 1, N)
ld4, ud4 = iterate(x * 2 + 1, y * 2 + 1, iteration + 1, N)
return ((ld1 + ld2 + ld3 + ld4) / 4, (ud1 + ud2 + ud3 + ud4) / 4)
def around_border(x, y, N):
length = 2 ** N
double_length = length * length
low_x = x
low_y = y
high_x = (x + 1)
high_y = (y + 1)
low_length = low_x ** 2 + low_y ** 2
high_length = high_x ** 2 + high_y ** 2
if low_length > double_length: return False
elif high_length < double_length: return False
return True
def navigating(N):
'''
calculate the area of a quarter-circle via following its outlines.
'''
length = 2 ** N
x = 0
y = length - 1
inside = length - 1
outside = 0
border = 1
while not (x == 2 ** N - 1 and y == 0):
right = around_border(x + 1, y, N)
bottom = around_border(x, y - 1, N)
assert not (right and bottom), "(%d, %d) Cannot navigate" % (x, y)
assert right or bottom, "(%d, %d) Navigating both" % (x, y)
if right:
# move to the right pixel
inside += y
outside += (length - y - 1)
x += 1
elif bottom:
# move to the bottom pixel
# subtract 1 pixel from the circle.
# no need to add 1 to outside, because this area is taken by the border.
inside -= 1
y -= 1
return (
(inside / length / length),
(outside / length / length)
)
def calculate(N, algorithm):
lower, upper = algorithm(N)
pi_lower = lower * 4
pi_upper = (1 - upper) * 4
delta = ((1 - upper) - lower) * 4
print("%2d: %f < pi < %f (delta = %.10f)" % (N, pi_lower, pi_upper, delta))
for i in range(25, 30):
print("")
# calculate(i, lambda N: iterate(0, 0, 0, N))
calculate(i, navigating)
# sample out:
'''
0: 0.000000 < pi < 4.000000 (delta = 4.000000)
1: 1.000000 < pi < 4.000000 (delta = 3.000000)
2: 2.000000 < pi < 3.750000 (delta = 1.750000)
3: 2.562500 < pi < 3.500000 (delta = 0.937500)
4: 2.859375 < pi < 3.343750 (delta = 0.484375)
5: 3.007812 < pi < 3.253906 (delta = 0.246094)
6: 3.075195 < pi < 3.199219 (delta = 0.124023)
7: 3.107910 < pi < 3.170166 (delta = 0.062256)
8: 3.125549 < pi < 3.156738 (delta = 0.031189)
9: 3.133484 < pi < 3.149094 (delta = 0.015610)
10: 3.137589 < pi < 3.145397 (delta = 0.007809)
11: 3.139624 < pi < 3.143529 (delta = 0.003905)
12: 3.140601 < pi < 3.142554 (delta = 0.001953)
13: 3.141100 < pi < 3.142076 (delta = 0.000977)
14: 3.141347 < pi < 3.141835 (delta = 0.000488)
15: 3.141470 < pi < 3.141714 (delta = 0.000244)
16: 3.141531 < pi < 3.141653 (delta = 0.000122)
17: 3.141562 < pi < 3.141623 (delta = 0.000061)
18: 3.141577 < pi < 3.141608 (delta = 0.000031)
19: 3.141585 < pi < 3.141600 (delta = 0.000015)
'''
```
#### File: exercise/python/queen.py
```python
import itertools
import random
class Board(object):
def __init__(self, rows = 8, cols = 8):
self.placed = set()
self.rows = rows
self.cols = cols
def place(self, row, col):
"place a chess piece."
if not self.can_place(row, col): raise RuntimeError("Cannot place")
self.placed.add((row, col))
def unplace(self, row, col):
"un-place a chess piece."
if (row, col) not in self.placed: raise RuntimeError("Piece does not exist")
self.placed.remove((row, col))
def can_place(self, row, col):
if not (0 <= row < self.rows and 0 <= col < self.cols):
return False # outside the range
for placed_row, placed_col in self.placed:
if placed_col == col: return False
if placed_row == row: return False
if abs(placed_row - row) == abs(placed_col - col): return False
return True
def __str__(self):
output = ""
for row in xrange(self.rows):
for col in xrange(self.cols):
if (row, col) in self.placed:
output += "X"
else:
output += "."
output += "\n"
return output
def seq(board):
# sequential navigation strategy
return itertools.product(range(board.rows), range(board.cols))
def rand(board):
# random navigation strategy
x = list(itertools.product(range(board.rows), range(board.cols)))
random.shuffle(x)
return x
def rand_partial(num):
def generator(board):
# random navigation strategy
x = []
for row, col in itertools.product(range(board.rows), range(board.cols)):
if board.can_place(row, col):
x.append((row, col))
random.shuffle(x)
return x[:num]
return generator
def attempt(board, goal, strategy = seq, stat = None):
"""
attempt placing a queen to the board.
board - a chess board being worked on.
goal - # of queens we want to place on this board.
strategy - queen placing strategy.
stat - statistics related to the current run.
"""
if stat is not None:
stat['count'] += 1
for row, col in strategy(board):
if board.can_place(row, col):
board.place(row, col)
if len(board.placed) >= goal:
return board
found = attempt(board, goal, strategy, stat)
if found: return found
board.unplace(row, col)
return None
def main():
def test_run(board, goal, strategy):
stat = {'count': 0}
found = attempt(board, goal, strategy, stat)
print(stat['count'])
print(board)
def load_test(title, size, strategy, runs):
print('> %s' % title)
count = 0
found = 0
for run in range(runs):
stat = {'count': 0}
success = attempt(Board(size, size), size, strategy, stat)
count += stat['count']
if success:
found += 1
print(' count: %d' % (count / runs))
print(' failures: %d' % (runs - found))
# test_run(Board(8, 8), 8, seq)
# test_run(Board(8, 8), 8, rand)
# test_run(Board(10, 10), 10, rand)
# print 'rand'
# load_test(8, rand, 10)
print(">> Board size 8")
# load_test('rand', 8, rand, 10)
load_test('rand_partial(1)', 8, rand_partial(5), 10)
load_test('rand_partial(5)', 8, rand_partial(5), 10)
load_test('rand_partial(20)', 8, rand_partial(20), 10)
print(">> Board size 10")
load_test('rand_partial(3)', 10, rand_partial(20), 10)
main()
```
#### File: exercise/thoughts/ricochet.py
```python
from enum import Enum
import heapq
import matplotlib.pyplot as plt
DIMENSION = 16 # size of the board
DIRX = [0, 0, -1, 1] # directional vectors
DIRY = [1, -1, 0, 0] # color vectors
COLORS = ['red','blue','green','purple']
MAX_DEPTH = 30
class Direction(Enum):
UP, DOWN, LEFT, RIGHT = range(4)
def reverse(self):
if self == Direction.UP:
return Direction.DOWN
elif self == Direction.DOWN:
return Direction.UP
elif self == Direction.LEFT:
return Direction.RIGHT
elif self == Direction.RIGHT:
return Direction.LEFT
return None
class Color(Enum):
RED, BLUE, GREEN, PURPLE = range(4)
class Board(object):
def __init__(self):
# note: bottom left of grid is 0, 0
self.walls = set() # list of walls - normalized to (x, y, (DOWN|LEFT))
def add_wall(self, x, y, direction):
"""Add a wall to the current position"""
self.walls.add(normalize_wall(x, y, direction))
def has_wall(self, x, y, direction):
"""Determine whether there's a wall in the given position."""
return normalize_wall(x, y, direction) in self.walls
class Stat(object):
def __init__(self):
self.iteration = 0
self.distance = -1
def __repr__(self):
return repr(self.__dict__)
def normalize_wall(x, y, direction):
'''walls are normalized to "down" or "left".'''
if direction == Direction.UP:
direction = Direction.DOWN
y += 1
elif direction == Direction.RIGHT:
direction = Direction.LEFT
x += 1
return (x, y, direction)
def compute_delta(robots1, robots2):
'''
computes delta between two positioning of robots. Assume that exactly one robot is moved.
return (color, (x, y), (x, y))
note: this logic is used to construct robot paths.
'''
for idx in range(len(COLORS)):
if robots1[idx] != robots2[idx]:
return (idx, robots1[idx], robots2[idx])
assert False, "same positions given"
def next_moves_single(board, robot_index, robots):
"""Generate list of next moves by moving a single robot given by the index."""
def generate(index, replaced_robot):
return tuple((replaced_robot if i == index else r) for (i, r) in enumerate(robots))
robot = robots[robot_index]
for direction in Direction:
moved = False
(x, y) = robot
while True:
newx = x + DIRX[direction.value]
newy = y + DIRY[direction.value]
# stops when a wall or another robot is encountered.
if board.has_wall(x, y, direction) or (newx, newy) in robots:
if moved: yield generate(robot_index, (x, y))
break
moved = True
x = newx
y = newy
def next_moves_all(board, robots):
"""Generate list of next moves by moving a single robot."""
for index in range(len(robots)):
for move in next_moves_single(board, index, robots):
assert move is not None
yield move
def prev_position(board, obstacles, start, magic_stop=False):
for direction in Direction:
(x, y) = start
reverse = direction.reverse()
prevx = x + DIRX[reverse.value]
prevy = y + DIRY[reverse.value]
if not magic_stop and not (board.has_wall(x, y, reverse) or (prevx, prevy) in obstacles):
continue # Cannot reach here.
moved = False
while True:
newx = x + DIRX[direction.value]
newy = y + DIRY[direction.value]
if board.has_wall(x, y, direction) or (newx, newy) in obstacles:
break
yield (newx, newy)
x = newx
y = newy
def astar(
start,
neighbour,
finish_condition,
heuristic=None,
stat=None):
"""
Perform an A* search.
finish_condition = (position) -> bool
neighbour - neibhbourhood generation function
heuristic = A* heuristic function. (new position, old position) -> distance
"""
queue = [] # contains (distance+heuristic, distance, position)
heapq.heappush(queue, (0, 0, start, None))
history = {start: (0, None)} # position -> (distance, previous)
visited = set()
if not stat: stat = Stat()
if not heuristic: heuristic = lambda new, old: 0
while queue:
stat.iteration += 1
_, distance, position, prev_position = heapq.heappop(queue)
if distance > MAX_DEPTH: return
if finish_condition(position):
# found a solution!
positions = [position, prev_position]
cur_position = prev_position
while cur_position in history:
cur_position = history[cur_position][1]
if cur_position is not None:
positions.append(cur_position)
stat.distance = distance
return positions
if position in visited: continue
visited.add(position)
new_distance = distance + 1
for new_position in neighbour(position):
if new_position in history and new_distance > history[new_position][0]: continue
history[new_position] = (new_distance, position)
heapq.heappush(queue, (new_distance + heuristic(position, new_position), new_distance, new_position, position))
def compute_all(start, neighbour):
"""
Compute shortest distance from "start" to all reachable node.
Note: This function should only be executed with relatively small graph.
"""
queue = []
# contains (distance, position, old_position)
heapq.heappush(queue, (0, start))
history = {start: (0, None)} # position -> (distance, previous)
visited = set()
while queue:
distance, position = heapq.heappop(queue)
if position in visited: continue
visited.add(position)
new_distance = distance + 1
for new_position in neighbour(position):
if new_position in history and new_distance > history[new_position][0]: continue
history[new_position] = (new_distance, position)
heapq.heappush(queue, (new_distance, new_position))
return history
def print_board(board,
robots=None,
paths=None,
additionals=None,
labels=None,
markers=None):
'''
Print the given board position.
robots - 4-tuple of pair (x, y), representing red, blue, green, and yellow robots.
paths - list of (color, (x, y), (x, y)) paths to draw.
additionals - list of (color, (x, y)) points to draw.
labels - list of labels to render.
'''
plt.figure(figsize=(5, 5))
axis = plt.gca()
MARGIN = 0.1
PADDING = 0.5
def plot_robot(index, coord, size):
(x, y) = coord
circle = plt.Circle((x + 0.5, y + 0.5), size, fc=COLORS[i])
axis.add_patch(circle)
def render_wall(wall):
(x1, y1, direction) = wall
if direction == Direction.DOWN:
x2 = x1 + 1
y2 = y1
else:
x2 = x1
y2 = y1 + 1
line = plt.Line2D((x1, x2), (y1, y2), lw=2.5, color='black')
axis.add_line(line)
def render_path(path):
(i, pos1, pos2) = path
line = plt.Line2D(
(pos1[0] + 0.5, pos2[0] + 0.5),
(pos1[1] + 0.5, pos2[1] + 0.5),
color=COLORS[i],
marker='x')
axis.add_line(line)
def render_marker(marker):
(color, coord) = marker
(x, y) = coord
rectangle = plt.Rectangle((x + MARGIN, y + MARGIN),
1 - MARGIN * 2,
1 - MARGIN * 2,
fc=COLORS[color])
axis.add_patch(rectangle)
for wall in board.walls: render_wall(wall)
for path in (paths or []): render_path(path)
for marker in (markers or []): render_marker(marker)
for additional in (additionals or []):
(i, robot) = additional
plot_robot(i, robot, 0.1)
if robots is not None:
for i in range(len(COLORS)):
plot_robot(i, robots[i], 0.4)
if labels is not None:
for row_idx, row in enumerate(labels):
for col_idx, cell in enumerate(row):
axis.text(col_idx + 0.5,
row_idx + 0.5,
cell,
verticalalignment='center',
horizontalalignment='center')
plt.xlim(0 - PADDING, DIMENSION + PADDING)
plt.ylim(0 - PADDING, DIMENSION + PADDING)
plt.show()
```
|
{
"source": "jeeysie/site",
"score": 2
}
|
#### File: site/tasks/ops.py
```python
import logging
from django.db.models.query import F
from blog.models import Blog
from tasks import app
logger = logging.getLogger(__name__)
@app.task(name="ops.update_art_like")
def update_art_like(pk):
"""
更新文章点赞量
"""
Blog.objects.filter(pk=pk).update(like=F("like") + 1)
logger.info("更新文章点赞量成功: {}".format(pk))
```
|
{
"source": "jeezes/Straintracer",
"score": 2
}
|
#### File: jeezes/Straintracer/GetAlleles.py
```python
import os, sys
from shutil import copyfile
class GetAlleles:
def __init__(self, option, stFile, alleles):
workingdir = os.getcwd() + '/'
galaxydir = workingdir.split('/galaxy')[0]
print workingdir
print galaxydir
copyfile(galaxydir + '/galaxy/tools/straintracer/GetMlst.class', workingdir + "/GetMlst.class")
copyfile(galaxydir + '/galaxy/tools/straintracer/PsqlWriter.class', workingdir + "/PsqlWriter.class")
#copyfile(galaxydir + '/galaxy/tools/straintracer/postgresql-9.4.1208.jre6.jar', workingdir + "/postgresql-9.4.1208.jre6.jar") #Funker ikke
#postgresql-9.4.1208.jre6.jar
#os.system('cp -r %s/tools/straintracer/*.class %s/' % (galaxydir, workingdir))
os.system('java -cp /home/jonas/galaxy/tools/straintracer/postgresql-9.4.1208.jre6.jar:. GetMlst %s %s %s' % (option, stFile, alleles))
GetAlleles(sys.argv[1], sys.argv[2], sys.argv[3])
```
|
{
"source": "JEF1056/MetaLearning-Neural-Style",
"score": 2
}
|
#### File: MetaLearning-Neural-Style/python/demo.py
```python
import _pycaffe as caffe
from cv2 import *
import pdb
import numpy as np
import time
from argparse import ArgumentParser
import os
from tqdm import tqdm
import shutil
import subprocess
MODEL_LOC = "/content/drive/My Drive/IMP_STYLE/train_8.caffemodel"
MODEL_PROTO = "prototxt/8/"
CONTENT_LOC = "/content/drive/My Drive/IMP_STYLE/image2.jpg"
STYLE_LOC = "/content/drive/My Drive/IMP_STYLE/1_zToNBcKp1777KbT4WtvnkA.jpeg"
OUT_LOC = "/content/drive/My Drive/IMP_STYLE/out.jpg"
def pycaffe_hidden(im_label):
prototxt_file = MODEL_PROTO+'hidden.prototxt'
weights_file = MODEL_LOC
if caffe.is_initialized() < 1:
caffe.init(prototxt_file, weights_file)
caffe.set_device(0)
im_label = im_label.astype('float32')
im_label[:,:,0] = im_label[:,:,0] - 104.008
im_label[:,:,1] = im_label[:,:,1] - 116.669
im_label[:,:,2] = im_label[:,:,2] - 122.675
im_label_ = np.expand_dims(im_label,3)
im_label = np.transpose(im_label_,(3,2,0,1)).copy()
input_data = [im_label]
score = caffe.forward(0,input_data)
hidden_feat = score[0].squeeze()
return hidden_feat
def pycaffe_param(hidden_feat):
prototxt_file = MODEL_PROTO+'param.prototxt'
weights_file = MODEL_LOC
if caffe.is_initialized() < 2:
caffe.init(prototxt_file, weights_file)
caffe.set_device(0)
hidden_feat = hidden_feat.reshape((1,hidden_feat.size,1,1))
input_data = [hidden_feat]
param = caffe.forward(1, input_data)
caffe.save_model(param,'layer_name.txt','base.caffemodel','predict.caffemodel')
def pycaffe_predict(im):
prototxt_file = MODEL_PROTO+'predict.prototxt'
weights_file = 'predict.caffemodel'
if caffe.is_initialized() < 3:
caffe.init(prototxt_file, weights_file)
caffe.set_device(0)
im = im.astype('float32')
im[:,:,0] = im[:,:,0] - 104.008
im[:,:,1] = im[:,:,1] - 116.669
im[:,:,2] = im[:,:,2] - 122.675
im = np.expand_dims(im,3)
im = np.transpose(im,(3,2,0,1)).copy()
input_data = [im]
#t1=time.time()
score = caffe.forward(2, input_data)
#t2=time.time()
#print(t2-t1)
raw_score = score[0]
raw_score = raw_score[0,:,:,:]
raw_score = np.transpose(raw_score,(1,2,0)).copy()
raw_score[:,:,0] = raw_score[:,:,0] + 104.008
raw_score[:,:,1] = raw_score[:,:,1] + 116.669
raw_score[:,:,2] = raw_score[:,:,2] + 122.675
raw_score = np.clip(raw_score,0,255)
return raw_score.astype('uint8')
def build_parser():
parser = ArgumentParser()
parser.add_argument('--model', type=str,
dest='model', help='dir to find the model',
metavar='MODEL_LOC', required=True)
parser.add_argument('--prototxt', type=str,
dest='prototxt', help='dir to find the model',
metavar='MODEL_PROTO', required=True)
parser.add_argument('--content', type=str,
dest='content', help='dir to find content image/video',
metavar='CONTENT_LOC')
parser.add_argument('--style', type=str,
dest='style', help='dir to find style image',
metavar='STYLE_LOC')
parser.add_argument('--out', type=str,
dest='out', help='dir to save output',
metavar='OUT_LOC')
parser.add_argument('--oc', dest='oc', help='original colors',
action='store_true')
parser.add_argument('--cct', type=str,
dest='cct', help='Convert color type, of options yuv, lab, luv, and ycrcb',
default="yuv")
parser.add_argument('--cr', type=float,
dest='cr', help='content ratio',
default=1)
parser.add_argument('--sr', type=float,
dest='sr', help='style ratio',
default=1)
parser.add_argument('--video', dest='video', help='uwu for those video fans',
action='store_true')
parser.add_argument('--realtime', dest='realtime', help='UWU IS THAT REALTIME?!?!',
action='store_true')
parser.add_argument('--camera', type=int, dest='camera', help='OMG A CAMERA OWO')
parser.set_defaults(gpu=False, video=False, oc=False, realtime=False, camera=0)
return parser
if __name__ == '__main__':
parser = build_parser()
options = parser.parse_args()
MODEL_LOC=options.model
MODEL_PROTO=options.prototxt
caffe.base_model(options.model, 'base.txt')
style_im = imread(options.style)
style_im = cv2.resize(style_im, (0,0), fx=options.sr, fy=options.sr)
print("Style size: " + str(style_im.shape))
hidden_feat = pycaffe_hidden(style_im)
pycaffe_param(hidden_feat)
if options.video == True and options.realtime == True:
print("Cannot have both video and realtime active at the same time")
end
if options.video:
try:
shutil.rmtree("recon")
except:
pass
os.mkdir("recon")
vidcap = cv2.VideoCapture(options.content)
(major_ver, minor_ver, subminor_ver) = (cv2.__version__).split('.')
if int(major_ver) < 3 :
fps = vidcap.get(cv2.cv.CV_CAP_PROP_FPS)
else :
fps = vidcap.get(cv2.CAP_PROP_FPS)
success,image = vidcap.read()
video_length = int(vidcap.get(cv2.CAP_PROP_FRAME_COUNT))
print("Found " + str(video_length) + " frames")
height, width, layers = image.shape
fourcc = cv2.VideoWriter_fourcc(*'mp4v')
video = cv2.VideoWriter("recon/NOAUD.avi", fourcc, fps, (width,height))
i = 0
pbar = tqdm(total=video_length)
while(vidcap.isOpened()) and success == True and i < video_length:
if success==True:
origin_im = cv2.resize(image, (0,0), fx=options.cr, fy=options.cr)
if i == 0:
print("Content size: " + str(origin_im.shape))
scoremap = pycaffe_predict(origin_im)
if options.oc:
if options.cct == 'yuv':
cvt_type = cv2.COLOR_BGR2YUV
inv_cvt_type = cv2.COLOR_YUV2BGR
elif options.cct == 'ycrcb':
cvt_type = cv2.COLOR_BGR2YCR_CB
inv_cvt_type = cv2.COLOR_YCR_CB2BGR
elif options.cct == 'luv':
cvt_type = cv2.COLOR_BGR2LUV
inv_cvt_type = cv2.COLOR_LUV2BGR
elif options.cct == 'lab':
cvt_type = cv2.COLOR_BGR2LAB
inv_cvt_type = cv2.COLOR_LAB2BGR
content_cvt = cv2.cvtColor(origin_im, cvt_type)
stylized_cvt = cv2.cvtColor(scoremap, cvt_type)
c1, _, _ = cv2.split(stylized_cvt)
_, c2, c3 = cv2.split(content_cvt)
merged = cv2.merge((c1, c2, c3))
scoremap = cv2.cvtColor(merged, inv_cvt_type).astype(np.float32)
video.write(np.uint8(scoremap))
pbar.update(1)
success,image = vidcap.read()
i += 1
vidcap.release()
pbar.close()
cv2.destroyAllWindows()
video.release()
#Extract audio
subprocess.call(['ffmpeg', '-i', options.content, '-f', 'mp3', '-ab', '192000', '-vn', 'recon/v_aud.mp3'])
subprocess.call(['ffmpeg', '-i', "recon/NOAUD.avi", '-i', 'recon/v_aud.mp3', '-vcodec', 'x265', '-crf', '24', '-map', '0:0', '-map', '1:0', '-c:v', 'copy', '-c:a', 'copy', options.out])
if options.realtime:
vidcap = cv2.VideoCapture(options.camera)
(major_ver, minor_ver, subminor_ver) = (cv2.__version__).split('.')
fpscount=0
t1 = time.time()
while(True):
success,image = vidcap.read()
origin_im = cv2.resize(image, (0,0), fx=options.cr, fy=options.cr)
if fpscount == 0:
print("Content size: " + str(origin_im.shape))
scoremap = pycaffe_predict(origin_im)
if options.oc:
if options.cct == 'yuv':
cvt_type = cv2.COLOR_BGR2YUV
inv_cvt_type = cv2.COLOR_YUV2BGR
elif options.cct == 'ycrcb':
cvt_type = cv2.COLOR_BGR2YCR_CB
inv_cvt_type = cv2.COLOR_YCR_CB2BGR
elif options.cct == 'luv':
cvt_type = cv2.COLOR_BGR2LUV
inv_cvt_type = cv2.COLOR_LUV2BGR
elif options.cct == 'lab':
cvt_type = cv2.COLOR_BGR2LAB
inv_cvt_type = cv2.COLOR_LAB2BGR
content_cvt = cv2.cvtColor(origin_im, cvt_type)
stylized_cvt = cv2.cvtColor(scoremap, cvt_type)
c1, _, _ = cv2.split(stylized_cvt)
_, c2, c3 = cv2.split(content_cvt)
merged = cv2.merge((c1, c2, c3))
scoremap = cv2.cvtColor(merged, inv_cvt_type).astype(np.float32)
t2 = time.time()
fpscount=fpscount+1
fps = (fpscount/(t2-t1))
font = cv2.FONT_HERSHEY_SIMPLEX
withfps = cv2.putText(cv2.resize(np.uint8(scoremap), (0,0), fx=1.5, fy=1.5),str(round(fps,2))+" fps",(10,40), font, 1,(255,255,255),2,cv2.LINE_AA)
cv2.imshow('frame', withfps)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
vidcap.release()
cv2.destroyAllWindows()
else:
origin_im = cv2.resize(cv2.imread(options.content), (0,0), fx=options.cr, fy=options.cr)
print("Content size: " + str(origin_im.shape))
t1=time.time()
scoremap = pycaffe_predict(origin_im)
if options.oc:
if options.cct == 'yuv':
cvt_type = cv2.COLOR_BGR2YUV
inv_cvt_type = cv2.COLOR_YUV2BGR
elif options.cct == 'ycrcb':
cvt_type = cv2.COLOR_BGR2YCR_CB
inv_cvt_type = cv2.COLOR_YCR_CB2BGR
elif options.cct == 'luv':
cvt_type = cv2.COLOR_BGR2LUV
inv_cvt_type = cv2.COLOR_LUV2BGR
elif options.cct == 'lab':
cvt_type = cv2.COLOR_BGR2LAB
inv_cvt_type = cv2.COLOR_LAB2BGR
content_cvt = cv2.cvtColor(origin_im, cvt_type)
stylized_cvt = cv2.cvtColor(scoremap, cvt_type)
c1, _, _ = cv2.split(stylized_cvt)
_, c2, c3 = cv2.split(content_cvt)
merged = cv2.merge((c1, c2, c3))
dst = cv2.cvtColor(merged, inv_cvt_type).astype(np.float32)
cv2.imwrite(options.out, dst)
else:
imwrite(options.out,scoremap)
print("Took " + str(round((time.time()-t1),2)) + " seconds")
print("DONE")
```
|
{
"source": "jef5ez/jefNets",
"score": 2
}
|
#### File: jefNets/sentences_CNN/dcnn.py
```python
__author__ = 'jef5ez'
from collections import OrderedDict
import numpy as np
import numpy.random as npr
import sklearn.metrics as sklm
import sys
import theano
import theano.sandbox.neighbours as TSN
from theano.ifelse import ifelse
from theano import tensor as T
from theano.printing import Print as tPrint
from theano.tensor.shared_randomstreams import RandomStreams
import time
try:
import cPickle as pickle
except:
import pickle
def create_shared_matrix(row_dim, col_dim, name):
shape = (row_dim, col_dim)
return theano.shared(0.2 * np.random.uniform(-1.0, 1.0, shape).astype(theano.config.floatX),
name=name)
def create_shared_3d(i_dim, row_dim, col_dim, name, broadcast=(False, False, False)):
shape = (i_dim, row_dim, col_dim)
return theano.shared(0.2 * np.random.uniform(-1.0, 1.0, shape).astype(theano.config.floatX),
name=name,
broadcastable=broadcast)
def create_shared_4d(cur_feats, prev_feats, row_dim, col_dim, name):
shape = (cur_feats, prev_feats, row_dim, col_dim)
return theano.shared(0.2 * np.random.uniform(-1.0, 1.0, shape).astype(theano.config.floatX),
name=name)
def one_of_n(idx, size):
zeros = np.zeros([size])
zeros[idx] = 1
return zeros
def k_of_n(idxs, size):
zeros = np.zeros([size])
zeros[idxs] = 1
return zeros
def pad_sentence_batch(sents):
"""
:param sents: lists of lists of idxs
:return: matrix padded to largest sentence length with neg ones
"""
def pad_sentence(sentence, max_size):
return np.pad(sentence, (0, max_size - len(sentence)), 'constant',
constant_values=-1)
max_sent_len = max(map(len, sents))
return [pad_sentence(x, max_sent_len) for x in sents]
class DCNN(object):
def __init__(self, word_vecs, folding_layers, filters_per_layer, filter_widths, train_set,
valid_set, num_classes=None, word_dim=None, hidden_dim=None, k_top=3, decay=0.95,
epsilon=1e-6, dropout=1, rand_state=npr.RandomState(5), theano_rand=RandomStreams(seed=5),
activate=T.tanh, last_activate=T.nnet.softmax):
"""
:param word_vecs: rows of word vectors
:param word_dim: width of word vectors
:param hidden_dim: width of embedded space
:param train_set: list of lists of ints (i.e. words)
:param valid_set: list of lists of ints (i.e. words)
:param num_classes: number of softmax classes
:param folding_layers: list of 0s/1s for which layers are folded
:param filters_per_layer: list of ints, how many filters to learn at each layer
:param filter_widths: list of ints, width of each of the filters at each layer
(all filters at the same layer have the same width)
:param k_top: the k-max pooling size for the topmost layer
:param decay: ada delta decay rate
:param epsilon: ada delta epsilon
:return:
"""
# sentence (d x s) matrix, ie s word vectors with d dimensions
# weight_mat = (d x m), word_dim by m, the width of the convolution
# c = (d x s+m-1), the wide/full convolution matrix of sentence and weights
# after convolution can optionally fold word dimension
# k-max pooling given a sequence of length greater than k take the k highest
# values from the sequence without reordering the sequence
# This is done at the top most convolution layer
# dynamic k max pooling is using k that is a function of the sequence length
# paper recommends k = max(k_top, ceil((L-l)/L * len(s)))
# where L is the # of conv layers and l is the current layer
# after the pooling is done bias is added and then it is activated
self.rand_state = rand_state
self.theano_rand = theano_rand
self.word_vecs = theano.shared(word_vecs.astype(theano.config.floatX),
name="wordVectors")
self.train_set = train_set
self.valid_set = valid_set
if num_classes:
self.num_classes = num_classes
else:
self.num_classes = len(train_set[0][1])
if word_dim:
self.word_dim = word_dim
else:
self.word_dim = word_vecs.shape[-1]
self.k_top = k_top
self.num_layers = 3
dim_divisor = pow(2, sum(folding_layers))
if self.word_dim % dim_divisor == 0:
self.folding_layers = folding_layers
else:
raise Exception("Word dimension must be divisible by 2^(number of folded layers)")
self.filter_heights = [self.word_dim]
for i in self.folding_layers:
if i:
self.filter_heights += [self.filter_heights[-1]/2]
else:
self.filter_heights += [self.filter_heights[-1]]
self.filters_per_layer = filters_per_layer
self.filter_widths = filter_widths
if hidden_dim:
self.hidden_dim = hidden_dim
else:
self.hidden_dim = self.word_dim
self.theano_activate = activate
self.unravelled_size = self.filter_heights[-1] * self.k_top * self.filters_per_layer[-1]
self.filters = \
[create_shared_4d(self.filters_per_layer[0], 1, self.word_dim, self.filter_widths[0],
"layer0")] + \
[create_shared_4d(self.filters_per_layer[i], self.filters_per_layer[i - 1],
self.filter_heights[i], self.filter_widths[i], "layer"+str(i)) for i in
xrange(1, self.num_layers)]
# bias for each layer should be shape(num_filters, num_word_dim, 1)
self.biases = [create_shared_3d(self.filters_per_layer[i], self.filter_heights[i + 1], 1,
"layerbias" + str(i), (False, False, True)) for i in
xrange(self.num_layers)]
self.hidden_layer = create_shared_matrix(self.unravelled_size, self.hidden_dim, 'hiddenWeights')
self.theta = create_shared_matrix(self.hidden_dim, self.num_classes, 'classifyWeights')
self.b = theano.shared(np.zeros((1, self.num_classes), dtype=theano.config.floatX),
broadcastable=(True, False),
name='softmaxBias')
self.decay = decay
self.epsilon = epsilon
self.dropout = dropout
def wide_convolve(inpt, filters):
conv = T.nnet.conv2d(inpt, filters, border_mode='full')
take = conv[:, :, :inpt.shape[2], :]
t_shape = take.shape
# final_shape = (t_shape[0], t_shape[1], inpt.shape[2], t_shape[3])
extra_row = T.zeros((t_shape[0], t_shape[1], t_shape[2] + 1, t_shape[3]))
padded = T.set_subtensor(extra_row[:, :, :-1, :], take)
offset = T.set_subtensor(extra_row[:, :, 1:, :], take)
diff = padded - offset
final = diff[:, :, :-1, :]
return final
def fold(conv):
c_shape = conv.shape
pool_size = (1, conv.shape[-1])
neighbors_to_pool = TSN.images2neibs(ten4=conv,
neib_shape=pool_size,
mode='ignore_borders')
n_shape = neighbors_to_pool.shape
paired = T.reshape(neighbors_to_pool, (n_shape[0] / 2, 2, n_shape[-1]))
summed = T.sum(paired, axis=1)
folded_out = T.reshape(summed, (c_shape[0], c_shape[1], c_shape[2] / 2, c_shape[3]),
ndim=4)
return folded_out
def calculate_k(s_length, cur_layer):
proportion = (self.num_layers - cur_layer) / self.num_layers
return T.maximum(self.k_top, T.ceil(proportion * s_length))
def k_max_pool(conv, k):
c_shape = conv.shape
# c_shape = tPrint('conv_shape')(c_shape)
pool_size = (1, conv.shape[-1])
neighbors_to_pool = TSN.images2neibs(ten4=conv,
neib_shape=pool_size,
mode='ignore_borders')
arg_sorted = T.argsort(neighbors_to_pool, axis=1)
top_k = arg_sorted[:, -k:]
top_k_sorted = T.sort(top_k, axis=1)
ii = T.repeat(T.arange(neighbors_to_pool.shape[0], dtype='int32'), k)
jj = top_k_sorted.flatten()
values = neighbors_to_pool[ii, jj]
pooled_out = T.reshape(values, (c_shape[0], c_shape[1], c_shape[2], k), ndim=4)
return pooled_out
def bias_and_activate(pooled, bias):
"""
:param pooled: 4d tensor, shape(num_sent, num_filts, num_word_dim, num_words)
:param bias: 3d tensor, a bias vector for each filter with shape
(num_filters, num_word_dim, 1) last dimension broadcastable
:return: active(pooled + bias)
"""
return self.theano_activate(pooled + bias)
def create_convolution_layer(inpt, filters, fold_bool, k, bias):
conv = wide_convolve(inpt, filters)
folded = ifelse(fold_bool, fold(conv), conv)
pooled = k_max_pool(folded, k)
return bias_and_activate(pooled, bias)
def lookup_sentence(sentence):
row_vecs = self.word_vecs[sentence]
return T.transpose(row_vecs)
def lookup_all(sentences):
results, ups = theano.scan(lookup_sentence, sequences=[sentences])
shape = results.shape
return T.reshape(results, (shape[0], 1, shape[1], shape[2]), ndim=4)
def conv_forward_prop_train(sentences):
# layers = T.arange(self.num_layers)
k = calculate_k(sentences.shape[-1], 1)
# k = tPrint("first k")(k)
sentences = T.switch(self.dropout,
theano_rand.binomial(sentences.shape,
p=0.8,
dtype=theano.config.floatX) * sentences,
sentences)
first_layer = create_convolution_layer(sentences,
self.filters[0],
self.folding_layers[0],
k,
self.biases[0])
k = calculate_k(sentences.shape[-1], 2)
# k = tPrint("second k")(k)
first_layer = T.switch(self.dropout,
theano_rand.binomial(first_layer.shape,
dtype=theano.config.floatX) * first_layer,
first_layer)
second_layer = create_convolution_layer(first_layer,
self.filters[1],
self.folding_layers[1],
k,
self.biases[1])
k = T.as_tensor(self.k_top)
# k = tPrint("k_top")(k)
second_layer = T.switch(self.dropout,
theano_rand.binomial(second_layer.shape,
dtype=theano.config.floatX) * second_layer,
second_layer)
third_layer = create_convolution_layer(second_layer,
self.filters[2],
self.folding_layers[2],
k,
self.biases[2])
third_layer = T.switch(self.dropout,
theano_rand.binomial(third_layer.shape,
dtype=theano.config.floatX) * third_layer,
third_layer)
return third_layer
def conv_forward_prop_test(sentences):
# layers = T.arange(self.num_layers)
k = calculate_k(sentences.shape[-1], 1)
# k = tPrint("first k")(k)
first_layer = create_convolution_layer(sentences,
T.switch(self.dropout,
self.filters[0] * 0.8,
self.filters[0]),
self.folding_layers[0],
k,
self.biases[0])
k = calculate_k(sentences.shape[-1], 2)
# k = tPrint("second k")(k)
second_layer = create_convolution_layer(first_layer,
T.switch(self.dropout,
self.filters[1] * 0.5,
self.filters[1]),
self.folding_layers[1],
k,
self.biases[1])
k = T.as_tensor(self.k_top)
# k = tPrint("k_top")(k)
third_layer = create_convolution_layer(second_layer,
T.switch(self.dropout,
self.filters[2] * 0.5,
self.filters[2]),
self.folding_layers[2],
k,
self.biases[2])
return third_layer
def embed(sentences):
vectors = lookup_all(sentences)
convolved = conv_forward_prop_train(vectors)
flat = T.flatten(convolved, outdim=2)
hidden = self.theano_activate(T.dot(flat, self.hidden_layer))
return hidden
def embed_test(sentences):
vectors = lookup_all(sentences)
convolved = conv_forward_prop_test(vectors)
flat = T.flatten(convolved, outdim=2)
h_weights = T.switch(self.dropout,
self.hidden_layer * 0.5,
self.hidden_layer)
embedded= self.theano_activate(T.dot(flat, h_weights))
return embedded
def forward_prop(sentences):
hidden = embed(sentences)
classes = last_activate(T.dot(hidden, self.theta) + self.b)
return classes
def forward_prop_test(sentences):
hidden = embed_test(sentences)
classes = last_activate(T.dot(hidden, self.theta) + self.b)
return classes
self.params = self.filters + self.biases + [self.hidden_layer, self.theta, self.b,
self.word_vecs]
def make_zero_shared_vars_like(a_shared, name_mod):
zeros = np.zeros_like(a_shared.get_value())
return theano.shared(zeros,
name=a_shared.name + name_mod,
broadcastable=a_shared.broadcastable)
# holds expected values for ada delta
self.param_grad_accums = map(lambda x: make_zero_shared_vars_like(x, "-grad"), self.params)
self.param_up_accums = map(lambda x: make_zero_shared_vars_like(x, "-update"), self.params)
s = T.imatrix("sentences")
y = T.imatrix("response")
beta = T.scalar("beta")
def bootstrap_soft(beta, prediction, truth):
return -1 * T.sum((beta * truth + (1-beta)*prediction)*T.log(prediction), axis=1).mean()
def binary_bootstrap_soft(beta, prediction, truth):
t = (beta * truth + (1-beta)*prediction)*T.log(prediction)
f = (beta * (1-truth) + (1-beta)*(1-prediction)) * T.log(1 - prediction)
return -1 * T.mean(t + f)
def bootstrap_hard(beta, prediction, truth):
zeros = T.zeros_like(prediction)
am = T.argmax(prediction, axis=1)
maxed = T.set_subtensor(zeros[T.arange(am.shape[0]), am], 1)
return -1 * T.sum((beta * truth + (1-beta)*maxed)*T.log(prediction), axis=1).mean()
# def binary_bootstrap_hard(beta, prediction, truth):
# t = (beta * truth + (1-beta)*prediction)*T.log(prediction)
# f = (beta * (1-truth) + (1-beta)*(1-prediction)) * T.log(1 - prediction)
# return -1 * T.mean(t + f)
def multilabel_b_soft(beta, preds, ys):
res, ups = theano.scan(lambda pred, truth: binary_bootstrap_soft(beta, pred, truth),
sequences=[preds.T, ys.T])
return T.sum(res)
# def multilabel_b_hard(beta, preds, ys):
# zeros = T.zeros_like(preds)
# am = T.argmax(preds, axis=1)
# maxed = T.set_subtensor(zeros[T.arange(am.shape[0]), am], 1)
# res, ups = theano.scan(lambda pred, truth: bootstrap_hard(beta, pred, truth),
# sequences=[preds.T, ys.T])
# return T.sum(res)
def multilabel_cross_ent(preds, ys):
res, ups = theano.scan(lambda p, t: T.nnet.binary_crossentropy(p, t).mean(),
sequences=[preds.T, ys.T])
return T.sum(res)
self.individual_b_soft_sum = multilabel_b_soft(beta, forward_prop(s), y)
# self.individual_b_hard_sum = multilabel_b_hard(beta, forward_prop(s), y)
self.individual_cross_ent_sum = multilabel_cross_ent(forward_prop(s), y)
self.bootstrap_soft_cost = bootstrap_soft(beta, forward_prop(s), y)
self.bootstrap_hard_cost = bootstrap_hard(beta, forward_prop(s), y)
self.squared_error = T.sum(T.mean((forward_prop(s)-y)**2, axis=0))
self.cross_entropy = T.nnet.categorical_crossentropy(forward_prop(s), y).mean() #+ \
# T.sum(T.sqr(self.filters[0])) + T.sum(T.sqr(self.filters[1])) + \
# T.sum(T.sqr(self.filters[2])) +\
# T.sum(T.sqr(self.hidden_layer))
b_soft_grads = T.grad(self.bootstrap_soft_cost, self.params)
b_hard_grads = T.grad(self.bootstrap_hard_cost, self.params)
sq_err_grads = T.grad(self.squared_error, self.params)
cross_ent_grads = T.grad(self.cross_entropy, self.params)
ind_b_soft_grads = T.grad(self.individual_b_soft_sum, self.params)
# ind_b_hard_grads = T.grad(self.individual_b_hard_sum, self.params)
ind_cross_ent_grads = T.grad(self.individual_cross_ent_sum, self.params)
def ada_delta_step(next_gradient, decay_rate, eps, prev_grad, prev_up):
# from http://arxiv.org/pdf/1212.5701v1.pdf
grad_accum = (decay_rate * prev_grad) + (1-decay_rate) * (next_gradient ** 2)
rms_g = T.sqrt(grad_accum + eps)
rms_delta = T.sqrt(prev_up + eps)
update = (rms_delta / rms_g) * next_gradient
up_accum = (decay_rate * prev_up) + (1-decay_rate) * (update ** 2)
return update, grad_accum, up_accum
def create_update_tuple(param, grad, grad_acc, up_acc):
# grad = tPrint("gradient for" + param.name)(grad)
update, new_g_acc, new_up_acc = ada_delta_step(grad,
self.decay,
self.epsilon,
grad_acc,
up_acc)
# update = tPrint("update for" + param.name)(update)
return [(param, param - update), (grad_acc, new_g_acc), (up_acc, new_up_acc)]
def create_gradient_updates(gradients):
params_to_up = zip(self.params, gradients, self.param_grad_accums,
self.param_up_accums)
return OrderedDict([tup for x in params_to_up for tup in create_update_tuple(*x)])
self.nn_train_b_soft = theano.function(inputs=[beta, s, y],
outputs=[self.bootstrap_soft_cost],
updates=create_gradient_updates(b_soft_grads),
name="nn_train_one",
allow_input_downcast=True)
self.nn_train_b_hard = theano.function(inputs=[beta, s, y],
outputs=[self.bootstrap_hard_cost],
updates=create_gradient_updates(b_hard_grads),
name="nn_train_one",
allow_input_downcast=True)
self.nn_train_sq_err = theano.function(inputs=[s, y],
outputs=[self.squared_error],
updates=create_gradient_updates(sq_err_grads),
name="nn_train_one",
allow_input_downcast=True)
self.nn_train_cross_ent = theano.function(inputs=[s, y],
outputs=[self.cross_entropy],
updates=create_gradient_updates(cross_ent_grads),
name="nn_train_one",
allow_input_downcast=True)
self.nn_train_ind_b_soft = theano.function(inputs=[beta, s, y],
outputs=[self.individual_b_soft_sum],
updates=create_gradient_updates(ind_b_soft_grads),
name="nn_train_one",
allow_input_downcast=True)
# self.nn_train_ind_b_hard = theano.function(inputs=[s, y],
# outputs=[self.individual_b_hard_sum],
# updates=create_gradient_updates(ind_b_hard_grads),
# name="nn_train_one",
# allow_input_downcast=True)
self.nn_train_ind_cross_ent = theano.function(inputs=[s, y],
outputs=[self.individual_cross_ent_sum],
updates=create_gradient_updates(ind_cross_ent_grads),
name="nn_train_one",
allow_input_downcast=True)
self.nn_embed = theano.function([s], embed_test(s), allow_input_downcast=True)
self.nn_predict = theano.function([s], forward_prop_test(s), allow_input_downcast=True)
def lookup_vectors(self, sent_idx):
row_vecs = np.array([self.word_vecs[i] for i in sent_idx])
return np.transpose(row_vecs)
def shuffle_training(self):
self.rand_state.shuffle(self.train_set)
def reshape_sentences(self, sentences):
padded = pad_sentence_batch(sentences)
# input_3d = np.array(map(self.lookup_vectors, padded))
# shape = input_3d.shape
# return np.reshape(input_3d, (shape[0], 1, shape[1], shape[2]))
return padded
def restructure_batch(self, lst_of_tups):
sentences, ys = zip(*lst_of_tups)
return self.reshape_sentences(sentences), ys
def __train_outline(self, method, epochs, batch_size, stop_crit, shuffle):
def make_batches(lst, bs):
return [self.restructure_batch(lst[i:i+bs]) for i in xrange(0, len(lst), bs)]
not_improving_count = 0
best_f1 = 0
for epoch in range(epochs):
print "Running epoch " + str(epoch)
sys.stdout.flush()
tic = time.time()
batches = make_batches(self.train_set, batch_size)
for x, y in batches:
method(x, y)
if self.valid_set is not None:
cur_f1 = self.cross_validate(self.valid_set)
print "F1 score for this epoch:"
print cur_f1
if cur_f1 > best_f1:
not_improving_count = 0
best_f1 = cur_f1
else:
not_improving_count += 1
print 'epoch %i' % epoch, 'completed in %.2f (sec)' % (time.time()-tic)
if not_improving_count > stop_crit:
break
if shuffle:
self.shuffle_training()
def train_b_soft(self, beta=0.95, epochs=5, batch_size=1, stop_crit=5, shuffle=True):
train = lambda x, y: self.nn_train_b_soft(beta, x, y)
self.__train_outline(train, epochs, batch_size, stop_crit, shuffle)
def train_b_hard(self,beta=0.8, epochs=5, stop_crit=5, batch_size=1, shuffle=True):
train = lambda x, y: self.nn_train_b_hard(beta, x, y)
self.__train_outline(train, epochs, batch_size, stop_crit, shuffle)
def train_sq_err(self, epochs=50, batch_size=1, stop_crit=5, shuffle=True):
self.__train_outline(self.nn_train_sq_err, epochs, batch_size, stop_crit, shuffle)
def train_cross_ent(self, epochs=50, batch_size=1, stop_crit=5, shuffle=True):
self.__train_outline(self.nn_train_cross_ent, epochs, batch_size, stop_crit, shuffle)
def train_multi_b_soft(self, beta=0.95, epochs=50, batch_size=1, stop_crit=5, shuffle=True):
train = lambda x, y: self.nn_train_ind_b_soft(beta, x, y)
self.__train_outline(train, epochs, batch_size, stop_crit, shuffle)
# def train_multi_b_hard(self, beta=0.95, epochs=50, batch_size=1, stop_crit=5, shuffle=True):
# train = lambda x, y: self.nn_train_ind_b_hard(beta, x, y)
# self.__train_outline(train, epochs, batch_size, stop_crit, shuffle)
def train_multi_cross_ent(self, epochs=50, batch_size=1, stop_crit=5, shuffle=True):
self.__train_outline(self.nn_train_ind_cross_ent, epochs, batch_size, stop_crit, shuffle)
def predict(self, exs, batch_size=1):
"""
:param exs: lists of tuples(words, word_order, 3d_dep_tensor, leaf_counts)
:return: matrix of probabilities for each class
"""
batches = [self.reshape_sentences(exs[i:i + batch_size]) for i in
xrange(0, len(exs), batch_size)]
preds = [self.nn_predict(batch) for batch in batches]
return [row for mat in preds for row in mat]
def cross_validate(self, pairs):
predictions = self.predict([x[0] for x in pairs])
return sklm.f1_score([np.argmax(x[1]) for x in pairs], map(np.argmax, predictions))
def embed(self, exs, batch_size=1):
"""
:param exs: lists of tuples(words, word_order, 3d_dep_tensor, leaf_counts)
:return: matrix of probabilities for each class
"""
batches = [self.reshape_sentences(exs[i:i + batch_size]) for i in
xrange(0, len(exs), batch_size)]
preds = [self.nn_embed(batch) for batch in batches]
return [row for mat in preds for row in mat]
def save_weights(self, file_name):
to_save = [x.get_value() for x in self.params]
pickle.dump(to_save, open(file_name, 'wb'))
def load_weights(self, file_name):
"""
This will override any filter sizes you initialized to be those of the saved model
in the shared variables
:param file_name:
:return:
"""
to_load = pickle.load(open(file_name, 'rb'))
for s,l in zip(self.params, to_load):
s.set_value(l)
```
|
{
"source": "jef79m/django-usage",
"score": 3
}
|
#### File: django-usage/tests/test_commands.py
```python
import unittest
from datetime import datetime
from usage.management.commands.summarizeusage import _round_to_interval
class TestRoundToInterval(unittest.TestCase):
input = datetime(2015, 10, 15, 10, 43, 21, 11)
def test_simple_case(self):
expected = datetime(2015, 10, 15, 10, 40)
output = _round_to_interval(self.input, 5)
self.assertEqual(expected, output)
def test_60_case(self):
expected = datetime(2015, 10, 15, 10, 00)
output = _round_to_interval(self.input, 60)
self.assertEqual(expected, output)
if __name__ == '__main__':
unittest.main()
```
#### File: management/commands/summarizeusage.py
```python
from datetime import (timedelta,
datetime)
from django.conf import settings
from django.core.management.base import BaseCommand
from django.core.urlresolvers import resolve, Resolver404
from usage.models import (
PageHit,
Period,
UsageSummary)
def _round_to_interval(time, interval):
"""
Description:
Rounds a time to the previous <interval> number
of minutes.
eg. input: 10:43:21:12 -> 10:40:00:00
Args:
time (DateTime): The time to perform rounding on
interval (int): The rounding period in minutes.
"""
delta = timedelta(minutes=time.minute % interval,
seconds=time.second,
microseconds=time.microsecond)
return time - delta
class Command(BaseCommand):
"""
Summarizes individual page hits and calculates time
spent on site.
"""
help = "Summarize user usage details"
def handle(self, *args, **options):
"""
Run the Command
"""
interval = settings.USAGE_INTERVAL
delta = timedelta(minutes=interval)
run_time = datetime.now()
pagehits = PageHit.objects.not_summarized().order_by('requested_time')
if pagehits.exists():
start = _round_to_interval(
pagehits.earliest('requested_time').requested_time, interval)
period, created = Period.objects.get_or_create(start=start, end=start + delta)
for hit in pagehits:
excluded = False
for exclude in settings.USAGE_EXCLUDE_URLS:
if exclude in hit.requested_page:
excluded = True
if excluded:
continue
if not (hit.requested_time >= start and
hit.requested_time < start + delta):
start = _round_to_interval(hit.requested_time, interval)
period, created = Period.objects.get_or_create(
start=start, end=start + delta)
try:
namespace = resolve(hit.requested_page).namespace
url_name = resolve(hit.requested_page).url_name
except Resolver404:
namespace = url_name = "Unknown"
summary, created = UsageSummary.objects.get_or_create(
time_period=period,
namespace=namespace,
url_name=url_name,
user=hit.user)
hit.summarized = run_time
hit.save()
summary.hits += 1
summary.save()
print "Processed %s hits" % pagehits.count()
```
#### File: django-usage/usage/views.py
```python
from datetime import datetime
from .models import UsageSummary, Period
from django.contrib.auth.models import User
from django.db.models import Count
from django.template.response import TemplateResponse
from django.conf import settings
def usage_display(request):
template_name = 'usage/usage.html'
context = {
'title': "User Usage Report"
}
usage_data = []
# Get date from request, otherwise assume Today.
date = request.GET.get('date', None)
if date:
date = datetime.strptime(date, '%Y%m%d').date()
else:
date = datetime.now().date()
# Users who have records for the given date,
# annotated with number of time periods they were
# active in.
users = (User.objects
.filter(usagesummary__time_period__start__contains=date)
.values('pk')
.annotate(total=Count('usagesummary__time_period', distinct=True))
)
# for each user, build a record with thier entries
# as well as converting active periods to minutes
# period length can be set in settings.USAGE_INTERVAL
for user in users:
summaries = (UsageSummary.objects
.filter(
user_id=user['pk'], time_period__start__contains=date))
# Sparkline data is initialised with zero values
# I'm making the assumption that ther will be more
# periods with 0 activity than actual activity
spark_data = [0] * (60 * 24 / settings.USAGE_INTERVAL)
periods = summaries.values('time_period').annotate(hit_total=Count('hits')).values_list('time_period','hit_total')
for period_id, hits in periods:
spark_data[Period.objects.get(pk=period_id).index] = hits
spark_data = ','.join([str(x) for x in spark_data])
print spark_data
usage_data.append({
'user': User.objects.get(pk=user['pk']),
'time_active': user['total'] * settings.USAGE_INTERVAL,
'spark_data': spark_data,
'summaries': summaries,
})
if users.exists():
earliest = (UsageSummary.objects
.filter(time_period__start__contains=date)
.earliest('time_period__start').time_period.start)
latest = (UsageSummary.objects
.filter(time_period__start__contains=date)
.latest('time_period__start').time_period.start)
else:
earliest = date
latest = date
try:
previous_day = (UsageSummary.objects
.filter(time_period__start__lt=earliest)
.latest('time_period__start').time_period.start.date()
)
except UsageSummary.DoesNotExist:
previous_day = None
try:
next_day = (UsageSummary.objects
.filter(time_period__start__gt=latest)
.earliest('time_period__start').time_period.start.date()
)
except UsageSummary.DoesNotExist:
next_day = None
context.update({
'usage_data': usage_data,
'previous_day': previous_day,
'next_day': next_day})
return TemplateResponse(request, template_name, context)
```
|
{
"source": "jefalexa/cloudpandas",
"score": 3
}
|
#### File: src/cloudpandas/__init__.py
```python
import pandas as pd
import numpy as np
import requests
import json
import time
def df_to_json(df):
for col in df.columns:
if df[col].dtype == 'datetime64[ns]':
df[col] = df[col].astype('str')
df.reset_index(drop=True, inplace=True)
return(df.to_json())
class Client:
"""A Class used to create a client object for communicating with the API
Attributes
----------
api_token : str
Your API Authentication token
base_url : str
Base URL for the API
dataproviders : obj
An instance of the DataProviders Class initialized with the api_token and base_url from the Client class
sheets : obj
An instance of the Sheets Class initialized with the api_token and base_url from the Client class
Classes
-------
DataProviders
A Class used to get information about the Data Providers configured in your account
Sheets
A Class used to interact with sheets/files contained in your Data Providers
"""
def __init__(self, api_token, base_url='https://www.cloudpandas.com/api/'):
"""
Parameters
----------
api_token : str
Your API Authentication token
"""
self.api_token = api_token
self.base_url = base_url
self.dataproviders = self.DataProviders(self.api_token, self.base_url)
self.sheets = self.Sheets(self.api_token, self.base_url)
class DataProviders:
def __init__(self, api_token, base_url):
"""
Parameters
----------
api_token : str
Your API Authentication token
base_url : str
Base URL for the API
"""
self.api_token = api_token
self.base_url = base_url
def list(self):
"""A Method to list the Data Provider Instances configured in your account
Returns
-------
pandas.DataFrame
id: Unique ID used to reference that provider
name: The given name of the provider
provider: What service the provider connects to
status: a-Active, m-Maintnance, e-Expired
Raises
------
RuntimeError
If the API returns anything other than Status 200
"""
url = '{}providerlist'.format(self.base_url)
headers = {'Authorization': 'Token {}'.format(self.api_token)}
providers = requests.get(url, headers=headers)
if providers.status_code == 200:
data_providers = pd.DataFrame(providers.json())
data_providers['id'] = data_providers['id'].astype('int')
data_providers['status'] = data_providers['status'].map({'m':'Maintenance','a':'Active','e':'Expired'})
return(data_providers)
else:
raise RuntimeError("Error {} - {}: {}".format(providers.status_code, providers.reason, json.loads(providers.content)))
class Sheets:
def __init__(self, api_token, base_url):
"""
Parameters
----------
api_token : str
Your API Authentication token
base_url : str
Base URL for the API
"""
self.api_token = api_token
self.base_url = base_url
def status(self, job_id):
url = '{}getmyprocstatus/{}/'.format(self.base_url, job_id)
headers = {'Authorization': 'Token {}'.format(self.api_token)}
results = requests.get(url, headers=headers)
return(results.json())
def list(self, provider_id):
"""A Method to list the sheets/files of a given Data Provider
Parameters
----------
provider_id : str
The ID or Name of the Data Provider to use. ID is more deterministic and thus preferred, but Name will also work.
Returns
-------
pandas.DataFrame
id: Unique ID used to reference that sheet
name: The given name of the sheet
Raises
------
RuntimeError
If the API returns anything other than Status 200
"""
provider_id = str(provider_id)
url = '{}filelist/{}'.format(self.base_url, provider_id)
headers = {'Authorization': 'Token {}'.format(self.api_token)}
sheet = requests.get(url, headers=headers)
if sheet.status_code == 200:
return(pd.read_json(sheet.json()))
else:
raise RuntimeError("Error {} - {}: {}".format(sheet.status_code, sheet.reason, json.loads(sheet.content)))
def get(self, provider_id, sheet_id, sub_sheet=0, skip_rows=0):
"""A Method to get the contents of a sheet/file
Parameters
----------
provider_id : str
The ID or Name of the Data Provider to use. ID is more deterministic and thus preferred, but Name will also work.
sheet_id : str
The ID or Name of the sheet/file. ID is more deterministic and thus preferred, but Name will also work.
If more than one sheet share the same name, then the most recently modified will be chosen.
sub_sheet : int|str
For files that support sub sheets, such as the sheets within an Excel file, allows a specific sub sheet to be chosen.
Input options are the index number (defaults to 0, the first sheet in the workbook) or the name.
skip_rows : int
Allows header rows to be skipped. Defaults to 0.
Returns
-------
pandas.DataFrame
Contents of the file chosen, returned as a Pandas DataFrame.
Raises
------
RuntimeError
If the API returns anything other than Status 200
"""
provider_id = str(provider_id)
sheet_id = str(sheet_id)
url = '{}fileget/'.format(self.base_url)
data = {'provider_id':provider_id, 'sheet_name':sheet_id, 'sub_sheet':sub_sheet, 'skip_rows':skip_rows}
data = json.dumps(data)
headers = {'Authorization': 'Token {}'.format(self.api_token), 'Content-Type':'application/json'}
sheet = requests.post(url, headers=headers, data=data)
if sheet.status_code == 200:
results = sheet.json()
while True:
procstatus = self.status(results['id'])
if procstatus['status'] == 'finished':
try:
return(pd.read_json(procstatus['result']))
except:
return("Error loading data: {}".format(procstatus))
elif procstatus['status'] == 'failed':
return(procstatus)
time.sleep(1)
else:
raise RuntimeError("Error {} - {}: {}".format(sheet.status_code, sheet.reason, json.loads(sheet.content)))
def info(self, provider_id, sheet_id):
"""A Method to get information about a sheet/file
Parameters
----------
provider_id : str
The ID or Name of the Data Provider to use. ID is more deterministic and thus preferred, but Name will also work.
sheet_id : str
The ID or Name of the sheet/file. ID is more deterministic and thus preferred, but Name will also work.
If more than one sheet share the same name, then the most recently modified will be chosen.
Returns
-------
dict
name : str
Name of the sheet/file
id : str
Unique ID of the sheet/file
modified_at : str
Datetime string of time last modified
sub_sheets : list
Where supported, Sub Sheets contained in the file
path/folder : str
Where supported, the full path to or folder containing the file
Raises
------
RuntimeError
If the API returns anything other than Status 200
"""
provider_id = str(provider_id)
sheet_id = str(sheet_id)
url = '{}fileinfo/'.format(self.base_url)
data = {'provider_id':provider_id, 'sheet_name':sheet_id}
data = json.dumps(data)
headers = {'Authorization': 'Token {}'.format(self.api_token), 'Content-Type':'application/json'}
sheet = requests.post(url, headers=headers, data=data)
if sheet.status_code == 200:
return(sheet.json())
else:
raise RuntimeError("Error {} - {}: {}".format(sheet.status_code, sheet.reason, json.loads(sheet.content)))
def update(self, data, provider_id, sheet_id, sub_sheet=0):
"""A Method to update a sheet/file
Parameters
----------
data : pandas.DataFrame
The data that will be synced to the sheet/file
provider_id : str
The ID or Name of the Data Provider to use. ID is more deterministic and thus preferred, but Name will also work.
sheet_id : str
The ID or Name of the sheet/file. ID is more deterministic and thus preferred, but Name will also work.
If more than one sheet share the same name, then the most recently modified will be chosen.
sub_sheet : str
Where supported, the name of the sub sheet. Defaults to Sheet1
Returns
-------
dict
str
OK or Error depending on the success of the operation
Raises
------
RuntimeError
If the API returns anything other than Status 200
"""
provider_id = str(provider_id)
sheet_id = str(sheet_id)
url = '{}fileupdate/'.format(self.base_url)
data = {'provider_id':provider_id, 'sheet_name':sheet_id, 'sub_sheet':sub_sheet, 'data':df_to_json(data.fillna(""))}
data = json.dumps(data)
headers = {'Authorization': 'Token {}'.format(self.api_token), 'Content-Type':'application/json'}
sheet = requests.post(url, headers=headers, data=data)
if sheet.status_code == 200:
results = sheet.json()
while True:
procstatus = self.status(results['id'])
if procstatus['status'] == 'finished':
try:
return(procstatus['result'])
except:
return("Error loading data: {}".format(procstatus))
elif procstatus['status'] == 'failed':
return(procstatus)
time.sleep(1)
else:
raise RuntimeError("Error {} - {}: {}".format(sheet.status_code, sheet.reason, json.loads(sheet.content)))
def create(self, data, provider_id, sheet_name, sub_sheet='Sheet1', sheet_type='sheet', folder_path='0'):
"""A Method to update a sheet/file
Parameters
----------
data : pandas.DataFrame
The data that will be synced to the sheet/file
provider_id : str
The ID or Name of the Data Provider to use. ID is more deterministic and thus preferred, but Name will also work.
sheet_name : str
The Name of the sheet/file to be created
sub_sheet : str
Where supported, the name of the sub sheet. Defaults to Sheet1
sheet_type : str
What type of object to create. sheet = GoogleSheets or SmartSheets. xlsx and csv = file.
folder_path : str
Currently only supported with files, not sheets. Path to the folder in which to create the file.
Returns
-------
dict
str
OK or Error depending on the success of the operation
Raises
------
RuntimeError
If attempting to set a folder_path for a sheet
RuntimeError
If the API returns anything other than Status 200
"""
provider_id = str(provider_id)
sheet_name = str(sheet_name)
if ((sheet_type.lower() == 'sheet') & (folder_path != '0')):
raise RuntimeError("folder_path cannot be set with sheet_type=sheet")
url = '{}filecreate/'.format(self.base_url)
data = {'provider_id':provider_id, 'sheet_name':sheet_name, 'sub_sheet':sub_sheet, 'sheet_type':sheet_type, 'folder_path':folder_path, 'data':df_to_json(data.fillna(""))}
data = json.dumps(data)
headers = {'Authorization': 'Token {}'.format(self.api_token), 'Content-Type':'application/json'}
sheet = requests.post(url, headers=headers, data=data)
if sheet.status_code == 200:
results = sheet.json()
while True:
procstatus = self.status(results['id'])
if procstatus['status'] == 'finished':
try:
return(procstatus['result'])
except:
return("Error loading data: {}".format(procstatus))
elif procstatus['status'] == 'failed':
return(procstatus)
time.sleep(1)
else:
raise RuntimeError("Error {} - {}: {}".format(sheet.status_code, sheet.reason, json.loads(sheet.content)))
def delete(self, provider_id, sheet_id):
"""A Method to remove a sheet/file
Parameters
----------
provider_id : str
The ID or Name of the Data Provider to use. ID is more deterministic and thus preferred, but Name will also work.
sheet_id : str
The ID or Name of the sheet/file. ID is more deterministic and thus preferred, but Name will also work.
If more than one sheet share the same name, then the most recently modified will be chosen.
Returns
-------
: str
Status message, success or error
Raises
------
RuntimeError
If the API returns anything other than Status 200
"""
provider_id = str(provider_id)
sheet_id = str(sheet_id)
url = '{}filedelete/'.format(self.base_url)
data = {'provider_id':provider_id, 'sheet_name':sheet_id}
data = json.dumps(data)
headers = {'Authorization': 'Token {}'.format(self.api_token), 'Content-Type':'application/json'}
sheet = requests.post(url, headers=headers, data=data)
if sheet.status_code == 200:
return(sheet.json())
else:
raise RuntimeError("Error {} - {}: {}".format(sheet.status_code, sheet.reason, json.loads(sheet.content)))
```
|
{
"source": "jefalexa/custom_modules",
"score": 3
}
|
#### File: jefalexaudf/file_mgt/__init__.py
```python
import pandas as pd
import numpy as np
import datetime as dt
import os, sys
import re
import logging
#global interact
from ipywidgets import interact, interactive, fixed, interact_manual
def check_match(x, y):
try:
pattern = re.compile(y)
return(bool(re.match(pattern=pattern, string=x)))
except:
return("N/A")
def local_find(working_dir, x=".*"):
pattern = re.compile(x)
file_list = []
try:
for file in os.listdir(working_dir):
if re.match(pattern=pattern, string=file):
file_list.append(file)
return(file_list)
except:
file_list = []
return(file_list)
def local_find_recent(working_dir, x=".*"):
pattern = re.compile(x)
file_list = []
fts_min = 0
try:
for file in os.listdir(working_dir):
if re.match(pattern=pattern, string=file):
f2 = os.path.join(working_dir, file)
fts = os.stat(f2).st_mtime
fdt = dt.datetime.fromtimestamp(fts)
if ((fts_min < fts) | (fts_min == 0)):
file_list = [file, f2, fdt]
fts_min = fts
return(file_list)
except:
print("Error")
file_list = []
return(file_list)
def local_find_to_df(working_dir, x=".*"):
pattern = re.compile(x)
file_list = []
try:
for file in os.listdir(working_dir):
if re.match(pattern=pattern, string=file):
f2 = os.path.join(working_dir, file)
fsize = os.stat(f2).st_size
fts = os.stat(f2).st_mtime
fdt = dt.datetime.fromtimestamp(fts)
#print(file, fsize, fdt)
file_list.append([file, fsize, fdt])
return(pd.DataFrame(columns=['Filename', 'Size', 'Modified Time'], data=file_list))
except:
print("Error")
file_list = []
return(pd.DataFrame(columns=['Filename', 'Size', 'Modified Time'], data=file_list))
def interactive_file_saveloc(dir_list, search_pattern):
output_file = ""
def test01(dir_input=dir_list, search_pattern=fixed(search_pattern)):
file_df = local_find_to_df(dir_input, search_pattern).sort_values(by='Modified Time', ascending=False)
file_list = file_df['Filename'].tolist()
file_list.insert(0, "")
interact(test02, file_picker="{}".format(dt.datetime.strftime(dt.datetime.now(), '%m%d%Y_%H%M')), dir_input=fixed(dir_input), file_df=fixed(file_df))
def test02(file_picker, dir_input, file_df):
global output_file
output_file = os.path.join(dir_input, file_picker)
if len(file_picker) > 0:
print(output_file)
return(file_df.loc[file_df['Filename'].apply(lambda x: check_match(x, file_picker)) == True ] )
else:
return(file_df)
interact(test01, dir_input=dir_list, search_pattern=fixed(search_pattern))
def interactive_file_picker(dir_list, search_pattern):
file = ""
def ifp_sub01(dir_input, search_pattern):
file_list = local_find_to_df(dir_input, search_pattern).sort_values(by='Modified Time', ascending=False)['Filename'].tolist()
file_list.insert(0, "")
interact(ifp_sub02, dir_input=fixed(dir_input), file_picker=file_list, search_pattern=fixed(search_pattern))
def ifp_sub02(dir_input, file_picker, search_pattern):
file = os.path.join(dir_input, file_picker)
if len(file_picker) > 0:
print(" File: {}\n Path: {}\n Size: {}\n Modified: {}".format(file_picker, file, os.stat(file).st_size, dt.datetime.strftime(dt.datetime.fromtimestamp(os.stat(file).st_mtime), '%m-%d-%y %H:%M')))
else:
return(local_find_to_df(dir_input, search_pattern).sort_values(by='Modified Time', ascending=False))
interact(ifp_sub01, dir_input=dir_list, search_pattern=fixed(search_pattern))
def interactive_table_frame(df):
col_list = df.select_dtypes('object').columns
val_list = df.select_dtypes('float').columns
def itf01(Filter1_Name, Filter2_Name, col_list, val_list):
l1 = df[Filter1_Name].sort_values().unique().tolist()
l1.insert(0, 'ANY')
l1.insert(1, '')
l2 = df[Filter2_Name].sort_values().unique().tolist()
l2.insert(0, 'ANY')
interact(test02, Filter1_Value='ANY', Filter2_Value='ANY', SortBy=df.columns, Ascending=[True, False], Clipboard=[False, True])
def test02(Filter1_Value, Filter2_Value, SortBy, Ascending, Clipboard):
try:
if Filter1_Value == 'ANY':
pdata1 = df
else:
#pattern = re.compile(r"{}".format(Filter1_Value))
pdata1 = df.loc[df[Filter1_Name].apply(lambda x: check_match(x, Filter1_Value)) == True]
if Filter2_Value == 'ANY':
pdata2 = pdata1
else:
#pattern = re.compile(r"{}".format(Filter2_Value))
pdata2 = pdata1.loc[pdata1[Filter2_Name].apply(lambda x: check_match(x, Filter2_Value)) == True]
pdata3 = pdata2.sort_values(SortBy, ascending=Ascending)
if Clipboard:
pdata3.to_clipboard(index=False)
global interactive_table_frame_output
interactive_table_frame_output = pdata3
return(pdata3)
except:
print("Make a selection")
interact(itf01, Filter1_Name=col_list, Filter2_Name=col_list, col_list=fixed(col_list), val_list=fixed(val_list))
```
#### File: src/jefalexaudf/__init__.py
```python
import pandas as pd
import numpy as np
import datetime as dt
import os, sys
import re
import logging
import ipywidgets as widgets
from ipywidgets import interact, interact_manual, Button, Box, Layout, interactive, fixed, interact_manual
from IPython.display import clear_output
dir_home_options = ["/home/jovyan/work/", "/Users/jefalexa/"]
for dir_home in dir_home_options:
if bool(re.match(dir_home, os.getcwd())):
break
else:
continue
dir_clipboard = os.path.join(dir_home, "Box Sync/data_repo/interim/clipboard")
def check_match(x, y, Match_Case=True):
'''Check if variable (x) matches regex pattern (y). Return True, False or N/A'''
try:
if Match_Case:
pattern = re.compile(y)
else:
pattern = re.compile(y, flags=re.IGNORECASE)
return(bool(re.search(pattern=pattern, string=x)))
except:
return("N/A")
def usd_to_float(test_string):
'''Turn a string representing a dollar amount into a float. '''
pattern = re.compile("(\$)|(USD \$)|(USD)")
try:
split = re.split(pattern, test_string)
return(float(split[-1]))
except:
return(0)
def get_fy_info(date, calendar_fy, field=''):
'''Returns the fiscal calendar information for a given date
INPUTS:
date='%Y-%m-%d'
calendar_fy=DataFrame with Fiscal Information, generaly saved in Interim folder
field=If a valid field from the DF is listed, return just the value, if not, return the entire DF
'''
f1 = calendar_fy['Fiscal Week Start Date'] <= date
f2 = calendar_fy['Fiscal Week End Date'] >= date
if field in calendar_fy.columns:
return(calendar_fy.loc[f1&f2, field].to_list()[0])
else:
return(calendar_fy.loc[f1&f2, :])
def local_find(working_dir, x=".*"):
pattern = re.compile(x)
file_list = []
try:
for file in os.listdir(working_dir):
if re.match(pattern=pattern, string=file):
file_list.append(file)
return(file_list)
except:
file_list = []
return(file_list)
def local_find_recent(working_dir, x=".*"):
pattern = re.compile(x)
file_list = []
fts_min = 0
try:
for file in os.listdir(working_dir):
if re.match(pattern=pattern, string=file):
f2 = os.path.join(working_dir, file)
fts = os.stat(f2).st_mtime
fdt = dt.datetime.fromtimestamp(fts)
if ((fts_min < fts) | (fts_min == 0)):
file_list = [file, f2, fdt]
fts_min = fts
return(file_list)
except:
print("Error")
file_list = []
return(file_list)
def local_find_to_df(working_dir, x=".*"):
pattern = re.compile(x)
file_list = []
try:
for file in os.listdir(working_dir):
if re.match(pattern=pattern, string=file):
f2 = os.path.join(working_dir, file)
fsize = os.stat(f2).st_size
fts = os.stat(f2).st_mtime
fdt = dt.datetime.fromtimestamp(fts)
#print(file, fsize, fdt)
file_list.append([file, fsize, fdt])
return(pd.DataFrame(columns=['Filename', 'Size', 'Modified Time'], data=file_list))
except:
print("Error")
file_list = []
return(pd.DataFrame(columns=['Filename', 'Size', 'Modified Time'], data=file_list))
def local_find_dir(working_dir):
'''Returns a list of root directories in a given directory'''
directory_list = []
for name in os.listdir(working_dir):
if os.path.isdir(os.path.join(working_dir, name)):
directory_list.append(os.path.join(working_dir, name))
return(directory_list)
def interactive_file_saveloc(dir_list, search_pattern):
output_file = ""
def test01(dir_input=dir_list, search_pattern=fixed(search_pattern)):
file_df = local_find_to_df(dir_input, search_pattern).sort_values(by='Modified Time', ascending=False)
file_list = file_df['Filename'].tolist()
file_list.insert(0, "")
interact(test02, file_picker="{}".format(dt.datetime.strftime(dt.datetime.now(), '%m%d%Y_%H%M')), dir_input=fixed(dir_input), file_df=fixed(file_df))
def test02(file_picker, dir_input, file_df):
global interactive_file_saveloc_output
interactive_file_saveloc_output = [file_picker, os.path.join(dir_input, file_picker), dir_input]
if len(file_picker) > 0:
print(output_file)
return(file_df.loc[file_df['Filename'].apply(lambda x: check_match(x, file_picker)) == True ] )
else:
return(file_df)
interact(test01, dir_input=dir_list, search_pattern=fixed(search_pattern))
def interactive_table_frame(df):
col_list = df.select_dtypes('object').columns
val_list = df.select_dtypes('float').columns
def itf01(Filter1_Name, Filter2_Name, col_list, val_list):
l1 = df[Filter1_Name].sort_values().unique().tolist()
l1.insert(0, 'ANY')
l1.insert(1, '')
l2 = df[Filter2_Name].sort_values().unique().tolist()
l2.insert(0, 'ANY')
interact(test02, Filter1_Value='ANY', Filter2_Value='ANY', SortBy=df.columns, Ascending=[True, False], Clipboard=[False, True], Filter1_Name=fixed(Filter1_Name), Filter2_Name=fixed(Filter2_Name))
def test02(Filter1_Value, Filter2_Value, SortBy, Ascending, Clipboard, Filter1_Name, Filter2_Name):
try:
if Filter1_Value == 'ANY':
pdata1 = df
else:
#pattern = re.compile(r"{}".format(Filter1_Value))
pdata1 = df.loc[df[Filter1_Name].apply(lambda x: check_match(x, Filter1_Value)) == True]
if Filter2_Value == 'ANY':
pdata2 = pdata1
else:
#pattern = re.compile(r"{}".format(Filter2_Value))
pdata2 = pdata1.loc[pdata1[Filter2_Name].apply(lambda x: check_match(x, Filter2_Value)) == True]
pdata3 = pdata2.sort_values(SortBy, ascending=Ascending)
if Clipboard:
pdata3.to_clipboard(index=False)
global interactive_table_frame_output
interactive_table_frame_output = pdata3
return(pdata3)
except:
print("Make a selection")
interact(itf01, Filter1_Name=col_list, Filter2_Name=col_list, col_list=fixed(col_list), val_list=fixed(val_list))
def interactive_tabs(df):
global tab_contents
global tab
#tab_contents = df.columns.sort_values()
tab_contents = df.columns
children = []
for name in tab_contents:
try:
l1 = df[name].dropna().sort_values().unique().tolist()
l1.insert(0, '')
if df[name].dtype == (float or int):
f1 = widgets.HBox([widgets.Label(name), widgets.FloatRangeSlider(value=[df[name].min(), df[name].max()], min=df[name].min(), max=df[name].max(), step=1, disabled=False, continuous_update=False, orientation='horizontal', readout=True, readout_format='.0f', ) ])
else:
if len(l1) <= 20:
f1 = widgets.HBox([widgets.Label(name), widgets.SelectMultiple(options=l1, disabled=False) ])
else:
#f1 = widgets.Text(value='.*',placeholder='.*',description='Filter: ',disabled=False)
f1 = widgets.HBox([widgets.Label(name), widgets.Text(value='.*',placeholder='.*',disabled=False) ])
children.append(f1)
except:
print("Error on {}".format(name))
tab = widgets.Tab()
tab.children = children
for i in range(len(children)):
tab.set_title(i, tab_contents[i])
return(tab)
def interactive_tabs_display(df1):
index_num = 0
total_len = len(df1)
for index_num in range(0, len(tab_contents)):
tname = tab_contents[index_num]
tval = tab.children[index_num].children[1].value
if tval:
vt = type(tval)
if vt == type(tuple()):
if df1[tname].dtype == (float or int):
if ((tab.children[index_num].children[1].min == tval[0]) & (tab.children[index_num].children[1].max == tval[1])):
continue
else:
f1 = df1[tname] >= tval[0]
f2 = df1[tname] <= tval[1]
df1 = df1.loc[f1&f2, :]
print("____________\n{} Min: {} - Max: {}".format(tname, tval[0], tval[1]))
print("Matched {} entries".format(len(df1)))
else:
if tval == ('',):
continue
else:
f1 = df1[tname].isin(tval)
df1 = df1.loc[f1, :]
print("____________\n{} {}".format(tname, tval))
print("Matched {} entries".format(len(df1)))
else:
if tval == '.*':
continue
else:
Match_Case = True
df1 = df1.loc[df1[tname].apply(lambda x: check_match(x, tval, Match_Case)) == True]
print("____________\n{}: '{}' Matched:\n".format(tname, tval), df1[tname].value_counts())
print("____________\n", "Matched {} of {} entries".format(len(df1), total_len))
return(df1)
def to_myclip(df):
date_str = dt.datetime.strftime(dt.datetime.now(), '%m-%d-%y_%H%M%S')
file_name = "clipboard_{}.csv".format(date_str)
file = os.path.join(dir_clipboard, file_name)
df.to_csv(file)
print("Saved: {}".format(file))
def read_myclip():
file = local_find_recent(dir_clipboard, x=".*.csv")[1]
df = pd.read_csv(file, index_col=0)
return(df)
class file_picker():
'''
Create a file_picker object mypicker01, then call mypicker01.interactive_file_picker(dir_list=['./', '../'], search_pattern=".*") to pick files from a set of directories.
Then reference file name, full file path and the directory as mypicker01.file_name, mypicker01.file_path, mypicker01.file_dir. Or all three as mypicker01.interactive_file_picker_output
'''
def __init__(self, dir_list=['./', '../'], search_pattern=".*"):
self.file = ""
self.interactive_file_picker_output = []
self.file_name = ""
self.file_path = ""
self.file_dir = ""
self.dir_list = []
self.search_pattern = ""
self.dir_list = dir_list
self.search_pattern = search_pattern
def select(self):
dir_list = self.dir_list
search_pattern = self.search_pattern
def ifp_sub01(dir_input, search_pattern):
file_list = self.__local_find_to_df(dir_input, search_pattern).sort_values(by='Modified Time', ascending=False)['Filename'].tolist()
file_list.insert(0, "")
interact(ifp_sub02, dir_input=fixed(dir_input), file_picker=file_list, search_pattern=fixed(search_pattern))
def ifp_sub02(dir_input, file_picker, search_pattern):
self.file = os.path.join(dir_input, file_picker)
if len(file_picker) > 0:
file_path = os.path.join(dir_input, self.file)
if os.path.isdir(file_path):
print("'{}' added to directory list. Reload select function.".format(file_path))
self.dir_list.append(file_path)
else:
self.interactive_file_picker_output = [file_picker, self.file, dir_input]
self.file_name, self.file_path, self.file_dir = [file_picker, self.file, dir_input]
print(" File: {}\n Path: {}\n Size: {}\n Modified: {}".format(file_picker, self.file, os.stat(self.file).st_size, dt.datetime.strftime(dt.datetime.fromtimestamp(os.stat(self.file).st_mtime), '%m-%d-%y %H:%M')))
else:
return(self.__local_find_to_df(dir_input, search_pattern).sort_values(by='Modified Time', ascending=False))
interact(ifp_sub01, dir_input=dir_list, search_pattern=fixed(search_pattern))
def __local_find_to_df(self, working_dir, x=".*"):
pattern = re.compile(x)
file_list = []
try:
for file in os.listdir(working_dir):
if re.match(pattern=pattern, string=file):
f2 = os.path.join(working_dir, file)
fsize = os.stat(f2).st_size
fts = os.stat(f2).st_mtime
fdt = dt.datetime.fromtimestamp(fts)
#print(file, fsize, fdt)
file_list.append([file, fsize, fdt])
return(pd.DataFrame(columns=['Filename', 'Size', 'Modified Time'], data=file_list))
except:
print("Error")
file_list = []
return(pd.DataFrame(columns=['Filename', 'Size', 'Modified Time'], data=file_list))
class interactive_tabs():
def __init__(self, df):
self.tab_contents = []
self.tab = widgets.Tab()
self.df = df
self.cols = df.columns
def select_columns(self):
f1 = widgets.HBox([widgets.Label("Columns"), widgets.SelectMultiple(options=self.df.columns, value=tuple(self.df.columns), disabled=False) ])
def handle_col_change(change):
self.cols = list(f1.children[1].value)
button = widgets.Button(description="Apply")
output = widgets.Output()
with output:
display(self.select())
def on_button_clicked(b):
with output:
self.cols = list(f1.children[1].value)
clear_output(wait=True)
display(self.select())
f1.children[1].observe(on_button_clicked, names='value')
display(f1, output)
def select(self):
self.tab_contents = self.cols
children = []
for name in self.tab_contents:
try:
l1 = self.df[name].dropna().sort_values().unique().tolist()
l1.insert(0, '')
if self.df[name].dtype == (float or int):
f1 = widgets.HBox([widgets.Label(name), widgets.FloatRangeSlider(value=[self.df[name].min(), self.df[name].max()], min=self.df[name].min(), max=self.df[name].max(), step=1, disabled=False, continuous_update=False, orientation='horizontal', readout=True, readout_format='.0f', ) ])
else:
if len(l1) <= 30:
f1 = widgets.HBox([widgets.Label(name), widgets.SelectMultiple(options=l1, disabled=False) ])
else:
f1 = widgets.HBox([widgets.Label(name), widgets.Text(value='.*',placeholder='.*',disabled=False) ])
children.append(f1)
except:
print("Error on {}".format(name))
self.tab.children = children
for i in range(len(children)):
self.tab.set_title(i, self.tab_contents[i])
display(self.tab)
def display(self):
index_num = 0
df1 = self.df[self.cols]
total_len = len(df1)
for index_num in range(0, len(self.tab_contents)):
tname = self.tab_contents[index_num]
tval = self.tab.children[index_num].children[1].value
if tval:
vt = type(tval)
if vt == type(tuple()):
if df1[tname].dtype == (float or int):
if ((self.tab.children[index_num].children[1].min == tval[0]) & (self.tab.children[index_num].children[1].max == tval[1])):
df1 = df1
else:
f1 = df1[tname] >= tval[0]
f2 = df1[tname] <= tval[1]
df1 = df1.loc[f1&f2, :]
print("____________\n{} Min: {} - Max: {}".format(tname, tval[0], tval[1]))
print("Matched {} entries".format(len(df1)))
else:
if tval == ('',):
continue
else:
f1 = df1[tname].isin(tval)
df1 = df1.loc[f1, :]
print("____________\n{} {}".format(tname, tval))
print("Matched {} entries".format(len(df1)))
else:
if tval == '.*':
df1 = df1
else:
Match_Case = True
df1 = df1.loc[df1[tname].apply(lambda x: check_match(x, tval, Match_Case)) == True]
print("____________\n{}: '{}' Matched:\n".format(tname, tval), df1[tname].value_counts())
print("____________\n", "Matched {} of {} entries".format(len(df1), total_len))
return(df1)
def datetime_from_exceldate(excel_date):
if type(excel_date) == int:
excel_date = excel_date
elif type(excel_date) == float:
if excel_date > 0:
excel_date = int(excel_date)
else:
return("NA")
elif type(excel_date) == str:
if excel_date.isnumeric():
excel_date = int(excel_date)
else:
return("Error")
else:
return("NA")
return(dt.datetime.fromordinal(dt.datetime(1900, 1, 1).toordinal() + excel_date - 2))
def datetime_plus_years(date, years):
try:
return(date + dt.timedelta(days=years*365))
except:
return("NA")
def datetime_from_string(x):
try:
return(dt.datetime.strptime(x, "%m/%d/%y"))
except:
return("NA")
def datetime_to_year(date):
try:
return(date.year)
except:
return("NA")
def datetime_to_string(date):
try:
return(dt.datetime.strftime(date, "%m/%d/%y"))
except:
return("NA")
```
#### File: jefalexaudf/profile_product/__init__.py
```python
import re
def profile_ap_sku(x):
'''
Takes in "Enterprise SKU" and if it is for an AP, profiles the type of AP it is.
INPUT:
str : x : Enterprise SKU
OUTPUT:
dict :
"type_1": Access Point
"type_2": AX, AC, etc
"class_type": High/Med/Low
'''
if bool(re.search("C91..AX", x)):
type_1 = "Access Point"
type_2 = 'AX'
if bool(re.search("(.*DNA)|(.*ADJ)", x)):
type_1 = "N/A"
type_2 = 'N/A'
class_type = 'N/A'
elif bool(re.search("C91[01].AX[IEW]", x)):
class_type = "Low"
elif bool(re.search("C9120AX[IE]", x)):
class_type = "Medium"
elif bool(re.search("C913.AX[IE]", x)):
class_type = "High"
else:
type_1 = "N/A"
type_2 = 'N/A'
class_type = 'N/A'
elif bool(re.search("AIR-[C]?AP[1-4]8..", x)):
type_1 = "Access Point"
type_2 = 'AC2'
if bool(re.search("AIR-[C]?AP18..[IEWMT]", x)):
class_type = "Low"
elif bool(re.search("AIR-[C]?AP28..[IE]", x)):
class_type = "Medium"
elif bool(re.search("AIR-[C]?AP38..[IEP]", x)):
class_type = "High"
elif bool(re.search("AIR-[C]?AP48..", x)):
class_type = "High"
else:
class_type = "N/A"
elif bool(re.search("AIR-AP15..", x)):
type_1 = "Access Point"
if bool(re.search("AIR-AP157.", x)):
type_2 = "AC1"
class_type = "Outdoor"
elif bool(re.search("AIR-AP15[46].", x)):
type_2 = "AC2"
class_type = "Outdoor"
else:
type_2 = 'N/A'
class_type = 'N/A'
elif bool(re.search("AIR-[C]?AP[1-3]7..", x)):
type_1 = "Access Point"
type_2 = 'AC1'
if bool(re.search("AIR-[C]?AP17..[IE]", x)):
class_type = "Low"
elif bool(re.search("AIR-[C]?AP27..[IE]", x)):
class_type = "Medium"
elif bool(re.search("AIR-[C]?AP37..[IE]", x)):
class_type = "High"
else:
class_type = 'N/A'
elif bool(re.search("MR.*-HW", x)):
type_1 = "Access Point"
if bool(re.search("MR[2-3][0-9]", x)):
class_type = "Low"
elif bool(re.search("MR4[0-9]", x)):
class_type = "Medium"
elif bool(re.search("MR5[0-9]", x)):
class_type = "High"
elif bool(re.search("MR[7-8][0-9]", x)):
class_type = "Outdoor"
else:
class_type = 'N/A'
if bool(re.search("MR[2-8]0", x)):
type_2 = 'AC1'
elif bool(re.search("MR[2-8][2-3]", x)):
type_2 = 'AC2'
elif bool(re.search("MR[2-8][4-6]", x)):
type_2 = 'AX'
else:
type_2 = 'N/A'
else:
type_1 = "N/A"
type_2 = 'N/A'
class_type = 'N/A'
return({"type_1":type_1, "type_2":type_2, "class_type":class_type})
def profile_switch_sku(x):
'''
Takes in "Enterprise SKU" and if it is for an switch, profiles the type of switch it is.
INPUT:
str : x : Enterprise SKU
OUTPUT:
dict :
str : switch_type
str : port_count
str : port_type
bool : mgig
'''
port_count_exp = re.compile("([0-9]*)([A-Z])(.*)")
port_type_dict = {'T':'Data', 'S':'SFP-1G', 'P':"PoE+", 'U':'UPoE', 'H':'UPoE+', "C":"QSFP28-100G", "Q":"QSFP+-40G", "Y":"SFP28-1/10/25G", "X":"SFP/SPF+-1/10G"}
meraki_port_count_exp = re.compile("([0-9]*)([A-Z]?[A-Z]?)(.*)")
meraki_port_type_dict = {'P':"PoE+", 'LP':"PoE+", 'FP':"PoE+", 'U':'UPoE', 'X':'UPoE', 'UX':'UPoE'}
mgig = False
if bool(re.search("^C9[23]00[LX]?-[0-9]+[A-Z]+", x)):
switch_type = 'stackable'
port_config = x.split("-")[1]
port_count = re.match(port_count_exp, port_config)[1]
port_type = re.match(port_count_exp, port_config)[2]
if port_type in port_type_dict:
port_type = port_type_dict[port_type]
else:
port_type = "N/A"
port_remainder = re.match(port_count_exp, port_config)[3]
if bool(re.search('.*X.*', port_remainder)):
mgig = True
else:
mgig = False
elif bool(re.search("^C94[0-9]+[R-]", x)):
if bool(re.search("^C94[0-9]+R", x)):
switch_type = 'chassis'
port_count = 0
port_type = "N/A"
elif bool(re.search("^C94[0-9]+-SUP", x)):
switch_type = 'supervisor'
port_count = 0
port_type = "N/A"
elif bool(re.search("^C94[0-9]+-LC", x)):
switch_type = 'linecard'
port_config = x.split("-")[2]
port_count = re.match(port_count_exp, port_config)[1]
port_type = re.match(port_count_exp, port_config)[2]
if port_type in port_type_dict:
port_type = port_type_dict[port_type]
else:
port_type = "N/A"
port_remainder = re.match(port_count_exp, port_config)[3]
if bool(re.search('.*X.*', port_remainder)):
mgig = True
else:
mgig = False
else:
switch_type = "N/A"
port_count = 0
port_type = "N/A"
elif bool(re.search("^C9500-[0-9]+", x)):
switch_type = "stackable"
port_config = x.split("-")[1]
port_count = re.match(port_count_exp, port_config)[1]
port_type = re.match(port_count_exp, port_config)[2]
if port_type in port_type_dict:
port_type = port_type_dict[port_type]
else:
port_type = "N/A"
port_remainder = re.match(port_count_exp, port_config)[3]
if bool(re.search('.*X.*', port_remainder)):
mgig = True
else:
mgig = False
elif bool(re.search("^C96[0-9]+[R-]", x)):
if bool(re.search("^C96[0-9]+R", x)):
switch_type = 'chassis'
port_count = 0
port_type = "N/A"
elif bool(re.search("^C96[0-9]+-SUP", x)):
switch_type = 'supervisor'
port_count = 0
port_type = "N/A"
elif bool(re.search("^C96[0-9]+-LC", x)):
switch_type = 'linecard'
port_config = x.split("-")[2]
port_count = re.match(port_count_exp, port_config)[1]
port_type = re.match(port_count_exp, port_config)[2]
if port_type in port_type_dict:
port_type = port_type_dict[port_type]
else:
port_type = "N/A"
port_remainder = re.match(port_count_exp, port_config)[3]
if bool(re.search('.*X.*', port_remainder)):
mgig = True
else:
mgig = False
else:
switch_type = "N/A"
port_count = 0
port_type = "N/A"
elif bool(re.search("^MS.*-HW$", x)):
switch_type = 'stackable'
port_config = x.split("-")[1]
port_count = re.match(meraki_port_count_exp, port_config)[1]
port_type = re.match(meraki_port_count_exp, port_config)[2]
if bool(re.search('.*X.*', port_config)):
mgig = True
else:
mgig = False
if len(port_type) == 0:
if bool(re.search("^MS41.*-HW$", x)):
port_type = "SFP-1G"
elif bool(re.search("^MS42.*-HW$", x)):
port_type = "SFP+-10G"
elif bool(re.search("^MS45.*-HW$", x)):
port_type = "QSFP+-40G"
else:
port_type = "Data"
elif port_type in meraki_port_type_dict:
port_type = meraki_port_type_dict[port_type]
else:
port_type = "N/A"
elif bool(re.search("^GS[1-9].*-HW", x)):
switch_type = 'stackable'
port_config = x.split("-")[1]
port_count = re.match(meraki_port_count_exp, port_config)[1]
port_type = re.match(meraki_port_count_exp, port_config)[2]
if bool(re.search('.*X.*', port_config)):
mgig = True
else:
mgig = False
if len(port_type) == 0:
port_type = "Data"
elif port_type in meraki_port_type_dict:
port_type = meraki_port_type_dict[port_type]
else:
port_type = "N/A"
else:
switch_type = "N/A"
port_count = 0
port_type = "N/A"
return({'switch_type':switch_type, 'port_count':port_count , 'port_type':port_type, 'mgig':mgig})
def profile_dna_sku(x):
'''
Takes in "Enterprise SKU" and if it is for DNA, profiles it.
INPUT:
str : x : Enterprise SKU
OUTPUT:
dict :
'license_type': Switching, Wireless, Routing
'buying_type': ALC, EA
'device_type': AP or Switch Type
'license_tier': Ess, Adv, Prem
'years': 3, 5, 7
'''
sw_exp = re.compile("^C([1-9][A-Z,0-9]*)-DNA-.*([E,A,P])(.*)([1,3,5,7])[Y,R]")
air_exp = re.compile("^(AIR|EDU)-DNA-([E,A,P])-([1,3,5,7])[Y,R]")
spaces_exp = re.compile("^D-(CISCODNAS|DNAS)-(.*)-([1-9])[Y,R]")
ea_sw_exp = re.compile("^E2N-C([A-Z,0-9]*)-(.*)-([E,A,P])$")
ea_air_exp = re.compile("^E2N-AIRWLAN-(.*)-([E,A,P])$")
ea_spaces_exp = re.compile("^E2N-DNAS-([A-Z]*)")
if bool(re.search(sw_exp, x)):
m = re.match(sw_exp, x)
license_type = "Switching"
buying_type = "ALC"
device_type = m[1]
license_tier = m[2]
years = m[4]
elif bool(re.search(air_exp, x)):
m = re.match(air_exp, x)
license_type = "Wireless"
buying_type = "ALC"
device_type = "AP"
license_tier = m[2]
years = m[3]
elif bool(re.search(spaces_exp, x)):
m = re.match(spaces_exp, x)
license_type = "Wireless"
buying_type = "ALC"
device_type = "DNA Spaces"
license_tier = m[2]
years = m[3]
elif bool(re.search(ea_sw_exp, x)):
m = re.match(ea_sw_exp, x)
license_type = "Switching"
buying_type = "EA"
device_type = m[1]
license_tier = m[3]
years = 'N/A'
elif bool(re.search(ea_air_exp, x)):
m = re.match(ea_air_exp, x)
license_type = "Wireless"
buying_type = "EA"
device_type = "AP"
license_tier = m[2]
years = 'N/A'
elif bool(re.search(ea_spaces_exp, x)):
m = re.match(ea_spaces_exp, x)
license_type = "Wireless"
buying_type = "EA"
device_type = "DNA Spaces"
license_tier = m[1]
years = 'N/A'
else:
license_type = 'N/A'
buying_type = 'N/A'
device_type = 'N/A'
license_tier = 'N/A'
years = 'N/A'
return({'license_type': license_type, 'buying_type': buying_type, 'device_type': device_type, 'license_tier': license_tier, 'years': years})
```
|
{
"source": "JefAlmeida1/Python",
"score": 4
}
|
#### File: Python/CursoemVideo2/tescode.py
```python
from tkinter import *
window = Tk()
def from_kg():
gram = float(e2_value.get()) * 1000
pound = float(e2_value.get()) * 2.20462
ounce = float(e2_value.get()) * 35.274
t1.delete("1.0", END)
t1.insert(END, GRAM)
t2.delete("1.0", END)
t2.insert(END, GRAM)
t3.delete("1.0", END)
t3.insert(END, GRAM)
e1 = Label(window, text="Enter the weight in Kg")
e2_value = StringVar()
e2 = Entry(window, contextvariable=e2_value)
e3 = Label(window, text='Gram')
e3 = Label(window, text='Pounds')
e3 = Label(window, text='Ounce')
t1 = Text(window, height=1, width=20)
t2 = Text(window, height=1, width=20)
t3 = Text(window, height=1, width=20)
b1 = Button(window, text='Convert', command=from_kg)
e1.grid(row=0, column=0)
e2.grid(row=0, column=1)
e3.grid(row=1, column=0)
e4.grid(row=1, column=1)
e5.grid(row=1, column=2)
t1.grid(row=2, column=0)
t2.grid(row=2, column=1)
t3.grid(row=2, column=2)
b1.grid(row=0, column=2)
window.mainloop()
```
|
{
"source": "jefalon/watertap",
"score": 2
}
|
#### File: full_treatment_train/model_components/unit_separator.py
```python
from pyomo.environ import ConcreteModel, Constraint
from idaes.core import FlowsheetBlock
# from idaes.generic_models.unit_models import Separator # replaced separator
from idaes.generic_models.unit_models.separator import SplittingType, EnergySplittingType
from idaes.core.util.scaling import calculate_scaling_factors, set_scaling_factor, constraint_scaling_transform
from watertap.flowsheets.full_treatment_train.model_components import property_models
from watertap.flowsheets.full_treatment_train.util import solve_with_user_scaling, check_dof
from watertap.flowsheets.full_treatment_train.model_components import Separator
def build_SepRO(m, base='TDS'):
"""
Builds RO model based on the IDAES separator.
Requires prop_TDS property package.
"""
prop = property_models.get_prop(m, base=base)
m.fs.RO = Separator(default={
"property_package": prop,
"outlet_list": ['retentate', 'permeate'],
"split_basis": SplittingType.componentFlow,
"energy_split_basis": EnergySplittingType.equal_temperature})
# specify
if base == 'TDS':
m.fs.RO.split_fraction[0, 'permeate', 'H2O'].fix(0.5)
m.fs.RO.split_fraction[0, 'permeate', 'TDS'].fix(0.01)
else:
raise ValueError('Unexpected property base {base} provided to build_SepRO'
''.format(base=base))
# scale
set_scaling_factor(m.fs.RO.split_fraction, 1) # TODO: IDAES should set these scaling factors by default
constraint_scaling_transform(m.fs.RO.sum_split_frac[0.0, 'H2O'], 1)
constraint_scaling_transform(m.fs.RO.sum_split_frac[0.0, 'TDS'], 1)
def build_SepNF(m, base='ion'):
"""
Builds NF model based on the IDAES separator for a specified property base.
Requires prop_ion or prop_salt property package.
"""
prop = property_models.get_prop(m, base=base)
m.fs.NF = Separator(default={
"property_package": prop,
"outlet_list": ['retentate', 'permeate'],
"split_basis": SplittingType.componentFlow,
"energy_split_basis": EnergySplittingType.equal_temperature})
# specify
if base == 'ion':
m.fs.NF.split_fraction[0, 'permeate', 'H2O'].fix(0.9)
m.fs.NF.split_fraction[0, 'permeate', 'Na'].fix(0.9)
m.fs.NF.split_fraction[0, 'permeate', 'Ca'].fix(0.1)
m.fs.NF.split_fraction[0, 'permeate', 'Mg'].fix(0.1)
m.fs.NF.split_fraction[0, 'permeate', 'SO4'].fix(0.1)
# Cl split fraction determined through electro-neutrality for the retentate
charge_dict = {'Na': 1, 'Ca': 2, 'Mg': 2, 'SO4': -2, 'Cl': -1}
m.fs.NF.EN_out = Constraint(
expr=0 ==
sum(charge_dict[j] * m.fs.NF.retentate_state[0].flow_mol_phase_comp['Liq', j]
for j in charge_dict))
constraint_scaling_transform(m.fs.NF.EN_out, 1)
elif base == 'salt':
m.fs.NF.split_fraction[0, 'permeate', 'H2O'].fix(0.9)
m.fs.NF.split_fraction[0, 'permeate', 'NaCl'].fix(0.9)
m.fs.NF.split_fraction[0, 'permeate', 'CaSO4'].fix(0.1)
m.fs.NF.split_fraction[0, 'permeate', 'MgSO4'].fix(0.1)
m.fs.NF.split_fraction[0, 'permeate', 'MgCl2'].fix(0.2)
# scale
set_scaling_factor(m.fs.NF.split_fraction, 1) # TODO: IDAES should set these scaling factors by default
if base == 'ion':
constraint_scaling_transform(m.fs.NF.sum_split_frac[0.0, 'H2O'], 1)
constraint_scaling_transform(m.fs.NF.sum_split_frac[0.0, 'Na'], 1)
constraint_scaling_transform(m.fs.NF.sum_split_frac[0.0, 'Ca'], 1)
constraint_scaling_transform(m.fs.NF.sum_split_frac[0.0, 'Mg'], 1)
constraint_scaling_transform(m.fs.NF.sum_split_frac[0.0, 'SO4'], 1)
constraint_scaling_transform(m.fs.NF.sum_split_frac[0.0, 'Cl'], 1)
elif base == 'salt':
constraint_scaling_transform(m.fs.NF.sum_split_frac[0.0, 'H2O'], 1)
constraint_scaling_transform(m.fs.NF.sum_split_frac[0.0, 'NaCl'], 1)
constraint_scaling_transform(m.fs.NF.sum_split_frac[0.0, 'CaSO4'], 1)
constraint_scaling_transform(m.fs.NF.sum_split_frac[0.0, 'MgSO4'], 1)
constraint_scaling_transform(m.fs.NF.sum_split_frac[0.0, 'MgCl2'], 1)
def solve_SepRO(base='TDS'):
m = ConcreteModel()
m.fs = FlowsheetBlock(default={"dynamic": False})
property_models.build_prop(m, base=base)
build_SepRO(m, base=base)
property_models.specify_feed(m.fs.RO.mixed_state[0], base=base)
check_dof(m)
calculate_scaling_factors(m)
solve_with_user_scaling(m)
m.fs.RO.inlet.display()
m.fs.RO.permeate.display()
m.fs.RO.retentate.display()
return m
def solve_SepNF(base='ion'):
m = ConcreteModel()
m.fs = FlowsheetBlock(default={"dynamic": False})
property_models.build_prop(m, base=base)
build_SepNF(m, base=base)
property_models.specify_feed(m.fs.NF.mixed_state[0], base=base)
m.fs.NF.mixed_state[0].mass_frac_phase_comp # touching for tests
check_dof(m)
calculate_scaling_factors(m)
solve_with_user_scaling(m)
m.fs.NF.inlet.display()
m.fs.NF.permeate.display()
m.fs.NF.retentate.display()
return m
if __name__ == "__main__":
solve_SepRO(base='TDS')
solve_SepNF(base='ion')
solve_SepNF(base='salt')
```
#### File: flowsheets/RO_with_energy_recovery/RO_with_energy_recovery.py
```python
from pyomo.environ import (ConcreteModel,
SolverFactory,
TerminationCondition,
value,
Constraint,
Expression,
Objective,
Param,
TransformationFactory,
units as pyunits,
assert_optimal_termination)
from pyomo.network import Arc
import pyomo.util.infeasible as infeas
from idaes.core import FlowsheetBlock
from idaes.core.util import get_solver
from idaes.core.util.model_statistics import degrees_of_freedom
from idaes.core.util.initialization import (solve_indexed_blocks,
propagate_state,
fix_state_vars,
revert_state_vars)
from idaes.generic_models.unit_models import Mixer, Separator, Product, Feed
from idaes.generic_models.unit_models.mixer import MomentumMixingType
import idaes.core.util.scaling as iscale
import idaes.logger as idaeslog
import watertap.property_models.NaCl_prop_pack as props
from watertap.unit_models.reverse_osmosis_0D import (ReverseOsmosis0D,
ConcentrationPolarizationType,
MassTransferCoefficient,
PressureChangeType)
from watertap.unit_models.pressure_exchanger import PressureExchanger
from watertap.unit_models.pump_isothermal import Pump
from watertap.util.initialization import assert_degrees_of_freedom
import watertap.flowsheets.RO_with_energy_recovery.financials as financials
def main():
# set up solver
solver = get_solver(options={'nlp_scaling_method': 'user-scaling'})
# build, set, and initialize
m = build()
set_operating_conditions(m, water_recovery=0.5, over_pressure=0.3, solver=solver)
initialize_system(m, solver=solver)
# simulate and display
solve(m, solver=solver)
print('\n***---Simulation results---***')
display_system(m)
display_design(m)
display_state(m)
# optimize and display
optimize_set_up(m)
optimize(m, solver=solver)
print('\n***---Optimization results---***')
display_system(m)
display_design(m)
display_state(m)
def build():
# flowsheet set up
m = ConcreteModel()
m.fs = FlowsheetBlock(default={'dynamic': False})
m.fs.properties = props.NaClParameterBlock()
financials.add_costing_param_block(m.fs)
# unit models
m.fs.feed = Feed(default={'property_package': m.fs.properties})
m.fs.S1 = Separator(default={
"property_package": m.fs.properties,
"outlet_list": ['P1', 'PXR']})
m.fs.P1 = Pump(default={'property_package': m.fs.properties})
m.fs.PXR = PressureExchanger(default={'property_package': m.fs.properties})
m.fs.P2 = Pump(default={'property_package': m.fs.properties})
m.fs.M1 = Mixer(default={
"property_package": m.fs.properties,
"momentum_mixing_type": MomentumMixingType.equality, # booster pump will match pressure
"inlet_list": ['P1', 'P2']})
m.fs.RO = ReverseOsmosis0D(default={
"property_package": m.fs.properties,
"has_pressure_change": True,
"pressure_change_type": PressureChangeType.calculated,
"mass_transfer_coefficient": MassTransferCoefficient.calculated,
"concentration_polarization_type": ConcentrationPolarizationType.calculated,
})
m.fs.product = Product(default={'property_package': m.fs.properties})
m.fs.disposal = Product(default={'property_package': m.fs.properties})
# additional variables or expressions
product_flow_vol_total = m.fs.product.properties[0].flow_vol
m.fs.annual_water_production = Expression(
expr=pyunits.convert(product_flow_vol_total, to_units=pyunits.m ** 3 / pyunits.year)
* m.fs.costing_param.load_factor)
pump_power_total = m.fs.P1.work_mechanical[0] + m.fs.P2.work_mechanical[0]
m.fs.specific_energy_consumption = Expression(
expr=pyunits.convert(pump_power_total, to_units=pyunits.kW)
/ pyunits.convert(product_flow_vol_total, to_units=pyunits.m**3 / pyunits.hr))
# costing
m.fs.P1.get_costing(module=financials, pump_type="High pressure")
m.fs.P2.get_costing(module=financials, pump_type="High pressure")
m.fs.RO.get_costing(module=financials)
m.fs.PXR.get_costing(module=financials)
financials.get_system_costing(m.fs)
# connections
m.fs.s01 = Arc(source=m.fs.feed.outlet, destination=m.fs.S1.inlet)
m.fs.s02 = Arc(source=m.fs.S1.P1, destination=m.fs.P1.inlet)
m.fs.s03 = Arc(source=m.fs.P1.outlet, destination=m.fs.M1.P1)
m.fs.s04 = Arc(source=m.fs.M1.outlet, destination=m.fs.RO.inlet)
m.fs.s05 = Arc(source=m.fs.RO.permeate, destination=m.fs.product.inlet)
m.fs.s06 = Arc(source=m.fs.RO.retentate, destination=m.fs.PXR.high_pressure_inlet)
m.fs.s07 = Arc(source=m.fs.PXR.high_pressure_outlet, destination=m.fs.disposal.inlet)
m.fs.s08 = Arc(source=m.fs.S1.PXR, destination=m.fs.PXR.low_pressure_inlet)
m.fs.s09 = Arc(source=m.fs.PXR.low_pressure_outlet, destination=m.fs.P2.inlet)
m.fs.s10 = Arc(source=m.fs.P2.outlet, destination=m.fs.M1.P2)
TransformationFactory("network.expand_arcs").apply_to(m)
# scaling
# set default property values
m.fs.properties.set_default_scaling('flow_mass_phase_comp', 1, index=('Liq', 'H2O'))
m.fs.properties.set_default_scaling('flow_mass_phase_comp', 1e2, index=('Liq', 'NaCl'))
# set unit model values
iscale.set_scaling_factor(m.fs.P1.control_volume.work, 1e-3)
iscale.set_scaling_factor(m.fs.P2.control_volume.work, 1e-3)
iscale.set_scaling_factor(m.fs.PXR.low_pressure_side.work, 1e-3)
iscale.set_scaling_factor(m.fs.PXR.high_pressure_side.work, 1e-3)
# touch properties used in specifying and initializing the model
m.fs.feed.properties[0].flow_vol_phase['Liq']
m.fs.feed.properties[0].mass_frac_phase_comp['Liq', 'NaCl']
m.fs.S1.mixed_state[0].mass_frac_phase_comp
m.fs.S1.PXR_state[0].flow_vol_phase['Liq']
# unused scaling factors needed by IDAES base costing module
# TODO: update IDAES so that scaling factors are calculated from financial package
iscale.set_scaling_factor(m.fs.P1.costing.purchase_cost, 1)
iscale.set_scaling_factor(m.fs.P2.costing.purchase_cost, 1)
# calculate and propagate scaling factors
iscale.calculate_scaling_factors(m)
return m
def set_operating_conditions(m, water_recovery=0.5, over_pressure=0.3, solver=None):
if solver is None:
solver = get_solver(options={'nlp_scaling_method': 'user-scaling'})
# ---specifications---
# feed
# state variables
m.fs.feed.properties[0].pressure.fix(101325) # feed pressure [Pa]
m.fs.feed.properties[0].temperature.fix(273.15 + 25) # feed temperature [K]
# properties (cannot be fixed for initialization routines, must calculate the state variables)
m.fs.feed.properties.calculate_state(
var_args={('flow_vol_phase', 'Liq'): 1e-3, # feed volumetric flow rate [m3/s]
('mass_frac_phase_comp', ('Liq', 'NaCl')): 0.035}, # feed NaCl mass fraction [-]
hold_state=True, # fixes the calculated component mass flow rates
)
# separator, no degrees of freedom (i.e. equal flow rates in PXR determines split fraction)
# pump 1, high pressure pump, 2 degrees of freedom (efficiency and outlet pressure)
m.fs.P1.efficiency_pump.fix(0.80) # pump efficiency [-]
operating_pressure = calculate_operating_pressure(
feed_state_block=m.fs.feed.properties[0],
over_pressure=over_pressure,
water_recovery=water_recovery,
NaCl_passage=0.01,
solver=solver)
m.fs.P1.control_volume.properties_out[0].pressure.fix(operating_pressure)
# pressure exchanger
m.fs.PXR.efficiency_pressure_exchanger.fix(0.95) # pressure exchanger efficiency [-]
# pump 2, booster pump, 1 degree of freedom (efficiency, pressure must match high pressure pump)
m.fs.P2.efficiency_pump.fix(0.80)
# mixer, no degrees of freedom
# RO unit
m.fs.RO.A_comp.fix(4.2e-12) # membrane water permeability coefficient [m/s-Pa]
m.fs.RO.B_comp.fix(3.5e-8) # membrane salt permeability coefficient [m/s]
m.fs.RO.channel_height.fix(1e-3) # channel height in membrane stage [m]
m.fs.RO.spacer_porosity.fix(0.97) # spacer porosity in membrane stage [-]
m.fs.RO.permeate.pressure[0].fix(101325) # atmospheric pressure [Pa]
m.fs.RO.width.fix(5) # stage width [m]
# initialize RO
m.fs.RO.feed_side.properties_in[0].flow_mass_phase_comp['Liq', 'H2O'] = \
value(m.fs.feed.properties[0].flow_mass_phase_comp['Liq', 'H2O'])
m.fs.RO.feed_side.properties_in[0].flow_mass_phase_comp['Liq', 'NaCl'] = \
value(m.fs.feed.properties[0].flow_mass_phase_comp['Liq', 'NaCl'])
m.fs.RO.feed_side.properties_in[0].temperature = \
value(m.fs.feed.properties[0].temperature)
m.fs.RO.feed_side.properties_in[0].pressure = \
value(m.fs.P1.control_volume.properties_out[0].pressure)
m.fs.RO.area.fix(50) # guess area for RO initialization
m.fs.RO.initialize(optarg=solver.options)
# unfix guessed area, and fix water recovery
m.fs.RO.area.unfix()
m.fs.RO.recovery_mass_phase_comp[0, 'Liq', 'H2O'].fix(water_recovery)
# check degrees of freedom
if degrees_of_freedom(m) != 0:
raise RuntimeError("The set_operating_conditions function resulted in {} "
"degrees of freedom rather than 0. This error suggests "
"that too many or not enough variables are fixed for a "
"simulation.".format(degrees_of_freedom(m)))
def calculate_operating_pressure(feed_state_block=None, over_pressure=0.15,
water_recovery=0.5, NaCl_passage=0.01, solver=None):
"""
estimate operating pressure for RO unit model given the following arguments:
Arguments:
feed_state_block: the state block of the RO feed that has the non-pressure state
variables initialized to their values (default=None)
over_pressure: the amount of operating pressure above the brine osmotic pressure
represented as a fraction (default=0.15)
water_recovery: the mass-based fraction of inlet H2O that becomes permeate
(default=0.5)
NaCl_passage: the mass-based fraction of inlet NaCl that becomes permeate
(default=0.01)
solver: solver object to be used (default=None)
"""
t = ConcreteModel() # create temporary model
prop = feed_state_block.config.parameters
t.brine = prop.build_state_block([0], default={})
# specify state block
t.brine[0].flow_mass_phase_comp['Liq', 'H2O'].fix(
value(feed_state_block.flow_mass_phase_comp['Liq', 'H2O']) * (1 - water_recovery))
t.brine[0].flow_mass_phase_comp['Liq', 'NaCl'].fix(
value(feed_state_block.flow_mass_phase_comp['Liq', 'NaCl']) * (1 - NaCl_passage))
t.brine[0].pressure.fix(101325) # valid when osmotic pressure is independent of hydraulic pressure
t.brine[0].temperature.fix(value(feed_state_block.temperature))
# calculate osmotic pressure
# since properties are created on demand, we must touch the property to create it
t.brine[0].pressure_osm
# solve state block
results = solve_indexed_blocks(solver, [t.brine])
assert_optimal_termination(results)
return value(t.brine[0].pressure_osm) * (1 + over_pressure)
def solve(blk, solver=None, tee=False):
if solver is None:
solver = get_solver(options={'nlp_scaling_method': 'user-scaling'})
results = solver.solve(blk, tee=tee)
assert_optimal_termination(results)
def initialize_system(m, solver=None):
if solver is None:
solver = get_solver(options={'nlp_scaling_method': 'user-scaling'})
optarg = solver.options
# ---initialize RO---
m.fs.RO.initialize(optarg=optarg)
# ---initialize feed block---
m.fs.feed.initialize(optarg=optarg)
# ---initialize splitter and pressure exchanger---
# pressure exchanger high pressure inlet
propagate_state(m.fs.s06) # propagate to PXR high pressure inlet from RO retentate
m.fs.PXR.high_pressure_side.properties_in.initialize(optarg=optarg)
# splitter inlet
propagate_state(m.fs.s01) # propagate to splitter inlet from feed
m.fs.S1.mixed_state.initialize(optarg=optarg) # initialize inlet state block to solve for mass fraction
# splitter outlet to PXR, enforce same volumetric flow as PXR high pressure inlet
m.fs.S1.PXR_state.calculate_state(
var_args={('flow_vol_phase', 'Liq'): # same volumetric flow rate as PXR high pressure inlet
value(m.fs.PXR.high_pressure_side.properties_in[0].flow_vol_phase['Liq']),
('mass_frac_phase_comp', ('Liq', 'NaCl')):
value(m.fs.S1.mixed_state[0].mass_frac_phase_comp['Liq', 'NaCl']), # same as splitter inlet
('pressure', None): value(m.fs.S1.mixed_state[0].pressure), # same as splitter inlet
('temperature', None): value(m.fs.S1.mixed_state[0].temperature)}, # same as splitter inlet
)
# splitter initialization
m.fs.S1.PXR_state[0].flow_mass_phase_comp['Liq', 'NaCl'].fix() # fix the single degree of freedom for unit
m.fs.S1.initialize(optarg=optarg)
m.fs.S1.PXR_state[0].flow_mass_phase_comp['Liq', 'NaCl'].unfix() # unfix for flowsheet simulation and optimization
# pressure exchanger low pressure inlet
propagate_state(m.fs.s08)
# pressure exchanger initialization
m.fs.PXR.initialize(optarg=optarg)
# ---initialize pump 1---
propagate_state(m.fs.s02)
m.fs.P1.initialize(optarg=optarg)
# ---initialize pump 2---
propagate_state(m.fs.s09)
m.fs.P2.control_volume.properties_out[0].pressure.fix(
value(m.fs.P2.control_volume.properties_out[0].pressure))
m.fs.P2.initialize(optarg=optarg)
m.fs.P2.control_volume.properties_out[0].pressure.unfix()
# ---initialize mixer---
propagate_state(m.fs.s03)
propagate_state(m.fs.s10)
m.fs.M1.initialize(optarg=optarg, outlvl=idaeslog.INFO)
def optimize_set_up(m):
# objective
m.fs.objective = Objective(expr=m.fs.costing.LCOW)
# unfix decision variables and add bounds
# pump 1 and pump 2
m.fs.P1.control_volume.properties_out[0].pressure.unfix()
m.fs.P1.control_volume.properties_out[0].pressure.setlb(10e5)
m.fs.P1.control_volume.properties_out[0].pressure.setub(80e5)
m.fs.P1.deltaP.setlb(0)
m.fs.P2.control_volume.properties_out[0].pressure.setlb(10e5)
m.fs.P2.control_volume.properties_out[0].pressure.setub(80e5)
m.fs.P2.deltaP.setlb(0)
# RO
m.fs.RO.area.setlb(1)
m.fs.RO.area.setub(150)
# additional specifications
m.fs.product_salinity = Param(initialize=500e-6, mutable=True) # product NaCl mass fraction [-]
m.fs.minimum_water_flux = Param(initialize=1./3600., mutable=True) # minimum water flux [kg/m2-s]
# additional constraints
m.fs.eq_product_quality = Constraint(
expr=m.fs.product.properties[0].mass_frac_phase_comp['Liq', 'NaCl'] <= m.fs.product_salinity)
iscale.constraint_scaling_transform(m.fs.eq_product_quality, 1e3) # scaling constraint
m.fs.eq_minimum_water_flux = Constraint(
expr=m.fs.RO.flux_mass_io_phase_comp[0, 'out', 'Liq', 'H2O'] >= m.fs.minimum_water_flux)
# ---checking model---
assert_degrees_of_freedom(m, 1)
def optimize(m, solver=None):
# --solve---
solve(m, solver=solver)
def display_system(m):
print('---system metrics---')
feed_flow_mass = sum(m.fs.feed.flow_mass_phase_comp[0, 'Liq', j].value for j in ['H2O', 'NaCl'])
feed_mass_frac_NaCl = m.fs.feed.flow_mass_phase_comp[0, 'Liq', 'NaCl'].value / feed_flow_mass
print('Feed: %.2f kg/s, %.0f ppm' % (feed_flow_mass, feed_mass_frac_NaCl * 1e6))
prod_flow_mass = sum(m.fs.product.flow_mass_phase_comp[0, 'Liq', j].value for j in ['H2O', 'NaCl'])
prod_mass_frac_NaCl = m.fs.product.flow_mass_phase_comp[0, 'Liq', 'NaCl'].value / prod_flow_mass
print('Product: %.3f kg/s, %.0f ppm' % (prod_flow_mass, prod_mass_frac_NaCl * 1e6))
print('Volumetric recovery: %.1f%%' % (value(m.fs.RO.recovery_vol_phase[0, 'Liq']) * 100))
print('Water recovery: %.1f%%' % (value(m.fs.RO.recovery_mass_phase_comp[0, 'Liq', 'H2O']) * 100))
print('Energy Consumption: %.1f kWh/m3' % value(m.fs.specific_energy_consumption))
print('Levelized cost of water: %.2f $/m3' % value(m.fs.costing.LCOW))
def display_design(m):
print('---decision variables---')
print('Operating pressure %.1f bar' % (m.fs.RO.inlet.pressure[0].value/1e5))
print('Membrane area %.1f m2' % (m.fs.RO.area.value))
print('---design variables---')
print('Separator')
print('Split fraction %.2f' % (m.fs.S1.split_fraction[0, 'PXR'].value*100))
print('Pump 1\noutlet pressure: %.1f bar\npower %.2f kW'
% (m.fs.P1.outlet.pressure[0].value / 1e5, m.fs.P1.work_mechanical[0].value / 1e3))
print('Pump 2\noutlet pressure: %.1f bar\npower %.2f kW'
% (m.fs.P2.outlet.pressure[0].value / 1e5, m.fs.P2.work_mechanical[0].value / 1e3))
def display_state(m):
print('---state---')
def print_state(s, b):
flow_mass = sum(b.flow_mass_phase_comp[0, 'Liq', j].value for j in ['H2O', 'NaCl'])
mass_frac_ppm = b.flow_mass_phase_comp[0, 'Liq', 'NaCl'].value / flow_mass * 1e6
pressure_bar = b.pressure[0].value / 1e5
print(s + ': %.3f kg/s, %.0f ppm, %.1f bar' % (flow_mass, mass_frac_ppm, pressure_bar))
print_state('Feed ', m.fs.feed.outlet)
print_state('Split 1 ', m.fs.S1.P1)
print_state('P1 out ', m.fs.P1.outlet)
print_state('Split 2 ', m.fs.S1.PXR)
print_state('PXR LP out', m.fs.PXR.low_pressure_outlet)
print_state('P2 out ', m.fs.P2.outlet)
print_state('Mix out ', m.fs.M1.outlet)
print_state('RO perm ', m.fs.RO.permeate)
print_state('RO reten ', m.fs.RO.retentate)
print_state('PXR HP out', m.fs.PXR.high_pressure_outlet)
if __name__ == "__main__":
main()
```
#### File: watertap/util/initialization.py
```python
__author__ = "<NAME>"
from pyomo.environ import check_optimal_termination
from idaes.core.util.model_statistics import degrees_of_freedom
import idaes.logger as idaeslog
_log = idaeslog.getLogger(__name__)
def check_solve(results, checkpoint=None, logger=_log, fail_flag=False):
"""
Check that solver termination is optimal and OK in an initialization routine.
If the check fails, proceed through initialization with only a logger warning by default,
or set fail_flag=True to raise an error. This should also work for checking a solve outside
of an initialization routine.
Keyword Arguments:
results : solver results
checkpoint : Optional string argument to specify the step of initialization being checked
(e.g., checkpoint="Initialization step 1: solve indexed blocks")
logger : Optional argument for loading idaes.getInitLogger object (e.g., logger=init_log)
fail_flag : Boolean argument to specify error or warning (Default: fail_flag=False produces logger warning.
set fail_flag=True to raise an error and stop the initialization routine.)
Returns:
None
"""
if check_optimal_termination(results):
if checkpoint is None:
logger.info(f'Solve successful.')
else:
logger.info(f'{checkpoint} successful.')
else:
if checkpoint is None:
msg = f"The solver failed to converge to an optimal solution. " \
f"This suggests that the user provided infeasible inputs or that the model is poorly scaled."
else:
msg = f"{checkpoint} failed. The solver failed to converge to an optimal solution. " \
f"This suggests that the user provided infeasible inputs or that the model is poorly scaled."
if fail_flag:
logger.error(msg)
raise ValueError(msg)
else:
logger.warning(msg)
def check_dof(blk, fail_flag=False, logger=_log, expected_dof=0):
"""
Check that degrees of freedom are 0, or the expected amount ``expected_dof``.
If not 0 or ``expected_dof``, either throw a warning and continue or throw an error and stop.
Keyword Arguments:
blk : block to check
fail_flag : Boolean argument to specify error or warning
(Default: fail_flag=False produces logger warning. Set fail_flag=True to raise an error and stop
the initialization routine.)
logger : Optional argument for loading idaes.getInitLogger object (e.g., logger=init_log)
expected_dof : Integer number of degrees of freedom ``blk`` should have
Returns:
None
"""
if degrees_of_freedom(blk) != expected_dof:
if expected_dof == 0:
msg = f"Non-zero degrees of freedom: Degrees of freedom on {blk} = {degrees_of_freedom(blk)}. " \
f"Fix {degrees_of_freedom(blk)} more variable(s)"
elif degrees_of_freedom(blk) < expected_dof:
msg = f"Unexpected degrees of freedom: Degrees of freedom on {blk} = {degrees_of_freedom(blk)}. " \
f"Expected {expected_dof}. Unfix {expected_dof - degrees_of_freedom(blk)} variable(s)"
elif degrees_of_freedom(blk) > expected_dof:
msg = f"Unexpected degrees of freedom: Degrees of freedom on {blk} = {degrees_of_freedom(blk)}. " \
f"Expected {expected_dof}. Fix {degrees_of_freedom(blk) - expected_dof} variable(s)"
if fail_flag:
logger.error(msg)
raise ValueError(msg)
else:
logger.warning(msg)
def assert_degrees_of_freedom(blk, expected_dof):
"""
Assert that degrees of freedom are ``expected_dof``.
If not ``expected_dof``, throw an error and stop.
Keyword Arguments:
blk : block to check
expected_dof : Integer number of degrees of freedom ``blk`` should have
Returns:
None
"""
check_dof(blk, True, expected_dof=expected_dof)
def assert_no_degrees_of_freedom(blk):
"""
Assert that degrees of freedom are 0.
If ``blk`` has non-zero degrees of freedom, throw an error and stop.
Keyword Arguments:
blk : block to check
Returns:
None
"""
check_dof(blk, True)
```
|
{
"source": "jefalon/WindSE",
"score": 2
}
|
#### File: undocumented/LayoutOptimization/LayoutOpt3Dterrain.py
```python
from dolfin import *
from dolfin_adjoint import *
import windse
import numpy as np
parameters['form_compiler']['quadrature_degree'] = 6
set_log_level(20)
### Create an Instance of the Options ###
options = windse.initialize("params3Dterrain.yaml")
### Generate Domain ###
dom = windse.ImportedDomain()
# dom = windse.BoxDomain()
### Generate Wind Farm ###
farm = windse.GridWindFarm(dom)
# farm = windse.RandomWindFarm(dom)
# farm.Plot(False)
### Warp the mesh and refine ###
# dom.Warp(200,0.75)
region = [[-1000,1500],[-1000,1000],[0,300]]
dom.Refine(1,region=region)
# dom.Save()
print(len(dom.mesh.coordinates()[:]))
print(len(farm.dom.mesh.coordinates()[:]))
print(print(farm.dom.mesh.hmin()))
# exit()
### Function Space ###
fs = windse.LinearFunctionSpace(dom)
print(fs.Q.dim())
### Setup Boundary Conditions ###
bc = windse.PowerInflow(dom,fs)
### Generate the problem ###
problem = windse.StabilizedProblem(dom,farm,fs,bc)
### Solve ###
solver = windse.SteadySolver(problem)
solver.Solve()
### Output Results ###
# solver.Save()
######
# control = windse.CreateAxialControl(farm.ma,farm)
# bounds = windse.CreateAxialBounds(farm.ma,farm)
control = windse.CreateLayoutControl(farm.mx,farm.my,farm)
bounds = windse.CreateLayoutBounds(farm.mx,farm.my,farm)
J=windse.PowerFunctional(problem.tf,solver.u_next)
# rf=ReducedFunctional(J,control)
# def iter_cb(m):
# # if MPI.rank(mpi_comm_world()) == 0:
# print("m = ")
# for mm in m:
# print("Constant("+ str(mm)+ "),")
# m_opt=minimize(rf, method="L-BFGS-B", options = {"disp": True}, bounds = bounds, callback = iter_cb)
# # m_opt=minimize(rf, method="SLSQP", options = {"disp": True}, bounds = bounds, callback = iter_cb)
# print([float(mm) for mm in m_opt])
# # farm.mx,farm.my=splitSolution(m_opt,farm.numturbs)
# farm.ma = m_opt
# solver.Solve()
mtest=[]
for i in range(farm.numturbs):
mtest.append((farm.mx[i]))
mtest.append((farm.my[i]))
h = [Constant(0.001)]*(2*farm.numturbs) # the direction of the perturbation
Jhat = ReducedFunctional(J, control)
conv_rate = taylor_test(Jhat, mtest, h)
print(conv_rate)
# ### Output Results ###
solver.Save()
```
#### File: undocumented/Legacy/WindSE2D_Dyn.py
```python
from __future__ import print_function
from fenics import *
from mshr import *
import numpy as np
from scipy import integrate
set_log_level(LogLevel.INFO)
T = 500.0 # final time
num_steps = 1000 # number of time steps
dt = T / num_steps # time step size
mu = 16 # dynamic viscosity
rho = 1 # density
save_int = 5
inflowVel=8
# mesh parameters
degree = 2
Lx = 5000
Ly = 5000
nx = 72
ny = 72
RD=126
#WTG parameters
numturbs = 9
#number of inflow direction bins
bins = 1
WTGexp = 6.
radius = RD/2.
thickness = RD/10.
numRefine = 1
A=RD # weird for 2D
HH=80
initExtent=1.
mlDenom=5
restart = False
randStart = False
gridStart = True
optimize = False
loadnuT = False
mesh = RectangleMesh(Point(-Lx/2., -Ly/2.), Point(Lx/2., Ly/2.), nx, ny)
site_x = 1000
site_y = 1000
refine_x = 1100
refine_y = 1100
def refine_mesh(mesh, refine_x, refine_y):
#refines the mesh around the site boundaries
h = mesh.hmin()
cell_markers = MeshFunction('bool',mesh, mesh.topology().dim())
cell_markers.set_all(False)
for cell in cells(mesh):
if (cell.midpoint()[0] > -(refine_x)) and (abs(cell.midpoint()[1]) < refine_y ):
cell_markers[cell] = True
mesh = refine(mesh, cell_markers)
return mesh
def refine_mesh2(mesh, refine_x, refine_y):
#refines the mesh around the site boundaries
h = mesh.hmin()
cell_markers = MeshFunction('bool',mesh, mesh.topology().dim())
cell_markers.set_all(False)
for cell in cells(mesh):
if (cell.midpoint()[0]**2 + cell.midpoint()[1]**2 < refine_x**2+refine_y**2 ):
cell_markers[cell] = True
mesh = refine(mesh, cell_markers)
return mesh
for nums in range(numRefine):
print('refining mesh')
mesh=refine_mesh2(mesh, refine_x, refine_y)
h = mesh.hmin()
Re = Lx*8/mu
print(Re)
print(mesh.hmin())
print(inflowVel*dt/mesh.hmin())
alpha = 7*pi/64
# Define function spaces
V = VectorFunctionSpace(mesh, 'P', 2)
Q = FunctionSpace(mesh, 'P', 1)
print(V.dim())
def WTGdist(x,y):
return np.exp(-((x/thickness)**WTGexp + (y/radius)**WTGexp))
def createLayout(numturbs):
mx=[]
my=[]
mz=[]
if randStart == True:
for i in range(numturbs):
mx.append(Constant(np.random.uniform(low=-(site_x - radius),high=(site_x - radius))))
my.append(Constant(np.random.uniform(low=-(site_y - radius), high=(site_y - radius))))
mz.append(Constant(HH))
elif gridStart ==True:
if numturbs == 16:
rows = 4
cols = 4
xpos = np.linspace(-initExtent*(site_x - radius),initExtent*(site_x - radius),cols)
ypos = np.linspace(-initExtent*(site_y - radius),initExtent*(site_y - radius),rows)
for i in range(rows):
for j in range(cols):
mx.append(Constant(xpos[j]))
my.append(Constant(ypos[i]))
# # some starting noise sometimes helps
# mx.append(Constant(xpos[j]+5.*np.random.randn()))
# my.append(Constant(ypos[i]+5.*np.random.randn()))
mz.append(Constant(HH))
if numturbs == 9:
rows = 3
cols = 3
xpos = np.linspace(-site_x,site_x,cols)
ypos = np.linspace(-site_y,site_y,rows)
for i in range(rows):
for j in range(cols):
mx.append(Constant(xpos[j]))
my.append(Constant(ypos[i]))
# # some starting noise sometimes helps
# mx.append(Constant(xpos[j]+5.*np.random.randn()))
# my.append(Constant(ypos[i]+5.*np.random.randn()))
mz.append(Constant(HH))
if numturbs == 1:
mx.append(Constant(-1500))
my.append(Constant(0))
mz.append(Constant(HH))
if numturbs == 2:
mx.append(Constant(-1500))
mx.append(Constant(-1500 + 7*RD))
my.append(Constant(0))
my.append(Constant(0))
mz.append(Constant(HH))
mz.append(Constant(HH))
if numturbs == 3:
mx.append(Constant(-1000))
mx.append(Constant(0))
mx.append(Constant(1000))
my.append(Constant(0))
my.append(Constant(0))
my.append(Constant(0))
mz.append(Constant(HH))
mz.append(Constant(HH))
mz.append(Constant(HH))
if numturbs == 4:
mx.append(Constant(-1200))
mx.append(Constant(-400))
mx.append(Constant(400))
mx.append(Constant(1200))
my.append(Constant(0))
my.append(Constant(0))
my.append(Constant(0))
my.append(Constant(0))
mz.append(Constant(HH))
mz.append(Constant(HH))
mz.append(Constant(HH))
mz.append(Constant(HH))
return mx, my, mz
def createRotatedTurbineForce(mx,my,ma,A,beta,numturbs,alpha,V):
x=SpatialCoordinate(mesh)
tf = Function(V)
for i in range(numturbs):
WTGbase = project(Expression(("cos(yaw)","-sin(yaw)"),yaw=myaw[i],degree=2),V)
# WTGbase = project(Expression(("0","1"),yaw=myaw[i],degree=2),V)
#rotation
mxrot = cos(alpha)*mx[i] - sin(alpha)*my[i]
myrot = sin(alpha)*mx[i] + cos(alpha)*my[i]
# mxrot=mx[i]
# myrot=my[i]
x_centered=x[0]-mxrot
y_centered=x[1]-myrot
x_centered_rotated = x_centered*cos(myaw[i]) + y_centered*sin(myaw[i])
y_centered_rotated = -x_centered*sin(myaw[i]) + y_centered*cos(myaw[i])
# tf = tf+ 0.0001*exp(-(((x[0] - mx[i])/thickness)**WTGexp +(((x[1] - my[i])**2)/radius**2)**WTGexp))*WTGbase
# tf = tf + 0.5*4.*A*ma[i]/(1.-ma[i])/beta*exp(-((x_centered/thickness)**WTGexp + ((y_centered-radius/2.)/(radius/2.))**WTGexp))*WTGbase
# tf = tf + 0.5*4.*A*ma[i]/(1.-ma[i])/beta*exp(-((x_centered/thickness)**WTGexp + ((y_centered+radius/2.)/(radius/2.))**WTGexp))*WTGbase
tf = tf + 0.5*4.*A*ma[i]/(1.-ma[i])/beta*exp(-((x_centered_rotated/thickness)**WTGexp + ((y_centered_rotated-radius/2.)/(radius/2.))**WTGexp))*WTGbase
tf = tf + 0.5*4.*A*ma[i]/(1.-ma[i])/beta*exp(-((x_centered_rotated/thickness)**WTGexp + ((y_centered_rotated+radius/2.)/(radius/2.))**WTGexp))*WTGbase
# tf = tf + 0.5*4.*A*ma[i]/(1.-ma[i])/beta*exp(-(((x[0]*cos(myaw[i]) - x - mxrot)/thickness)**WTGexp + ((x[1] - myrot-radius/2.)/(radius/2.))**WTGexp))*WTGbase
# tf = tf + 0.5*4.*A*ma[i]/(1.-ma[i])/beta*exp(-(((x[0]*cos(myaw[i]) - mxrot)/thickness)**WTGexp + ((x[1] - myrot+radius/2.)/(radius/2.))**WTGexp))*WTGbase
return tf
#boundary conditions
class walls(SubDomain):
def inside(self, x, on_boundary):
return near(x[1]**2 - (Ly/2.)**2, 0.) and on_boundary
class inflow(SubDomain):
def inside(self, x, on_boundary):
return near(x[0],-(Lx/2.)) and on_boundary
class outflow(SubDomain):
def inside(self, x, on_boundary):
return near(x[0],Lx/2.) and on_boundary
wavenum=2*pi/(Ly/4.)
wavenum2=2*pi/(Ly/4.)
freq=2*pi/200.
wavenummod=wavenum
wavenum2mod=wavenum2
freqmod=freq
inflowExpr=Expression(("inflowVel + 0.1*sin(freq*t + wavenum*x[1])","0. + 0.1*sin(freq*t + wavenum2*x[1]) "), inflowVel=inflowVel,t=0,wavenum=wavenum,wavenum2=wavenum2,freq=freq,degree=2)
# inflowExpr=Expression(("inflowVel + 0.05*sin(2*pi*t/100. + wavenum*x[1]) + perturbx*0.2*sin(2*pi*t/100. + wavenum2*x[1]+pi/2.)","0. + 0.01*sin(2*pi*t/100. + wavenum*x[1])+ perturby*0.2*sin(2*pi*t/100. + wavenum2*x[1])"), inflowVel=inflowVel,t=0,perturbx=0,perturby=0,wavenum=wavenum,wavenum2=wavenum2,degree=2)
# inflowExpr=Expression(("inflowVel + 0.5*sin(2*pi*t/100. + wavenum*x[1])","0. + 0.25*sin(2*pi*t/100.)"), inflowVel=inflowVel,t=0,wavenum=wavenum,degree=2)
# inflowExpr=Expression(("inflowVel","0."), inflowVel=inflowVel,degree=2)
# lateral BC
bcu_inflow = DirichletBC(V, inflowExpr, inflow())
# bcu_walls = DirichletBC(V, Expression(("0","0."), inflowVel=inflowVel,degree=2), walls())
bcp_outflow = DirichletBC(Q, Constant(0), outflow())
# bc1a = DirichletBC(V.sub(1), Constant(0.0), NoSlipBoundary())
# inflow BC
# bc2 = DirichletBC(V, Constant((inflowVel,0.0)), InflowBoundary())
# bc2a = DirichletBC(VQ.sub(0).sub(0), Constant(8.), InflowBoundary())
# bcp = [DirichletBC(Q, Constant(0), OutflowBoundary())]
bcp=[bcp_outflow]
# bcu = [bcu_inflow,bcu_walls]
bcu = [bcu_inflow]
# Define trial and test functions
u = TrialFunction(V)
v = TestFunction(V)
p = TrialFunction(Q)
q = TestFunction(Q)
# Define functions for solutions at previous and current time steps
u_n = Function(V)
u_ = Function(V)
p_n = Function(Q)
p_ = Function(Q)
# Define expressions used in variational forms
U = 0.5*(u_n + u)
n = FacetNormal(mesh)
f = Constant((0, 0))
k = Constant(dt)
mu = Constant(mu)
rho = Constant(rho)
mx,my,mz = createLayout(numturbs)
ma=[Constant(mm) for mm in 0.33*np.ones(numturbs)]
# right hand rule from above
# myaw=[Constant(pi/8.),Constant(0),Constant(0)]
yaw=0
myaw = [Constant(mm) for mm in (yaw*pi/180.)*np.ones(numturbs)]
beta = integrate.dblquad(WTGdist,-3*radius,3*radius,lambda x: -3*radius,lambda x: 3*radius)
B=beta[0]
f = createRotatedTurbineForce(mx,my,ma,A,B,numturbs,alpha,V)
# Define symmetric gradient
def epsilon(u):
return sym(nabla_grad(u))
# Define stress tensor
def sigma(u, p):
return 2*mu*epsilon(u) - p*Identity(len(u))
# Define variational problem for step 1
F1 = rho*dot((u - u_n) / k, v)*dx \
+ rho*dot(dot(u_n, nabla_grad(u_n)), v)*dx \
+ inner(sigma(U, p_n), epsilon(v))*dx \
+ dot(p_n*n, v)*ds - dot(mu*nabla_grad(U)*n, v)*ds \
+ dot(f*(cos(myaw[0])**2*u_n[0]*u_n[0]+sin(myaw[0])**2*u_n[1]*u_n[1]), v)*dx # inner? other form of vel?
a1 = lhs(F1)
L1 = rhs(F1)
# Define variational problem for step 2
a2 = dot(nabla_grad(p), nabla_grad(q))*dx
L2 = dot(nabla_grad(p_n), nabla_grad(q))*dx - (1/k)*div(u_)*q*dx
# Define variational problem for step 3
a3 = dot(u, v)*dx
L3 = dot(u_, v)*dx - k*dot(nabla_grad(p_ - p_n), v)*dx
# Assemble matrices
A1 = assemble(a1)
A2 = assemble(a2)
A3 = assemble(a3)
# Apply boundary conditions to matrices
[bc.apply(A1) for bc in bcu]
[bc.apply(A2) for bc in bcp]
# Create XDMF files for visualization output
# ufile = File('output/fields/velocity_'+str(numturbs) + '_' + str(int(np.round(Re))) + '_' + str(yaw) + '_' + str(alpha)+'.pvd')
# pfile = File('output/fields/pressure_'+str(numturbs) + '_' + str(int(np.round(Re))) + '_' + str(yaw) + '_' + str(alpha)+'.pvd')
xdmffile_u = XDMFFile('output/velocity_'+str(numturbs) + '_' + str(int(np.round(Re))) + '_' + str(yaw) + '_' + str(alpha)+'.xdmf')
xdmffile_p = XDMFFile('output/pressure_'+str(numturbs) + '_' + str(int(np.round(Re))) + '_' + str(yaw) + '_' + str(alpha)+'.xdmf')
# # xdmffile_tf = XDMFFile('2DDynamic/turbine_'+str(numturbs) + '_' + str(int(np.round(Re))) + '_' + str(yaw) + '_' + str(alpha)+'.xdmf')
# # Create time series (for use in reaction_system.py)
# timeseries_u = TimeSeries('output/velocity_series_'+str(numturbs) + '_' + str(int(np.round(Re))) + '_' + str(yaw) + '_' + str(alpha)+'.xdmf')
# timeseries_p = TimeSeries('output/pressure_series_'+str(numturbs) + '_' + str(int(np.round(Re))) + '_' + str(yaw) + '_' + str(alpha)+'.xdmf')
# # Save mesh to file (for use in reaction_system.py)
# File('navier_stokes_cylinder/cylinder.xml.gz') << mesh
# Create progress bar
# progress = Progress('Time-stepping')
# set_log_level(PROGRESS)
# ufile = File('output/u_'+str(float(mu))+'.pvd')
# pfile = File('output/p_'+str(float(mu))+'.pvd')
# DoF=len(u_.vector()[:])
# snapshots = np.zeros((DoF,int(num_steps/save_int)))
# uInterp = Function(V)
# uInterp=project(Expression(("x[0]","x[1]"),degree=2),V)
# basePositions=uInterp.vector()[:]
# np.save('output/basePositions_'+str(numturbs) + '_' + str(int(np.round(Re))) + '_' + str(yaw) + '_' + str(alpha),basePositions)
# Time-stepping
t = 0
count=0
for n in range(num_steps):
# Update current time
t += dt
# bcu_inflow.perturbx=.1*np.random.rand()
# bcu_inflow.perturby=.1*np.random.rand()
inflowExpr.t=t
# wavenummod = wavenummod + .01*np.random.randn()*wavenum
# wavenum2mod = wavenum2mod+ .01*np.random.randn()*wavenum2
# freqmod = freqmod+ .01*np.random.randn()*wavenum2
# inflowExpr.wavenum=wavenummod
# inflowExpr.wavenum2=wavenum2mod
# inflowExpr.freq=freqmod
bcu_inflow = DirichletBC(V, inflowExpr, inflow())
bcu=[bcu_inflow]
# Step 1: Tentative velocity step
b1 = assemble(L1)
[bc.apply(b1) for bc in bcu]
solve(A1, u_.vector(), b1, 'bicgstab', 'hypre_amg')
# Step 2: Pressure correction step
b2 = assemble(L2)
[bc.apply(b2) for bc in bcp]
solve(A2, p_.vector(), b2, 'bicgstab', 'hypre_amg')
# Step 3: Velocity correction step
b3 = assemble(L3)
solve(A3, u_.vector(), b3, 'cg', 'sor')
# Update previous solution
u_n.assign(u_)
p_n.assign(p_)
if n % save_int ==0:
# Save solution to file (XDMF/HDF5)
# ufile << u_
# pfile << p_
xdmffile_u.write(u_, t)
xdmffile_p.write(p_, t)
# xdmffile_tf.write(project(f,V),t)
# # Save nodal values to file
# timeseries_u.store(u_.vector(), t)
# timeseries_p.store(p_.vector(), t)
# snapshots[:,count]=u_.vector()[:]
print(t)
# print(wavenummod/wavenum)
# print(wavenum2mod/wavenum2)
# print(freqmod/freq)
count+=1
# # Update progress bar
# progress.update(t / T)
# print('u max:', u_.vector().array().max())
# Hold plot
# interactive()
# np.save('output/snapshots'+str(numturbs) + '_' + str(int(np.round(Re))) + '_' + str(yaw) + '_' + str(alpha),snapshots)
```
#### File: undocumented/YawOptimization/YawOpt.py
```python
from dolfin import *
from dolfin_adjoint import *
import windse
import numpy as np
parameters['form_compiler']['quadrature_degree'] = 6
set_log_level(20)
### Create an Instance of the Options ###
windse.initialize("params.yaml")
### Generate Domain ###
dom = windse.BoxDomain()
### Generate Wind Farm ###
farm = windse.ImportedWindFarm(dom)
# farm.Plot()
### Warp the mesh and refine ###
dom.Warp(200,0.75)
# dom.Save()
print(len(dom.mesh.coordinates()[:]))
print(len(farm.dom.mesh.coordinates()[:]))
### Function Space ###
fs = windse.LinearFunctionSpace(dom)
print(fs.W.dim())
### Setup Boundary Conditions ###
bc = windse.PowerInflow(dom,fs)
### Generate the problem ###
problem = windse.StabilizedProblem(dom,farm,fs,bc)
### Solve ###
solver = windse.SteadySolver(problem)
solver.Solve()
control = windse.CreateYawControl(farm.myaw, farm)
bounds = windse.CreateYawBounds(farm.ma, farm)
J=windse.PowerFunctional(problem.tf,solver.u_next)
rf=ReducedFunctional(J,control)
print(J)
print(float(J))
dJdma= compute_gradient(J, control, options={"newton_solver":{"linear_solver": "mumps"}})
print([float(dd) for dd in dJdma])
# def iter_cb(m):
# # if MPI.rank(mpi_comm_world()) == 0:
# print("m = ")
# for mm in m:
# print("Constant("+ str(mm)+ "),")
# m_opt=minimize(rf, method="L-BFGS-B", options = {"disp": True}, bounds = bounds, callback = iter_cb)
# print([float(mm) for mm in m_opt])
# farm.ma = m_opt
# solver.Solve()
# h = [Constant(0.001),Constant(0.001)] # the direction of the perturbation
# Jhat = ReducedFunctional(J, control)
# conv_rate = taylor_test(Jhat, farm.ma, h)
# print(conv_rate)
### Output Results ###
solver.Save()
print("finished")
```
#### File: doc/source/pylit.py
```python
_version = "0.7.9"
__docformat__ = 'restructuredtext'
# Introduction
# ------------
#
# PyLit is a bidirectional converter between two formats of a computer
# program source:
#
# * a (reStructured) text document with program code embedded in
# *code blocks*, and
# * a compilable (or executable) code source with *documentation*
# embedded in comment blocks
#
#
# Requirements
# ------------
#
# ::
import os, sys
import re, optparse
# DefaultDict
# ~~~~~~~~~~~
# As `collections.defaultdict` is only introduced in Python 2.5, we
# define a simplified version of the dictionary with default from
# http://code.activestate.com/recipes/389639/
# ::
class DefaultDict(dict):
"""Minimalistic Dictionary with default value."""
def __init__(self, default=None, *args, **kwargs):
self.update(dict(*args, **kwargs))
self.default = default
def __getitem__(self, key):
return self.get(key, self.default)
# Defaults
# ========
#
# The `defaults` object provides a central repository for default
# values and their customisation. ::
defaults = optparse.Values()
# It is used for
#
# * the initialisation of data arguments in TextCodeConverter_ and
# PylitOptions_
#
# * completion of command line options in `PylitOptions.complete_values`_.
#
# This allows the easy creation of back-ends that customise the
# defaults and then call `main`_ e.g.:
#
# >>> import pylit
# >>> pylit.defaults.comment_string = "## "
# >>> pylit.defaults.codeindent = 4
# >>> pylit.main()
#
# The following default values are defined in pylit.py:
#
# languages
# ---------
#
# Mapping of code file extensions to code language::
defaults.languages = DefaultDict("python", # fallback language
{".c": "c",
".cc": "c++",
".css": "css",
".py": "python",
".sh": "shell",
".sl": "slang",
".sty": "latex",
".tex": "latex"
})
# Will be overridden by the ``--language`` command line option.
#
# The first argument is the fallback language, used if there is no
# matching extension (e.g. if pylit is used as filter) and no
# ``--language`` is specified. It can be changed programmatically by
# assignment to the ``.default`` attribute, e.g.
#
# >>> defaults.languages.default='c++'
#
#
# .. _text_extension:
#
# text_extensions
# ---------------
#
# List of known extensions of (reStructured) text files. The first
# extension in this list is used by the `_get_outfile_name`_ method to
# generate a text output filename::
defaults.text_extensions = [".txt", ".rst"]
# comment_strings
# ---------------
#
# Comment strings for known languages. Used in Code2Text_ to recognise
# text blocks and in Text2Code_ to format text blocks as comments.
# Defaults to ``'# '``.
#
# **Comment strings include trailing whitespace.** ::
defaults.comment_strings = DefaultDict('# ',
{"css": '// ',
"c": '// ',
"c++": '// ',
"latex": '% ',
"python": '# ',
"shell": '# ',
"slang": '% '
})
# header_string
# -------------
#
# Marker string for a header code block in the text source. No trailing
# whitespace needed as indented code follows.
# Must be a valid rst directive that accepts code on the same line, e.g.
# ``'..admonition::'``.
#
# Default is a comment marker::
defaults.header_string = '..'
# .. _code_block_marker:
#
# code_block_markers
# ------------------
#
# Markup at the end of a documentation block.
# Default is Docutils' marker for a `literal block`_::
defaults.code_block_markers = DefaultDict('::')
# The `code_block_marker` string is `inserted into a regular expression`_.
# Language-specific markers can be defined programmatically, e.g. in a
# wrapper script.
#
# In a document where code examples are only one of several uses of
# literal blocks, it is more appropriate to single out the source code
# ,e.g. with the double colon at a separate line ("expanded form")
#
# ``defaults.code_block_marker.default = ':: *'``
#
# or a dedicated ``.. code-block::`` directive [#]_
#
# ``defaults.code_block_marker['c++'] = '.. code-block:: *c++'``
#
# The latter form also allows code in different languages kept together
# in one literate source file.
#
# .. [#] The ``.. code-block::`` directive is not (yet) supported by
# standard Docutils. It is provided by several add-ons, including
# the `code-block directive`_ project in the Docutils Sandbox and
# Sphinx_.
#
#
# strip
# -----
#
# Export to the output format stripping documentation or code blocks::
defaults.strip = False
# strip_marker
# ------------
#
# Strip literal marker from the end of documentation blocks when
# converting to code format. Makes the code more concise but looses the
# synchronisation of line numbers in text and code formats. Can also be used
# (together with the auto-completion of the code-text conversion) to change
# the `code_block_marker`::
defaults.strip_marker = False
# add_missing_marker
# ------------------
#
# When converting from code format to text format, add a `code_block_marker`
# at the end of documentation blocks if it is missing::
defaults.add_missing_marker = True
# Keep this at ``True``, if you want to re-convert to code format later!
#
#
# .. _defaults.preprocessors:
#
# preprocessors
# -------------
#
# Preprocess the data with language-specific filters_
# Set below in Filters_::
defaults.preprocessors = {}
# .. _defaults.postprocessors:
#
# postprocessors
# --------------
#
# Postprocess the data with language-specific filters_::
defaults.postprocessors = {}
# .. _defaults.codeindent:
#
# codeindent
# ----------
#
# Number of spaces to indent code blocks in `Code2Text.code_block_handler`_::
defaults.codeindent = 2
# In `Text2Code.code_block_handler`_, the codeindent is determined by the
# first recognised code line (header or first indented literal block
# of the text source).
#
# overwrite
# ---------
#
# What to do if the outfile already exists? (ignored if `outfile` == '-')::
defaults.overwrite = 'update'
# Recognised values:
#
# :'yes': overwrite eventually existing `outfile`,
# :'update': fail if the `outfile` is newer than `infile`,
# :'no': fail if `outfile` exists.
#
#
# Extensions
# ==========
#
# Try to import optional extensions::
try:
import pylit_elisp
except ImportError:
pass
# Converter Classes
# =================
#
# The converter classes implement a simple state machine to separate and
# transform documentation and code blocks. For this task, only a very limited
# parsing is needed. PyLit's parser assumes:
#
# * `indented literal blocks`_ in a text source are code blocks.
#
# * comment blocks in a code source where every line starts with a matching
# comment string are documentation blocks.
#
# TextCodeConverter
# -----------------
# ::
class TextCodeConverter(object):
"""Parent class for the converters `Text2Code` and `Code2Text`.
"""
# The parent class defines data attributes and functions used in both
# `Text2Code`_ converting a text source to executable code source, and
# `Code2Text`_ converting commented code to a text source.
#
# Data attributes
# ~~~~~~~~~~~~~~~
#
# Class default values are fetched from the `defaults`_ object and can be
# overridden by matching keyword arguments during class instantiation. This
# also works with keyword arguments to `get_converter`_ and `main`_, as these
# functions pass on unused keyword args to the instantiation of a converter
# class. ::
language = defaults.languages.default
comment_strings = defaults.comment_strings
comment_string = "" # set in __init__ (if empty)
codeindent = defaults.codeindent
header_string = defaults.header_string
code_block_markers = defaults.code_block_markers
code_block_marker = "" # set in __init__ (if empty)
strip = defaults.strip
strip_marker = defaults.strip_marker
add_missing_marker = defaults.add_missing_marker
directive_option_regexp = re.compile(r' +:(\w|[-._+:])+:( |$)')
state = "" # type of current block, see `TextCodeConverter.convert`_
# Interface methods
# ~~~~~~~~~~~~~~~~~
#
# .. _TextCodeConverter.__init__:
#
# __init__
# """"""""
#
# Initialising sets the `data` attribute, an iterable object yielding lines of
# the source to convert. [#]_
#
# .. [#] The most common choice of data is a `file` object with the text
# or code source.
#
# To convert a string into a suitable object, use its splitlines method
# like ``"2 lines\nof source".splitlines(True)``.
#
#
# Additional keyword arguments are stored as instance variables,
# overwriting the class defaults::
def __init__(self, data, **keyw):
"""data -- iterable data object
(list, file, generator, string, ...)
**keyw -- remaining keyword arguments are
stored as data-attributes
"""
self.data = data
self.__dict__.update(keyw)
# If empty, `code_block_marker` and `comment_string` are set according
# to the `language`::
if not self.code_block_marker:
self.code_block_marker = self.code_block_markers[self.language]
if not self.comment_string:
self.comment_string = self.comment_strings[self.language]
self.stripped_comment_string = self.comment_string.rstrip()
# Pre- and postprocessing filters are set (with
# `TextCodeConverter.get_filter`_)::
self.preprocessor = self.get_filter("preprocessors", self.language)
self.postprocessor = self.get_filter("postprocessors", self.language)
# .. _inserted into a regular expression:
#
# Finally, a regular_expression for the `code_block_marker` is compiled
# to find valid cases of `code_block_marker` in a given line and return
# the groups: ``\1 prefix, \2 code_block_marker, \3 remainder`` ::
marker = self.code_block_marker
if marker == '::':
# the default marker may occur at the end of a text line
self.marker_regexp = re.compile('^( *(?!\.\.).*)(::)([ \n]*)$')
else:
# marker must be on a separate line
self.marker_regexp = re.compile('^( *)(%s)(.*\n?)$' % marker)
# .. _TextCodeConverter.__iter__:
#
# __iter__
# """"""""
#
# Return an iterator for the instance. Iteration yields lines of converted
# data.
#
# The iterator is a chain of iterators acting on `self.data` that does
#
# * preprocessing
# * text<->code format conversion
# * postprocessing
#
# Pre- and postprocessing are only performed, if filters for the current
# language are registered in `defaults.preprocessors`_ and|or
# `defaults.postprocessors`_. The filters must accept an iterable as first
# argument and yield the processed input data line-wise.
# ::
def __iter__(self):
"""Iterate over input data source and yield converted lines
"""
return self.postprocessor(self.convert(self.preprocessor(self.data)))
# .. _TextCodeConverter.__call__:
#
# __call__
# """"""""
# The special `__call__` method allows the use of class instances as callable
# objects. It returns the converted data as list of lines::
def __call__(self):
"""Iterate over state-machine and return results as list of lines"""
return [line for line in self]
# .. _TextCodeConverter.__str__:
#
# __str__
# """""""
# Return converted data as string::
def __str__(self):
return "".join(self())
# Helpers and convenience methods
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#
# .. _TextCodeConverter.convert:
#
# convert
# """""""
#
# The `convert` method generates an iterator that does the actual code <-->
# text format conversion. The converted data is yielded line-wise and the
# instance's `status` argument indicates whether the current line is "header",
# "documentation", or "code_block"::
def convert(self, lines):
"""Iterate over lines of a program document and convert
between "text" and "code" format
"""
# Initialise internal data arguments. (Done here, so that every new iteration
# re-initialises them.)
#
# `state`
# the "type" of the currently processed block of lines. One of
#
# :"": initial state: check for header,
# :"header": leading code block: strip `header_string`,
# :"documentation": documentation part: comment out,
# :"code_block": literal blocks containing source code: unindent.
#
# ::
self.state = ""
# `_codeindent`
# * Do not confuse the internal attribute `_codeindent` with the configurable
# `codeindent` (without the leading underscore).
# * `_codeindent` is set in `Text2Code.code_block_handler`_ to the indent of
# first non-blank "code_block" line and stripped from all "code_block" lines
# in the text-to-code conversion,
# * `codeindent` is set in `__init__` to `defaults.codeindent`_ and added to
# "code_block" lines in the code-to-text conversion.
#
# ::
self._codeindent = 0
# `_textindent`
# * set by `Text2Code.documentation_handler`_ to the minimal indent of a
# documentation block,
# * used in `Text2Code.set_state`_ to find the end of a code block.
#
# ::
self._textindent = 0
# `_add_code_block_marker`
# If the last paragraph of a documentation block does not end with a
# code_block_marker_, it should be added (otherwise, the back-conversion
# fails.).
#
# `_add_code_block_marker` is set by `Code2Text.documentation_handler`_
# and evaluated by `Code2Text.code_block_handler`_, because the
# documentation_handler does not know whether the next block will be
# documentation (with no need for a code_block_marker) or a code block.
#
# ::
self._add_code_block_marker = False
# Determine the state of the block and convert with the matching "handler"::
for block in collect_blocks(expandtabs_filter(lines)):
self.set_state(block)
for line in getattr(self, self.state+"_handler")(block):
yield line
# .. _TextCodeConverter.get_filter:
#
# get_filter
# """"""""""
# ::
def get_filter(self, filter_set, language):
"""Return language specific filter"""
if self.__class__ == Text2Code:
key = "text2"+language
elif self.__class__ == Code2Text:
key = language+"2text"
else:
key = ""
try:
return getattr(defaults, filter_set)[key]
except (AttributeError, KeyError):
# print "there is no %r filter in %r"%(key, filter_set)
pass
return identity_filter
# get_indent
# """"""""""
# Return the number of leading spaces in `line`::
def get_indent(self, line):
"""Return the indentation of `string`.
"""
return len(line) - len(line.lstrip())
# Text2Code
# ---------
#
# The `Text2Code` converter separates *code-blocks* [#]_ from *documentation*.
# Code blocks are unindented, documentation is commented (or filtered, if the
# ``strip`` option is True).
#
# .. [#] Only `indented literal blocks`_ are considered code-blocks. `quoted
# literal blocks`_, `parsed-literal blocks`_, and `doctest blocks`_ are
# treated as part of the documentation. This allows the inclusion of
# examples:
#
# >>> 23 + 3
# 26
#
# Mark that there is no double colon before the doctest block in the
# text source.
#
# The class inherits the interface and helper functions from
# TextCodeConverter_ and adds functions specific to the text-to-code format
# conversion::
class Text2Code(TextCodeConverter):
"""Convert a (reStructured) text source to code source
"""
# .. _Text2Code.set_state:
#
# set_state
# ~~~~~~~~~
# ::
def set_state(self, block):
"""Determine state of `block`. Set `self.state`
"""
# `set_state` is used inside an iteration. Hence, if we are out of data, a
# StopItertion exception should be raised::
if not block:
raise StopIteration
# The new state depends on the active state (from the last block) and
# features of the current block. It is either "header", "documentation", or
# "code_block".
#
# If the current state is "" (first block), check for
# the `header_string` indicating a leading code block::
if self.state == "":
# print "set state for %r"%block
if block[0].startswith(self.header_string):
self.state = "header"
else:
self.state = "documentation"
# If the current state is "documentation", the next block is also
# documentation. The end of a documentation part is detected in the
# `Text2Code.documentation_handler`_::
# elif self.state == "documentation":
# self.state = "documentation"
# A "code_block" ends with the first less indented, non-blank line.
# `_textindent` is set by the documentation handler to the indent of the
# preceding documentation block::
elif self.state in ["code_block", "header"]:
indents = [self.get_indent(line) for line in block
if line.rstrip()]
# print "set_state:", indents, self._textindent
if indents and min(indents) <= self._textindent:
self.state = 'documentation'
else:
self.state = 'code_block'
# TODO: (or not to do?) insert blank line before the first line with too-small
# codeindent using self.ensure_trailing_blank_line(lines, line) (would need
# split and push-back of the documentation part)?
#
# .. _Text2Code.header_handler:
#
# header_handler
# ~~~~~~~~~~~~~~
#
# Sometimes code needs to remain on the first line(s) of the document to be
# valid. The most common example is the "shebang" line that tells a POSIX
# shell how to process an executable file::
#!/usr/bin/env python
# In Python, the special comment to indicate the encoding, e.g.
# ``# -*- coding: iso-8859-1 -*-``, must occur before any other comment
# or code too.
#
# If we want to keep the line numbers in sync for text and code source, the
# reStructured Text markup for these header lines must start at the same line
# as the first header line. Therefore, header lines could not be marked as
# literal block (this would require the ``::`` and an empty line above the
# code_block).
#
# OTOH, a comment may start at the same line as the comment marker and it
# includes subsequent indented lines. Comments are visible in the reStructured
# Text source but hidden in the pretty-printed output.
#
# With a header converted to comment in the text source, everything before
# the first documentation block (i.e. before the first paragraph using the
# matching comment string) will be hidden away (in HTML or PDF output).
#
# This seems a good compromise, the advantages
#
# * line numbers are kept
# * the "normal" code_block conversion rules (indent/unindent by `codeindent` apply
# * greater flexibility: you can hide a repeating header in a project
# consisting of many source files.
#
# set off the disadvantages
#
# - it may come as surprise if a part of the file is not "printed",
# - one more syntax element to learn for rst newbies to start with pylit,
# (however, starting from the code source, this will be auto-generated)
#
# In the case that there is no matching comment at all, the complete code
# source will become a comment -- however, in this case it is not very likely
# the source is a literate document anyway.
#
# If needed for the documentation, it is possible to quote the header in (or
# after) the first documentation block, e.g. as `parsed literal`.
# ::
def header_handler(self, lines):
"""Format leading code block"""
# strip header string from first line
lines[0] = lines[0].replace(self.header_string, "", 1)
# yield remaining lines formatted as code-block
for line in self.code_block_handler(lines):
yield line
# .. _Text2Code.documentation_handler:
#
# documentation_handler
# ~~~~~~~~~~~~~~~~~~~~~
#
# The 'documentation' handler processes everything that is not recognised as
# "code_block". Documentation is quoted with `self.comment_string`
# (or filtered with `--strip=True`).
#
# If end-of-documentation marker is detected,
#
# * set state to 'code_block'
# * set `self._textindent` (needed by `Text2Code.set_state`_ to find the
# next "documentation" block)
#
# ::
def documentation_handler(self, lines):
"""Convert documentation blocks from text to code format
"""
for line in lines:
# test lines following the code-block marker for false positives
if (self.state == "code_block" and line.rstrip()
and not self.directive_option_regexp.search(line)):
self.state = "documentation"
# test for end of documentation block
if self.marker_regexp.search(line):
self.state = "code_block"
self._textindent = self.get_indent(line)
# yield lines
if self.strip:
continue
# do not comment blank lines preceding a code block
if self.state == "code_block" and not line.rstrip():
yield line
else:
yield self.comment_string + line
# .. _Text2Code.code_block_handler:
#
# code_block_handler
# ~~~~~~~~~~~~~~~~~~
#
# The "code_block" handler is called with an indented literal block. It
# removes leading whitespace up to the indentation of the first code line in
# the file (this deviation from Docutils behaviour allows indented blocks of
# Python code). ::
def code_block_handler(self, block):
"""Convert indented literal blocks to source code format
"""
# If still unset, determine the indentation of code blocks from first non-blank
# code line::
if self._codeindent == 0:
self._codeindent = self.get_indent(block[0])
# Yield unindented lines after check whether we can safely unindent. If the
# line is less indented then `_codeindent`, something got wrong. ::
for line in block:
if line.lstrip() and self.get_indent(line) < self._codeindent:
raise ValueError("code block contains line less indented " \
"than %d spaces \n%r"%(self._codeindent, block))
yield line.replace(" "*self._codeindent, "", 1)
# Code2Text
# ---------
#
# The `Code2Text` converter does the opposite of `Text2Code`_ -- it processes
# a source in "code format" (i.e. in a programming language), extracts
# documentation from comment blocks, and puts program code in literal blocks.
#
# The class inherits the interface and helper functions from
# TextCodeConverter_ and adds functions specific to the text-to-code format
# conversion::
class Code2Text(TextCodeConverter):
"""Convert code source to text source
"""
# set_state
# ~~~~~~~~~
#
# Check if block is "header", "documentation", or "code_block":
#
# A paragraph is "documentation", if every non-blank line starts with a
# matching comment string (including whitespace except for commented blank
# lines) ::
def set_state(self, block):
"""Determine state of `block`."""
for line in block:
# skip documentation lines (commented, blank or blank comment)
if (line.startswith(self.comment_string)
or not line.rstrip()
or line.rstrip() == self.comment_string.rstrip()
):
continue
# non-commented line found:
if self.state == "":
self.state = "header"
else:
self.state = "code_block"
break
else:
# no code line found
# keep state if the block is just a blank line
# if len(block) == 1 and self._is_blank_codeline(line):
# return
self.state = "documentation"
# header_handler
# ~~~~~~~~~~~~~~
#
# Handle a leading code block. (See `Text2Code.header_handler`_ for a
# discussion of the "header" state.) ::
def header_handler(self, lines):
"""Format leading code block"""
if self.strip == True:
return
# get iterator over the lines that formats them as code-block
lines = iter(self.code_block_handler(lines))
# prepend header string to first line
yield self.header_string + lines.next()
# yield remaining lines
for line in lines:
yield line
# .. _Code2Text.documentation_handler:
#
# documentation_handler
# ~~~~~~~~~~~~~~~~~~~~~
#
# The *documentation state* handler converts a comment to a documentation
# block by stripping the leading `comment string` from every line::
def documentation_handler(self, block):
"""Uncomment documentation blocks in source code
"""
# Strip comment strings::
lines = [self.uncomment_line(line) for line in block]
# If the code block is stripped, the literal marker would lead to an
# error when the text is converted with Docutils. Strip it as well. ::
if self.strip or self.strip_marker:
self.strip_code_block_marker(lines)
# Otherwise, check for the `code_block_marker`_ at the end of the
# documentation block (skipping directive options that might follow it)::
elif self.add_missing_marker:
for line in lines[::-1]:
if self.marker_regexp.search(line):
self._add_code_block_marker = False
break
if (line.rstrip() and
not self.directive_option_regexp.search(line)):
self._add_code_block_marker = True
break
else:
self._add_code_block_marker = True
# Yield lines::
for line in lines:
yield line
# uncomment_line
# ~~~~~~~~~~~~~~
#
# Return documentation line after stripping comment string. Consider the
# case that a blank line has a comment string without trailing whitespace::
def uncomment_line(self, line):
"""Return uncommented documentation line"""
line = line.replace(self.comment_string, "", 1)
if line.rstrip() == self.stripped_comment_string:
line = line.replace(self.stripped_comment_string, "", 1)
return line
# .. _Code2Text.code_block_handler:
#
# code_block_handler
# ~~~~~~~~~~~~~~~~~~
#
# The `code_block` handler returns the code block as indented literal
# block (or filters it, if ``self.strip == True``). The amount of the code
# indentation is controlled by `self.codeindent` (default 2). ::
def code_block_handler(self, lines):
"""Covert code blocks to text format (indent or strip)
"""
if self.strip == True:
return
# eventually insert transition marker
if self._add_code_block_marker:
self.state = "documentation"
yield self.code_block_marker + "\n"
yield "\n"
self._add_code_block_marker = False
self.state = "code_block"
for line in lines:
yield " "*self.codeindent + line
# strip_code_block_marker
# ~~~~~~~~~~~~~~~~~~~~~~~
#
# Replace the literal marker with the equivalent of Docutils replace rules
#
# * strip ``::``-line (and preceding blank line) if on a line on its own
# * strip ``::`` if it is preceded by whitespace.
# * convert ``::`` to a single colon if preceded by text
#
# `lines` is a list of documentation lines (with a trailing blank line).
# It is modified in-place::
def strip_code_block_marker(self, lines):
try:
line = lines[-2]
except IndexError:
return # just one line (no trailing blank line)
# match with regexp: `match` is None or has groups
# \1 leading text, \2 code_block_marker, \3 remainder
match = self.marker_regexp.search(line)
if not match: # no code_block_marker present
return
if not match.group(1): # `code_block_marker` on an extra line
del(lines[-2])
# delete preceding line if it is blank
if len(lines) >= 2 and not lines[-2].lstrip():
del(lines[-2])
elif match.group(1).rstrip() < match.group(1):
# '::' follows whitespace
lines[-2] = match.group(1).rstrip() + match.group(3)
else: # '::' follows text
lines[-2] = match.group(1).rstrip() + ':' + match.group(3)
# Filters
# =======
#
# Filters allow pre- and post-processing of the data to bring it in a format
# suitable for the "normal" text<->code conversion. An example is conversion
# of `C` ``/*`` ``*/`` comments into C++ ``//`` comments (and back).
# Another example is the conversion of `C` ``/*`` ``*/`` comments into C++
# ``//`` comments (and back).
#
# Filters are generator functions that return an iterator acting on a
# `data` iterable and yielding processed `data` lines.
#
# identity_filter
# ---------------
#
# The most basic filter is the identity filter, that returns its argument as
# iterator::
def identity_filter(data):
"""Return data iterator without any processing"""
return iter(data)
# expandtabs_filter
# -----------------
#
# Expand hard-tabs in every line of `data` (cf. `str.expandtabs`).
#
# This filter is applied to the input data by `TextCodeConverter.convert`_ as
# hard tabs can lead to errors when the indentation is changed. ::
def expandtabs_filter(data):
"""Yield data tokens with hard-tabs expanded"""
for line in data:
yield line.expandtabs()
# collect_blocks
# --------------
#
# A filter to aggregate "paragraphs" (blocks separated by blank
# lines). Yields lists of lines::
def collect_blocks(lines):
"""collect lines in a list
yield list for each paragraph, i.e. block of lines separated by a
blank line (whitespace only).
Trailing blank lines are collected as well.
"""
blank_line_reached = False
block = []
for line in lines:
if blank_line_reached and line.rstrip():
yield block
blank_line_reached = False
block = [line]
continue
if not line.rstrip():
blank_line_reached = True
block.append(line)
yield block
# dumb_c_preprocessor
# -------------------
#
# This is a basic filter to convert `C` to `C++` comments. Works line-wise and
# only converts lines that
#
# * start with "/\* " and end with " \*/" (followed by whitespace only)
#
# A more sophisticated version would also
#
# * convert multi-line comments
#
# + Keep indentation or strip 3 leading spaces?
#
# * account for nested comments
#
# * only convert comments that are separated from code by a blank line
#
# ::
def dumb_c_preprocessor(data):
"""change `C` ``/* `` `` */`` comments into C++ ``// `` comments"""
comment_string = defaults.comment_strings["c++"]
boc_string = "/* "
eoc_string = " */"
for line in data:
if (line.startswith(boc_string)
and line.rstrip().endswith(eoc_string)
):
line = line.replace(boc_string, comment_string, 1)
line = "".join(line.rsplit(eoc_string, 1))
yield line
# Unfortunately, the `replace` method of strings does not support negative
# numbers for the `count` argument:
#
# >>> "foo */ baz */ bar".replace(" */", "", -1) == "foo */ baz bar"
# False
#
# However, there is the `rsplit` method, that can be used together with `join`:
#
# >>> "".join("foo */ baz */ bar".rsplit(" */", 1)) == "foo */ baz bar"
# True
#
# dumb_c_postprocessor
# --------------------
#
# Undo the preparations by the dumb_c_preprocessor and re-insert valid comment
# delimiters ::
def dumb_c_postprocessor(data):
"""change C++ ``// `` comments into `C` ``/* `` `` */`` comments"""
comment_string = defaults.comment_strings["c++"]
boc_string = "/* "
eoc_string = " */"
for line in data:
if line.rstrip() == comment_string.rstrip():
line = line.replace(comment_string, "", 1)
elif line.startswith(comment_string):
line = line.replace(comment_string, boc_string, 1)
line = line.rstrip() + eoc_string + "\n"
yield line
# register filters
# ----------------
#
# ::
defaults.preprocessors['c2text'] = dumb_c_preprocessor
defaults.preprocessors['css2text'] = dumb_c_preprocessor
defaults.postprocessors['text2c'] = dumb_c_postprocessor
defaults.postprocessors['text2css'] = dumb_c_postprocessor
# Command line use
# ================
#
# Using this script from the command line will convert a file according to its
# extension. This default can be overridden by a couple of options.
#
# Dual source handling
# --------------------
#
# How to determine which source is up-to-date?
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#
# - set modification date of `outfile` to the one of `infile`
#
# Points out that the source files are 'synchronised'.
#
# * Are there problems to expect from "backdating" a file? Which?
#
# Looking at http://www.unix.com/showthread.php?t=20526, it seems
# perfectly legal to set `mtime` (while leaving `ctime`) as `mtime` is a
# description of the "actuality" of the data in the file.
#
# * Should this become a default or an option?
#
# - alternatively move input file to a backup copy (with option: `--replace`)
#
# - check modification date before overwriting
# (with option: `--overwrite=update`)
#
# - check modification date before editing (implemented as `Jed editor`_
# function `pylit_check()` in `pylit.sl`_)
#
# .. _Jed editor: http://www.jedsoft.org/jed/
# .. _pylit.sl: http://jedmodes.sourceforge.net/mode/pylit/
#
# Recognised Filename Extensions
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#
# Instead of defining a new extension for "pylit" literate programs,
# by default ``.txt`` will be appended for the text source and stripped by
# the conversion to the code source. I.e. for a Python program foo:
#
# * the code source is called ``foo.py``
# * the text source is called ``foo.py.txt``
# * the html rendering is called ``foo.py.html``
#
#
# OptionValues
# ------------
#
# The following class adds `as_dict`_, `complete`_ and `__getattr__`_
# methods to `optparse.Values`::
class OptionValues(optparse.Values):
# .. _OptionValues.as_dict:
#
# as_dict
# ~~~~~~~
#
# For use as keyword arguments, it is handy to have the options in a
# dictionary. `as_dict` returns a copy of the instances object dictionary::
def as_dict(self):
"""Return options as dictionary object"""
return self.__dict__.copy()
# .. _OptionValues.complete:
#
# complete
# ~~~~~~~~
#
# ::
def complete(self, **keyw):
"""
Complete the option values with keyword arguments.
Do not overwrite existing values. Only use arguments that do not
have a corresponding attribute in `self`,
"""
for key in keyw:
if key not in self.__dict__:
setattr(self, key, keyw[key])
# .. _OptionValues.__getattr__:
#
# __getattr__
# ~~~~~~~~~~~
#
# To replace calls using ``options.ensure_value("OPTION", None)`` with the
# more concise ``options.OPTION``, we define `__getattr__` [#]_ ::
def __getattr__(self, name):
"""Return default value for non existing options"""
return None
# .. [#] The special method `__getattr__` is only called when an attribute
# look-up has not found the attribute in the usual places (i.e. it is
# not an instance attribute nor is it found in the class tree for
# self).
#
#
# PylitOptions
# ------------
#
# The `PylitOptions` class comprises an option parser and methods for parsing
# and completion of command line options::
class PylitOptions(object):
"""Storage and handling of command line options for pylit"""
# Instantiation
# ~~~~~~~~~~~~~
#
# ::
def __init__(self):
"""Set up an `OptionParser` instance for pylit command line options
"""
p = optparse.OptionParser(usage=main.__doc__, version=_version)
# Conversion settings
p.add_option("-c", "--code2txt", dest="txt2code", action="store_false",
help="convert code source to text source")
p.add_option("-t", "--txt2code", action="store_true",
help="convert text source to code source")
p.add_option("--language",
choices = list(defaults.languages.values()),
help="use LANGUAGE native comment style")
p.add_option("--comment-string", dest="comment_string",
help="documentation block marker in code source "
"(including trailing whitespace, "
"default: language dependent)")
p.add_option("-m", "--code-block-marker", dest="code_block_marker",
help="syntax token starting a code block. (default '::')")
p.add_option("--codeindent", type="int",
help="Number of spaces to indent code blocks with "
"text2code (default %d)" % defaults.codeindent)
# Output file handling
p.add_option("--overwrite", action="store",
choices = ["yes", "update", "no"],
help="overwrite output file (default 'update')")
p.add_option("--replace", action="store_true",
help="move infile to a backup copy (appending '~')")
p.add_option("-s", "--strip", action="store_true",
help='"export" by stripping documentation or code')
# Special actions
p.add_option("-d", "--diff", action="store_true",
help="test for differences to existing file")
p.add_option("--doctest", action="store_true",
help="run doctest.testfile() on the text version")
p.add_option("-e", "--execute", action="store_true",
help="execute code (Python only)")
self.parser = p
# .. _PylitOptions.parse_args:
#
# parse_args
# ~~~~~~~~~~
#
# The `parse_args` method calls the `optparse.OptionParser` on command
# line or provided args and returns the result as `PylitOptions.Values`
# instance. Defaults can be provided as keyword arguments::
def parse_args(self, args=sys.argv[1:], **keyw):
"""parse command line arguments using `optparse.OptionParser`
parse_args(args, **keyw) -> OptionValues instance
args -- list of command line arguments.
keyw -- keyword arguments or dictionary of option defaults
"""
# parse arguments
(values, args) = self.parser.parse_args(args, OptionValues(keyw))
# Convert FILE and OUTFILE positional args to option values
# (other positional arguments are ignored)
try:
values.infile = args[0]
values.outfile = args[1]
except IndexError:
pass
return values
# .. _PylitOptions.complete_values:
#
# complete_values
# ~~~~~~~~~~~~~~~
#
# Complete an OptionValues instance `values`. Use module-level defaults and
# context information to set missing option values to sensible defaults (if
# possible) ::
def complete_values(self, values):
"""complete option values with module and context sensible defaults
x.complete_values(values) -> values
values -- OptionValues instance
"""
# Complete with module-level defaults_::
values.complete(**defaults.__dict__)
# Ensure infile is a string::
values.ensure_value("infile", "")
# Guess conversion direction from `infile` filename::
if values.txt2code is None:
in_extension = os.path.splitext(values.infile)[1]
if in_extension in values.text_extensions:
values.txt2code = True
elif in_extension in values.languages.keys():
values.txt2code = False
# Auto-determine the output file name::
values.ensure_value("outfile", self._get_outfile_name(values))
# Second try: Guess conversion direction from outfile filename::
if values.txt2code is None:
out_extension = os.path.splitext(values.outfile)[1]
values.txt2code = not (out_extension in values.text_extensions)
# Set the language of the code::
if values.txt2code is True:
code_extension = os.path.splitext(values.outfile)[1]
elif values.txt2code is False:
code_extension = os.path.splitext(values.infile)[1]
values.ensure_value("language", values.languages[code_extension])
return values
# _get_outfile_name
# ~~~~~~~~~~~~~~~~~
#
# Construct a matching filename for the output file. The output filename is
# constructed from `infile` by the following rules:
#
# * '-' (stdin) results in '-' (stdout)
# * strip the `text_extension`_ (txt2code) or
# * add the `text_extension`_ (code2txt)
# * fallback: if no guess can be made, add ".out"
#
# .. TODO: use values.outfile_extension if it exists?
#
# ::
def _get_outfile_name(self, values):
"""Return a matching output filename for `infile`
"""
# if input is stdin, default output is stdout
if values.infile == '-':
return '-'
# Derive from `infile` name: strip or add text extension
(base, ext) = os.path.splitext(values.infile)
if ext in values.text_extensions:
return base # strip
if ext in values.languages.keys() or values.txt2code == False:
return values.infile + values.text_extensions[0] # add
# give up
return values.infile + ".out"
# .. _PylitOptions.__call__:
#
# __call__
# ~~~~~~~~
#
# The special `__call__` method allows to use PylitOptions instances as
# *callables*: Calling an instance parses the argument list to extract option
# values and completes them based on "context-sensitive defaults". Keyword
# arguments are passed to `PylitOptions.parse_args`_ as default values. ::
def __call__(self, args=sys.argv[1:], **keyw):
"""parse and complete command line args return option values
"""
values = self.parse_args(args, **keyw)
return self.complete_values(values)
# Helper functions
# ----------------
#
# open_streams
# ~~~~~~~~~~~~
#
# Return file objects for in- and output. If the input path is missing,
# write usage and abort. (An alternative would be to use stdin as default.
# However, this leaves the uninitiated user with a non-responding application
# if (s)he just tries the script without any arguments) ::
def open_streams(infile = '-', outfile = '-', overwrite='update', **keyw):
"""Open and return the input and output stream
open_streams(infile, outfile) -> (in_stream, out_stream)
in_stream -- file(infile) or sys.stdin
out_stream -- file(outfile) or sys.stdout
overwrite -- 'yes': overwrite eventually existing `outfile`,
'update': fail if the `outfile` is newer than `infile`,
'no': fail if `outfile` exists.
Irrelevant if `outfile` == '-'.
"""
if not infile:
strerror = "Missing input file name ('-' for stdin; -h for help)"
raise IOError(2, strerror, infile)
if infile == '-':
in_stream = sys.stdin
else:
in_stream = open(infile, 'r')
if outfile == '-':
out_stream = sys.stdout
elif overwrite == 'no' and os.path.exists(outfile):
raise IOError(1, "Output file exists!", outfile)
elif overwrite == 'update' and is_newer(outfile, infile):
raise IOError(1, "Output file is newer than input file!", outfile)
else:
out_stream = open(outfile, 'w')
return (in_stream, out_stream)
# is_newer
# ~~~~~~~~
#
# ::
def is_newer(path1, path2):
"""Check if `path1` is newer than `path2` (using mtime)
Compare modification time of files at path1 and path2.
Non-existing files are considered oldest: Return False if path1 does not
exist and True if path2 does not exist.
Return None for equal modification time. (This evaluates to False in a
Boolean context but allows a test for equality.)
"""
try:
mtime1 = os.path.getmtime(path1)
except OSError:
mtime1 = -1
try:
mtime2 = os.path.getmtime(path2)
except OSError:
mtime2 = -1
# print "mtime1", mtime1, path1, "\n", "mtime2", mtime2, path2
if mtime1 == mtime2:
return None
return mtime1 > mtime2
# get_converter
# ~~~~~~~~~~~~~
#
# Get an instance of the converter state machine::
def get_converter(data, txt2code=True, **keyw):
if txt2code:
return Text2Code(data, **keyw)
else:
return Code2Text(data, **keyw)
# Use cases
# ---------
#
# run_doctest
# ~~~~~~~~~~~
# ::
def run_doctest(infile="-", txt2code=True,
globs={}, verbose=False, optionflags=0, **keyw):
"""run doctest on the text source
"""
# Allow imports from the current working dir by prepending an empty string to
# sys.path (see doc of sys.path())::
sys.path.insert(0, '')
# Import classes from the doctest module::
from doctest import DocTestParser, DocTestRunner
# Read in source. Make sure it is in text format, as tests in comments are not
# found by doctest::
(data, out_stream) = open_streams(infile, "-")
if txt2code is False:
keyw.update({'add_missing_marker': False})
converter = Code2Text(data, **keyw)
docstring = str(converter)
else:
docstring = data.read()
# decode doc string if there is a "magic comment" in the first or second line
# (http://docs.python.org/reference/lexical_analysis.html#encoding-declarations)
# ::
firstlines = ' '.join(docstring.splitlines()[:2])
match = re.search('coding[=:]\s*([-\w.]+)', firstlines)
if match:
docencoding = match.group(1)
docstring = docstring.decode(docencoding)
# Use the doctest Advanced API to run all doctests in the source text::
test = DocTestParser().get_doctest(docstring, globs, name="",
filename=infile, lineno=0)
runner = DocTestRunner(verbose, optionflags)
runner.run(test)
runner.summarize
# give feedback also if no failures occurred
if not runner.failures:
print("{:d} failures in {:d} tests".format(runner.failures, runner.tries))
return runner.failures, runner.tries
# diff
# ~~~~
#
# ::
def diff(infile='-', outfile='-', txt2code=True, **keyw):
"""Report differences between converted infile and existing outfile
If outfile does not exist or is '-', do a round-trip conversion and
report differences.
"""
import difflib
instream = open(infile)
# for diffing, we need a copy of the data as list::
data = instream.readlines()
# convert
converter = get_converter(data, txt2code, **keyw)
new = converter()
if outfile != '-' and os.path.exists(outfile):
outstream = open(outfile)
old = outstream.readlines()
oldname = outfile
newname = "<conversion of %s>"%infile
else:
old = data
oldname = infile
# back-convert the output data
converter = get_converter(new, not txt2code)
new = converter()
newname = "<round-conversion of %s>"%infile
# find and print the differences
is_different = False
# print type(old), old
# print type(new), new
delta = difflib.unified_diff(old, new,
# delta = difflib.unified_diff(["heute\n", "schon\n"], ["heute\n", "noch\n"],
fromfile=oldname, tofile=newname)
for line in delta:
is_different = True
# print(line, end=" ")
print(line)
if not is_different:
print(oldname)
print(newname)
print("no differences found")
return is_different
# execute
# ~~~~~~~
#
# Works only for python code.
#
# Does not work with `eval`, as code is not just one expression. ::
def execute(infile="-", txt2code=True, **keyw):
"""Execute the input file. Convert first, if it is a text source.
"""
data = open(infile)
if txt2code:
data = str(Text2Code(data, **keyw))
# print "executing " + options.infile
exec(data)
# main
# ----
#
# If this script is called from the command line, the `main` function will
# convert the input (file or stdin) between text and code formats.
#
# Option default values for the conversion can be given as keyword arguments
# to `main`_. The option defaults will be updated by command line options and
# extended with "intelligent guesses" by `PylitOptions`_ and passed on to
# helper functions and the converter instantiation.
#
# This allows easy customisation for programmatic use -- just call `main`
# with the appropriate keyword options, e.g. ``pylit.main(comment_string="## ")``
#
# ::
def main(args=sys.argv[1:], **defaults):
"""%prog [options] INFILE [OUTFILE]
Convert between (reStructured) text source with embedded code,
and code source with embedded documentation (comment blocks)
The special filename '-' stands for standard in and output.
"""
# Parse and complete the options::
options = PylitOptions()(args, **defaults)
# print "infile", repr(options.infile)
# Special actions with early return::
if options.doctest:
return run_doctest(**options.as_dict())
if options.diff:
return diff(**options.as_dict())
if options.execute:
return execute(**options.as_dict())
# Open in- and output streams::
try:
(data, out_stream) = open_streams(**options.as_dict())
except IOError as ex:
print("IOError: {0} {1}".format(ex.filename, ex.strerror))
sys.exit(ex.errno)
# Get a converter instance::
converter = get_converter(data, **options.as_dict())
# Convert and write to out_stream::
out_stream.write(str(converter))
if out_stream is not sys.stdout:
print("extract written to "+ out_stream.name)
out_stream.close()
# If input and output are from files, set the modification time (`mtime`) of
# the output file to the one of the input file to indicate that the contained
# information is equal. [#]_ ::
try:
os.utime(options.outfile, (os.path.getatime(options.outfile),
os.path.getmtime(options.infile))
)
except OSError:
pass
## print "mtime", os.path.getmtime(options.infile), options.infile
## print "mtime", os.path.getmtime(options.outfile), options.outfile
# .. [#] Make sure the corresponding file object (here `out_stream`) is
# closed, as otherwise the change will be overwritten when `close` is
# called afterwards (either explicitly or at program exit).
#
#
# Rename the infile to a backup copy if ``--replace`` is set::
if options.replace:
os.rename(options.infile, options.infile + "~")
# Run main, if called from the command line::
if __name__ == '__main__':
main()
# Open questions
# ==============
#
# Open questions and ideas for further development
#
# Clean code
# ----------
#
# * can we gain from using "shutils" over "os.path" and "os"?
# * use pylint or pyChecker to enforce a consistent style?
#
# Options
# -------
#
# * Use templates for the "intelligent guesses" (with Python syntax for string
# replacement with dicts: ``"hello %(what)s" % {'what': 'world'}``)
#
# * Is it sensible to offer the `header_string` option also as command line
# option?
#
# treatment of blank lines
# ------------------------
#
# Alternatives: Keep blank lines blank
#
# - "never" (current setting) -> "visually merges" all documentation
# if there is no interjacent code
#
# - "always" -> disrupts documentation blocks,
#
# - "if empty" (no whitespace). Comment if there is whitespace.
#
# This would allow non-obstructing markup but unfortunately this is (in
# most editors) also non-visible markup.
#
# + "if double" (if there is more than one consecutive blank line)
#
# With this handling, the "visual gap" remains in both, text and code
# source.
#
#
# Parsing Problems
# ----------------
#
# * Ignore "matching comments" in literal strings?
#
# Too complicated: Would need a specific detection algorithm for every
# language that supports multi-line literal strings (C++, PHP, Python)
#
# * Warn if a comment in code will become documentation after round-trip?
#
#
# docstrings in code blocks
# -------------------------
#
# * How to handle docstrings in code blocks? (it would be nice to convert them
# to rst-text if ``__docformat__ == restructuredtext``)
#
# TODO: Ask at Docutils users|developers
#
# Plug-ins
# --------
#
# Specify a path for user additions and plug-ins. This would require to
# convert Pylit from a pure module to a package...
#
# 6.4.3 Packages in Multiple Directories
#
# Packages support one more special attribute, __path__. This is initialized
# to be a list containing the name of the directory holding the package's
# __init__.py before the code in that file is executed. This
# variable can be modified; doing so affects future searches for modules and
# subpackages contained in the package.
#
# While this feature is not often needed, it can be used to extend the set
# of modules found in a package.
#
#
# .. References
#
# .. _Docutils: http://docutils.sourceforge.net/
# .. _Sphinx: http://sphinx.pocoo.org
# .. _Pygments: http://pygments.org/
# .. _code-block directive:
# http://docutils.sourceforge.net/sandbox/code-block-directive/
# .. _literal block:
# .. _literal blocks:
# http://docutils.sf.net/docs/ref/rst/restructuredtext.html#literal-blocks
# .. _indented literal block:
# .. _indented literal blocks:
# http://docutils.sf.net/docs/ref/rst/restructuredtext.html#indented-literal-blocks
# .. _quoted literal block:
# .. _quoted literal blocks:
# http://docutils.sf.net/docs/ref/rst/restructuredtext.html#quoted-literal-blocks
# .. _parsed-literal blocks:
# http://docutils.sf.net/docs/ref/rst/directives.html#parsed-literal-block
# .. _doctest block:
# .. _doctest blocks:
# http://docutils.sf.net/docs/ref/rst/restructuredtext.html#doctest-blocks
#
# .. _feature request and patch by jrioux:
# http://developer.berlios.de/feature/?func=detailfeature&feature_id=4890&group_id=7974
```
#### File: doc/source/rstprocess.py
```python
import sys
import os
import shutil
# sys.path.append('../../../utils/pylit/')
try:
import pylit
except ImportError:
raise ImportError("Unable to import pylit module")
def process():
"""Copy demo rst files (C++ and Python) from the DOLFIN source tree
into the demo source tree, and process file with pylit
"""
# Check that we can find pylint.py for converting foo.py.rst to
# foo.py
pylit_parser = "pylit.py"
if os.path.isfile(pylit_parser):
pass
else:
raise RuntimeError("Cannot find pylit.py")
# Directories to scan
subdirs = ["../../demo/documented"]
# Iterate over subdirectories containing demos
for subdir in subdirs:
# Get list of demos (demo name , subdirectory)
demos = [(dI, os.path.join(subdir, dI)) for dI in os.listdir(subdir) if os.path.isdir(os.path.join(subdir, dI))]
# Iterate over demos
for demo, path in demos:
# Make demo doc directory
demo_dir = os.path.join('./demos/', demo)
if not os.path.exists(demo_dir):
os.makedirs(demo_dir)
#for f in rst_files_common:
# shutil.copy(os.path.join(path, f), demo_dir)
# Build list of rst and png files in demo source directory
rst_files = [f for f in os.listdir(path) if os.path.splitext(f)[1] == ".rst" ]
other_files = [f for f in os.listdir(path) if os.path.splitext(f)[1] in (".png", ".pdf", ".gif", ".py", ".gz", ".yaml", ".zip")]
# Create directory in documentation tree for demo
demo_dir = os.path.join('./demos/', demo)
if not os.path.exists(demo_dir):
os.makedirs(demo_dir)
# Copy .png and .py files into documentation demo directory
for f in other_files:
shutil.copy(os.path.join(path, f), demo_dir)
# # Copy input folders
# if "Input_Data" in os.listdir(path):
# input_path = os.path.join(path, "Input_Data")
# demo_input_dir = os.path.join(demo_dir, "Input_Data/")
# if not os.path.exists(demo_input_dir):
# os.makedirs(demo_input_dir)
# for f in os.listdir(input_path):
# shutil.copy(os.path.join(input_path, f), demo_input_dir)
# Copy rst files into documentation demo directory
for f in rst_files:
shutil.copy(os.path.join(path, f), demo_dir)
# Copy rst files into documentation demo directory and
# process with Pylit
for f in rst_files:
shutil.copy(os.path.join(path, f), demo_dir)
# Run pylit on py.rst files (files with 'double
# extensions')
if os.path.splitext(os.path.splitext(f)[0])[1] == ".py":
rst_file = os.path.join(demo_dir, f)
pylit.main([rst_file])
if __name__ == "__main__":
process()
```
#### File: tests/1-Domains/est_circle_domain.py
```python
import windse
import pytest
import numpy as np
from dolfin import *
import windse_driver.driver_functions as df
###############################################################
######################## Setup Objects ########################
###############################################################
### Alias Parameters ###
params = df.BlankParameters()
### Set General Parameters ###
params["general"]["name"] = "Circle_Domain_Test"
### Set Box Parameters ###
radius = 1200
params["domain"]["type"] = "circle"
params["domain"]["mesh_type"] = "mshr"
params["domain"]["radius"] = radius
params["domain"]["center"] = [0.0, 0.0]
params["domain"]["nt"] = 100
params["domain"]["res"] = 100
### Initialize Parameters using those set above ###
windse.initialize(params)
### Create the Domain Object ###
dom = windse.CircleDomain()
### Check if the object is as expected ###
dom.Save()
### Create unit for integration ###
V = FiniteElement('Lagrange', dom.mesh.ufl_cell(), 1)
V = FunctionSpace(dom.mesh,V)
u = Function(V)
u.vector()[:] = 1.0
### Calculate inflow integral ###
def CalculateInflowBoundary(dom,u):
xy = Expression("(x[0]+pi)*(x[1]+pi)",degree=2,pi=pi)
ds = Measure('ds', subdomain_data=dom.boundary_markers)
val = 0
unique_id = np.unique(dom.boundary_markers.array())
for blabel in dom.boundary_types["inflow"]:
if dom.boundary_names[blabel] in unique_id:
val += assemble(xy*u*ds(dom.boundary_names[blabel]))/assemble(u*ds(dom.boundary_names[blabel]))
return val
###############################################################
######################## Define Tests #########################
###############################################################
def test_volume():
### Calculate Volume ###
Volume = assemble(u*dx)
True_Volume = (pi*radius**2.0)
### Check the mesh volume ###
if abs((Volume-True_Volume)/True_Volume) > 1e-3:
print("Expected Volume: " + repr(True_Volume))
print("Actual Volume: " + repr(Volume))
print("Percent Error: " + repr(abs((Volume-True_Volume)/True_Volume)))
raise ValueError("Box domain constructed with unexpected volume")
def test_boundary():
### Check the initial boundary integral
Boundary_Value = CalculateInflowBoundary(dom,u)
True_Boundary_Value = -2363.411257268333
if abs((Boundary_Value-True_Boundary_Value)/True_Boundary_Value) > 1e-3:
print("Expected inflow: " + repr(True_Boundary_Value))
print("Actual inflow: " + repr(Boundary_Value))
raise ValueError("Initial inflow integral returned unexpected value (test value is hard coded)")
def test_rotated_boundary():
### Rotate the boundary by 1.337 rads
dom.RecomputeBoundaryMarkers(1.337)
### Test Rotated Boundary integral ###
Boundary_Value = CalculateInflowBoundary(dom,u)
True_Boundary_Value = -2077.458137099408
if abs((Boundary_Value-True_Boundary_Value)/True_Boundary_Value) > 1e-3:
print("Expected inflow: " + repr(True_Boundary_Value))
print("Actual inflow: " + repr(Boundary_Value))
raise ValueError("Initial inflow integral returned unexpected value (test value is hard coded)")
```
#### File: WindSE/tests/test_regression_parallel.py
```python
import pathlib
import pytest
import os, sys
import yaml
import warnings
import subprocess
### Located Demos ###
home_path = os.getcwd()
reg_path = "../9-Regression/"
### Get Yaml Files ###
yaml_files = sorted(pathlib.Path(__file__, reg_path).resolve().glob('*_Unsteady.yaml'))
### Import the tolerances ###
tolerances = yaml.load(open("tests/9-Regression/Truth_Data/tolerances.yaml"),Loader=yaml.SafeLoader)
### Get current status of modules
default_modules = sys.modules.keys()
## Set the number of processors to test with
num_procs = 2
###############################################################
######################### Define Tests ########################
###############################################################
### Run Demo Yaml Files
@pytest.mark.parametrize('yaml_file', yaml_files, ids=lambda yaml_file: yaml_file.parts[-2]+"/"+yaml_file.parts[-1])
def test_yaml_execution(yaml_file):
### Filter out some benign numpy warnings ###
warnings.filterwarnings("ignore", message="numpy.dtype size changed")
warnings.filterwarnings("ignore", message="numpy.ufunc size changed")
### Run The Windse Simulation
folder = os.path.split(yaml_file.as_posix())[0]
os.chdir(folder)
# from windse_driver import driver
### Grab the name of the run ###
_, yaml_name = os.path.split(yaml_file)
yaml_name = yaml_name.split(".")[0]
parallel_yaml_name = yaml_name + "_np_%d" % (num_procs)
### Set the number of processes and launch using mpirun as if from the command line
cl_command = "mpirun -n %d windse run %s -p general:name:%s" % (num_procs, yaml_file, parallel_yaml_name)
# print(cl_command)
cl_output = subprocess.run(cl_command.split())
# driver.run_action(params_loc=yaml_file.as_posix())
os.chdir(home_path)
### Import the Truth ###
truth_loc = folder + "/Truth_Data/" + yaml_name + "_truth.yaml"
sim_truth = yaml.load(open(truth_loc),Loader=yaml.SafeLoader)
### Import the Results ###
results_loc = folder + "/output/" + parallel_yaml_name + "/tagged_output.yaml"
sim_results = yaml.load(open(results_loc),Loader=yaml.SafeLoader)
errors = ""
### Iterate over the truth and check with the results
for module_name, truth_dict in sim_truth.items():
check_dict = sim_results.get(module_name, None)
tol_dict = tolerances.get(module_name, {})
### Send warning if a module was not checked ###
if check_dict is None:
errors += f"Missing Group - {module_name}\n"
### Get Wildcard Tolerances for module ###
wild_tol_keys = []
for key in tol_dict.keys():
if "*" in key:
wild_tol_keys.append(key)
### Check each value in the module
for key, truth_value in truth_dict.items():
check_value = check_dict.get(key,None)
tol_value = None
### Check if there is a valid wildcard ###
use_wildcard = False
for wild_key in wild_tol_keys:
filtered_wild_key = wild_key.replace('*', '')
if filtered_wild_key in key:
wild_tol_value = tol_dict[wild_key]
use_wildcard = True
### Check if the exact key is available ###
if key in tol_dict.keys():
tol_value = tol_dict[key]
### Check if wildcard tolerance key is available ###
elif use_wildcard:
tol_value = wild_tol_value
### Set the default tolerances for a float ###
elif isinstance(check_value,float):
tol_value = [1e-4,"absolute"]
### Set the default tolerances for a float ###
elif isinstance(check_value,int):
tol_value = [0,"absolute"]
### Get tolerance parameters ###
tol = float(tol_value[0])
check_type = tol_value[1]
if check_value is None:
errors += f"Missing Key - {module_name}: {key} \n"
else:
### Calculate errors ###
abs_error = abs(check_value-truth_value)
if truth_value != 0:
rel_error = abs(check_value-truth_value)/truth_value
if check_type == "absolute" and abs_error > tol:
errors += f"Value Error - {module_name}: {key} (abs error: {abs_error}, tol: {tol} truth: {truth_value}, check: {check_value})\n"
elif check_type == "relative" and rel_error > tol:
errors += f"Value Error - {module_name}: {key} (rel error: {rel_error}, tol: {tol}, truth: {truth_value}, check: {check_value})\n"
if len(errors)>0:
errors = parallel_yaml_name + "\n" + errors
raise ValueError(errors)
```
#### File: WindSE/windse/BoundaryManager.py
```python
import __main__
import os
### Get the name of program importing this package ###
if hasattr(__main__,"__file__"):
main_file = os.path.basename(__main__.__file__)
else:
main_file = "ipython"
### This checks if we are just doing documentation ###
if not main_file in ["sphinx-build", "__main__.py"]:
from dolfin import *
import numpy as np
### Import the cumulative parameters ###
from windse import windse_parameters
### Check if we need dolfin_adjoint ###
if windse_parameters.dolfin_adjoint:
from dolfin_adjoint import *
import math
from scipy.interpolate import RegularGridInterpolator
class GenericBoundary(object):
def __init__(self,dom,fs,farm):
self.params = windse_parameters
self.dom = dom
self.fs = fs
self.farm = farm
self.ig_first_save = True
self.height_first_save = True
self.fprint = self.params.fprint
self.tag_output = self.params.tag_output
self.debug_mode = self.params.debug_mode
### Update attributes based on params file ###
for key, value in self.params["boundary_conditions"].items():
setattr(self,key,value)
### get the height to apply the HH_vel ###
if self.vel_height == "HH":
self.vel_height = np.mean(farm.HH)
if np.isnan(self.vel_height):
raise ValueError("Hub Height not defined, likely and EmptyFarm. Please set boundary_conditions:vel_height in config yaml")
### Get solver parameters ###
self.final_time = self.params["solver"]["final_time"]
### Define the zero function based on domain dimension ###
self.zeros = Constant(dom.mesh.topology().dim()*(0.0,))
self.zero = Constant(0.0)
### Use custom boundary tags if provided ###
if self.params.default_bc_names:
self.boundary_names = self.dom.boundary_names
if self.params.default_bc_types:
self.boundary_types = self.dom.boundary_types
def DebugOutput(self):
if self.debug_mode:
# Average of the x and y-velocities
self.tag_output("min_x", self.ux.vector().min())
self.tag_output("max_x", self.ux.vector().max())
self.tag_output("avg_x", self.ux.vector().sum()/self.ux.vector().size())
self.tag_output("min_y", self.uy.vector().min())
self.tag_output("max_y", self.uy.vector().max())
self.tag_output("avg_y", self.uy.vector().sum()/self.uy.vector().size())
# If applicable, average of z-velocities
if self.dom.dim == 3:
self.tag_output("min_z", self.uz.vector().min())
self.tag_output("max_z", self.uz.vector().max())
self.tag_output("avg_z", self.uz.vector().sum()/self.uz.vector().size())
# Average of the pressures
self.tag_output("min_p", self.bc_pressure.vector().min())
self.tag_output("max_p", self.bc_pressure.vector().max())
self.tag_output("avg_p", self.bc_pressure.vector().sum()/self.bc_pressure.vector().size())
# Average of all initialized fields (confirms function assignment) ### Depends on DOFS
self.tag_output("min_initial_values", self.u0.vector().min())
self.tag_output("max_initial_values", self.u0.vector().max())
self.tag_output("avg_initial_values", self.u0.vector().sum()/self.u0.vector().size())
# Get number of boundary conditions
num_bc = len(self.bcu) + len(self.bcp) + len(self.bcs)
self.tag_output("num_bc", num_bc)
def SetupBoundaries(self):
### Create the equations need for defining the boundary conditions ###
### this is sloppy and will be cleaned up.
### Inflow is always from the front
self.fprint("Applying Boundary Conditions",offset=1)
# If running in parallel, avoid using boundary markers
if self.params.num_procs > 1:
self.bcu = []
self.bcp = []
self.bcs = []
for bc_type, bc_loc_list in self.boundary_types.items():
for bc_loc in bc_loc_list:
# Translate the boundary name, a string, into an integer index:
# East = 0, North = 1, West = 2, South = 3, Bottom = 4, Top = 5
bc_loc_id = self.boundary_names[bc_loc] - 1
# Get the correct compiled subdomain based off the location id
bc_domain = self.dom.boundary_subdomains[bc_loc_id]
# Append the right type of Dirichlet BC to the list
if bc_type == 'inflow':
self.bcu.append(DirichletBC(self.fs.V, self.bc_velocity, bc_domain))
self.bcs.append(DirichletBC(self.fs.W.sub(0), self.bc_velocity, bc_domain))
elif bc_type == 'no_slip':
if self.dom.mesh.topology().dim() == 3:
zeros = Constant((0.0, 0.0, 0.0))
elif self.dom.mesh.topology().dim() == 2:
zeros = Constant((0.0, 0.0))
self.bcu.append(DirichletBC(self.fs.V, zeros, bc_domain))
self.bcs.append(DirichletBC(self.fs.W.sub(0), zeros, bc_domain))
elif bc_type == 'free_slip':
# Identify the component/direction normal to this wall
if bc_loc == 'east' or bc_loc == 'west':
norm_comp = 0
elif bc_loc == 'south' or bc_loc == 'north':
norm_comp = 1
elif bc_loc == 'bottom' or bc_loc == 'top':
norm_comp = 2
self.bcu.append(DirichletBC(self.fs.V.sub(norm_comp), Constant(0.0), bc_domain))
self.bcs.append(DirichletBC(self.fs.W.sub(0).sub(norm_comp), Constant(0.0), bc_domain))
elif bc_type == 'no_stress':
self.bcp.append(DirichletBC(self.fs.Q, Constant(0.0), bc_domain))
else:
unique_ids = np.unique(self.dom.boundary_markers.array())
### Assemble boundary conditions ###
bcu_eqns = []
bcp_eqns = []
for bc_type, bs in self.boundary_types.items():
if bs is not None:
if bc_type == "inflow":
for b in bs:
if self.boundary_names[b] in unique_ids:
bcu_eqns.append([self.fs.V, self.fs.W.sub(0), self.bc_velocity, self.boundary_names[b]])
elif bc_type == "no_slip":
for b in bs:
bcu_eqns.append([self.fs.V, self.fs.W.sub(0), self.zeros, self.boundary_names[b]])
elif bc_type == "free_slip":
temp_list = list(self.boundary_names.keys()) # get ordered list
for b in bs:
### get a facet on the relevant boundary ###
boundary_id = self.boundary_names[b]
### check to make sure the free slip boundary still exists ###
if boundary_id in unique_ids:
facet_ids = self.dom.boundary_markers.where_equal(boundary_id)
test_facet = Facet(self.dom.mesh,facet_ids[int(len(facet_ids)/2.0)])
### get the function space sub form the normal ###
facet_normal = test_facet.normal().array()
field_id = int(np.argmin(abs(abs(facet_normal)-1.0)))
bcu_eqns.append([self.fs.V.sub(field_id), self.fs.W.sub(0).sub(field_id), self.zero, boundary_id])
elif bc_type == "no_stress":
for b in bs:
bcu_eqns.append([None, None, None, self.boundary_names[b]])
bcp_eqns.append([self.fs.Q, self.fs.W.sub(1), self.zero, self.boundary_names[b]])
else:
raise ValueError(bc_type+" is not a recognized boundary type")
bcs_eqns = bcu_eqns#+bcp_eqns
### Set the boundary conditions ###
self.bcu = []
for i in range(len(bcu_eqns)):
if bcu_eqns[i][0] is not None:
self.bcu.append(DirichletBC(bcu_eqns[i][0], bcu_eqns[i][2], self.dom.boundary_markers, bcu_eqns[i][3]))
self.bcp = []
for i in range(len(bcp_eqns)):
if bcp_eqns[i][0] is not None:
self.bcp.append(DirichletBC(bcp_eqns[i][0], bcp_eqns[i][2], self.dom.boundary_markers, bcp_eqns[i][3]))
self.bcs = []
for i in range(len(bcs_eqns)):
if bcs_eqns[i][0] is not None:
self.bcs.append(DirichletBC(bcs_eqns[i][1], bcs_eqns[i][2], self.dom.boundary_markers, bcs_eqns[i][3]))
self.fprint("Boundary Conditions Applied",offset=1)
self.fprint("")
def PrepareVelocity(self,inflow_angle):
length = len(self.unit_reference_velocity)
ux_com = np.zeros(length)
uy_com = np.zeros(length)
uz_com = np.zeros(length)
for i in range(length):
v = self.HH_vel * self.unit_reference_velocity[i]
ux_com[i] = math.cos(inflow_angle)*v
uy_com[i] = math.sin(inflow_angle)*v
if self.dom.dim == 3:
uz_com[i] = 0.0
return [ux_com,uy_com,uz_com]
def RecomputeVelocity(self,inflow_angle):
self.fprint("Recomputing Velocity")
ux_com, uy_com, uz_com = self.PrepareVelocity(inflow_angle)
self.ux = Function(self.fs.V0)
self.uy = Function(self.fs.V1)
if self.dom.dim == 3:
self.uz = Function(self.fs.V2)
self.ux.vector()[:] = ux_com
self.uy.vector()[:] = uy_com
if self.dom.dim == 3:
self.uz.vector()[:] = uz_com
### Assigning Velocity
self.bc_velocity = Function(self.fs.V)
self.bc_velocity.rename("bc_velocity","bc_velocity")
if self.dom.dim == 3:
self.fs.VelocityAssigner.assign(self.bc_velocity,[self.ux,self.uy,self.uz])
else:
self.fs.VelocityAssigner.assign(self.bc_velocity,[self.ux,self.uy])
### Create Pressure Boundary Function
self.bc_pressure = Function(self.fs.Q)
### Create Initial Guess
self.fprint("Assigning Initial Guess")
self.u0 = Function(self.fs.W)
self.fs.SolutionAssigner.assign(self.u0,[self.bc_velocity,self.bc_pressure])
self.SetupBoundaries()
def UpdateVelocity(self, simTime):
pass
def SaveInitialGuess(self,val=0):
"""
This function saves the turbine force if exists to output/.../functions/
"""
self.bc_velocity.vector()[:]=self.bc_velocity.vector()[:]/self.dom.xscale
self.dom.mesh.coordinates()[:]=self.dom.mesh.coordinates()[:]/self.dom.xscale
if self.ig_first_save:
self.u0_file = self.params.Save(self.bc_velocity,"u0",subfolder="functions/",val=val)
self.p0_file = self.params.Save(self.bc_pressure,"p0",subfolder="functions/",val=val)
self.ig_first_save = False
else:
self.params.Save(self.bc_velocity,"u0",subfolder="functions/",val=val,file=self.u0_file)
self.params.Save(self.bc_pressure,"p0",subfolder="functions/",val=val,file=self.p0_file)
self.bc_velocity.vector()[:]=self.bc_velocity.vector()[:]*self.dom.xscale
self.dom.mesh.coordinates()[:]=self.dom.mesh.coordinates()[:]*self.dom.xscale
def SaveHeight(self,val=0):
"""
This function saves the turbine force if exists to output/.../functions/
"""
self.dom.mesh.coordinates()[:]=self.dom.mesh.coordinates()[:]/self.dom.xscale
self.height.vector()[:]=self.height.vector()[:]/self.dom.xscale
self.depth.vector()[:]=self.depth.vector()[:]/self.dom.xscale
if self.height_first_save:
self.height_file = self.params.Save(self.height,"height",subfolder="functions/",val=val)
self.depth_file = self.params.Save(self.depth,"depth",subfolder="functions/",val=val)
self.height_first_save = False
else:
self.params.Save(self.height,"height",subfolder="functions/",val=val,file=self.height_file)
self.params.Save(self.depth,"depth",subfolder="functions/",val=val,file=self.depth_file)
self.height.vector()[:]=self.height.vector()[:]*self.dom.xscale
self.depth.vector()[:]=self.depth.vector()[:]*self.dom.xscale
self.dom.mesh.coordinates()[:]=self.dom.mesh.coordinates()[:]*self.dom.xscale
def CalculateHeights(self):
### Calculate the distance to the ground for the Q function space ###
# self.z_dist_Q = Function(fs.Q)
self.height = Function(self.fs.Q)
self.depth = Function(self.fs.Q)
Q_coords = self.fs.Q.tabulate_dof_coordinates()
height_vals = self.height.vector()[:]
for i in range(len(Q_coords)):
height_vals[i] = self.dom.Ground(Q_coords[i,0],Q_coords[i,1])
z_dist_Q = Q_coords[:,2]-height_vals
self.height.vector()[:]=height_vals
self.depth.vector()[:]=z_dist_Q
### Calculate the distance to the ground for the V function space ###
self.depth_V = Function(self.fs.V)
V_coords = self.fs.V.tabulate_dof_coordinates()
z_dist_V_val = np.zeros(len(V_coords))
for i in range(len(V_coords)):
z_dist_V_val[i] = V_coords[i,2]-self.dom.Ground(V_coords[i,0],V_coords[i,1])
self.depth_V.vector()[:]=z_dist_V_val
self.V0_coords = self.fs.V0.tabulate_dof_coordinates()
class UniformInflow(GenericBoundary):
def __init__(self,dom,fs,farm):
super(UniformInflow, self).__init__(dom,fs,farm)
self.fprint("Setting Up Boundary Conditions",special="header")
self.fprint("Type: Uniform Inflow")
for key, values in self.boundary_types.items():
self.fprint("Boundary Type: {0}, Applied to:".format(key))
for value in values:
self.fprint(value,offset=1)
### Create the Velocity Function ###
self.ux = Function(fs.V0)
self.uy = Function(fs.V1)
if self.dom.dim == 3:
self.uz = Function(fs.V2)
self.unit_reference_velocity = np.full(len(self.ux.vector()[:]),1.0)
self.ux.vector()[:] = self.unit_reference_velocity
ux_com, uy_com, uz_com = self.PrepareVelocity(self.dom.inflow_angle)
self.ux.vector()[:] = ux_com
self.uy.vector()[:] = uy_com
if self.dom.dim == 3:
self.uz.vector()[:] = uz_com
### Compute distances ###
if self.dom.dim == 3:
self.fprint("Computing Distance to Ground")
self.CalculateHeights()
### Assigning Velocity
self.fprint("Computing Velocity Vector")
self.bc_velocity = Function(fs.V)
if self.dom.dim == 3:
self.fs.VelocityAssigner.assign(self.bc_velocity,[self.ux,self.uy,self.uz])
else:
self.fs.VelocityAssigner.assign(self.bc_velocity,[self.ux,self.uy])
### Create Pressure Boundary Function
self.bc_pressure = Function(fs.Q)
### Create Initial Guess
self.fprint("Assigning Initial Guess")
self.u0 = Function(fs.W)
self.fs.SolutionAssigner.assign(self.u0,[self.bc_velocity,self.bc_pressure])
### Setup the boundary Conditions ###
self.SetupBoundaries()
self.DebugOutput()
self.fprint("Boundary Condition Finished",special="footer")
class PowerInflow(GenericBoundary):
"""
PowerInflow creates a set of boundary conditions where the x-component
of velocity follows a power law. Currently the function is
.. math::
u_x=8.0 \\left( \\frac{z-z_0}{z_1-z_0} \\right)^{0.15}.
where :math:`z_0` is the ground and :math:`z_1` is the top of the domain.
Args:
dom (:class:`windse.DomainManager.GenericDomain`): A windse domain object.
fs (:class:`windse.FunctionSpaceManager.GenericFunctionSpace`):
A windse function space object
Todo:
* Make the max velocity an input
* Make the power an input
"""
def __init__(self,dom,fs,farm):
super(PowerInflow, self).__init__(dom,fs,farm)
if self.dom.dim != 3:
raise ValueError("PowerInflow can only be used with 3D domains.")
### Setup Boundary Conditions
self.fprint("Setting Up Boundary Conditions",special="header")
self.fprint("Type: Power Law Inflow")
for key, values in self.boundary_types.items():
self.fprint("Boundary Type: {0}, Applied to:".format(key))
for value in values:
self.fprint(value,offset=1)
self.fprint("")
### Compute distances ###
self.fprint("Computing Distance to Ground")
self.CalculateHeights()
depth_v0,depth_v1,depth_v2 = self.depth_V.split(deepcopy=True)
### Create the Velocity Function ###
self.fprint("Computing Velocity Vector")
self.ux = Function(fs.V0)
self.uy = Function(fs.V1)
self.uz = Function(fs.V2)
#################
#################
#################
#################
#################
#################
scaled_depth = np.abs(np.divide(depth_v0.vector()[:],(np.mean(self.vel_height)-dom.ground_reference)))
# scaled_depth = np.abs(np.divide(depth_v0.vector()[:],(np.mean(self.vel_height)-0.0)))
#################
#################
#################
#################
#################
self.unit_reference_velocity = np.power(scaled_depth,self.power)
# self.reference_velocity = np.multiply(self.HH_vel,np.power(scaled_depth,self.power))
ux_com, uy_com, uz_com = self.PrepareVelocity(self.dom.inflow_angle)
self.ux.vector()[:] = ux_com
self.uy.vector()[:] = uy_com
self.uz.vector()[:] = uz_com
### Assigning Velocity
self.bc_velocity = Function(self.fs.V)
if self.dom.dim == 3:
self.fs.VelocityAssigner.assign(self.bc_velocity,[self.ux,self.uy,self.uz])
else:
self.fs.VelocityAssigner.assign(self.bc_velocity,[self.ux,self.uy])
### Create Pressure Boundary Function
self.bc_pressure = Function(self.fs.Q)
### Create Initial Guess
self.fprint("Assigning Initial Guess")
self.u0 = Function(self.fs.W)
self.fs.SolutionAssigner.assign(self.u0,[self.bc_velocity,self.bc_pressure])
### Setup the boundary Conditions ###
self.SetupBoundaries()
self.DebugOutput()
self.fprint("Boundary Condition Setup",special="footer")
class LogLayerInflow(GenericBoundary):
def __init__(self,dom,fs,farm):
super(LogLayerInflow, self).__init__(dom,fs,farm)
if self.dom.dim != 3:
raise ValueError("LogLayerInflow can only be used with 3D domains.")
### Setup Boundary Conditions
self.fprint("Setting Up Boundary Conditions",special="header")
self.fprint("Type: Power Law Inflow")
for key, values in self.boundary_types.items():
self.fprint("Boundary Type: {0}, Applied to:".format(key))
if values is not None:
for value in values:
self.fprint(value,offset=1)
self.fprint("")
### Compute distances ###
self.fprint("Computing Distance to Ground")
self.CalculateHeights()
depth_v0,depth_v1,depth_v2 = self.depth_V.split(deepcopy=True)
### Create the Velocity Function ###
self.fprint("Computing Velocity Vector")
self.ux = Function(fs.V0)
self.uy = Function(fs.V1)
self.uz = Function(fs.V2)
if dom.ground_reference == 0:
scaled_depth = np.abs(np.divide(depth_v0.vector()[:]+0.0001,0.0001))
ustar = self.k/np.log(np.mean(self.vel_height)/0.0001)
elif dom.ground_reference <= 0:
raise ValueError("Log profile cannot be used with negative z values")
else:
scaled_depth = np.abs(np.divide(depth_v0.vector()[:]+dom.ground_reference,(dom.ground_reference)))
ustar = self.k/np.log(np.mean(self.vel_height)/dom.ground_reference)
self.unit_reference_velocity = np.multiply(ustar/self.k,np.log(scaled_depth))
ux_com, uy_com, uz_com = self.PrepareVelocity(self.dom.inflow_angle)
self.ux.vector()[:] = ux_com
self.uy.vector()[:] = uy_com
self.uz.vector()[:] = uz_com
### Assigning Velocity
self.bc_velocity = Function(self.fs.V)
if self.dom.dim == 3:
self.fs.VelocityAssigner.assign(self.bc_velocity,[self.ux,self.uy,self.uz])
else:
self.fs.VelocityAssigner.assign(self.bc_velocity,[self.ux,self.uy])
### Create Pressure Boundary Function
self.bc_pressure = Function(self.fs.Q)
### Create Initial Guess
self.fprint("Assigning Initial Guess")
self.u0 = Function(self.fs.W)
self.fs.SolutionAssigner.assign(self.u0,[self.bc_velocity,self.bc_pressure])
### Setup the boundary Conditions ###
self.SetupBoundaries()
self.DebugOutput()
self.fprint("Boundary Condition Setup",special="footer")
class TurbSimInflow(LogLayerInflow):
def __init__(self,dom,fs,farm):
super(TurbSimInflow, self).__init__(dom,fs,farm)
### Get the path for turbsim data ###
if self.turbsim_path is None:
raise ValueError("Please provide the path to the turbsim data")
### Load Turbsim Data ###
uTotal = np.load(self.turbsim_path+'turb_u.npy')
vTotal = np.load(self.turbsim_path+'turb_v.npy')
wTotal = np.load(self.turbsim_path+'turb_w.npy')
### Extract number of data points ###
ny = np.shape(uTotal)[1]
nz = np.shape(uTotal)[0]
nt = np.shape(uTotal)[2]
### Create the data bounds ###
y = np.linspace(self.dom.y_range[0], self.dom.y_range[1], ny)
z = np.linspace(self.dom.z_range[0], self.dom.z_range[1], nz)
t = np.linspace(0.0, self.final_time, nt)
### Build interpolating functions ###
self.interp_u = RegularGridInterpolator((z, y, t), uTotal)
self.interp_v = RegularGridInterpolator((z, y, t), vTotal)
self.interp_w = RegularGridInterpolator((z, y, t), wTotal)
### Locate Boundary DOFS indexes ###
# Define tolerance
tol = 1e-6
##### FIX MAKE WORK FOR ALL BOUNDARY INFLOW ####
# Iterate and fine the boundary IDs
self.boundaryIDs = []
for k, pos in enumerate(self.V0_coords):
if pos[0] < self.dom.x_range[0] + tol:
self.boundaryIDs.append(k)
self.UpdateVelocity(0.0)
self.DebugOutput()
def UpdateVelocity(self, simTime):
# Define tolerance
tol = 1e-6
loc_ux = self.ux.vector().get_local()
loc_uy = self.uy.vector().get_local()
loc_uz = self.uz.vector().get_local()
# Interpolate a value at each boundary coordinate
for k in self.boundaryIDs:
# Get the position corresponding to this boundary id
pos = self.V0_coords[k, :]
# The interpolation point specifies a 3D (z, y, time) point
xi = np.array([pos[2], pos[1], simTime])
# This method breaks in parallel
# self.ux.vector()[k] = self.interp_u(xi)
# self.uy.vector()[k] = self.interp_v(xi)
# self.uz.vector()[k] = self.interp_w(xi)
# Get the interpolated value at this point
loc_ux[k] = self.interp_u(xi)
loc_uy[k] = self.interp_v(xi)
loc_uz[k] = self.interp_w(xi)
# This is safer in parallel
self.ux.vector()[:] = (loc_ux)
self.uy.vector()[:] = (loc_uy)
self.uz.vector()[:] = (loc_uz)
### Assigning Velocity
self.bc_velocity = Function(self.fs.V)
if self.dom.dim == 3:
self.fs.VelocityAssigner.assign(self.bc_velocity,[self.ux,self.uy,self.uz])
else:
self.fs.VelocityAssigner.assign(self.bc_velocity,[self.ux,self.uy])
self.SetupBoundaries()
```
#### File: WindSE/windse/__init__.py
```python
import os
import __main__
### Get the name of program importing this package ###
if hasattr(__main__,"__file__"):
main_file = os.path.basename(__main__.__file__)
else:
main_file = "ipython"
from windse.ParameterManager import windse_parameters
def initialize(loc,updated_parameters=[]):
"""
This function initializes all the submodules in WindSE.
Args:
loc (str): This string is the location of the .yaml parameters file.
"""
windse_parameters.Load(loc,updated_parameters=updated_parameters)
global BaseHeight, CalculateDiskTurbineForces, UpdateActuatorLineForce, RadialChordForce, Optimizer#, ReducedFunctional
if windse_parameters["general"].get("dolfin_adjoint", False) or main_file in ["sphinx-build", "__main__.py"]:
from windse.dolfin_adjoint_helper import BaseHeight, CalculateDiskTurbineForces, UpdateActuatorLineForce, RadialChordForce#, ReducedFunctional
from windse.OptimizationManager import Optimizer
else:
from windse.helper_functions import BaseHeight, CalculateDiskTurbineForces, UpdateActuatorLineForce, RadialChordForce
global BoxDomain, CylinderDomain, CircleDomain, RectangleDomain, ImportedDomain, InterpolatedCylinderDomain, InterpolatedBoxDomain, PeriodicDomain
from windse.DomainManager import BoxDomain, CylinderDomain, CircleDomain, RectangleDomain, ImportedDomain, InterpolatedCylinderDomain, InterpolatedBoxDomain, PeriodicDomain
global GridWindFarm, RandomWindFarm, ImportedWindFarm, EmptyWindFarm
from windse.WindFarmManager import GridWindFarm, RandomWindFarm, ImportedWindFarm, EmptyWindFarm
global RefineMesh, WarpMesh
from windse.RefinementManager import RefineMesh, WarpMesh
global LinearFunctionSpace, TaylorHoodFunctionSpace
from windse.FunctionSpaceManager import LinearFunctionSpace, TaylorHoodFunctionSpace
global PowerInflow, UniformInflow, LogLayerInflow, TurbSimInflow
from windse.BoundaryManager import PowerInflow, UniformInflow, LogLayerInflow, TurbSimInflow
global StabilizedProblem, TaylorHoodProblem, IterativeSteady, UnsteadyProblem
from windse.ProblemManager import StabilizedProblem, TaylorHoodProblem, IterativeSteady, UnsteadyProblem
global SteadySolver, IterativeSteadySolver, UnsteadySolver, MultiAngleSolver, TimeSeriesSolver
from windse.SolverManager import SteadySolver, IterativeSteadySolver, UnsteadySolver, MultiAngleSolver, TimeSeriesSolver
```
#### File: windse/objective_functions/PlaneBlockage.py
```python
import os
import __main__
### Get the name of program importing this package ###
if hasattr(__main__,"__file__"):
main_file = os.path.basename(__main__.__file__)
else:
main_file = "ipython"
### This checks if we are just doing documentation ###
if not main_file in ["sphinx-build", "__main__.py"]:
from dolfin import *
from dolfin_adjoint import *
########################################################
### Additional import statements ###
import numpy as np
### Declare Unique name
name = "plane_blockage"
### Set default keyword argument values ###
keyword_defaults = {
"axis": 2,
"thickness": "rmax",
"center" : 250,
"offset_by_mean": False
}
### Define objective function
def objective(solver, inflow_angle = 0.0, first_call=False, **kwargs):
'''
This is a simple blockage metric that integrates the velocity deficit in
a plane in front of or above the farm.
Keyword arguments:
axis: the orientation of the plane, "z" for above, "x" for in front
thickness: how thick of a plane to integrate over
center: distance along the axis where the plane is centered
'''
### Extract keyword arguments
axis = int(kwargs.pop("axis"))
thickness = kwargs.pop("thickness")
center = kwargs.pop("center")
if thickness == "rmax":
thickness = solver.problem.dom.mesh.rmax()
### Get bounds of integration ###
lb = center - thickness/2.0
ub = center + thickness/2.0
# ### Create the Mesh Function to hold the region of integration
# region = CompiledSubDomain("x[axis]>=lb && x[axis]<=ub", lb=lb, ub=ub, axis=axis)
# plane_marker = MeshFunction("size_t", solver.problem.dom.mesh, solver.problem.dom.mesh.topology().dim())
# plane_marker.set_all(0)
# region.mark(plane_marker,1)
# File("test_"+repr(center)+".pvd")<<plane_marker
# ### Create measure
plane_marker = Expression('x[axis] < lb ? 0.0 : (x[axis] > ub ? 0.0 : 1.0)', lb=lb, ub=ub, axis=axis, degree=1)
dx = Measure('dx', domain=solver.problem.dom.mesh)
V = assemble(plane_marker*dx)
if V <= 1e-10:
J = np.nan
print("Warning: No area of integration for plane blockage, refine mesh or increase thickness.")
else:
### Compute velocity deficit
# u_ref = solver.problem.bd.bc_velocity[0]
# u = solver.problem.u_k[0]
# ud = (u - u_ref)/u_ref
# ud = u
### Evaluate objective ###
J = assemble(plane_marker*solver.problem.u_k[0]*dx)/V
return J
```
#### File: WindSE/windse/ParameterManager.py
```python
import __main__
import os
import yaml
import warnings
import copy
### Get the name of program importing this package ###
if hasattr(__main__,"__file__"):
main_file = os.path.basename(__main__.__file__)
else:
main_file = "ipython"
### This checks if we are just doing documentation ###
if not main_file in ["sphinx-build", "__main__.py"]:
import datetime
import numpy as np
from math import ceil
import shutil
import dolfin
import sys
import ast
import difflib
import inspect
# set_log_level(LogLevel.CRITICAL)
######################################################
### Collect all options and define general options ###
######################################################
### THis is a special class that allows prints to go to file and terminal
class Logger(object):
def __init__(self, filename, std, rank):
self.__dict__ = std.__dict__.copy()
self.terminal = std
self.rank = rank
if self.rank == 0:
self.log = open(filename, "a")
self.log.seek(0)
self.log.truncate()
def write(self, message):
self.terminal.write(message)
if self.rank == 0:
self.log.write(message)
def flush(self):
self.terminal.flush()
if self.rank == 0:
self.log.flush()
pass
def isatty(self):
return self.terminal.isatty()
class Parameters(dict):
"""
Parameters is a subclass of pythons *dict* that adds
function specific to windse.
"""
def __init__(self):
super(Parameters, self).__init__()
self.current_tab = 0
self.tagged_output = {}
self.windse_path = os.path.dirname(os.path.realpath(__file__))
self.defaults = yaml.load(open(self.windse_path+"/default_parameters.yaml"),Loader=yaml.SafeLoader)
### Update self with the defaults ###
defaults_bak = copy.deepcopy(self.defaults)
self.update(defaults_bak)
### Include the defaults from all the objectives ###
import windse.objective_functions as obj_funcs
self.obj_names = obj_funcs.objective_functions.keys()
self["optimization"]["objective_type"] = obj_funcs.objective_kwargs
# print(dir(obj_funcs))
# print(obj_funcs.alm_power())
# exit()
def TerminalUpdate(self,dic,keys,value):
if len(keys) > 1:
next_dic = dic.setdefault(keys[0],{})
self.TerminalUpdate(next_dic,keys[1:],value)
elif len(keys) == 1:
current_value = dic.get(keys[0],"")
if isinstance(current_value,int):
dic[keys[0]] = int(value)
elif isinstance(current_value,float):
dic[keys[0]] = float(value)
elif isinstance(current_value,str):
dic[keys[0]] = value
elif isinstance(current_value,list):
dic[keys[0]] = ast.literal_eval(value)
def CheckParameters(self,updates,defaults,out_string=""):
default_keys = defaults.keys()
for key in updates.keys():
split_key = key.split("_#")[0]
if split_key not in default_keys:
suggestion = difflib.get_close_matches(key, default_keys, n=1)
if suggestion:
raise KeyError(out_string + key + " is not a valid parameter, did you mean: "+suggestion[0])
else:
raise KeyError(out_string + key + " is not a valid parameter")
elif isinstance(updates[key],dict):
in_string =out_string + key + ":"
self.CheckParameters(updates[key],defaults[split_key],out_string=in_string)
def NestedUpdate(self,dic,subdic=None):
if subdic is None:
target_dic = self
else:
target_dic = subdic
for key, value in dic.items():
if isinstance(value,dict):
target_dic[key] = self.NestedUpdate(value,subdic=target_dic[key])
else:
target_dic[key] = value
return target_dic
def Load(self, loc,updated_parameters=[]):
"""
This function loads the parameters from the .yaml file.
It should only be assessed once from the :meth:`windse.initialize` function.
Args:
loc (str): This string is the location of the .yaml parameters file.
"""
# Create an MPI communicator and initialize rank and num_procs
self.comm = dolfin.MPI.comm_world
self.rank = self.comm.Get_rank()
self.num_procs = self.comm.Get_size()
### Load the yaml file (requires PyYaml)
if isinstance(loc,dict):
self.fprint("Loading from dictionary")
yaml_file = loc
else:
self.fprint("Loading: "+loc)
yaml_file = yaml.load(open(loc),Loader=yaml.SafeLoader)
### update any parameters if supplied ###
for p in updated_parameters:
keys_list = p.split(":")
self.TerminalUpdate(yaml_file,keys_list[:-1],keys_list[-1])
### Check for incorrect parameters ###
self.CheckParameters(yaml_file,self)
self.fprint("Parameter Check Passed")
### Check is specific parameters were provided ###
yaml_bc = yaml_file.get("boundary_conditions",{})
self.default_bc_names = True
if yaml_bc.get("boundary_names",{}):
self.default_bc_names = False
self.default_bc_types = True
if yaml_bc.get("boundary_types",{}):
self.default_bc_types = False
### Setup objective functions if needed ###
yaml_op = yaml_file.get("optimization",{})
objective_type = yaml_op.pop("objective_type", None)
### Load in the defaults objective dictionaries
import windse.objective_functions as obj_funcs
### Replace the dictionary defaults with the real default
if objective_type is None:
objective_type = self.defaults["optimization"]["objective_type"]
### Process the objective keyword arguments
if isinstance(objective_type,str):
objective_type = {objective_type: obj_funcs.objective_kwargs[objective_type]}
elif isinstance(objective_type,list):
new_objective_type = {}
for obj in objective_type:
new_objective_type[obj] = obj_funcs.objective_kwargs[obj]
objective_type = new_objective_type
elif isinstance(objective_type,dict):
### make sure to add in any default values the user may not have set for the objectives
for key, value in objective_type.items():
objective_split = key.split("_#")[0]
obj_default = obj_funcs.objective_kwargs[objective_split]
for k, v in obj_default.items():
if k not in value.keys():
value[k] = v
### Set the parameters ###
self.update(self.NestedUpdate(yaml_file))
self["optimization"]["objective_type"] = objective_type
### Create Instances of the general options ###
for key, value in self["general"].items():
setattr(self,key,value)
### Check if dolfin_adjoint is unnecessary or required ###
opt_gradient = yaml_file.get("optimization",{}).get("gradient",False)
opt_taylor = yaml_file.get("optimization",{}).get("taylor_test",False)
opt_optimize = yaml_file.get("optimization",{}).get("optimize",False)
self.performing_opt_calc = opt_gradient or opt_taylor or opt_optimize
if self.performing_opt_calc and not self.dolfin_adjoint:
raise ValueError("Asked to perform gradient, Taylor test, or optimization but general:dolfin_adjoint is set to False. These operations will not work without dolfin_adjoint.")
elif not self.performing_opt_calc and self.dolfin_adjoint:
warnings.warn("general:dolfin_adjoint is set to True but no optimization parameters provided. This will cause unneeded overhead.")
# print(self.dolfin_adjoint)
# for module in sys.modules:
# if "adjoint" in module:
# print(module)
### set default name ###
if self.name is None:
_, yaml_name = os.path.split(loc)
self.name = yaml_name.split(".")[0]
### Set up the folder Structure ###
timestamp=datetime.datetime.today().strftime('%Y%m%d_%H%M%S')
fancytimestamp=datetime.datetime.today().strftime('%Y/%m/%d_%H:%M:%S')
if self.preappend_datetime:
self.name = timestamp+"-"+self.name
self["general"]["name"]=self.name
self.folder = self.output_folder+self.name+"/"
self["general"]["folder"] = self.folder
# Create all needed directories ahead of time
if self.rank == 0:
# Try to create the parent folder
os.makedirs(self.folder, exist_ok=True)
# Try to create all sub folders within this parent
subfolder_list = ['data',
'functions',
'input_files',
'mesh',
'plots',
'timeSeries',
'profiling']
for sub in subfolder_list:
os.makedirs('%s/%s' % (self.folder, sub), exist_ok=True)
os.makedirs('%s/data/alm' % (self.folder), exist_ok=True)
os.makedirs('%s/data/alm/rotor_force' % (self.folder), exist_ok=True)
os.makedirs('%s/data/alm/angle_of_attack' % (self.folder), exist_ok=True)
# Wait until rank 0 has created the directory structure
self.comm.barrier()
# ### Make sure folder exists ###
# comm = MPI.comm_world
# rank = comm.Get_rank()
# if not os.path.exists(self.folder) and rank == 0: os.makedirs(self.folder)
# if not os.path.exists(self.folder+"input_files/") and rank == 0: os.makedirs(self.folder+"input_files/")
### Setup the logger ###
self.log = self.folder+"log.txt"
sys.stdout = Logger(self.log, sys.stdout, self.rank)
sys.stderr = Logger(self.log, sys.stderr, self.rank)
### Copy params file to output folder ###
if isinstance(loc,str):
shutil.copy(loc,self.folder+"input_files/")
### Create checkpoint if required ###
# if self.save_file_type == "hdf5":
# self.Hdf=HDF5File(MPI.mpi_comm(), self.folder+"checkpoint/checkpoint.h5", "w")
### Print some more stuff
self.fprint("General Parameter Information", special="header")
self.fprint("Run Name: {0}".format(self.name))
self.fprint("Run Time Stamp: {0}".format(fancytimestamp))
self.fprint("Output Folder: {0}".format(self.folder))
if updated_parameters:
self.fprint("Updated Parameter:")
for i,p in enumerate(updated_parameters):
self.fprint("{:d}: {:}".format(i,p),offset=1)
self.fprint("Parameters Setup", special="footer")
def Read(self):
"""
This function reads the current state of the parameters object
and prints it in a easy to read way.
"""
for group in self:
print(group)
max_length = 0
for key in self[group]:
max_length = max(max_length,len(key))
max_length = max_length
for key in self[group]:
print(" "+key+": "+" "*(max_length-len(key))+repr(self[group][key]))
def Save(self, func, filename, subfolder="",val=0,file=None,filetype="default"):
"""
This function is used to save the various dolfin.Functions created
by windse. It should only be accessed internally.
Args:
func (dolfin.Function): The Function to be saved
filename (str): the name of the function
:Keyword Arguments:
* **subfolder** (*str*): where to save the files within the output folder
* **n** (*float*): used for saving a series of output. Use n=0 for the first save.
"""
self.fprint("Saving: {0}".format(filename))
# if not isinstance(init_func,Function):
# func = Function(func)
# else:
# func = init_func
### Name the function in the meta data, This should probably be done at creation
old_filename = func.name()
func.rename(filename,filename)
if filetype == "default":
filetype = self.output_type
if file is None:
### Make sure the folder exists
if not os.path.exists(self.folder+subfolder) and self.rank == 0: os.makedirs(self.folder+subfolder)
if filetype == "pvd":
file_string = self.folder+subfolder+filename+".pvd"
out = dolfin.File(file_string)
out << (func,val)
elif filetype == "xdmf":
file_string = self.folder+subfolder+filename+".xdmf"
out = dolfin.XDMFFile(file_string)
out.write(func,val)
func.rename(old_filename,old_filename)
return out
else:
if filetype == "pvd" or isinstance(func,type(dolfin.Mesh)):
file << (func,val)
elif filetype == "xdmf":
file.write(func,val)
func.rename(old_filename,old_filename)
return file
def save_csv(self, filename, data=None, subfolder="", header=None, mode='w'):
### Check Processor ###
if self.rank == 0:
### Set the output folder ###
out_f = subfolder
### Check if folder exists ###
if not os.path.exists(out_f): os.makedirs(out_f, exist_ok=True)
### Open the file ###
f = open(out_f+filename+".csv",mode)
### Write the header if present ###
if header is not None:
f.write(header)
f.write("\n")
### Write the data ###
if data is not None:
np.savetxt(f,data, delimiter=', ')
### Close the file ###
f.close()
dolfin.MPI.comm_world.barrier()
def fprint(self,string,tab=None,offset=0,special=None):
"""
This is just a fancy print function that will tab according to where
we are in the solve
Args:
string (str): the string for printing
:Keyword Arguments:
* **tab** (*int*): the tab level
"""
### Check Processor ###
if self.rank == 0:
### Check if tab length has been overridden
if tab is None:
tab = self.current_tab
### Check if we are starting or ending a section
if special=="header":
self.current_tab += 1
self.fprint("",tab=tab)
elif special =="footer":
self.current_tab -= 1
tab -= 1
self.fprint("",tab=tab+1)
### Apply Offset if provided ###
tab += offset
### Create Tabbed string ###
tabbed = "| "*tab
### Apply Tabbed string ###
if isinstance(string,str):
string = tabbed+string
else:
string = tabbed+repr(string)
### Print ###
# print(string, flush=True)
print(string)
sys.stdout.flush()
if special=="header":
self.fprint("",tab=tab+1)
def tag_output(self, key, value, collective_output=None):
### Process value ###
if not isinstance(value,int):
value = float(value)
if self.num_procs > 1:
send_data = np.float64(value)
mpi_buff = np.zeros(self.num_procs, dtype=np.float64)
self.comm.Gather(send_data, mpi_buff, root=0)
differing_opinions = False
if not np.all(np.isclose(mpi_buff, mpi_buff[0])):
differing_opinions = True
if differing_opinions or collective_output is not None:
if collective_output == 'sum' or 'sum' in key:
value = np.sum(mpi_buff)
elif collective_output == 'avg' or 'avg' in key:
value = np.mean(mpi_buff)
elif collective_output == 'max' or 'max' in key:
value = np.amax(mpi_buff)
elif collective_output == 'min' or 'min' in key:
value = np.amin(mpi_buff)
else:
print('WARNING: tagging %s in parallel may result in disagreement between processors.' % (key))
value = float(value)
if self.rank == 0:
### Grab the name of the module that called this function ###
stack = inspect.stack()[1][0]
mod = inspect.getmodule(stack)
the_module = mod.__name__.split(".")[-1]
the_class = stack.f_locals["self"].__class__.__name__
the_method = stack.f_code.co_name
### This will tell exactly where this function was called from ###
# print("I was called by {}:{}.{}()".format(the_module, the_class, the_method))
### Check if that module has called before and add the dictionary entries ###
if the_module in self.tagged_output.keys():
self.tagged_output[the_module].update({key: value})
else:
self.tagged_output.update({the_module: {key: value}})
### Update the yaml file ###
with open(self.folder+"tagged_output.yaml","w") as file:
yaml.dump(self.tagged_output, file, sort_keys=False)
### Print the new dict ###
# print(self.tagged_output)
windse_parameters = Parameters()
```
#### File: WindSE/windse/RefinementManager.py
```python
import time
import numpy as np
def CreateRefinementList(dom, farm, refine_params):
farm_num = refine_params["farm_num"]
farm_type = refine_params["farm_type"]
farm_factor = refine_params["farm_factor"]
turbine_num = refine_params["turbine_num"]
turbine_type = refine_params["turbine_type"]
turbine_factor = refine_params["turbine_factor"]
refine_custom = refine_params["refine_custom"] ### Need to fix for if the domain is scaled
refine_power_calc = refine_params["refine_power_calc"]
refine_list = []
if farm_num > 0:
bbox = farm.CalculateFarmBoundingBox()
for i in range(farm_num,0,-1):
# expand_factor = 1+(farm_factor-1)*(i)
expand_factor = (farm_factor)**(i)
if farm_type == 'box':
RD = max(farm.RD)
bbox[2] = [bbox[2][0]-RD,bbox[2][1]+RD]
refine_list.append(["box",[bbox,expand_factor]])
elif farm_type == 'cylinder':
RD = max(farm.RD)
x0 = (bbox[0][1]+bbox[0][0])/2.0
y0 = (bbox[1][1]+bbox[1][0])/2.0
if dom.dim == 3:
z0 = bbox[2][0]-RD
center = [x0,y0,z0]
height = bbox[2][1]-bbox[2][0]+2*RD
else:
center = [x0,y0]
height = 0
radius = np.sqrt((bbox[0][1]-bbox[0][0])**2+(bbox[1][1]-bbox[1][0])**2)/2.0
refine_list.append(["cylinder",[center,radius,height,expand_factor]])
elif farm_type == 'stream':
RD = max(farm.RD)
x0 = bbox[0][0]-RD
y0 = (bbox[1][1]+bbox[1][0])/2.0
if dom.dim == 3:
z0 = (min(farm.z)+max(farm.z))/2.0
center = [x0,y0,z0]
radius = np.sqrt((bbox[1][1]-bbox[1][0])**2+(bbox[2][1]-bbox[2][0])**2)/2.0
else:
center = [x0,y0]
radius = (bbox[1][1]-bbox[1][0])/2.0
length = bbox[0][1]-bbox[0][0]+6*RD
theta = dom.inflow_angle
pivot_offset = 3*max(farm.RD)/2.0
refine_list.append(["stream",[center,radius,length,theta,pivot_offset,expand_factor]])
if turbine_num > 0 and farm.numturbs > 0:
for i in range(turbine_num,0,-1):
expand_factor = (turbine_factor)**(i)
if turbine_type == 'simple':
radius = max(farm.RD)
refine_list.append(["simple",[radius,expand_factor]])
elif turbine_type == 'sphere':
radius = max(farm.RD)
refine_list.append(["sphere",[radius,expand_factor]])
elif turbine_type == 'wake':
radius = max(farm.RD)
length = 5*radius
theta = dom.inflow_angle
refine_list.append(["wake",[radius,length,theta,expand_factor]])
elif turbine_type == 'tear':
radius = max(farm.RD)
theta = dom.inflow_angle
refine_list.append(["tear",[radius,theta,expand_factor]])
if refine_power_calc:
radius = max(farm.RD)
length = radius/5.0
theta = dom.inflow_angle
centered = True
refine_list.append(["wake",[radius,length,theta,expand_factor,centered]])
if refine_custom is not None:
refine_list = refine_list+refine_custom
return refine_list
def RefineMesh(dom,farm):
### Define the possible operations ###
refine_dict = {"full": dom.Refine,
"box": dom.BoxRefine,
"cylinder": dom.CylinderRefine,
"stream": dom.StreamRefine,
"simple": farm.SimpleRefine,
"sphere": farm.SphereRefine,
"tear": farm.TearRefine,
"wake": farm.WakeRefine
}
### Alias the print command ###
fprint = dom.params.fprint
### Convert Parameters to a list of refine instructions ###
refine_params = dom.params["refine"]
refine_list = CreateRefinementList(dom,farm,refine_params)
### Step through refine instructions ###
num = len(refine_list)
for i, refine_step in enumerate(refine_list):
fprint("Refining Mesh Step {:d} of {:d}".format(i+1,num), special="header")
step_start = time.time()
refine_type = refine_step[0]
refine_args = refine_step[1]
refine_func = refine_dict[refine_type]
refine_func(*refine_args)
step_stop = time.time()
fprint("Step {:d} of {:d} Finished: {:1.2f} s".format(i+1,num,step_stop-step_start), special="footer")
def WarpMesh(dom):
warp_type = dom.params["refine"]["warp_type"]
warp_strength = dom.params["refine"]["warp_strength"]
warp_height = dom.params["refine"]["warp_height"]
warp_percent = dom.params["refine"]["warp_percent"]
if warp_type == "smooth":
dom.WarpSmooth(warp_strength)
elif warp_type == "split":
dom.WarpSplit(warp_height*dom.xscale,warp_percent)
```
|
{
"source": "jefanya14/Bot",
"score": 2
}
|
#### File: Bot/userbot/util.py
```python
import math
import os
import re
import time
from telethon import events
from telethon.tl.functions.messages import GetPeerDialogsRequest
# the secret configuration specific things
ENV = bool(os.environ.get("ENV", False))
if ENV:
from sample_config import Config
else:
if os.path.exists("config.py"):
from config import Development as Config
def admin_cmd(**args):
pattern = args.get("pattern")
allow_sudo = args.get("allow_sudo", False)
# get the pattern from the decorator
if pattern is not None:
if pattern.startswith("\\#"):
# special fix for snip.py
args["pattern"] = re.compile(pattern)
else:
args["pattern"] = re.compile(Config.COMMAND_HAND_LER + pattern)
args["outgoing"] = True
# should this command be available for other users?
if allow_sudo:
args["from_users"] = list(Config.SUDO_USERS)
# Mutually exclusive with outgoing (can only set one of either).
args["incoming"] = True
del args["allow_sudo"]
# error handling condition check
elif "incoming" in args and not args["incoming"]:
args["outgoing"] = True
# add blacklist chats, UB should not respond in these chats
args["blacklist_chats"] = True
black_list_chats = list(Config.UB_BLACK_LIST_CHAT)
if black_list_chats:
args["chats"] = black_list_chats
if "allow_edited_updates" in args and args["allow_edited_updates"]:
del args["allow_edited_updates"]
return events.NewMessage(**args)
async def is_read(borg, entity, message, is_out=None):
"""
Returns True if the given message (or id) has been read
if a id is given, is_out needs to be a bool
"""
is_out = getattr(message, "out", is_out)
if not isinstance(is_out, bool):
raise ValueError(
"Message was id but is_out not provided or not a bool")
message_id = getattr(message, "id", message)
if not isinstance(message_id, int):
raise ValueError("Failed to extract id from message")
dialog = (await borg(GetPeerDialogsRequest([entity]))).dialogs[0]
max_id = dialog.read_outbox_max_id if is_out else dialog.read_inbox_max_id
return message_id <= max_id
async def progress(current, total, event, start, type_of_ps):
"""Generic progress_callback for both
upload.py and download.py"""
now = time.time()
diff = now - start
if round(diff % 10.00) == 0 or current == total:
percentage = current * 100 / total
speed = current / diff
elapsed_time = round(diff) * 1000
time_to_completion = round((total - current) / speed) * 1000
estimated_total_time = elapsed_time + time_to_completion
progress_str = "[{0}{1}]\nPercent: {2}%\n".format(
"".join(["█" for i in range(math.floor(percentage / 5))]),
"".join(["░" for i in range(20 - math.floor(percentage / 5))]),
round(percentage, 2),
)
tmp = progress_str + "{0} of {1}\nETA: {2}".format(
humanbytes(current), humanbytes(total),
time_formatter(estimated_total_time))
await event.edit("{}\n {}".format(type_of_ps, tmp))
def humanbytes(size):
"""Input size in bytes,
outputs in a human readable format"""
# https://stackoverflow.com/a/49361727/4723940
if not size:
return ""
# 2 ** 10 = 1024
power = 2**10
raised_to_pow = 0
dict_power_n = {0: "", 1: "Ki", 2: "Mi", 3: "Gi", 4: "Ti"}
while size > power:
size /= power
raised_to_pow += 1
return str(round(size, 2)) + " " + dict_power_n[raised_to_pow] + "B"
def time_formatter(milliseconds: int) -> str:
"""Inputs time in milliseconds, to get beautified time,
as string"""
seconds, milliseconds = divmod(int(milliseconds), 1000)
minutes, seconds = divmod(seconds, 60)
hours, minutes = divmod(minutes, 60)
days, hours = divmod(hours, 24)
tmp = (((str(days) + "d, ") if days else "") +
((str(hours) + "h, ") if hours else "") +
((str(minutes) + "m, ") if minutes else "") +
((str(seconds) + "s, ") if seconds else "") +
((str(milliseconds) + "ms, ") if milliseconds else ""))
return tmp[:-2]
```
|
{
"source": "JeFaProductions/bombgame2",
"score": 3
}
|
#### File: bombgame2/bombgame/ai.py
```python
import numpy as np
from . import astar
SEARCH_TARGET = 0
MOVE = 1
class AI:
def __init__(self, player):
self.player = player
self.path = []
self.state = SEARCH_TARGET
self.weight_self = 3
self.weight_enemy = 6
self.weight_crossroad = 3
self.map_positions = np.empty((0, 0))
self.bomb_times = np.empty((0, 0))
def __update_map_positions(self, map):
if map.size != self.map_positions.shape:
width, height = map.size
self.map_positions = np.empty((width, height, 2))
self.map_positions[:, :, 0] = np.arange(width) \
.reshape(1, width).repeat(height, 0)
self.map_positions[:, :, 1] = np.arange(height) \
.reshape(height, 1).repeat(width, 1)
def __update_bomb_times(self, bombs, map):
if map.size != self.bomb_times.shape:
self.bomb_times = np.empty(map.size, dtype=np.int)
self.bomb_times[:, :] = 1e16
# define the four diections west, east, south, north
directions = np.array([(1, 0), (-1, 0), (0, 1), (0, -1)])
for bomb in bombs:
pos = bomb.pos
self.bomb_times[pos[0], pos[1]] = bomb.time
for dir in directions:
# try to spread the explosions as far as possible
for delta in range(1, bomb.range):
npos = pos + dir * delta
# check if the position is valid, if not stop explosion
# spread here
if not map.is_valid(npos) or map.is_blocked(npos) or \
map.has_explosion(npos):
break
self.bomb_times[pos[0], pos[1]] = bomb.time
def update(self, world):
self.player.drop_bomb = False
self.player.move[:] = 0
if self.state == MOVE:
if self.path:
next_pos = self.path.pop(0)
if world.map.is_blocked(next_pos) or world.map.has_explosion(next_pos):
self.path = []
self.state = SEARCH_TARGET
next_pos = np.array(next_pos, dtype=np.int)
self.player.move = next_pos - self.player.pos
else:
self.player.drop_bomb = True
self.state = SEARCH_TARGET
if self.state == SEARCH_TARGET:
# init score board, each tile gets a score the maximum is chosen as
# target
score = np.zeros(world.map.size)
# get mask of tiles which are not blocked
unblock = ~world.map.blocked
width, height = score.shape
# create array of tile positions, create lazily
self.__update_map_positions(world.map)
self.__update_bomb_times(world.bombs, world.map)
# calculate distances of this player to all other tiles (manhatten)
self_dist = np.abs(self.map_positions - self.player.pos).sum(2)
# normalize distances into interval [0,1]
self_dist /= self_dist.max()
# make shortest distances have greates value
self_dist -= 1
self_dist *= -1
# check if there are any other players than this one
if len(world.players) > 1:
# calculate distances of all enemies to all other tiles
enemy_dist = []
for enemy in world.players:
# check if this player is not the one controlled by ai
if enemy.id != self.player.id:
diff = self.map_positions - enemy.pos
dist = np.abs(diff).sum(2)
enemy_dist.append(dist)
# convert distance to numpy array
enemy_dist = np.array(enemy_dist)
# find element wise minimum of all player distances
enemy_dist = np.min(enemy_dist, axis=0)
# normalize distances into interval [0,1]
enemy_dist /= enemy_dist.max()
# make shortest distances have greates value
enemy_dist -= 1
enemy_dist *= -1
else:
# no enemies, distances are zero
enemy_dist = np.zeros((width, height))
# detect how many neighbouring unblocked tiles each tile has
crossroads = np.zeros((width, height))
# add +1 if left neighbour is not blocked
crossroads[1:, :] += unblock[:-1, :] * 1
# add +1 if right neighbour is not blocked
crossroads[:-1, :] += unblock[1:, :] * 1
# add +1 if upper neighbour is not blocked
crossroads[:, 1:] += unblock[:, :-1] * 1
# add +1 if lower neighbour is not blocked
crossroads[:, :-1] += unblock[:, 1:] * 1
# normalize into interval [0,1]
crossroads /= 4
# calculate score as weighted sum
score += self.weight_self * self_dist
score += self.weight_enemy * enemy_dist
score += self.weight_crossroad * crossroads
# set all blocked tiles to zero
score[world.map.blocked] = 0
def is_valid(node, path):
return world.map.is_valid(node) and \
not world.map.is_blocked(node) and \
not world.map.has_explosion(node) and \
self.bomb_times[node[0], node[1]] - len(path) - 1 > 0
found = False
iterations = 0
while not found and iterations < 10:
# retrieve tile with maximum score
target = np.unravel_index(np.argmax(score), score.shape)
# set score to 0
score[target[0], target[1]] = 0
# search path with astar
self.path = astar.search(self.player.pos, target,
is_valid=is_valid)
if self.path:
self.state = MOVE
found = True
iterations += 1
if not found:
print('No path found!')
```
#### File: bombgame2/bombgame/objects.py
```python
import numpy as np
class Grid:
def __init__(self, size, value=None):
self.size = size
self.data = [None for _ in range(size[0] * size[1])]
def __getitem__(self, idx):
x, y = idx
idx = y * self.size[0] + x
return self.data[idx]
def __setitem__(self, idx, value):
x, y = idx
idx = y * self.size[0] + x
self.data[idx] = value
class TileMap:
def __init__(self, size, tileSize):
self.size = size
self.tileSize = tileSize
width, height = size
self.background = np.zeros((width, height), dtype=np.int)
self.blocked = self.background == 0
self.explosions = Grid(size)
def is_valid(self, pos):
return pos[0] >= 0 and pos[0] < self.size[0] and\
pos[1] >= 0 and pos[1] < self.size[1]
def is_blocked(self, pos):
return self.blocked[pos[0], pos[1]]
def set_blocked(self, pos, value):
self.blocked[pos[0], pos[1]] = value
def has_explosion(self, pos):
return self.get_explosion(pos) is not None
def get_explosion(self, pos):
return self.explosions[pos[0], pos[1]]
def set_explosion(self, pos, value):
self.explosions[pos[0], pos[1]] = value
class Explosion:
def __init__(self, pos=(0, 0), time=10, owner=None):
self.pos = np.array(pos, dtype=np.int)
self.time = time
self.owner = owner
class Bomb:
def __init__(self, pos=(0, 0), time=10, owner=None, range=3):
self.pos = np.array(pos, dtype=np.int)
self.time = time
self.owner = owner
self.range = range
class Player:
def __init__(self, id, pos=(0, 0), lifes=1, kills=0, hits=0,
max_bombs=5):
self.id = id
self.prev_pos = np.array(pos, dtype=np.int)
self.pos = np.array(pos, dtype=np.int)
self.render_pos = np.array(pos, dtype=np.float64)
self.lifes = lifes
self.kills = kills
self.hits = hits
self.move = np.array((0, 0), dtype=np.int)
self.drop_bomb = False
self.bomb_count = 0
self.max_bombs = max_bombs
self.sprites = {}
def is_dead(self):
return self.lifes == 0
class World:
def __init__(self):
self.map = None
self.bombs = []
self.players = []
self.explosions = []
```
#### File: bombgame2/bombgame/recursive_bt_maze.py
```python
import os
import random
import numpy as np
class RecursiveBTMaze:
def __init__(self, width, height):
if width % 2 == 0 or height % 2 == 0:
raise ValueError("Width and height need to be odd.")
self.width = width
self.height = height
self.go = {'N': np.array([0, 2]),
'E': np.array([2, 0]),
'S': np.array([0, -2]),
'W': np.array([-2, 0])}
self.go_half = {key: (0.5 * value).astype(np.int) for key, value in self.go.items()}
self.opposite = {'N': 'S', 'E': 'W', 'S': 'N', 'W': 'E'}
# 0: path, 1: wall.
self.data = np.ones((height, width), dtype=np.int)
self.stack = []
index = np.array([random.randint(0, self.height - 1),
random.randint(0, self.width - 1)])
index[index % 2 == 0] += 1
self.stack.append([index, self.shuffle_directions()])
def generate(self):
while self.next():
pass
def next(self, borders=False):
if self.stack:
index, directions = self.stack.pop()
stack_size = len(self.stack)
directions_size = len(directions)
while directions:
direction = directions.pop()
new_index = index + self.go[direction]
# Special case at the borders.
if borders:
if self.cell_valid(index + self.go_half[direction]) and not self.cell_valid(new_index):
if random.choice([0, 1]):
y, x = index + self.go_half[direction]
self.data[y, x] = 0
if self.cell_valid(new_index) and not self.cell_visited(new_index):
self.stack.append([index, directions])
self.cell_move(index, new_index)
self.stack.append([new_index, self.shuffle_directions()])
break
if directions_size == 4 and not directions and len(self.stack) == stack_size:
self.random_break(index)
return True
else:
return False
def random_break(self, index):
for direction in self.shuffle_directions():
new_index = index + self.go[direction]
if self.cell_valid(new_index) and self.cell_value(index + self.go_half[direction]) == 1:
self.cell_move(index, new_index)
break
def cell_value(self, index):
y, x = index
return self.data[y, x]
def cell_visited(self, index):
return self.cell_value(index) != 1
def cell_valid(self, index):
y, x = index
if y < 0 or y >= self.height or x < 0 or x >= self.width:
return False
return True
def cell_move(self, index, new_index):
y, x = new_index
self.data[y, x] = 0
y, x = (index + 0.5 * (new_index - index)).astype(np.int)
self.data[y, x] = 0
def shuffle_directions(self):
return random.sample(self.go.keys(), len(self.go.keys()))
def itermaze(self):
return self.__iter2d__(self.data)
@staticmethod
def __iter2d__(data):
for i in range(data.shape[0]):
for j in range(data.shape[1]):
yield np.array([i, j]), data[i, j]
def __str__(self):
data = -1 * np.ones((self.height + 2, self.width + 2))
out = ''
wall = '#'
path = '0'
border = '+'
data[1:-1, 1:-1] = self.data
for index, value in self.__iter2d__(data):
if index[1] == 0:
out += os.linesep
if value == -1:
out += border
elif value == 0:
out += path
elif value == 1:
out += wall
return out
```
|
{
"source": "JeFaProductions/TextAdventure2",
"score": 3
}
|
#### File: TextAdventure2/tead/action.py
```python
class ActionBuilder:
def __init__(self, world, gui):
self._gui = gui
self._world = world
self._actions = {
'createItem' : self._createItem,
'destroyItem' : self._destroyItem,
'openDoor' : self._openDoor,
'closeDoor' : self._closeDoor,
'printText' : self._printText
}
self._conditions = {
'usedItem' : self._usedItem
}
def _clear(self):
self._builtActions = []
self._builtConditions = []
def _createItem(self, param):
pass
def _destroyItem(self, param):
pass
def _openDoor(self, param):
assert('direction' in param)
direction = param['direction']
self._world.doors[direction].locked = False
def _closeDoor(self, param):
assert('direction' in param)
direction = param['direction']
self._world.doors[direction].locked = True
def _printText(self, param):
assert('text' in param)
self._gui.outputln(param['text'])
def createAction(self, action, param=dict()):
''' Creates a lambda function with the given action and parameters.
:param action, string that specifies the type of action that will be created.
:param param, a dictionary that specifies parameters for the action that
will be created.
:return a function which executes the action.
'''
if not action in self._actions:
return
self._builtActions.append(lambda : self._actions[action](param))
return self
def _usedItem(self, param):
assert('items' in param and 'itemName' in param)
for i in param['items']:
if i == param['itemName']:
return True
return False
def createCondition(self, condition, param=dict()):
''' Creates a lambda function with the given condition and parameters.
:param condition, string that specifies the type of condition that will be created.
:param param, a dictionary that specifies parameters for the action that
will be created.
:return a function which checks the condition.
'''
if not condition in self._conditions:
return
self._builtConditions.append(lambda : self._actions[condition](param))
return self
def build(self):
conds = self._builtConditions
acts = self._builtActions
def actionCB(event):
for cond in conds:
if not cond():
return
for act in acts:
act()
self._clear()
return actionCB
```
#### File: TextAdventure2/tead/event.py
```python
import queue
ROOM_ENTERED = 'roomEntered'
class Event:
def __init__(self, eventType='', userParam=dict()):
self.type = eventType
self.userParam = userParam
class EventSystem:
def __init__(self):
self._eventQueue = queue.Queue()
self._eventHandlers = dict()
def registerEventHander(self, eventType, callback):
''' Register a handler to be called on the given event type.
eventType specifies the type of event the handler should process.
callback specifies the function that should be called on the event.
Its function header should look like "def myCallback(event):"
Returns the ID of the handler.
'''
if not eventType in self._eventHandlers:
self._eventHandlers[eventType] = []
handlerID = len(self._eventHandlers[eventType])
self._eventHandlers[eventType].append(callback)
return handlerID
def unregisterEventHandler(self, eventType, handlerID):
''' Unregister a handler, so it won't be called on the specified event.
eventType specifies the type of event the handler should process.
handlerID specifies the ID of the handler, which should be unregistered.
The ID was returned by the corresponding register-function.
Returns True on success, else False.
'''
if not eventType in self._eventHandlers:
return False
if handlerID >= len(self._eventHandlers[eventType]):
return False
self._eventHandlers[eventType].pop(handlerID)
return True
def createEvent(self, event):
self._eventQueue.put_nowait(event)
def processEvents(self):
while not self._eventQueue.empty():
event = self._eventQueue.get_nowait()
# check if eventhandler wants to process event
if not event.type in self._eventHandlers:
continue
for cb in self._eventHandlers[event.type]:
cb(event)
```
#### File: TextAdventure2/tead/game.py
```python
import enum
import tead.event as evt
_DIRECTION = enum.Enum(
"_DIRECTION", "NORTH, SOUTH, EAST, WEST"
)
class Item:
def __init__(self):
self.id = None
self.name = None
class Player:
def __init__(self):
self.inventory = dict()
def addItem(self, item):
self.inventory[item.id] = item
def removeItem(self, itemID):
item = self.inventory[itemID]
del self.inventory[itemID]
return item
class Door:
def __init__(self, locked=False):
self.locked = locked
self.nextRoom = None
class Room:
def __init__(self):
self.id = None
self.name = None
self.doors = {
_DIRECTION.NORTH: None,
_DIRECTION.EAST: None,
_DIRECTION.SOUTH: None,
_DIRECTION.WEST: None
}
self.items = dict()
self._eventListener = []
def addItem(self, item):
self.items[item.id] = item
def removeItem(self, itemID):
item = self.items[itemID]
del self.items[itemID]
return item
def hasDoor(self, direction):
return self.doors[direction] is not None
def canPassDoor(self, direction):
return self.hasDoor(direction) and not self.doors[direction].locked
class World:
def __init__(self, eventSystem, gui):
self.currentRoomID = None
self.player = Player()
self.rooms = dict()
self._eventSystem = eventSystem
self._gui = gui
def addRoom(self, room):
self.rooms[room.id] = room
def gotoDirectionStr(self, directionStr):
directionStr = directionStr.upper()
if directionStr not in _DIRECTION.__members__:
self._gui.outputln('Invalid direction.')
return
direction = _DIRECTION[directionStr]
self.gotoDirection(direction)
def gotoDirection(self, direction):
assert(self.currentRoomID in self.rooms)
currRoom = self.rooms[self.currentRoomID]
if not currRoom.hasDoor(direction):
self._gui.outputln('There is no door in this direction.')
return
if not currRoom.doors[direction].locked:
self._gui.outputln('The door is locked.')
return
self.currentRoomID = currRoom.doors[direction].nextRoom
self.eventSystem.createEvent(evt.Event(evt.ROOM_ENTERED,
{'room': self.rooms[self.currentRoomID]}))
```
#### File: TextAdventure2/tead/gui.py
```python
import tkinter as tk
import os
# Global configuration parameters to easily change the style.
config = dict(
# Global "theme" for all widgets.
theme=dict(
bg="black",
fg="white",
selectbackground="grey",
insertbackground="white",
borderwidth=3,
font="Courier 9 bold",
highlightthickness=0,
highlightbackground="yellow",
inactiveselectbackground="grey",
),
# File path, text, etc.
files=dict(
icon_file_win="res/icon.ico",
icon_file_linux="res/icon.xbm",
welcome_text_file="res/welcome_text",
title="TEAD",
default_width=800,
default_height=600,
default_infoleft="https://github.com/JeFaProductions/TextAdventure2",
default_infocenter="Text Adventure 2",
default_inforight="GUI prototype",
default_prompt=">"
)
)
class ReadonlyText(tk.Frame):
def __init__(self, master=None):
super().__init__(master)
self.text = tk.Text(self)
self.text.config(state="disabled", wrap=tk.WORD)
self.text.config(**config["theme"])
self.rowconfigure(0, weight=1)
self.columnconfigure(0, weight=1)
self.text.grid(row=0, column=0, sticky=tk.N + tk.E + tk.S + tk.W)
def delete(self, indexfrom, indexto):
"""
Deletes text in the output field.
:param indexfrom: Tkinter index
:param indexto: Tkinter index
"""
self.text.config(state="normal")
self.text.delete(indexfrom, indexto)
self.text.config(state="disabled")
def put(self, text):
self.text.config(state="normal")
self.text.insert("end", text)
self.text.config(state="disabled")
def putln(self, text):
"""
Adds a linebreak to the text and prints it on the output field.
:param text: String
"""
self.put(text + os.linesep)
class TextOutput(ReadonlyText):
def __init__(self, master=None):
super().__init__(master)
class Inventory(ReadonlyText):
def __init__(self, master=None):
super().__init__(master)
self.selectedrow = 1
self.addItem("inventory item 1")
self.addItem("inventory item 2")
self.addItem("inventory item 3")
self.deleteItem("inventory item 2")
self.text.bind("<Down>", self._onDown)
self.text.bind("<Up>", self._onUp)
self.text.bind("<Right>", lambda e: "break")
self.text.bind("<Left>", lambda e: "break")
self.text.bind("<Button-1>", lambda e: "break")
self.text.bind("<Double-Button-1>", lambda e: "break")
self.text.bind("<B1-Motion>", lambda e: "break")
self._rowmark()
def _getrowrange(self, row):
"""
Returns the from and to index for the desired row.
:param row: String with the desired row.
:return: List with from and to index.
"""
fromindex = row + ".0"
toindex = row + ".end"
return [fromindex, toindex]
def _rowmark(self, row=1):
"""
Sets the new row mark.
:param row: New row number as integer.
"""
old = self._getrowrange(str(self.selectedrow))
new = self._getrowrange(str(row))
self.text.tag_remove("sel", old[0], old[1])
self.text.tag_add("sel", new[0], new[1])
self.selectedrow = row
def _onUp(self, event):
if self.selectedrow != 1:
self._rowmark(self.selectedrow - 1)
return "break"
def _onDown(self, event):
end = int(float(self.text.index("end"))) - 2
if self.selectedrow < end:
self._rowmark(self.selectedrow + 1)
return "break"
def focus(self):
"""
Sets the focus to the text field.
"""
self.text.focus()
def addItem(self, name):
self.putln(name)
def deleteItem(self, name):
pos = self.text.search(name, "1.0", "end")
if self.selectedrow == int(float(pos)):
if self.selectedrow > 1:
self._onUp()
else:
self._onDown()
self.delete(pos, pos + "+1lines")
class TextInput(tk.Frame):
def __init__(self, master=None):
super().__init__(master)
self.text = tk.Text(self)
self.text.config(height=1, wrap="none")
self.text.config(**config["theme"])
self.rowconfigure(0, weight=1)
self.columnconfigure(0, weight=1)
self.text.grid(row=0, column=0, sticky=tk.N + tk.E + tk.S + tk.W)
self.text.bind("<Return>", self._onReturn)
self._onReturnCB = None
def _onReturn(self, event):
if self._onReturnCB is not None:
self._onReturnCB(self.text.get(1.0, tk.END))
self.text.delete(1.0, tk.END)
return "break"
def setOnReturn(self, cb):
self._onReturnCB = cb
def focus(self):
"""
Sets the focus to the text input field.
"""
self.text.focus()
class Prompt(tk.Text):
def __init__(self, master=None):
super().__init__(master)
self.setPrompt(config["files"]["default_prompt"])
self.config(width=2, height=1, state="disabled")
self.config(**config["theme"])
def setPrompt(self, prompt):
if len(prompt) is 1:
self.config(state="normal")
self.delete("1.0", "1.end")
self.insert("1.0", prompt)
self.config(state="disabled")
class InfoBar(tk.Frame):
bgcolor = "grey"
def __init__(self, master=None):
super().__init__(master)
self.config(height=20, bg=self.bgcolor)
font = config["theme"]["font"]
self.infoleft = tk.Label(self)
self.infoleft.config(bg=self.bgcolor, font=font)
self.infoleft.grid(row=0, column=0, sticky=tk.W)
self.infocenter = tk.Label(self)
self.infocenter.config(bg=self.bgcolor, font=font)
self.infocenter.grid(row=0, column=1)
self.inforight = tk.Label(self)
self.inforight.config(bg=self.bgcolor, font=font)
self.inforight.grid(row=0, column=2, sticky=tk.E)
self.rowconfigure(0, weight=1)
self.columnconfigure(1, weight=1)
self.outleft(config["files"]["default_infoleft"])
self.outcenter(config["files"]["default_infocenter"])
self.outright(config["files"]["default_inforight"])
self._setupevents()
def _setupevents(self):
# Move window
self.bind("<ButtonPress-1>", self._startMove)
self.bind("<ButtonRelease-1>", self._stopMove)
self.bind("<B1-Motion>", self._onMotion)
def _startMove(self, event):
self.x = event.x
self.y = event.y
def _stopMove(self, event):
self.x = None
self.y = None
def _onMotion(self, event):
deltax = event.x - self.x
deltay = event.y - self.y
x = self.winfo_toplevel().winfo_x() + deltax
y = self.winfo_toplevel().winfo_y() + deltay
self.winfo_toplevel().geometry("+%s+%s" % (x, y))
def outleft(self, text):
self.infoleft.config(text=text)
def outcenter(self, text):
self.infocenter.config(text=text)
def outright(self, text):
self.inforight.config(text=text)
class MainWindow(tk.Frame):
def __init__(self, master=None):
super().__init__(master)
self.infobar = None
self.textout = None
self.inventory = None
self.textprompt = None
self.textin = None
self._setupstyle()
self._setupframes()
self._setupevents()
welcomefile = config["files"]["welcome_text_file"]
with open(welcomefile) as f:
text = f.read()
self.output(text)
self.textin.focus()
def _setupstyle(self):
self.config(bg="black")
self.master.title(config["files"]["title"])
if os.name == "nt":
self.master.wm_iconbitmap(
bitmap=os.path.normpath(config["files"]["icon_file_win"]))
else:
self.master.wm_iconbitmap(
bitmap="@" + os.path.normpath(config["files"]["icon_file_linux"]))
self.master.geometry('{}x{}'.format(
config["files"]["default_width"],
config["files"]["default_height"]))
def _setupframes(self):
self.master.config(bg="black", borderwidth=1)
# Make it resizable
self.master.rowconfigure(0, weight=1)
self.master.columnconfigure(0, weight=1)
# Only one column in main frame
self.columnconfigure(0, weight=1) # Resizable
self.rowconfigure(1, weight=1) # midframe resizable
self.grid(row=0, column=0, sticky=tk.N + tk.E + tk.S + tk.W)
self.infobar = InfoBar(self)
self.infobar.grid(row=0, column=0, sticky=tk.N + tk.E + tk.S + tk.W)
midframe = tk.Frame(self)
# All resizable
midframe.rowconfigure(0, weight=1)
midframe.columnconfigure(0, weight=1)
midframe.columnconfigure(1, weight=2)
self.textout = TextOutput(midframe)
self.textout.grid(row=0, column=0, sticky=tk.N + tk.E + tk.S + tk.W)
self.inventory = Inventory(midframe)
self.inventory.grid(row=0, column=1, sticky=tk.N + tk.E + tk.S + tk.W)
midframe.grid(row=1, column=0, sticky=tk.N + tk.E + tk.W + tk.S)
bottomframe = tk.Frame(self)
bottomframe.columnconfigure(1, weight=1)
self.textprompt = Prompt(bottomframe)
self.textprompt.grid(row=0, column=0)
self.textin = TextInput(bottomframe)
self.textin.grid(row=0, column=1, sticky=tk.N + tk.W + tk.E + tk.S)
bottomframe.grid(row=2, column=0, sticky=tk.N + tk.E + tk.W + tk.S)
def _setupevents(self):
# GUI events
self.master.bind("<Escape>", self._onEscape)
self.textin.text.bind("<Tab>", self._onTab)
self.textin.text.bind("<FocusIn>", self._onTextinFocus)
self.inventory.text.bind("<FocusIn>", self._onInventoryFocus)
def _onEscape(self, event):
self.master.quit()
def _onTab(self, event):
if self.textin.text.focus_get():
self.inventory.focus()
return "break"
def _onTextinFocus(self, event):
self.textprompt.setPrompt(">")
def _onInventoryFocus(self, event):
self.textprompt.setPrompt("#")
def output(self, text):
"""
Prints the the text on the output text field.
:param text: String
"""
self.textout.put(text)
def outputln(self, text):
self.textout.putln(text)
def setOnReturn(self, cb):
"""
Sets the callback function for return input.
:param cb: Function that is called on return.
"""
self.textin.setOnReturn(cb)
```
|
{
"source": "Jefaxe/kilt",
"score": 2
}
|
#### File: Jefaxe/kilt/examples.py
```python
def lambd_update():
import kilt
# check lambdynamiclights version
mod = kilt.search("light dy")[0]
print("The latest version of {} is {}".format(mod.name, mod.version))
def caffein_install():
import kilt
# download sodium, lithium, and phosphor
mods = kilt.search(search_array=["sodium", "lithium", "phosphor"])
for i in mods:
i.download()
def open_wiki():
import kilt
import webbrowser
webbrowser.open(kilt._doc)
def specific_install():
import kilt
mod = kilt.search("lithium")[0]
mod.download(specific_version="mc1.16.5-0.6.3")
def search_by_id():
import kilt
mod = kilt.search(mod_id="AZomiSrC")[0]
print("{} is on version {}".format(mod.name, mod.version))
def search_array():
import kilt
mods = kilt.search(logging_level=0, search_array=["hydrogen", "galacticaft rewoven"])
for mod in mods:
print(mod.name)
def change_configs():
from kilt import config
config.global_level = 0
import kilt
kilt.search("zoom")
def facets_search():
import kilt.labrinth as m
mod = m.get(logging_level=0, mcversions=["1.14"], license_="MIT", server_side="unsupported")[0]
mod.web_open("home")
facets_search()
```
|
{
"source": "jefcolbi/django-magic-notifier",
"score": 2
}
|
#### File: django-magic-notifier/magic_notifier/consumers.py
```python
import binascii
import ctypes
import json
import logging
import traceback
from datetime import date, datetime, timedelta
from pathlib import Path
from channels.db import database_sync_to_async
from channels.generic.websocket import WebsocketConsumer
from django.core.exceptions import ObjectDoesNotExist
from django.utils import timezone
from notif.models import Notification
from notif.models_serializers import NotificationSerializer
from rest_framework.authtoken.models import Token
logger = logging.getLogger("notif")
class PushNotifConsumer(WebsocketConsumer):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.token: str = None
self.user = None
def connect(self):
try:
self.accept()
self.token = self.scope["url_route"]["kwargs"]["token"]
db_tok = Token.objects.get(key=self.token)
self.user = db_tok.user
self.user.settings.push_channel = self.channel_name
self.user.settings.save()
logger.info("Accepted")
except Exception as e:
print(traceback.print_exc())
logging.error(traceback.format_exc())
def disconnect(self, close_code):
try:
self.user.settings.push_channel = None
self.user.settings.save()
except Exception as e:
print(traceback.format_exc())
logging.error(traceback.format_exc())
def receive(self, text_data):
event = json.loads(text_data)
logger.info("{} >> {}".format(self.user, text_data))
event_handler = getattr(self, event["type"].lower().replace(".", "_"), None)
if callable(event_handler):
event_handler(event)
def notify(self, data: dict):
self.send(json.dumps(data))
def notification(self, data: dict):
self.send(json.dumps(data))
def unread(self, event: dict):
notifs = Notification.objects.filter(user=self.user, read__isnull=True)
event["count"] = len(notifs)
event["notifications"] = NotificationSerializer(notifs, many=True).data
self.send(json.dumps(event))
def markread(self, event: dict):
notifs = Notification.objects.filter(user=self.user, read__isnull=True)
notifs.update(read=timezone.now())
event["success"] = True
self.send(json.dumps(event))
```
#### File: django-magic-notifier/magic_notifier/models.py
```python
import json
from operator import mod
from django import VERSION as DJANGO_VERSION
from django.contrib.auth import get_user_model
from django.core.exceptions import ValidationError
from django.db import models
from django.db.models.deletion import CASCADE
from django.db.models.fields.related import ForeignKey, OneToOneField
from django.utils.translation import gettext as _
if DJANGO_VERSION[0] >= 3 and DJANGO_VERSION[1] >= 1:
from django.db.models import JSONField
else:
try:
from django.contrib.postgres.fields import JSONField
except:
class JSONField(models.TextField):
"""Simple JSON field that stores python structures as JSON strings
on database.
"""
def from_db_value(self, value, *args, **kwargs):
return self.to_python(value)
def to_python(self, value):
"""
Convert the input JSON value into python structures, raises
django.core.exceptions.ValidationError if the data can't be converted.
"""
if self.blank and not value:
return None
if isinstance(value, str):
try:
return json.loads(value)
except Exception as e:
raise ValidationError(str(e))
else:
return value
def validate(self, value, model_instance):
"""Check value is a valid JSON string, raise ValidationError on
error."""
if isinstance(value, str):
super(JSONField, self).validate(value, model_instance)
try:
json.loads(value)
except Exception as e:
raise ValidationError(str(e))
def get_prep_value(self, value):
"""Convert value to JSON string before save"""
try:
return json.dumps(value)
except Exception as e:
raise ValidationError(str(e))
def value_from_object(self, obj):
"""Return value dumped to string."""
val = super(JSONField, self).value_from_object(obj)
return self.get_prep_value(val)
from .settings import NOTIFIER_AVAILABLE_MODES, NOTIFIER_DEFAULT_MODE
User = get_user_model()
class Notification(models.Model):
id = models.BigAutoField(primary_key=True)
user: models.ForeignKey = models.ForeignKey(
User, models.CASCADE, null=True, blank=True, related_name="magic_notifications"
)
text: models.TextField = models.TextField()
type: models.CharField = models.CharField(max_length=30)
sub_type: models.CharField = models.CharField(max_length=30, null=True, blank=True)
link: models.CharField = models.CharField(_("The link associated"), max_length=255)
image: models.ImageField = models.ImageField(upload_to="notifications")
actions: JSONField = JSONField(default=dict)
data: JSONField = JSONField(default=dict)
read: models.DateTimeField = models.DateTimeField(null=True, blank=True)
sent: models.DateTimeField = models.DateTimeField(auto_now_add=True)
mode: models.CharField = models.CharField(
max_length=10, default=NOTIFIER_DEFAULT_MODE, choices=NOTIFIER_AVAILABLE_MODES
)
def __str__(self):
if self.user and self.user.name:
user_name = self.user.name
else:
user_name = ""
return "{} Notif #{}".format(user_name, self.id)
def save(self, *args, **kwargs):
return super().save(*args, **kwargs)
def mark_read(self):
from django.utils import timezone
self.read = timezone.now()
self.save()
class NotifyProfile(models.Model):
id = models.BigAutoField(primary_key=True)
phone_number: models.CharField = models.CharField(max_length=20, null=True, blank=True)
current_channel: models.CharField = models.CharField(max_length=255, null=True, blank=True)
user: models.OneToOneField = models.OneToOneField(User, models.CASCADE)
```
#### File: django-magic-notifier/magic_notifier/notifier.py
```python
import logging
import traceback
from typing import Optional, Union
from django.contrib.auth import get_user_model
from django.db import models
from magic_notifier.emailer import Emailer
from magic_notifier.pusher import Pusher
from magic_notifier.settings import NOTIFIER_THREADED
from magic_notifier.smser import ExternalSMS
User = get_user_model()
logger = logging.getLogger("notifier")
def notify(
vias: list,
subject: str = None,
receivers: Union[str, list, models.QuerySet, models.Manager] = None,
template: str = None,
context: dict = None,
final_message: str = None,
email_gateway: str = 'default',
sms_gateway: Optional[str] = None,
files: list = None,
threaded: bool = None,
):
"""This function send a notification via the method specified in parameter vias
:param vias: accepted values are email,sms,push
:param subject: the subject of the notification, ignored when send by sms
:param receivers: it can be a list, queryset or manager of users. if a string is passed it must be *admins* to send to (super) admins, *staff* to send to staff only, *all* to all users, *all-staff* to all users minus staff and *all-admins* to all users excepted admins
:param template: the name of the template to user. Default None
:param context: the context to be passed to template. Note that the context is auto-filled with the current the notification is going under the key 'user'. Default None
:param final_message: the final message to be sent as the notification content, must be sent if template is None, template is ignored if it is sent. Default None
:param email_gateway: the email gateway to use. Default 'default'
:param sms_gateway: the sms gateway to use. Default to None
:param files: list of files to be sent. accept file-like objects, tuple, file path. Default None
:param threaded: if True, the notification is sent in background else sent with the current thread. Default to NOTIFIER["THREADED"] settings
:return:
"""
logger.debug(f"Sending {subject} to {receivers} via {vias}")
threaded = threaded if threaded is not None else NOTIFIER_THREADED
context = {} if context is None else context
assert subject, "subject not defined"
if isinstance(receivers, str):
if receivers in ["admins", "staff", "all", "all-staff", "all-admins"]:
if receivers == "admins":
receivers = User.objects.filter(is_superuser=True)
elif receivers == "staff":
receivers = User.objects.filter(is_staff=True)
elif receivers == "all":
receivers = User.objects.all()
elif receivers == "all-staff":
receivers = User.objects.exclude(is_staff=True)
elif receivers == "all-admins":
receivers = User.objects.exclude(is_superuser=True)
else:
raise ValueError(f"'{receivers}' is not an allowed value for receivers arguments")
assert isinstance(receivers, (list, models.Manager, models.QuerySet)), f"receivers must be a list at this point not {receivers}"
for via in vias:
try:
if via == "email":
em = Emailer(
subject,
list(receivers),
template,
context,
email_gateway,
threaded=threaded,
final_message=final_message,
files=files
)
em.send()
elif via == "sms":
ex_sms = ExternalSMS(receivers,context, threaded=threaded,
template=template, final_message=final_message,
sms_gateway=sms_gateway)
ex_sms.send()
elif via == "push":
assert template, "template variable can't be None or empty"
pusher = Pusher(
subject, receivers, template, context, threaded=threaded
)
pusher.send()
else:
logger.error(f"Unknown sending method {via}")
except:
logger.error(traceback.format_exc())
```
#### File: magic_notifier/sms_clients/cgsms_client.py
```python
import logging
import requests
from django.conf import settings
from .base import BaseSmsClient
logger = logging.getLogger("notifier")
class CGSmsClient(BaseSmsClient):
@classmethod
def send(cls, number: str, text: str, **kwargs):
sub_account = settings.NOTIFIER["SMS"]["GATEWAYS"]["CGS"]["SUB_ACCOUNT"]
sub_account_pass = settings.NOTIFIER["SMS"]["GATEWAYS"]["CGS"]["SUB_ACCOUNT_PASSWORD"]
params = {
"sub_account": sub_account,
"sub_account_pass": sub_account_pass,
"action": "send_sms",
"message": text,
"recipients": number,
}
res = requests.get("http://cheapglobalsms.com/api_v1", params=params)
return res
```
|
{
"source": "jefcolbi/njembe",
"score": 2
}
|
#### File: njembe/njembe/__main__.py
```python
from sys import exit
from njembe import VERSION
from njembe.models import Documentation, Step, db
from njembe.config import LOG_FILE, WORKING_FILE, EXPORT_FOLDER, EDITOR
import os
import click
import logging
import datetime
@click.group()
@click.version_option(VERSION)
def njembe():
pass
@njembe.command('open')
def init_doc():
"""
Initialize a new documentation project.
"""
query = Documentation.select().where(Documentation.closed==False)
if query.exists():
logging.error('Can\'t open a new documentation when another one is opened')
exit(0)
title = input('Enter the documentation title: ')
documentation = Documentation.create(title=title)
click.echo('Documentation created')
@njembe.command('close')
def close_doc():
"""
Close the current documentation project.
"""
try:
documentation = Documentation.select().where(Documentation.closed==False).order_by(Documentation.created_date.desc()).get()
documentation.closed = True
documentation.save()
except Documentation.DoesNotExist:
logging.info('No project to close')
@njembe.command('command')
@click.argument('command', nargs=-1, required=True)
def add_step(command):
"""
Add a new step to the documentation.
"""
try:
documentation = Documentation.select().where(Documentation.closed==False).order_by(Documentation.created_date.desc()).get()
except Documentation.DoesNotExist:
logging.info('Not existing documentation')
logging.info('Creating a new documentation...')
documentation = Documentation.create(title='Untitled')
documentation.save()
logging.info('Document created')
step = Step.create(documentation=documentation, command=' '.join(command), position=(documentation.steps + 1))
if EDITOR:
os.system(f'{EDITOR} {WORKING_FILE}')
else:
os.system(f'editor {WORKING_FILE}')
logging.error('env variable $EDITOR doesn\'t exist, set it to your favorite editor')
if os.path.exists(WORKING_FILE):
with open(WORKING_FILE) as tmp:
step.description = tmp.read()
os.remove(WORKING_FILE)
step.save()
documentation.steps += 1
documentation.save()
@njembe.command('list')
def show_projects():
"""
Function used to show saved projects of your computer.
"""
projects = Documentation.select()
for project in projects:
click.echo(f'{project.id}: {project.title} [{"Closed" if project.closed else "Open"}]')
@njembe.command('export')
@click.pass_context
def export_project(ctx):
"""
Export specific documentation in a folder
"""
ctx.invoke(show_projects)
try:
doc_id = int(input('Enter the documentation ID: '))
documentation = Documentation.get_by_id(doc_id)
steps = Step.select().where(Step.documentation==doc_id).order_by(Step.position.asc())
file_to_write = os.path.join(EXPORT_FOLDER, f'njembe_doc_{documentation.id}.nj')
doc = []
doc.append(f'Title: {documentation.title}\n')
doc.append(f'Created at: {documentation.created_date.strftime("%d-%m-%Y, %H:%M:%S")}\n')
doc.append(f'Steps: {documentation.steps}\n')
doc.append(f'{"-"*30}\n\n')
if steps:
for step in steps:
doc.append(f'Step {step.position}: {step.description}\n')
doc.append(f'Command: {step.command}\n')
doc.append('\n')
doc_to_write = ''.join(doc)
with open(file_to_write, 'w') as doc_file:
doc_file.write(doc_to_write)
else:
doc.append('No steps in this documentation')
doc_to_write = ''.join(doc)
with open(file_to_write, 'w') as doc_file:
doc_file.write(doc_to_write)
click.echo(f'Documentation available at {file_to_write}')
except ValueError:
click.echo('Wrong value')
return
except Documentation.DoesNotExist:
click.echo('This documentation doesn\'t exist')
if __name__ == "__main__":
# Create data folder
if not os.path.exists(EXPORT_FOLDER):
os.mkdir(EXPORT_FOLDER)
os.mkdir(os.path.join(EXPORT_FOLDER, 'logs'))
db.create_tables([Documentation, Step])
logging.basicConfig(filename=LOG_FILE, level=logging.ERROR,
format='%(asctime)s [%(levelname)s] %(message)s', datefmt='%Y-%m-%d %H:%M:%S')
njembe(prog_name='njembe')
```
|
{
"source": "jefcolbi/pyconafrica",
"score": 2
}
|
#### File: pyconafrica/ghana19/team.py
```python
from colorama import init, Style, Fore, Back
init()
TEAM = [
{
'name': '<NAME>',
'role': 'Chair of the organising committee.',
'bio': \
"""Marlene is a Director of the Python Software Foundation,
and a co-founder of ZimboPy, a Zimbabwean non-profit that
empowers women to pursue careers in technology.
She lives in Harare and has an active role in assisting the growth of Python
communities locally and across Africa."""
},
{
'name': '<NAME>',
'role': 'Chair of the Python Software Community in Ghana.',
'bio': \
"""Best known for helping make Ghana a space-faring nation, Aaron has been contributing
his widow's mite to the tech community for over seven years.
He's a member of the Python Software Foundation Grants committee and
helps promote Python-related activities around the globe."""
},
{
'name': '<NAME>',
'role': 'Aisha is a Cloud Systems Engineer at TechData.',
'bio': \
"""She's a former board member of the Python Nigeria Community, a Python Software Foundation fellow,
Django Software Foundation member and winner of the 2016 Malcolm Tredinnick Memorial award.
Aisha is passionate about mentoring African women through PyLadies and DjangoGirls.
She's on Twitter as @AishaXBello."""
},
{
'name': '<NAME>',
'role': 'Treasurer.',
'bio': \
"""Michael is a professional accountant with keen interest in Data Science and Financial Inclusion.
He is a co-founder and Executive Board member of the Python Software Community in Ghana,
and works with students and professionals as a career mentor, educator and a community builder."""
},
{
'name': '<NAME>',
'role': '',
'bio': \
"""Abigail is the current Lead of PyLadies Ghana, a mentorship group with a focus on helping
more women become active participants and leaders in the Python open-source community.
She has been involved in organising and coaching at several Django Girls events in Ghana and
hopes to empower more women in the field of technology through these initiatives."""
},
{
'name': '<NAME>',
'role': 'Talks committee lead.',
'bio': \
"""Noah is a software developer, a member of ICT4D.at and UI designer. He's a co-founder and
executive board member of the Python Software Community in Ghana,
and a member of the Django Software Foundation. Noah has been involved in the organisation
of several events including Django Girls & PyCon Ghana. He's on Twitter: @plasmadray """
},
{
'name': '<NAME>',
'role': 'Mannie is a computer scientist, graphic designer and software developer.',
'bio': \
"""He's also a community builder, having kick-started the initiatives and user groups under
the Python Software Community in Ghana. He volunteers as a mentor for people hoping
to get into software development and organises events and workshops around the country.
He's on Twitter: @mawy_7."""
},
{
'name': '<NAME>',
'role': 'Daniele works at Divio and currently lives in the Netherlands.',
'bio': \
"""He is a core developer of the Django Project and has been an active volunteer organiser
in the Python community for several years. In recent years he has been involved in
the organisation of several editions of DjangoCon Europe, PyCon UK and PyCon Namibia."""
}
]
def get_team():
"""
This function returns the team of organizers
"""
return TEAM
def print_team():
"""
This function prints the team of organizers
"""
print("The organizers team of PyCON Africa 2019")
print('----------------------------------------\n')
for member in TEAM:
print(Back.YELLOW + Fore.BLACK + member['name'] + Back.RESET + Fore.RESET)
if member['role']:
print(' '+ Back.WHITE + Fore.BLUE + ' {}'.format(member['role']) + Back.RESET + Fore.RESET)
print(' Bio: {}\n'.format(member['bio']))
print("\n\nLet's clap for them \U0001F44F \U0001F44F \U0001F44F")
if __name__ == "__main__":
print_team()
```
|
{
"source": "jefcolbi/template-data",
"score": 2
}
|
#### File: management/commands/restore_data.py
```python
from django.core.management.base import BaseCommand, CommandError
from django.conf import settings
from pathlib import Path
from template_data.models import TemplateData
from template_data.management.commands.add_data import DataMixin
import json
class Command(DataMixin, BaseCommand):
"""Install the theme"""
def add_arguments(self, parser):
parser.add_argument("file", type=str, help="The json file where to store the data")
def handle(self, *args, **options):
file_name = options['file']
try:
data = {}
with (settings.BASE_DIR / file_name).open() as fp:
data = json.load(fp)
for item in data:
for key, details in item.items():
self.add_data_in_db(key, details['type'], details['value'], details['lang'], details['page'],
details['inherit_page'])
break
print(f"restored {file_name}")
except Exception as e:
import traceback
traceback.print_exc()
```
|
{
"source": "jefdaj/treecl-nix",
"score": 2
}
|
#### File: treecl-nix/treeCl/collection.py
```python
from __future__ import print_function
from __future__ import absolute_import
from __future__ import division
from builtins import next
from builtins import hex
from builtins import str
from builtins import zip
from builtins import range
from builtins import object
# standard lib
import glob
import hashlib
import itertools
import json
import os
import random
import sys
import tempfile
from functools import reduce
# third party
import numpy as np
import phylo_utils
from scipy.spatial.distance import squareform
from tree_distance import PhyloTree
# treeCl
from .alignment import Alignment
from .concatenation import Concatenation
from .constants import SORT_KEY, ISPY3
from .distance_matrix import DistanceMatrix
from .errors import optioncheck, directorycheck
from . import tasks
from .partition import Partition
from .parutils import SequentialJobHandler
from .utils import fileIO, setup_progressbar, model_translate, smooth_freqs, create_gamma_model, flatten_list
from .utils.decorators import lazyprop
from .utils.misc import binom_coeff
# set up logging
import logging
logger = logging.getLogger(__name__)
default_jobhandler = SequentialJobHandler()
def gapmask(simseqs, origseqs):
"""
:param sims: list of (header, sequence) tuples of simulated sequences [no gaps]
:param aln: list of (header, sequence) tuples of original sequences
:return:
"""
import numpy as np
simdict = dict(simseqs)
origdict = dict(origseqs)
for k in origdict:
origseq = np.array(list(origdict[k]))
gap_pos = np.where(origseq=='-')
simseq = np.array(list(simdict[k]))
simseq[gap_pos] = '-'
simdict[k] = ''.join(simseq)
return list(simdict.items())
def transform_fn(table, amount=2.0):
tmp = table**(1.0/amount)
return tmp / tmp.sum(1)[:,np.newaxis]
class NoRecordsError(Exception):
def __init__(self, file_format, input_dir):
self.file_format = file_format
self.input_dir = input_dir
def __str__(self):
msg = ('No records were found in {0} matching\n'
'\tfile_format = {1}\n'.format(self.input_dir,
self.file_format))
return msg
class RecordsHandler(object):
def __init__(
self,
records=None,
input_dir=None,
param_dir=None,
trees_dir=None,
file_format='phylip',
header_grep=None,
show_progress=True,
):
self._records = None
self._input_files = None
self.show_progress=show_progress
if records is not None:
self.records = records
elif input_dir is not None:
input_dir = os.path.abspath(input_dir)
directorycheck(input_dir)
optioncheck(file_format, ['fasta', 'phylip'])
self.records = self.read_alignments(input_dir,
file_format,
header_grep)
self.input_dir = input_dir
else:
raise ValueError('Provide a list of records, '
'or the path to a set of alignments')
if param_dir is not None and trees_dir is None:
self.read_parameters(param_dir)
elif trees_dir is not None and param_dir is None:
self.read_trees(trees_dir)
if not self.records:
raise NoRecordsError(file_format, input_dir)
def __len__(self):
if hasattr(self, 'records'):
return len(self.records)
return 0
def __getitem__(self, i):
if hasattr(self, 'records'):
return self.records[i]
@property
def records(self):
""" Returns a list of records in SORT_KEY order """
return [self._records[i] for i in range(len(self._records))]
@records.setter
def records(self, records):
""" Sets a dictionary of records keyed by SORT_KEY order """
self._records = dict(enumerate(records))
@lazyprop
def trees(self):
""" Returns a list of trees in SORT_KEY order """
try:
return [rec.tree for rec in self]
except ValueError:
return []
@lazyprop
def names(self):
"""
Returns a list of sequence record names in SORT_KEY order
"""
try:
return [rec.name for rec in self]
except ValueError:
return []
@lazyprop
def distances(self):
try:
return [rec.parameters.partitions.distances for rec in self]
except ValueError:
return []
@lazyprop
def variances(self):
try:
return [rec.parameters.partitions.variances for rec in self]
except ValueError:
return []
@lazyprop
def frequencies(self):
try:
return [rec.parameters.partitions.frequencies for rec in self]
except ValueError:
return []
@lazyprop
def alphas(self):
try:
return [rec.parameters.partitions.alpha for rec in self]
except ValueError:
return []
@lazyprop
def datatypes(self):
try:
return ['dna' if rec.is_dna() else 'protein' for rec in self]
except ValueError:
return []
@lazyprop
def lengths(self):
try:
return [len(rec) for rec in self]
except ValueError:
return []
@lazyprop
def headers(self):
try:
return [rec.get_names() for rec in self]
except ValueError:
return []
@lazyprop
def mrp_tree(self):
trees = [tree.newick if hasattr('newick', tree) else tree for tree in self.trees]
return Alignment().get_mrp_supertree(trees)
def read_alignments(self, input_dir, file_format, header_grep=None):
""" Get list of alignment files from an input directory *.fa, *.fas and
*.phy files only
Stores in self.files """
compression = ['', 'gz', 'bz2']
if file_format == 'fasta':
extensions = ['fa', 'fas', 'fasta']
elif file_format == 'phylip':
extensions = ['phy']
else:
extensions = []
extensions = list('.'.join([x,y]) if y else x for x,y in itertools.product(extensions, compression))
files = fileIO.glob_by_extensions(input_dir, extensions)
files.sort(key=SORT_KEY)
self._input_files = files
records = []
if self.show_progress:
pbar = setup_progressbar("Loading files", len(files), simple_progress=True)
pbar.start()
for i, f in enumerate(files):
if f.endswith('.gz') or f.endswith('.bz2'):
fd, tmpfile = tempfile.mkstemp()
with fileIO.freader(f, f.endswith('.gz'), f.endswith('.bz2')) as reader,\
fileIO.fwriter(tmpfile) as writer:
for line in reader:
if ISPY3:
line = line.decode()
writer.write(line)
try:
record = Alignment(tmpfile, file_format, True)
except ValueError:
record = Alignment(tmpfile, file_format, False)
finally:
os.close(fd)
os.unlink(tmpfile)
else:
try:
record = Alignment(f, file_format, True)
except RuntimeError:
record = Alignment(f, file_format, False)
if header_grep:
try:
datatype = 'dna' if record.is_dna() else 'protein'
record = Alignment([(header_grep(x), y) for (x, y) in record.get_sequences()], datatype)
except TypeError:
raise TypeError("Couldn't apply header_grep to header\n"
"alignment number={}, name={}\n"
"header_grep={}".format(i, fileIO.strip_extensions(f), header_grep))
except RuntimeError:
print('RuntimeError occurred processing alignment number={}, name={}'
.format(i, fileIO.strip_extensions(f)))
raise
record.name = (fileIO.strip_extensions(f))
records.append(record)
if self.show_progress:
pbar.update(i)
if self.show_progress:
pbar.finish()
return records
def read_trees(self, input_dir):
""" Read a directory full of tree files, matching them up to the
already loaded alignments """
if self.show_progress:
pbar = setup_progressbar("Loading trees", len(self.records))
pbar.start()
for i, rec in enumerate(self.records):
hook = os.path.join(input_dir, '{}.nwk*'.format(rec.name))
filename = glob.glob(hook)
try:
with fileIO.freader(filename[0]) as infile:
tree = infile.read().decode('utf-8')
d = dict(ml_tree=tree)
rec.parameters.construct_from_dict(d)
except (IOError, IndexError):
continue
finally:
if self.show_progress:
pbar.update(i)
if self.show_progress:
pbar.finish()
def read_parameters(self, input_dir):
""" Read a directory full of json parameter files, matching them up to the
already loaded alignments """
if self.show_progress:
pbar = setup_progressbar("Loading parameters", len(self.records))
pbar.start()
for i, rec in enumerate(self.records):
hook = os.path.join(input_dir, '{}.json*'.format(rec.name))
filename = glob.glob(hook)
try:
with fileIO.freader(filename[0]) as infile:
d = json.loads(infile.read().decode('utf-8'), parse_int=True)
rec.parameters.construct_from_dict(d)
except (IOError, IndexError):
continue
finally:
if self.show_progress:
pbar.update(i)
if self.show_progress:
pbar.finish()
def write_parameters(self, output_dir, gz=False):
if not os.path.exists(output_dir):
try:
os.makedirs(output_dir)
except IOError as err:
sys.stderr.write(err.message)
raise err
for rec in self.records:
with fileIO.fwriter(os.path.join(output_dir, '{}.json'.format(rec.name)), gz=gz) as outfile:
rec.parameters.write(outfile, indent=4)
class RecordsCalculatorMixin(object):
def calc_distances(self, indices=None, task_interface=None, jobhandler=default_jobhandler, batchsize=1,
show_progress=True):
"""
Calculate fast approximate intra-alignment pairwise distances and variances using
ML (requires ML models to have been set up using `calc_trees`).
:return: None (all side effects)
"""
if indices is None:
indices = list(range(len(self)))
if task_interface is None:
task_interface = tasks.MLDistanceTaskInterface()
records = [self[i] for i in indices]
# Assemble argument lists
args, to_delete = task_interface.scrape_args(records)
# Dispatch
msg = '{} estimation'.format(task_interface.name) if show_progress else ''
map_result = jobhandler(task_interface.get_task(), args, msg, batchsize)
# Process results
with fileIO.TempFileList(to_delete):
# pbar = setup_progressbar('Processing results', len(map_result))
# j = 0
# pbar.start()
for rec, result in zip(records, map_result):
rec.parameters.partitions.distances = result['partitions'][0]['distances']
rec.parameters.partitions.variances = result['partitions'][0]['variances']
rec.parameters.nj_tree = result['nj_tree']
# pbar.update(j+1)
# j += 1
# pbar.finish()
def calc_trees(self, indices=None, task_interface=None, jobhandler=default_jobhandler, batchsize=1,
show_progress=True, **kwargs):
"""
Infer phylogenetic trees for the loaded Alignments
:param indices: Only run inference on the alignments at these given indices
:param task_interface: Inference tool specified via TaskInterface (default RaxmlTaskInterface)
:param jobhandler: Launch jobs via this JobHandler (default SequentialJobHandler; also available are
ThreadpoolJobHandler and ProcesspoolJobHandler for running inference in parallel)
:param batchsize: Batch size for Thread- or ProcesspoolJobHandlers)
:param kwargs: Remaining arguments to pass to the TaskInterface
:return: None
"""
if indices is None:
indices = list(range(len(self)))
if task_interface is None:
task_interface = tasks.RaxmlTaskInterface()
records = [self[i] for i in indices]
# Scrape args from records
args, to_delete = task_interface.scrape_args(records, **kwargs)
# Dispatch work
msg = '{} Tree estimation'.format(task_interface.name) if show_progress else ''
map_result = jobhandler(task_interface.get_task(), args, msg, batchsize)
# Process results
with fileIO.TempFileList(to_delete):
for rec, result in zip(records, map_result):
#logger.debug('Result - {}'.format(result))
rec.parameters.construct_from_dict(result)
def get_inter_tree_distances(self, metric, jobhandler=default_jobhandler,
normalise=False, min_overlap=4, overlap_fail_value=0,
batchsize=1, show_progress=True):
""" Generate a distance matrix from a fully-populated Collection.
Can silence progressbars with show_progress=False option
:param metric: str. Tree distance metric to use. Choice of 'euc', 'geo', 'rf', 'wrf'.
:param jobhandler: treeCl.Jobhandler. Choice of SequentialJobHandler, ThreadpoolJobHandler, or
ProcesspoolJobHandler.
:param normalise: Bool. Whether to normalise the tree distance to the size of the leaf set.
:param min_overlap: int. Trees with fewer leaves in common than this threshold will not have their distance
calculated, but instead the distance returned will be the value in `overlap_fail_value`.
:param overlap_fail_value: Any. The distance between trees with fewer leaves in common than `min_overlap`
is set to this value.
:param batchsize: int. Number of jobs to process in a batch when using a ProcesspoolJobHandler or a
ThreadpoolJobHandler.
:return: treeCl.DistanceMatrix.
"""
metrics = {'euc': tasks.EuclideanTreeDistance,
'geo': tasks.GeodesicTreeDistance,
'rf': tasks.RobinsonFouldsTreeDistance,
'wrf': tasks.WeightedRobinsonFouldsTreeDistance,
'fasteuc': tasks.EqualLeafSetEuclideanTreeDistance,
'fastgeo': tasks.EqualLeafSetGeodesicTreeDistance,
'fastrf': tasks.EqualLeafSetRobinsonFouldsTreeDistance,
'fastwrf': tasks.EqualLeafSetWeightedRobinsonFouldsTreeDistance}
optioncheck(metric, list(metrics.keys()))
task_interface = metrics[metric]()
if metric.startswith('fast'):
trees = (PhyloTree(newick, False) for newick in self.trees)
else:
trees = self.trees
args = task_interface.scrape_args(trees, normalise, min_overlap, overlap_fail_value)
logger.debug('{}'.format(args))
msg = task_interface.name if show_progress else ''
array = jobhandler(task_interface.get_task(), args, msg, batchsize, nargs=binom_coeff(len(trees)))
return DistanceMatrix.from_array(squareform(array), self.names)
class Collection(RecordsHandler, RecordsCalculatorMixin):
""" Call:
c = Collection(input_dir, file_format, datatype, tmpdir ...)
c.calc_distances(), c.calc_TC_trees(), ...
dm = c.distance_matrix('geo')
cl = Clustering(dm)
k = cl.spectral(4, prune='estimate', local_scale=7)
p = Partition(k) """
def species_set(self):
return reduce(lambda x, y: set(x) | set(y),
(rec.get_names() for rec in self.records))
def num_species(self):
""" Returns the number of species found over all records
"""
all_headers = reduce(lambda x, y: set(x) | set(y),
(rec.get_names() for rec in self.records))
return len(all_headers)
def permuted_copy(self, partition=None):
""" Return a copy of the collection with all alignment columns permuted
"""
def take(n, iterable):
return [next(iterable) for _ in range(n)]
if partition is None:
partition = Partition([1] * len(self))
index_tuples = partition.get_membership()
alignments = []
for ix in index_tuples:
concat = Concatenation(self, ix)
sites = concat.alignment.get_sites()
random.shuffle(sites)
d = dict(zip(concat.alignment.get_names(), [iter(x) for x in zip(*sites)]))
new_seqs = [[(k, ''.join(take(l, d[k]))) for k in d] for l in concat.lengths]
for seqs, datatype, name in zip(new_seqs, concat.datatypes, concat.names):
alignment = Alignment(seqs, datatype)
alignment.name = name
alignments.append(alignment)
return self.__class__(records=sorted(alignments, key=lambda x: SORT_KEY(x.name)))
def concatenate(self, indices):
return Concatenation(self, indices)
class Scorer(object):
def __init__(self, collection, cache_dir, task_interface):
"""
Coordinates scoring of (usually) multilocus alignments in a partition
"""
self.collection = collection
self.cache_dir = cache_dir
self.task_interface = task_interface
self.cache = {}
if not os.path.exists(cache_dir):
os.makedirs(cache_dir)
if not os.path.exists(cache_dir):
raise IOError('\'{}\' does not exist'.format(cache_dir))
def get_id(self, grp):
"""
Return a hash of the tuple of indices that specify the group
"""
thehash = hex(hash(grp))
if ISPY3: # use default encoding to get bytes
thehash = thehash.encode()
return self.cache.get(grp, hashlib.sha1(thehash).hexdigest())
def check_work_done(self, grp):
"""
Check for the existence of alignment and result files.
"""
id_ = self.get_id(grp)
concat_file = os.path.join(self.cache_dir, '{}.phy'.format(id_))
result_file = os.path.join(self.cache_dir, '{}.{}.json'.format(id_, self.task_interface.name))
return os.path.exists(concat_file), os.path.exists(result_file)
def write_group(self, grp, overwrite=False, **kwargs):
"""
Write the concatenated alignment to disk in the location specified by
self.cache_dir
"""
id_ = self.get_id(grp)
alignment_done, result_done = self.check_work_done(grp)
self.cache[grp] = id_
al_filename = os.path.join(self.cache_dir, '{}.phy'.format(id_))
qfile_filename = os.path.join(self.cache_dir, '{}.partitions.txt'.format(id_))
if overwrite or not (alignment_done or result_done):
conc = self.collection.concatenate(grp)
al = conc.alignment
al.write_alignment(al_filename, 'phylip', True)
q = conc.qfile(**kwargs)
with open(qfile_filename, 'w') as fl:
fl.write(q + '\n')
def get_group_result(self, grp, **kwargs):
"""
Retrieve the results for a group. Needs this to already be calculated -
errors out if result not available.
"""
id_ = self.get_id(grp)
self.cache[grp] = id_
# Check if this file is already processed
alignment_written, results_written = self.check_work_done(grp)
if not results_written:
if not alignment_written:
self.write_group(grp, **kwargs)
logger.error('Alignment {} has not been analysed - run analyse_cache_dir'.format(id_))
raise ValueError('Missing result')
else:
with open(self.get_result_file(id_)) as fl:
return json.load(fl)
def get_result_file(self, id_):
f = os.path.join(self.cache_dir, id_ + '.phy')
return f.replace('.phy', '.{}.json'.format(self.task_interface.name))
def write_partition(self, p, overwrite=False, **kwargs):
for grp in p.get_membership():
self.write_group(grp, overwrite, **kwargs)
def analyse_cache_dir(self, jobhandler=None, batchsize=1, **kwargs):
"""
Scan the cache directory and launch analysis for all unscored alignments
using associated task handler. KWargs are passed to the tree calculating
task managed by the TaskInterface in self.task_interface.
Example kwargs:
TreeCollectionTaskInterface: scale=1, guide_tree=None,
niters=10, keep_topology=False
RaxmlTaskInterface: -------- partition_files=None, model=None, threads=1
FastTreeTaskInterface: ----- No kwargs
"""
if jobhandler is None:
jobhandler = SequentialJobHandler()
files = glob.glob(os.path.join(self.cache_dir, '*.phy'))
#logger.debug('Files - {}'.format(files))
records = []
outfiles = []
dna = self.collection[0].is_dna() # THIS IS ONLY A GUESS AT SEQ TYPE!!
for infile in files:
id_ = fileIO.strip_extensions(infile)
outfile = self.get_result_file(id_)
#logger.debug('Looking for {}: {}'.format(outfile, os.path.exists(outfile)))
if not os.path.exists(outfile):
record = Alignment(infile, 'phylip', True)
records.append(record)
outfiles.append(outfile)
if len(records) == 0:
return []
args, to_delete = self.task_interface.scrape_args(records, outfiles=outfiles, **kwargs)
# logger.debug('Args - {}'.format(args))
with fileIO.TempFileList(to_delete):
result = jobhandler(self.task_interface.get_task(), args, 'Cache dir analysis', batchsize)
for (out, res) in zip(outfiles, result):
if not os.path.exists(out) and res:
with open(out, 'w') as outfl:
json.dump(res, outfl)
return result
def get_partition_score(self, p):
"""
Assumes analysis is done and written to id.json!
"""
scores = []
for grp in p.get_membership():
try:
result = self.get_group_result(grp)
scores.append(result['likelihood'])
except ValueError:
scores.append(None)
return sum(scores)
def get_partition_trees(self, p):
"""
Return the trees associated with a partition, p
"""
trees = []
for grp in p.get_membership():
try:
result = self.get_group_result(grp)
trees.append(result['ml_tree'])
except ValueError:
trees.append(None)
logger.error('No tree found for group {}'.format(grp))
return trees
def get_partition_members(self, p):
result = []
for grp in p.get_membership():
members = [self.collection[i].name for i in grp]
result.append(members)
return result
def get_partition_results(self, p):
results = []
for grp in p.get_membership():
try:
result = self.get_group_result(grp)
results.append(result)
except ValueError:
results.append(None)
logger.error('No result found for group {}'.format(grp))
return results
def clean_cache(self):
files = glob.glob(os.path.join(self.cache_dir, '*.json'))
for f in files:
id_ = fileIO.strip_extensions(f)
alfile = os.path.join(self.cache_dir, '{}.phy'.format(id_))
qfile = os.path.join(self.cache_dir, '{}.partitions.txt'.format(id_))
if os.path.exists(alfile): os.remove(alfile)
if os.path.exists(qfile): os.remove(qfile)
alfile = os.path.join(self.cache_dir, '{}.phy.reduced'.format(id_))
qfile = os.path.join(self.cache_dir, '{}.partitions.txt.reduced'.format(id_))
if os.path.exists(alfile): os.remove(alfile)
if os.path.exists(qfile): os.remove(qfile)
def simulate(self, partition, outdir, jobhandler=default_jobhandler, batchsize=1, **kwargs):
"""
Simulate a set of alignments from the parameters inferred on a partition
:param partition:
:return:
"""
results = self.get_partition_results(partition)
DEFAULT_DNA_MODEL = 'GTR'
DEFAULT_PROTEIN_MODEL = 'LG08'
# Collect argument list
args = [None] * len(self.collection)
for result in results:
if len(result['partitions']) > 1:
places = dict((j,i) for (i,j) in enumerate(rec.name for rec in self.collection.records))
for partition in result['partitions'].values():
place = places[partition['name']]
model = partition.get('model')
freqs = partition.get('frequencies')
rates = partition.get('rates')
alpha = partition.get('alpha')
tree = str(result['ml_tree'])
if model is None:
model = DEFAULT_DNA_MODEL if self.collection[place].is_dna() else DEFAULT_PROTEIN_MODEL
if freqs is not None:
freqs = smooth_freqs(freqs)
args[place] = (len(self.collection[place]),
model_translate(model),
freqs,
alpha,
tree,
rates)
else:
model = result['partitions']['0'].get('model')
freqs = result['partitions']['0'].get('frequencies')
rates = result['partitions']['0'].get('rates')
alpha = result['partitions']['0'].get('alpha')
tree = str(result['ml_tree'])
if freqs is not None:
freqs = smooth_freqs(freqs)
use_default_model = (model is None)
for i in range(len(self.collection)):
if use_default_model:
model = DEFAULT_DNA_MODEL if self.collection[i].is_dna() else DEFAULT_PROTEIN_MODEL
args[i] = (len(self.collection[i]),
model_translate(model),
freqs,
alpha,
tree,
rates)
# Distribute work
msg = 'Simulating'
map_result = jobhandler(tasks.simulate_task, args, msg, batchsize)
# Process results
for i, result in enumerate(map_result):
orig = self.collection[i]
simseqs = gapmask(result.items(), orig.get_sequences())
al = Alignment(simseqs, alphabet=('protein' if orig.is_protein() else 'dna'))
outfile = os.path.join(outdir, orig.name + '.phy')
al.write_alignment(outfile, 'phylip', True)
class Optimiser(object):
""" Perform the Classification-Expectation-Maximisation (CEM) algorithm (1)
Brief:
Optimise the assignment of N data points to K groups by cycling through 3 steps
- Expectation - Calculate probabilities of group membership for each data point x_i
to group P_k, based on current model parameters
- Classification - Assign each data point x_i to one group P_k according to the
probability of membership
- Maximisation - Update model parameters according to new membership
(1) Celeux,G. and Govaert,G. (1992) A classification EM algorithm for clustering
and two stochastic versions. Comput. Stat. Data Anal.,14,315-332"""
def __init__(self, scorer, numgrp, partition=None, **kwargs):
self.scorer = scorer
self.numgrp = numgrp
self.partition = None
self.prev_partition = None
if partition is not None:
self.set_partition(partition)
self.insts = self.init_perlocus_likelihood_objects(**kwargs)
self.names_to_indices = dict((rec.name, i) for (i, rec) in enumerate(scorer.collection))
self.iterations = 0
self.log = []
self.lktable = None
self.table = None
def expect(self, use_proportions=True):
""" The Expectation step of the CEM algorithm """
changed = self.get_changed(self.partition, self.prev_partition)
lk_table = self.generate_lktable(self.partition, changed, use_proportions)
self.table = self.likelihood_table_to_probs(lk_table)
def classify(self, table, weighted_choice=False, transform=None):
""" The Classification step of the CEM algorithm """
assert table.shape[1] == self.numgrp
if weighted_choice:
if transform is not None:
probs = transform_fn(table.copy(), transform) #
else:
probs = table.copy()
cmprobs = probs.cumsum(1)
logger.info('Probabilities\n{}'.format(probs))
r = np.random.random(cmprobs.shape[0])
search = np.apply_along_axis(np.searchsorted, 1, cmprobs, r) # Not very efficient
assignment = np.diag(search)
else:
probs = table
assignment = np.where(probs==probs.max(1)[:, np.newaxis])[1]
logger.info('Assignment\n{}'.format(assignment))
assignment = self._fill_empty_groups(probs, assignment) # don't want empty groups
new_partition = Partition(tuple(assignment))
self.set_partition(new_partition)
def maximise(self, **kwargs):
""" The Maximisation step of the CEM algorithm """
self.scorer.write_partition(self.partition)
self.scorer.analyse_cache_dir(**kwargs)
self.likelihood = self.scorer.get_partition_score(self.partition)
self.scorer.clean_cache()
changed = self.get_changed(self.partition, self.prev_partition)
self.update_perlocus_likelihood_objects(self.partition, changed)
return self.partition, self.likelihood, sum(inst.get_likelihood() for inst in self.insts)
def iterate(self, use_proportions=True, weighted_choice=False, transform=None, **kwargs):
self.expect(use_proportions)
self.classify(self.table, weighted_choice, transform)
self.iterations += 1
result = self.maximise(**kwargs)
self.log.append(result)
return result
def random_partition(self, ngroups):
items = len(self.scorer.collection)
r = np.zeros(items)
r[:ngroups] = np.arange(ngroups)
r[ngroups:] = np.random.randint(ngroups, size=items-ngroups)
np.random.shuffle(r)
return Partition(tuple(r))
def set_partition(self, partition):
"""
Store the partition in self.partition, and
move the old self.partition into self.prev_partition
"""
assert len(partition) == self.numgrp
self.partition, self.prev_partition = partition, self.partition
def init_perlocus_likelihood_objects(self, **kwargs):
c = self.scorer.collection
insts = []
for rec in c:
gamma = create_gamma_model(rec, list(c.species_set() - set(rec.get_names())), **kwargs)
gamma.set_tree(rec.tree)
insts.append(gamma)
return insts
def get_cluster_at_index(self, i):
"""
Return the cluster membership of locus i, according to current
assignment
"""
return self.partition.partition_vector[i]
def get_changed(self, p1, p2):
"""
Return the loci that are in clusters that have changed between
partitions p1 and p2
"""
if p1 is None or p2 is None:
return list(range(len(self.insts)))
return set(flatten_list(set(p1) - set(p2)))
def update_perlocus_likelihood_objects_old(self, partition, changed):
results = self.scorer.get_partition_results(partition)
UNPARTITIONED=False
for result in results:
subdict = result['partitions']
if len(subdict) > 1:
for k in subdict:
p = subdict[k]
index = self.names_to_indices[p['name']]
if index in changed:
inst = self.insts[index]
self._update_likelihood_model(inst, p, result['ml_tree'])
else:
UNPARTITIONED=True # not nice, but I'm in a hurry
if UNPARTITIONED:
prev_partition = self.partition
for i in changed:
cluster = self.get_cluster_at_index(i)
inst = self.insts[i]
result = results[cluster]
p = result['partitions']['0']
self._update_likelihood_model(inst, p, result['ml_tree'])
def update_perlocus_likelihood_objects(self, partition, changed):
for grp in partition:
result = self.scorer.get_group_result(grp)
tree = result['ml_tree']
for i in grp:
self._update_likelihood_model(self.insts[i], result['partitions']['0'], tree)
# self.insts[i].set_tree(tree)
# self.insts[i].update_alpha(result['partitions']['0']['alpha'])
def _update_likelihood_model(self, inst, partition_parameters, tree):
"""
Set parameters of likelihood model - inst -
using values in dictionary - partition_parameters -,
and - tree -
"""
# Build transition matrix from dict
model = partition_parameters['model']
freqs = partition_parameters.get('frequencies')
if model == 'LG':
subs_model = phylo_utils.models.LG(freqs)
elif model == 'WAG':
subs_model = phylo_utils.models.WAG(freqs)
elif model == 'GTR':
rates = partition_parameters.get('rates')
subs_model = phylo_utils.models.GTR(rates, freqs, True)
else:
raise ValueError("Can't handle this model: {}".format(model))
tm = phylo_utils.markov.TransitionMatrix(subs_model)
# Read alpha value
alpha = partition_parameters['alpha']
inst.set_tree(tree)
inst.update_alpha(alpha)
inst.update_transition_matrix(tm)
def generate_lktable(self, partition, changed, use_proportions=True):
trees = self.scorer.get_partition_trees(partition)
# Try to call up table from previous step
prev_lktable = self.lktable
if use_proportions:
sizes = np.array([len(x) for x in partition.get_membership()], dtype=np.double)
total = partition.num_elements()
logproportions = np.log(sizes/total)
else:
logproportions = np.zeros(partition.num_elements())
lktable = np.zeros((len(self.insts), self.numgrp))
for i, gamma in enumerate(self.insts):
if i in changed or prev_lktable is None:
for j, t in enumerate(trees):
gamma.set_tree(t)
lktable[i, j] = logproportions[j] + gamma.get_likelihood()
else:
lktable[i] = prev_lktable[i]
self.lktable = lktable
return lktable
def likelihood_table_to_probs(self, lktable):
"""
Calculates this formula (1), given the log of the numerator as input
p_k * f(x_i, a_k)
t_k(x_i) = -----------------------
---K
\ p_k * f(x_i, a_k)
/__k=1
x_i is data point i
P_k is cluster k of K
t_k is the posterior probability of x_i belonging to P_k
p_k is the prior probability of belong to P_k (the proportional size of P_k)
f(x, a) is the likelihood of x with parameters a
"""
m = lktable.max(1) # row max of lktable
shifted = lktable-m[:,np.newaxis] # shift lktable of log-likelihoods to a non-underflowing range
expsum = np.exp(shifted).sum(1) # convert logs to (scaled) normal space, and sum the rows
logexpsum = np.log(expsum)+m # convert back to log space, and undo the scaling
return np.exp(lktable - logexpsum[:, np.newaxis])
def _fill_empty_groups(self, probs, assignment):
new_assignment = np.array(assignment.tolist())
for k in range(probs.shape[1]):
if np.count_nonzero(assignment==k) == 0:
logger.info('Group {} became empty'.format(k))
# Group k is empty, so needs another group to transfer a member
# Base this on probability
ix = probs[:,k].argsort()[::-1]
for i in ix:
# i is our candidate for transfer
# but first check that moving it
# doesn't empty its current group
curr_grp = new_assignment[i]
curr_grp_count = np.count_nonzero(new_assignment==curr_grp)
if curr_grp_count < 2:
logger.info('Transferring item {} would cause group {} to become empty!'.format(i, curr_grp))
if curr_grp_count > 1:
new_assignment[i] = k
logger.info('Transferred item {} to group {}'.format(i, k))
break
return new_assignment
def _fill_empty_groups_old(self, probs, assignment):
""" Does the simple thing - if any group is empty, but needs to have at
least one member, assign the data point with highest probability of
membership """
new_assignment = np.array(assignment.tolist())
for k in range(self.numgrp):
if np.count_nonzero(assignment==k) == 0:
logger.info('Group {} became empty'.format(k))
best = np.where(probs[:,k]==probs[:,k].max())[0][0]
new_assignment[best] = k
new_assignment = self._fill_empty_groups(probs, new_assignment)
return new_assignment
def wipe_partition(self, partition):
""" Deletes analysis result of partition, e.g. so a repeat
optimisation of the same partition can be done with a
different model """
for grp in partition.get_membership():
grpid = self.scorer.get_id(grp)
cache_dir = self.scorer.cache_dir
prog = self.scorer.task_interface.name
filename = os.path.join(cache_dir, '{}.{}.json'.format(grpid, prog))
if os.path.exists(filename):
os.unlink(filename)
def likelihood_distance_matrix(self):
# Assume all parameters are already updated
dm = np.empty((len(self.scorer.collection), len(self.scorer.collection)))
for i, gamma in enumerate(self.insts):
for j, rec in enumerate(self.scorer.collection):
gamma.set_tree(rec.tree)
dm[i, j] = gamma.get_likelihood()
scaled=(dm - np.diag(dm)[:,np.newaxis])
return treeCl.DistanceMatrix.from_array(-0.5*(scaled+scaled.T))
```
#### File: treecl-nix/treeCl/parsers.py
```python
from __future__ import absolute_import
from builtins import zip
from builtins import range
from builtins import object
from pyparsing import Suppress, SkipTo, Word, Regex, Literal, OneOrMore, Group, LineEnd, CharsNotIn, nums, alphanums, ParseException
import logging
logger = logging.getLogger(__name__)
import os
import numpy as np
FLOAT = Word(nums + '.-').setParseAction(lambda x: float(x[0]))
INT = Word(nums).setParseAction(lambda x: int(x[0]))
WORD = Word(alphanums+'_')
SPACEDWORD = Word(alphanums+' _')
class PhymlParser(object):
"""
Simple phyml result parser. Assumes one of the standard models for nucleotide analyses.
"""
def __init__(self):
self.MODEL_LABEL = Regex(r'Model of.*substitution:\s+')
self.ALPHA_LABEL = Regex(r'Gamma shape parameter:\s+')
self.LNL_LABEL = Regex(r'Log-likelihood:\s+')
self.F_LABEL = Regex(r'f\(([ACGT])\)=\s+')
self.R_LABEL = Regex(r'[ACGT]\s+<->\s+[ACGT]\s+')
self.TSTV_LABEL = Regex(r'Transition/transversion ratio.*:\s+')
self.model = Suppress(SkipTo(self.MODEL_LABEL)) + Suppress(self.MODEL_LABEL) + WORD
self.lnl = Suppress(SkipTo(self.LNL_LABEL)) + Suppress(self.LNL_LABEL) + FLOAT
self.alpha = Suppress(SkipTo(self.ALPHA_LABEL)) + Suppress(self.ALPHA_LABEL) + FLOAT
self.common = self.model + self.lnl + self.alpha
self.tstv = OneOrMore(Suppress(SkipTo(self.TSTV_LABEL)) + Suppress(self.TSTV_LABEL) + FLOAT)
self.freq = OneOrMore(Suppress(SkipTo(self.F_LABEL)) + Suppress(self.F_LABEL) + FLOAT)
self.rates = OneOrMore(Suppress(SkipTo(self.R_LABEL)) + Suppress(self.R_LABEL) + FLOAT)
self.gtr_specific = Group(self.freq) + Group(self.rates)
self.hky_specific = Group(self.tstv) + Group(self.freq)
def parse(self, filename):
model = None
alpha = None
lnl = None
freq = None
rates = None
with open(filename) as fl:
s = fl.read()
try:
model, lnl, alpha = self.common.parseString(s).asList()
except ParseException as err:
logger.error(err)
if model == 'JC69':
freq = [0.25, 0.25, 0.25, 0.25]
rates = [1.0, 1.0, 1.0, 1.0, 1.0, 1.0]
elif model == 'K80':
freq = [0.25, 0.25, 0.25, 0.25]
try:
tstv = self.tstv.parseString(s).asList()
except ParseException as err:
logger.error(err)
rates = [1.0, tstv[0], 1.0, 1.0, tstv[0], 1.0]
elif model == 'F81':
try:
freq = self.freq.parseString(s).asList()
except ParseException as err:
logger.error(err)
rates = [1.0, 1.0, 1.0, 1.0, 1.0, 1.0]
elif model == 'F84' or model == 'HKY85' or model == 'TN93':
parser = Group(self.tstv) + Group(self.freq)
try:
tstv, freq = parser.parseString(s).asList()
except ParseException as err:
logger.error(err)
if model == 'TN93':
rates = [1.0, tstv[0], 1.0, 1.0, tstv[1], 1.0]
else:
rates = [1.0, tstv[0], 1.0, 1.0, tstv[0], 1.0]
elif model == 'GTR':
parser = Group(self.freq) + Group(self.rates)
try:
freq, rates = parser.parseString(s).asList()
except ParseException as err:
logger.error(err)
return model, alpha, lnl, freq, rates
def to_dict(self, stats_filename, tree_filename):
model, alpha, lnl, freq, rates = self.parse(stats_filename)
try:
with open(tree_filename) as treefl:
tree = treefl.read().rstrip()
except IOError as err:
logger.error(err)
return
result = {'likelihood': lnl,
'partitions': {0: {'alpha': alpha,
'frequencies': freq,
'rates': rates,
'model': model}},
'ml_tree': tree}
return result
class RaxmlParser(object):
def __init__(self):
self.ALPHA_LABEL = Regex(r'alpha\[\d+\]:')
self.LNL_LABEL = Literal('Final GAMMA-based Score of best tree')
self.FRQ_LABEL = Regex(r'Base frequencies: (?=\d+)') ^ Regex(r'ML estimate base freqs\[\d+\]:')
self.NAMES_LABEL = Regex(r'Partition: \d+ with name:\s+')
self.RATES_LABEL = Regex(r'rates\[\d+\].+?:')
self.MODEL_LABEL = Literal('Substitution Matrix:')
self.alpha = OneOrMore(Suppress(SkipTo(self.ALPHA_LABEL)) + Suppress(self.ALPHA_LABEL) + FLOAT)
self.lnl = Suppress(SkipTo(self.LNL_LABEL)) + Suppress(self.LNL_LABEL) + FLOAT
self.frq = OneOrMore(Group(Suppress(SkipTo(self.FRQ_LABEL)) + Suppress(self.FRQ_LABEL) + OneOrMore(FLOAT)))
self.names = OneOrMore(Suppress(SkipTo(self.NAMES_LABEL)) + Suppress(self.NAMES_LABEL) + CharsNotIn('\n') + Suppress(LineEnd()))
self.rates = OneOrMore(Group(Suppress(SkipTo(self.RATES_LABEL)) + Suppress(self.RATES_LABEL) + OneOrMore(FLOAT)))
self.model = Suppress(SkipTo(self.MODEL_LABEL)) + Suppress(self.MODEL_LABEL) + WORD
MODEL_LABEL = Literal('Substitution Matrix:')
SCORE_LABEL = Literal('Final GAMMA likelihood:')
DESC_LABEL = Literal('Model Parameters of Partition')
NAME_LEADIN = Literal(', Name:')
DATATYPE_LEADIN = Literal(', Type of Data:')
ALPHA_LEADIN = Literal('alpha:')
TREELENGTH_LEADIN = Literal('Tree-Length:')
RATES_LABEL = Regex(r'rate \w <-> \w:')
FREQS_LABEL = Regex(r'freq pi\(\w\):')
BEST_LEADIN = Literal('Starting final GAMMA-based thorough Optimization on tree ')
PARTITION_LEADIN = Literal('Partition:')
INFERENCE_LEADIN = Literal('Inference[')
model = Suppress(SkipTo(MODEL_LABEL)) + Suppress(MODEL_LABEL) + WORD
likelihood = Suppress(SkipTo(SCORE_LABEL)) + Suppress(SCORE_LABEL) + FLOAT
description = (Suppress(SkipTo(DESC_LABEL)) +
Suppress(DESC_LABEL) + INT +
Suppress(NAME_LEADIN) +
SPACEDWORD +
Suppress(DATATYPE_LEADIN) +
WORD)
alpha = Suppress(ALPHA_LEADIN) + FLOAT
rates = Suppress(RATES_LABEL) + FLOAT
freqs = Suppress(FREQS_LABEL) + FLOAT
self.partition = OneOrMore(Suppress(SkipTo(PARTITION_LEADIN)) + Suppress(PARTITION_LEADIN) + INT)
self.inference = OneOrMore(Suppress(SkipTo(INFERENCE_LEADIN)) + Suppress(INFERENCE_LEADIN) + INT)
self.best = Suppress(SkipTo(BEST_LEADIN)) + Suppress(BEST_LEADIN) + INT
self._dash_f_e_parser = (Group(OneOrMore(model)) +
likelihood +
Group(OneOrMore(Group(description +
alpha +
Suppress(TREELENGTH_LEADIN) +
Suppress(FLOAT) +
Group(OneOrMore(rates)) +
Group(OneOrMore(freqs))
))))
def parse(self, filename):
with open(filename) as fl:
s = fl.read()
try:
best_index = self.best.parseString(s)[0]
except ParseException as err:
logger.error(err)
best_index = 0
try:
n_partitions = max(self.partition.parseString(s).asList()) + 1
except ParseException as err:
logger.error(err)
n_partitions = 1
try:
n_inferences = max(self.inference.parseString(s).asList()) + 1
except ParseException as err:
logger.error(err)
n_inferences = 1
try:
alphas = self.alpha.parseString(s).asList()
except ParseException as err:
logger.error(err)
alphas = [None]
try:
freqs = self.frq.parseString(s).asList()
except ParseException as err:
logger.error(err)
freqs = [None]
try:
names = self.names.parseString(s).asList()
except ParseException as err:
logger.error(err)
names = [None]
try:
rates = self.rates.parseString(s).asList()
except ParseException:
rates = None
try:
lnl = self.lnl.parseString(s).asList()
except ParseException as err:
logger.error(err)
lnl = [0]
alphas = np.array(alphas).reshape(n_inferences, n_partitions)[best_index].tolist()
if n_inferences > 1 and len(freqs) == n_inferences * n_partitions:
logger.debug('Reshaping freqs for multiple inference result')
freqs = np.array(freqs).reshape(n_inferences, n_partitions, len(freqs[0]))[best_index].tolist()
if rates is not None and n_inferences > 1 and len(rates) == n_inferences * n_partitions:
logger.debug('Reshaping rates for multiple inference result')
rates = np.array(rates).reshape(n_inferences, n_partitions, len(rates[0]))[best_index].tolist()
return alphas, freqs, names, rates, lnl
def _dash_f_e_to_dict(self, info_filename, tree_filename):
"""
Raxml provides an option to fit model params to a tree,
selected with -f e.
The output is different and needs a different parser.
"""
with open(info_filename) as fl:
models, likelihood, partition_params = self._dash_f_e_parser.parseFile(fl).asList()
with open(tree_filename) as fl:
tree = fl.read()
d = {'likelihood': likelihood, 'ml_tree': tree, 'partitions': {}}
for model, params in zip(models, partition_params):
subdict = {}
index, name, _, alpha, rates, freqs = params
subdict['alpha'] = alpha
subdict['name'] = name
subdict['rates'] = rates
subdict['frequencies'] = freqs
subdict['model'] = model
d['partitions'][index] = subdict
return d
def to_dict(self, info_filename, tree_filename, dash_f_e=False):
"""
Parse raxml output and return a dict
Option dash_f_e=True will parse the output of a raxml -f e run,
which has different output
"""
logger.debug('info_filename: {} {}'
.format(info_filename, '(FOUND)' if os.path.exists(info_filename) else '(NOT FOUND)'))
logger.debug('tree_filename: {} {}'
.format(tree_filename, '(FOUND)' if os.path.exists(tree_filename) else '(NOT FOUND)'))
if dash_f_e:
return self._dash_f_e_to_dict(info_filename, tree_filename)
else:
return self._to_dict(info_filename, tree_filename)
def _to_dict(self, info_filename, tree_filename):
alpha, freqs, names, rates, lnl = self.parse(info_filename)
try:
with open(tree_filename) as fl:
tree = fl.read().rstrip()
except IOError as err:
logger.error('No tree file - raxml analysis failed')
return
n_parts = len(alpha)
assert len(freqs) == n_parts
assert len(names) == n_parts
if rates is not None:
assert len(rates) == n_parts
result = {'likelihood': lnl[0],
'partitions': {},
'ml_tree': tree}
for i in range(n_parts):
subdict = {}
subdict['alpha'] = alpha[i]
subdict['frequencies'] = freqs[i]
subdict['name'] = names[i]
if rates is not None:
subdict['rates'] = rates[i]
result['partitions'][i] = subdict
return result
```
#### File: treecl-nix/treeCl/simulator.py
```python
from __future__ import print_function, division
from builtins import zip
from builtins import str
from builtins import range
from builtins import object
# standard library
import numbers
import shutil
import sys
# third party
import numpy as np
# treeCl
from .partition import Partition
from .tree import Tree
from . import errors
from .utils import fileIO, print_and_return
class Simulator(object):
"""
Simulate alignments from several trees.
Args:
class_list = a list with an entry for each class, which is the
(integer) number of genes in that class
permutations_list = a list with an entry for each class, which is the
(integer) number of permutations the class tree has
relative to the master tree (see master_tree)
num_species = number of leaves on the master tree
datatype = 'dna' or 'protein'
"""
def __init__(
self,
class_list,
permutations_list,
nspecies,
subst_model,
rate_model,
master_tree_generator_method='yule',
master_tree=None,
class_tree_permuter='nni',
gene_length_kappa=1.7719,
gene_length_theta=279.9,
gene_length_min=10,
gamma_rate_param=None,
outdir='./',
autocorrelated_relaxed_clock=False,
uncorrelated_relaxed_clock=False,
scale_rates=False,
verbosity=0,
):
# default
errors.optioncheck(master_tree_generator_method, ['yule', 'coal',
'rtree', 'custom'])
errors.optioncheck(class_tree_permuter, ['nni', 'spr', 'lgt', 'genetree'])
if master_tree is None and master_tree_generator_method == 'custom':
raise Exception('No custom tree was specified')
self.num_classes = len(class_list)
self.num_genes = sum(class_list)
self.class_list = class_list
self._master_tree = None
self.verbosity = verbosity
self.autocorrelated_relaxed_clock = autocorrelated_relaxed_clock
self.uncorrelated_relaxed_clock = uncorrelated_relaxed_clock
self.scale_rates = scale_rates
self.gene_trees = list()
if master_tree is None:
tree = self.generate_master_tree(master_tree_generator_method,
nspecies)
self.master_tree = tree
self.num_species = nspecies
else:
self.master_tree = master_tree
if len(master_tree) != nspecies:
msg = [
'Warning: supplied tree has {0} taxa.'.format(
len(master_tree)),
'Required number is {0}.\n'.format(nspecies),
'Resetting number of species to match the supplied tree.'
]
print(''.join(msg))
self.num_species = nspecies
self.set_gene_lengths(gene_length_kappa, gene_length_theta,
gene_length_min)
self.gamma_rate_param = gamma_rate_param
self.permuter = class_tree_permuter
self.permutations_list = permutations_list
self.datatype = datatype
self.tmpdir = errors.directorymake(tmpdir)
self.outdir = outdir
self.generate_class_trees() # sets self.class_trees dict
self.make_alf_dirs() # sets self.alf_dirs dict
self.write_alf_params()
self.get_true_partition()
@property
def master_tree(self):
return self._master_tree
@master_tree.setter
def master_tree(self, tree):
self._master_tree = tree
def generate_master_tree(self, method, nspecies):
if method == 'yule':
tree = Tree.new_yule(nspecies)
tree.name = '{}_master_tree'.format(method)
return tree
elif method == 'coal':
tree = Tree.new_coal(nspecies)
tree.name = '{}_master_tree'.format(method)
return tree
elif method == 'rtree':
tree = Tree.new_rtree(nspecies)
tree.name = '{}_master_tree'.format(method)
return tree
def set_gene_lengths(
self,
kappa,
theta,
min_,
):
self.gene_length_kappa = kappa
self.gene_length_theta = theta
self.gene_length_min = min_
def generate_class_trees(self):
class_trees = {}
if self.permuter == 'genetree':
for k in range(self.num_classes):
class_trees[k + 1] = self.master_tree.sample_gene_tree(
scale_to=self.permutations_list[k])
else:
# Base trees for each class
for k in range(self.num_classes):
if self.permuter == 'nni':
t = self.master_tree.rnni(times=self.permutations_list[k])
t.name = 'class{}'.format(k + 1)
class_trees[k + 1] = t
elif self.permuter == 'spr':
t = self.master_tree.rspr(times=self.permutations_list[k],
disallow_sibling_sprs=True, keep_entire_edge=True)
t.name = 'class{}'.format(k + 1)
class_trees[k + 1] = t
elif self.permuter == 'lgt':
t = self.master_tree.rlgt(times=self.permutations_list[k],
disallow_sibling_lgts=True)
t.name = 'class{}'.format(k + 1)
class_trees[k + 1] = t
# Expand base class trees into individual trees
gene_trees = list()
for k in range(self.num_classes):
num_genes = self.class_list[k]
trees = list()
# populate the trees list
for _ in range(num_genes):
class_tree = class_trees[k + 1]
tree = Tree(class_tree.newick)
tree.name = class_tree.name
trees.append(tree)
# do per-tree rates/branch length adjustments
for i, tree in enumerate(trees, start=1):
if self.autocorrelated_relaxed_clock:
tree.autocorrelated_relaxed_clock(1, 0.01)
for node in tree.postorder_node_iter():
node.edge_length *= node.rate
tree.name += '_{}'.format(i)
elif self.uncorrelated_relaxed_clock:
tree.uncorrelated_relaxed_clock(1, 0.01 * tree.length())
for node in tree.postorder_node_iter():
node.edge_length *= node.rate
tree.name += '_{}'.format(i)
elif self.scale_rates:
coeff = np.random.uniform(0.666, 1.333)
tree.scale(coeff, inplace=True)
tree.name += '_{}'.format(i)
else:
tree.name += '_{}'.format(i)
gene_trees.extend(trees)
self.class_trees = class_trees
self.gene_trees = gene_trees
def make_alf_dirs(self):
alf_dirs = {}
for i, g in enumerate(self.gene_trees, start=1):
dirname = fileIO.join_path(self.tmpdir, g.name)
alf_dirs[i] = errors.directorymake(dirname)
self.alf_dirs = alf_dirs
def make_alf_dirs_(self):
""" DEPRECATED """
alf_dirs = {}
for k in range(self.num_classes):
dirname = fileIO.join_path(self.tmpdir, 'class{0:0>1}'.format(
k + 1))
alf_dirs[k + 1] = errors.directorymake(dirname)
self.alf_dirs = alf_dirs
def write_alf_params(self):
if not hasattr(self, 'alf_dirs'):
self.make_alf_dirs()
if not hasattr(self, 'class_trees'):
self.generate_class_trees()
alf_params = {}
for i, tree in enumerate(self.gene_trees, start=1):
alfdir = self.alf_dirs[i]
datatype = self.datatype
name = tree.name
num_genes = 1
seqlength = self.gene_length_min
gene_length_kappa = self.gene_length_kappa
gene_length_theta = self.gene_length_theta
alf_obj = ALF(tree=tree,
datatype=datatype, num_genes=num_genes,
seqlength=seqlength, gene_length_kappa=gene_length_kappa,
gene_length_theta=gene_length_theta, name=name, tmpdir=alfdir)
if isinstance(self.gamma_rate_param, numbers.Number):
alf_obj.params.rate_variation(self.gamma_rate_param)
if datatype == 'protein':
alf_obj.params.one_word_model('WAG')
else:
alf_obj.params.jc_model()
alf_params[i] = alf_obj
self.alf_params = alf_params
def write_alf_params_(self):
""" DEPRECATED """
if not hasattr(self, 'alf_dirs'):
self.make_alf_dirs()
if not hasattr(self, 'class_trees'):
self.generate_class_trees()
alf_params = {}
for k in range(self.num_classes):
alfdir = self.alf_dirs[k + 1]
tree = self.class_trees[k + 1]
datatype = self.datatype
name = 'class{0}'.format(k + 1)
num_genes = self.class_list[k]
seqlength = self.gene_length_min
gene_length_kappa = self.gene_length_kappa
gene_length_theta = self.gene_length_theta
alf_obj = ALF(tree=tree,
datatype=datatype, num_genes=num_genes,
seqlength=seqlength, gene_length_kappa=gene_length_kappa,
gene_length_theta=gene_length_theta, name=name, tmpdir=alfdir)
if datatype == 'protein':
alf_obj.params.one_word_model('WAG')
else:
alf_obj.params.jc_model()
alf_params[k + 1] = alf_obj
self.alf_params = alf_params
def clean(self):
if not hasattr(self, 'alf_dirs'):
return
for directory in self.alf_dirs.values():
shutil.rmtree(directory)
def run(self):
all_records = []
total_jobs = len(self.gene_trees)
for i, tree in enumerate(self.gene_trees, start=1):
if self.verbosity > 0:
print_and_return('Simulating {} ({:.1f}%)'.format(tree.name,
100 * i / total_jobs),
sys.stderr)
simulated_record = self.alf_params[i].run()[0]
simulated_record.name = tree.name
all_records.append(simulated_record)
self.result = all_records
self.clean()
return all_records
def run_(self):
""" DEPRECATED """
all_records = []
for k in range(self.num_classes):
simulated_records = self.alf_params[k + 1].run()
names = ['class{0}_{1:0>{2}}'.format(k + 1, i,
len(str(self.class_list[k]))) for i in range(1,
len(
simulated_records) + 1)]
for (rec, name) in zip(simulated_records, names):
rec.name = name
all_records.extend(simulated_records)
self.result = all_records
self.clean()
return all_records
def write(self):
if hasattr(self, 'result'):
errors.directorymake(self.outdir)
errors.directorymake(fileIO.join_path(self.outdir,
'base_class_trees'))
errors.directorymake(fileIO.join_path(self.outdir,
'gene_trees'))
for rec in self.result:
filename = fileIO.join_path(self.outdir, rec.name) + '.phy'
rec.write_phylip(filename, interleaved=True)
for i in range(self.num_classes):
tree = self.class_trees[i + 1]
name = 'base_tree_class{0:0>{1}}.nwk'.format(i + 1,
len(str(self.num_classes)))
filename = fileIO.join_path(self.outdir, 'base_class_trees',
name)
tree.write_to_file(filename)
for i, tree in enumerate(self.gene_trees, start=1):
filename = fileIO.join_path(self.outdir, 'gene_trees',
tree.name + '.nwk')
tree.write_to_file(filename)
self.master_tree.write_to_file(fileIO.join_path(self.outdir,
'master_tree.nwk'))
filename = fileIO.join_path(self.outdir, 'true_partition.txt')
with open(filename, 'w') as partition_file:
partition_file.write(repr(self.true_partition))
def get_true_partition(self):
l = []
for k in range(len(self.class_list)):
l.extend([k + 1] * self.class_list[k])
self.true_partition = Partition(l)
return self.true_partition
if __name__ == '__main__':
import argparse
prog = fileIO.basename(__file__)
parser = argparse.ArgumentParser(description='{0}'.format(prog))
parser.add_argument('classes', type=int, nargs='+')
parser.add_argument('-p', '--permutations', type=int, nargs='+')
parser.add_argument('-s', '--species', type=int, default=12)
parser.add_argument('-d', '--datatype', type=str, default='protein')
parser.add_argument('-g', '--tree_generator', type=str, default='yule')
parser.add_argument('-t', '--tree', type=str)
parser.add_argument('--permuter', type=str, default='lgt')
parser.add_argument('-l', '--gamma_params', type=float, nargs=2,
default=(1.7719, 279.9))
parser.add_argument('-m', '--min_length', type=str, default=10)
parser.add_argument('--tmp', type=str, default='/tmp')
parser.add_argument('-o', '--output', type=str)
args = parser.parse_args()
if args.permutations is None:
args.permutations = [1 for _ in args.classes]
sim = Simulator(
class_list=args.classes,
permutations_list=args.permutations,
nspecies=args.species,
datatype=args.datatype,
master_tree_generator_method=args.tree_generator,
master_tree=args.tree,
class_tree_permuter=args.permuter,
gene_length_kappa=args.gamma_params[0],
gene_length_theta=args.gamma_params[1],
gene_length_min=args.min_length,
tmpdir=args.tmp,
outdir=args.output)
sim.run()
recs = sim.result
if args.output is not None:
sim.write()
```
#### File: treeCl/utils/enum.py
```python
from builtins import zip
from builtins import range
def enum(*sequential, **named):
"""creates an Enum type with given values"""
enums = dict(list(zip(sequential, list(range(len(sequential))))), **named)
enums['reverse'] = dict((value, key) for key, value in list(enums.items()))
return type('Enum', (object, ), enums)
```
#### File: treeCl/utils/gapmasker.py
```python
from __future__ import print_function
from builtins import str
from builtins import object
from collections import defaultdict
class GapMasker(object):
def __init__(self, template):
self.template = template
self.gap_positions = self.get_gap_positions()
def get_gap_positions(self):
gap_positions = defaultdict(list)
names = self.template.headers
for name in names:
seq = self.template.mapping[name]
for pos, char in enumerate(seq):
if char == '-':
gap_positions[name].append(pos)
return gap_positions
def mask(self, target):
try:
self.check_seqs(target)
return self.write_gap_positions(target)
except Exception as e:
print(e)
return
def check_seqs(self, target):
if len(self.template) != len(target):
raise Exception('Alignments have different numbers of sequences')
if set(self.template.headers) != set(target.headers):
raise Exception('Sequence names don\'t match')
if self.template.seqlength != target.seqlength:
raise Exception('Alignments are different lengths')
def write_gap_positions(self, target):
for name in self.gap_positions:
if not name in target.headers:
raise Exception('Trying to write gaps to non-existent sequence: ',
name)
listseq = list(target.mapping[name])
for pos in self.gap_positions[name]:
listseq[pos] = '-'
seq = ''.join(str(x) for x in listseq)
target.mapping[name] = seq
seqs = []
for name in target.headers:
seqs.append(target.mapping[name])
target.sequences = seqs
target.update()
return target
```
#### File: treeCl/utils/misc.py
```python
from __future__ import division
from builtins import str
from progressbar import ProgressBar, Percentage, SimpleProgress, Timer, AdaptiveETA, Bar, FormatLabel
import numpy as np
import itertools
import random
from phylo_utils import seq_to_partials
from phylo_utils.markov import TransitionMatrix
from phylo_utils.models import LG, WAG, GTR
from phylo_utils.likelihood import GammaMixture
from Bio.Seq import Seq, UnknownSeq
from Bio.SeqRecord import SeqRecord
from Bio.Align import MultipleSeqAlignment
from collections import defaultdict
__all__ = ['concatenate',
'flatten_list',
'symmetrise',
'regex_search_extract',
'setup_progressbar',
'model_translate',
'smooth_freqs',
'grouper',
'insort_no_dup',
'alignment_to_partials',
'biopython_to_partials',
'create_gamma_model',
'weighted_choice',
'sample_wr']
def concatenate(alignments):
"""
Concatenates a list of Bio.Align.MultipleSeqAlignment objects.
If any sequences are missing the are padded with unknown data
(Bio.Seq.UnknownSeq).
Returns a single Bio.Align.MultipleSeqAlignment.
Limitations: any annotations in the sub-alignments are lost in
the concatenated alignment.
"""
# Get the full set of labels (i.e. sequence ids) for all the alignments
all_labels = set(seq.id for aln in alignments for seq in aln)
# Make a dictionary to store info as we go along
# (defaultdict is convenient -- asking for a missing key gives back an empty list)
tmp = defaultdict(list)
# Assume all alignments have same alphabet
alphabet = alignments[0]._alphabet
for aln in alignments:
length = aln.get_alignment_length()
# check if any labels are missing in the current alignment
these_labels = set(rec.id for rec in aln)
missing = all_labels - these_labels
# if any are missing, create unknown data of the right length,
# stuff the string representation into the tmp dict
for label in missing:
new_seq = UnknownSeq(length, alphabet=alphabet)
tmp[label].append(str(new_seq))
# else stuff the string representation into the tmp dict
for rec in aln:
tmp[rec.id].append(str(rec.seq))
# Stitch all the substrings together using join (most efficient way),
# and build the Biopython data structures Seq, SeqRecord and MultipleSeqAlignment
msa = MultipleSeqAlignment(SeqRecord(Seq(''.join(v), alphabet=alphabet), id=k, name=k, description=k)
for (k,v) in tmp.items())
return msa
def flatten_list(list_):
newlist = list()
x = newlist.extend
ap = newlist.append
for sublist in list_:
try:
x(sublist)
except TypeError: # if the "sublist" is non-iterable, append as a plain element
ap(sublist)
return newlist
def symmetrise(matrix, tri='upper'):
"""
Will copy the selected (upper or lower) triangle of a square matrix
to the opposite side, so that the matrix is symmetrical.
Alters in place.
"""
if tri == 'upper':
tri_fn = np.triu_indices
else:
tri_fn = np.tril_indices
size = matrix.shape[0]
matrix[tri_fn(size)[::-1]] = matrix[tri_fn(size)]
return matrix
def regex_search_extract(search_attempt):
return search_attempt.group() if search_attempt else None
def setup_progressbar(msg, size, format_label=None, simple_progress=False):
if not msg.endswith(': '):
msg += ': '
if simple_progress:
widgets = [msg,
SimpleProgress(), ' ',
Bar(), ' ',
Timer(), ' ',
AdaptiveETA()]
else:
widgets = [msg,
Percentage(), ' ',
Bar(), ' ',
Timer(), ' ',
AdaptiveETA()]
if format_label is not None:
widgets.append(FormatLabel(format_label))
pbar = ProgressBar(widgets=widgets, maxval=size)
return pbar
def model_translate(model):
translation = {'LG' : 'LG08',
'WAG': 'WAG01'}
return translation.get(model, model)
def smooth_freqs(freqs):
"""
Smooths freqs vector, guarantees sum == 1
:param freqs: vector of frequencies
:return: vector of frequencies guaranteed to sum to 1
"""
s = sum(freqs)
return [f/s for f in freqs]
def grouper(n, iterable):
"""
>>> list(grouper(3, 'ABCDEFG'))
[['A', 'B', 'C'], ['D', 'E', 'F'], ['G']]
"""
iterable = iter(iterable)
return iter(lambda: list(itertools.islice(iterable, n)), [])
def insort_no_dup(lst, item):
"""
If item is not in lst, add item to list at its sorted position
"""
import bisect
ix = bisect.bisect_left(lst, item)
if lst[ix] != item:
lst[ix:ix] = [item]
def alignment_to_partials(alignment, missing_data=None):
""" Generate a partials dictionary from a treeCl.Alignment """
partials_dict = {}
for (name, sequence) in alignment.get_sequences():
datatype = 'dna' if alignment.is_dna() else 'protein'
partials_dict[name] = seq_to_partials(sequence, datatype)
if missing_data is not None:
l = len(alignment)
for name in missing_data:
if name not in partials_dict:
partials_dict[name] = seq_to_partials('-'*l, datatype)
return partials_dict
def biopython_to_partials(alignment, datatype):
""" Generate a partials dictionary from a treeCl.Alignment """
partials_dict = {}
for seq in alignment:
partials_dict[seq.name] = seq_to_partials(seq, datatype)
return partials_dict
def create_gamma_model(alignment, missing_data=None, ncat=4):
""" Create a phylo_utils.likelihood.GammaMixture for calculating
likelihood on a tree, from a treeCl.Alignment and its matching
treeCl.Parameters """
model = alignment.parameters.partitions.model
freqs = alignment.parameters.partitions.frequencies
alpha = alignment.parameters.partitions.alpha
if model == 'LG':
subs_model = LG(freqs)
elif model == 'WAG':
subs_model = WAG(freqs)
elif model == 'GTR':
rates = alignment.parameters.partitions.rates
subs_model = GTR(rates, freqs, True)
else:
raise ValueError("Can't handle this model: {}".format(model))
tm = TransitionMatrix(subs_model)
gamma = GammaMixture(alpha, ncat)
gamma.init_models(tm, alignment_to_partials(alignment, missing_data))
return gamma
def weighted_choice(choices):
total = sum(w for c, w in choices)
r = random.uniform(0, total)
upto = 0
for c, w in choices:
if upto + w > r:
return c
upto += w
assert False, "Shouldn't get here"
def sample_wr(lst):
"""
Sample from lst, with replacement
"""
arr = np.array(lst)
indices = np.random.randint(len(lst), size=len(lst))
sample = np.empty(arr.shape, dtype=arr.dtype)
for i, ix in enumerate(indices):
sample[i] = arr[ix]
return list(sample)
def binom_coeff(n):
"""
Calculate the binomial coefficient (n, 2), i.e. the number of distinct pairs possible
in a set of size n
:param n: size of set
:return: number of pairs
"""
return int(n * (n-1) / 2)
```
#### File: treeCl/wrappers/aligners.py
```python
from .abstract_wrapper import AbstractWrapper
__all__ = ['Muscle', 'Prank', 'FSA']
class Muscle(AbstractWrapper):
@property
def _default_exe(self):
return 'muscle'
def _set_help(self):
self(wait=True)
self._help = self.get_stderr()
class Prank(AbstractWrapper):
@property
def _default_exe(self):
return 'prank'
@property
def _hyphen_policy(self):
return 1
def _set_help(self):
self(help=True, wait=True)
self._help = self.get_stdout()
class FSA(AbstractWrapper):
@property
def _default_exe(self):
return 'fsa'
def _set_help(self):
self(help=True, wait=True)
self._help = self.get_stdout()
class Mafft(AbstractWrapper):
@property
def _default_exe(self):
return 'mafft'
def _set_help(self):
self(help=True, wait=True)
self._help = self.get_stdout()
```
#### File: treeCl/wrappers/phylogenetics.py
```python
from builtins import range
from .abstract_wrapper import AbstractWrapper
from ..utils import smooth_freqs
import re
class Raxml(AbstractWrapper):
@property
def _default_exe(self):
return 'raxmlHPC'
def _set_help(self):
self('-h', wait=True)
self._help = self.get_stdout()
class Phyml(AbstractWrapper):
@property
def _default_exe(self):
return 'phyml'
def _set_help(self):
self('-h', wait=True)
_help = self.get_stdout()
# Phyml's help string contains extra shell-escape characters to
# add underlining and boldness to the text. Let's get rid of these
import re
rgx = re.compile(r'\x1b\[00;0[0-9]m')
self._help = rgx.sub('', _help)
class FastTree(AbstractWrapper):
@property
def _default_exe(self):
return 'FastTree'
def _set_help(self):
self(wait=True)
self._help = self.get_stderr()
def parse_fasttree_output(s):
try:
loglk, alpha = (float(x) for x in re.search(r'Gamma\(\d+\) LogLk = ([0-9-.]+) alpha = ([0-9.]+)', s).groups())
except AttributeError:
raise AttributeError('Couldn\'t parse loglk and alpha')
return None
try:
freqs_match = re.search(r'GTR Frequencies:\s+([0-9.-]+)\s+([0-9.-]+)\s+([0-9.-]+)\s+([0-9.-]+)', s)
except AttributeError:
raise AttributeError('Couldn\'t parse GTR frequencies')
return None
if freqs_match:
freqs = []
for i in range(1, 5):
freqs.append(float(freqs_match.group(i)))
else:
freqs = []
try:
rates_match = re.search(
r'GTR rates\(ac ag at cg ct gt\)\s+([0-9.-]+)\s+([0-9.-]+)\s+([0-9.-]+)\s+([0-9.-]+)\s+([0-9.-]+)\s+([0-9.-]+)\s+',s)
except AttributeError:
raise AttributeError('Couldn\'t parse GTR rates')
return None
if rates_match:
rates = []
for i in range(1, 7):
rates.append(float(rates_match.group(i)))
else:
rates = []
result = {'likelihood': loglk, 'partitions': {0: {'alpha': alpha}}}
if freqs:
result['partitions'][0]['frequencies'] = smooth_freqs(freqs)
if rates:
result['partitions'][0]['rates'] = rates
return result
def parse_raxml_output(s):
ac=float(re.search(r'rate A <-> C:\s+([0-9.]+)', s).groups()[0])
ag=float(re.search(r'rate A <-> G:\s+([0-9.]+)', s).groups()[0])
at=float(re.search(r'rate A <-> T:\s+([0-9.]+)', s).groups()[0])
cg=float(re.search(r'rate C <-> G:\s+([0-9.]+)', s).groups()[0])
ct=float(re.search(r'rate C <-> T:\s+([0-9.]+)', s).groups()[0])
gt=float(re.search(r'rate G <-> T:\s+([0-9.]+)', s).groups()[0])
a=float(re.search(r'freq pi\(A\):\s+([0-9.]+)',s).groups()[0])
c=float(re.search(r'freq pi\(C\):\s+([0-9.]+)',s).groups()[0])
g=float(re.search(r'freq pi\(G\):\s+([0-9.]+)',s).groups()[0])
t=float(re.search(r'freq pi\(T\):\s+([0-9.]+)',s).groups()[0])
alpha = float(re.search(r'alpha:\s+([0-9.]+)' ,s).groups()[0])
loglk = float(re.search(r'Final GAMMA likelihood:\s+([0-9-.]+)', s).groups()[0])
return {
'likelihood': loglk,
'alpha': alpha,
'frequencies': [a, c, g, t],
'rates': [ac, ag, at, cg, ct, gt],
}
```
|
{
"source": "JefeDryden/narya",
"score": 3
}
|
#### File: narya/datasets/tracking_dataset.py
```python
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import mxnet as mx
from mxnet import autograd
from mxnet import gluon
from gluoncv.data import VOCDetection
from gluoncv.data.transforms.presets.ssd import SSDDefaultTrainTransform
from gluoncv.data.transforms.presets.ssd import SSDDefaultValTransform
from gluoncv.data.batchify import Tuple, Stack, Pad
CLASSES = ["ball", "player"]
"""
Checking with Mxnet if a GPU is available or not.
"""
try:
a = mx.nd.zeros((1,), ctx=mx.gpu(0))
ctx = [mx.gpu(0)]
except:
ctx = [mx.cpu()]
class VOCFootball(VOCDetection):
"""Class for a tracking dataset. Allows to load pairs of image, bouding boxes, and
apply random transformation to them. The dataset is base on the gluoncv.data VOC format
dataset. You can also easily write your own custom dataset if your data are in a COCO format.
Arguments:
root: Path to folder storing the dataset
splits: List of tuples, list of combinations (type,name). e.g: ('foot','train'),('foot','test')
transform: A function that takes data and label and transforms them.
A transform function for object detection should take label into consideration,
because any geometric modification will require label to be modified.
index_map: In default, the 20 classes are mapped into indices from 0 to 19.
We can customize it by providing a str to int dict specifying how to map class names to indices.
Use by advanced users only, when you want to swap the orders of class labels.
preload_label: If True, then parse and load all labels into memory during initialization.
It often accelerate speed but require more memory usage.
Typical preloaded labels took tens of MB.
You only need to disable it when your dataset is extremely large.
"""
def __init__(
self, root, splits, transform=None, index_map=None, preload_label=True
):
super(VOCFootball, self).__init__(
root, splits, transform, index_map, preload_label
)
def get_dataloader(
net, train_dataset, val_dataset, data_shape, batch_size, num_workers, ctx
):
"""Loads data from a dataset and returns mini-batches of data, for both the training
and the validation set.
Arguments:
net: the Gluon model you will train, used to generate fake anchors for target generation.
train_dataset: Training dataset. Note that numpy and mxnet arrays can be directly used as a Dataset.
val_dataset: Validation dataset. Note that numpy and mxnet arrays can be directly used as a Dataset.
data_shape: Tuple, the input_shape of the model
batch_size: Size of mini-batch.
num_workers: The number of multiprocessing workers to use for data preprocessing.
ctx: Indicator to the usage of GPU.
Returns:
train_loader: Gluon training dataloader
val_loader: Gluon testing dataloader
Raises:
"""
width, height = data_shape
# use fake data to generate fixed anchors for target generation
with autograd.train_mode():
_, _, anchors = net(mx.nd.zeros((1, 3, height, width), ctx))
anchors = anchors.as_in_context(mx.cpu())
batchify_fn = Tuple(
Stack(), Stack(), Stack()
) # stack image, cls_targets, box_targets
train_loader = gluon.data.DataLoader(
train_dataset.transform(SSDDefaultTrainTransform(width, height, anchors)),
batch_size,
True,
batchify_fn=batchify_fn,
last_batch="rollover",
num_workers=num_workers,
)
val_batchify_fn = Tuple(Stack(), Pad(pad_val=-1))
val_loader = gluon.data.DataLoader(
val_dataset.transform(SSDDefaultValTransform(width, height)),
batch_size,
False,
batchify_fn=val_batchify_fn,
last_batch="keep",
num_workers=num_workers,
)
return train_loader, val_loader
class TrackingDatasetBuilder:
"""Class for an homography dataset. Allows to load pairs of image, homography, and
apply random transformation to them. Also loads the dataloader you can then pass to a keras model.
Arguments:
dataset_path: Path to folder storing the dataset
batch_size: Size of mini-batch.
input_shape: Tuple, the input_shape of the model
net: the Gluon model you will train, used to generate fake anchors for target generation.
train_splits: List of tuples, list of combinations (type,name) for training
test_splits: List of tuples, list of combinations (type,name) for testing
num_workers: The number of multiprocessing workers to use for data preprocessing.
"""
def __init__(
self,
dataset_path,
batch_size,
input_shape,
net,
train_splits=[("foot", "train"), ("foot", "val")],
test_splits=[("foot", "test")],
num_workers=1,
):
self.train_dataset = VOCFootball(root=dataset_path, splits=train_splits)
self.valid_dataset = VOCFootball(root=dataset_path, splits=test_splits)
train_loader, val_loader = get_dataloader(
net,
self.train_dataset,
self.valid_dataset,
input_shape,
batch_size,
num_workers,
ctx,
)
self.train_dataloader = train_loader
self.valid_dataloader = val_loader
def _get_dataset(self):
return self.train_dataset, self.valid_dataset
def _get_dataloader(self):
return self.train_dataloader, self.valid_dataloader
```
#### File: narya/tracker/player_ball_tracker.py
```python
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import mxnet as mx
import os
import numpy as np
import tensorflow as tf
import cv2
from ..linker.multitracker import JDETracker
from ..utils.linker import tlwh_to_tlbr
from ..utils.vizualization import (
plot_tracking,
rgb_template_to_coord_conv_template,
merge_template,
)
from ..utils.homography import warp_image
from ..utils.image import np_img_to_torch_img, torch_img_to_np_img
from ..utils.utils import to_torch
class PlayerBallTracker:
"""Class for the Player and Ball tracking. It allows an online tracking and reIdentification of each detected player.
Arguments:
conf_tresh: Confidence treshold to keep tracked bouding boxes
track_buffer: Number of frame to keep in memory for tracking reIdentification
K: Number of boxes to keep at each frames
frame_rate: -
"""
def __init__(self, conf_tresh=0.5, track_buffer=30, K=100, frame_rate=30,ctx=None):
self.frame_rate = frame_rate
self.tracker = JDETracker(
conf_thres=conf_tresh, track_buffer=track_buffer, K=K, frame_rate=frame_rate, ctx=ctx
)
def get_tracking(
self,
imgs,
results=[],
begin_frame=0,
split_size=None,
verbose=True,
save_tracking_folder=None,
template=None,
frame_to_homo=None,
):
"""
Arguments:
imgs: List of np.array (images) to track
results: list of previous results, to resume tracking
begin_frame: int, starting frame, if you want to resume tracking
split_size: if None, apply the tracking model to the full image. If its an int, the image shape must be divisible by this int.
We then split the image to create n smaller images of shape (split_size,split_size), and apply the model
to those.
We then reconstruct the full images and the full predictions.
verbose: Boolean, to display tracking at each frame or not
save_tracking_folder: Foler to save the tracking images
template: Football field, to warp it with the computed homographies on to the saved images
frame_to_homo: Dict mapping each frame id to a pred_homography and the method used to compute it.
Returns:
results: List of results, each result being (frame_id, list of bbox coordiantes, list of bbox id)
frame_id: Id of the last tracked frame
Raises:
"""
# frame_to_homo: {id: (homo,method)}
results = results
frame_id = begin_frame
for image in imgs:
resized_image = cv2.resize(image, (512, 512))
online_targets, ball_bbox = self.tracker.update(
image, image, split_size=split_size, verbose=verbose
)
if ball_bbox is None:
online_boxs = []
online_tlwhs = []
online_ids = []
else:
online_tlwhs = [ball_bbox]
ball_bbox = tlwh_to_tlbr(ball_bbox)
online_boxs = [ball_bbox]
online_ids = [-1]
for t in online_targets:
tlwh = t.tlwh
tid = t.track_id
vertical = tlwh[2] / tlwh[3] > 1.6
if tlwh[2] * tlwh[3] > 100 and not vertical:
online_boxs.append(tlwh_to_tlbr(tlwh))
online_tlwhs.append(tlwh)
online_ids.append(tid)
results.append((frame_id + 1, online_boxs, online_ids))
if save_tracking_folder is not None:
if template is not None and frame_to_homo is not None:
pred_homo, method = frame_to_homo[frame_id + 1]
conv_template = cv2.resize(
rgb_template_to_coord_conv_template(template), (320, 320)
)
if method == "cv":
conv_template = warp_image(
conv_template, pred_homo, out_shape=(320, 320)
)
else:
conv_template = warp_image(
np_img_to_torch_img(conv_template),
to_torch(pred_homo),
method="torch",
)
conv_template = torch_img_to_np_img(conv_template[0])
conv_template = cv2.resize(conv_template, (512, 512)).astype(
"float32"
)
resized_image = merge_template(resized_image, conv_template * 255.0)
online_im = plot_tracking(
resized_image, online_tlwhs, online_ids, frame_id=frame_id, fps=1.0
)
cv2.imwrite(
os.path.join(
save_tracking_folder + "test_{:05d}.jpg".format(frame_id)
),
online_im,
)
frame_id += 1
return results, frame_id
```
#### File: narya/trainer/tracker_train.py
```python
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import mxnet as mx
import argparse
import os
from gluoncv.utils.metrics.voc_detection import VOC07MApMetric
from narya.models.gluon_models import TrackerModel
from narya.datasets.tracking_dataset import TrackingDatasetBuilder
parser = argparse.ArgumentParser(description="Training")
parser.add_argument("--data_dir", default="VOCFormat/", type=str)
parser.add_argument("--backbone", default="ssd_512_resnet50_v1_coco", type=str)
parser.add_argument("--batch_size", default=2, type=int)
parser.add_argument("--lr", default=0.001, type=float)
parser.add_argument("--epochs", default=100, type=int)
parser.add_argument("--weights", default=None, type=str)
opt = parser.parse_args()
args_no_wd = True
args_label_smooth = False
args_lr_decay_period = 0
args_epochs = opt.epochs
args_warmup_epochs = 0
args_num_samples = -1
args_batch_size = opt.batch_size
args_lr = opt.lr
args_lr_mode = "step"
args_lr_decay = 0.05
args_horovod = False
args_wd = 0.0005
args_momentum = 0.9
args_amp = False
args_save_prefix = "PlayerTracker_"
args_start_epoch = 0
args_mixup = False
args_no_mixup_epochs = 20
args_log_interval = 30
args_save_interval = 10
args_val_interval = 5
args_lr_decay_epoch = "30,40,60,80,90"
try:
a = mx.nd.zeros((1,), ctx=mx.gpu(0))
ctx = [mx.gpu(0)]
except:
ctx = [mx.cpu()]
print("-" * 10)
print("Building model")
print("-" * 10)
full_model = TrackerModel(pretrained=True, backbone=opt.backbone, ctx = ctx)
if opt.weights is not None:
full_model.load_weights(opt.weights)
net = full_model.model
preprocessing_fn = full_model.preprocessing
def save_params(net, best_map, current_map, epoch, save_interval, prefix):
current_map = float(current_map)
if current_map > best_map[0]:
best_map[0] = current_map
net.save_params("{:s}_best.params".format(prefix, epoch, current_map))
with open(prefix + "_best_map.log", "a") as f:
f.write("{:04d}:\t{:.4f}\n".format(epoch, current_map))
if save_interval and epoch % save_interval == 0:
net.save_params("{:s}_{:04d}_{:.4f}.params".format(prefix, epoch, current_map))
def validate(net, val_data, ctx, eval_metric):
"""Test on validation dataset."""
eval_metric.reset()
# set nms threshold and topk constraint
net.set_nms(nms_thresh=0.45, nms_topk=400)
net.hybridize(static_alloc=True, static_shape=True)
for batch in val_data:
data = gluon.utils.split_and_load(
batch[0], ctx_list=ctx, batch_axis=0, even_split=False
)
label = gluon.utils.split_and_load(
batch[1], ctx_list=ctx, batch_axis=0, even_split=False
)
det_bboxes = []
det_ids = []
det_scores = []
gt_bboxes = []
gt_ids = []
gt_difficults = []
for x, y in zip(data, label):
# get prediction results
ids, scores, bboxes = net(x)
det_ids.append(ids)
det_scores.append(scores)
# clip to image size
det_bboxes.append(bboxes.clip(0, batch[0].shape[2]))
# split ground truths
gt_ids.append(y.slice_axis(axis=-1, begin=4, end=5))
gt_bboxes.append(y.slice_axis(axis=-1, begin=0, end=4))
gt_difficults.append(
y.slice_axis(axis=-1, begin=5, end=6) if y.shape[-1] > 5 else None
)
# update metric
eval_metric.update(
det_bboxes, det_ids, det_scores, gt_bboxes, gt_ids, gt_difficults
)
return eval_metric.get()
def train(net, train_data, val_data, eval_metric, ctx):
"""Training pipeline"""
net.collect_params().reset_ctx(ctx)
if args_horovod:
hvd.broadcast_parameters(net.collect_params(), root_rank=0)
trainer = hvd.DistributedTrainer(
net.collect_params(),
"sgd",
{"learning_rate": args_lr, "wd": args_wd, "momentum": args_momentum},
)
else:
trainer = gluon.Trainer(
net.collect_params(),
"sgd",
{"learning_rate": args_lr, "wd": args_wd, "momentum": args_momentum},
update_on_kvstore=(False if args_amp else None),
)
if args_amp:
amp.init_trainer(trainer)
# lr decay policy
lr_decay = float(args_lr_decay)
lr_steps = sorted(
[float(ls) for ls in args_lr_decay_epoch.split(",") if ls.strip()]
)
mbox_loss = gcv.loss.SSDMultiBoxLoss()
ce_metric = mx.metric.Loss("CrossEntropy")
smoothl1_metric = mx.metric.Loss("SmoothL1")
# set up logger
logging.basicConfig()
logger = logging.getLogger()
logger.setLevel(logging.INFO)
log_file_path = args_save_prefix + "_train.log"
log_dir = os.path.dirname(log_file_path)
if log_dir and not os.path.exists(log_dir):
os.makedirs(log_dir)
fh = logging.FileHandler(log_file_path)
logger.addHandler(fh)
logger.info("Start training from [Epoch {}]".format(args_start_epoch))
best_map = [0]
for epoch in range(args_start_epoch, args_epochs):
while lr_steps and epoch >= lr_steps[0]:
new_lr = trainer.learning_rate * lr_decay
lr_steps.pop(0)
trainer.set_learning_rate(new_lr)
logger.info("[Epoch {}] Set learning rate to {}".format(epoch, new_lr))
ce_metric.reset()
smoothl1_metric.reset()
tic = time.time()
btic = time.time()
net.hybridize(static_alloc=True, static_shape=True)
for i, batch in enumerate(train_data):
data = gluon.utils.split_and_load(batch[0], ctx_list=ctx, batch_axis=0)
# cls_targets = gluon.utils.split_and_load(batch[1][4:5], ctx_list=ctx, batch_axis=0)
# box_targets = gluon.utils.split_and_load(batch[1][:4], ctx_list=ctx, batch_axis=0)
cls_targets = gluon.utils.split_and_load(
batch[1], ctx_list=ctx, batch_axis=0
)
box_targets = gluon.utils.split_and_load(
batch[2], ctx_list=ctx, batch_axis=0
)
with autograd.record():
cls_preds = []
box_preds = []
for x in data:
cls_pred, box_pred, _ = net(x)
cls_preds.append(cls_pred)
box_preds.append(box_pred)
sum_loss, cls_loss, box_loss = mbox_loss(
cls_preds, box_preds, cls_targets, box_targets
)
if args_amp:
with amp.scale_loss(sum_loss, trainer) as scaled_loss:
autograd.backward(scaled_loss)
else:
autograd.backward(sum_loss)
# since we have already normalized the loss, we don't want to normalize
# by batch-size anymore
trainer.step(1)
if not args_horovod or hvd.rank() == 0:
local_batch_size = int(
args_batch_size // (hvd.size() if args_horovod else 1)
)
ce_metric.update(0, [l * local_batch_size for l in cls_loss])
smoothl1_metric.update(0, [l * local_batch_size for l in box_loss])
if args_log_interval and not (i + 1) % args_log_interval:
name1, loss1 = ce_metric.get()
name2, loss2 = smoothl1_metric.get()
logger.info(
"[Epoch {}][Batch {}], Speed: {:.3f} samples/sec, {}={:.3f}, {}={:.3f}".format(
epoch,
i,
args_batch_size / (time.time() - btic),
name1,
loss1,
name2,
loss2,
)
)
btic = time.time()
if not args_horovod or hvd.rank() == 0:
name1, loss1 = ce_metric.get()
name2, loss2 = smoothl1_metric.get()
logger.info(
"[Epoch {}] Training cost: {:.3f}, {}={:.3f}, {}={:.3f}".format(
epoch, (time.time() - tic), name1, loss1, name2, loss2
)
)
if (epoch % args_val_interval == 0) or (
args_save_interval and epoch % args_save_interval == 0
):
# consider reduce the frequency of validation to save time
map_name, mean_ap = validate(net, val_data, ctx, eval_metric)
val_msg = "\n".join(
["{}={}".format(k, v) for k, v in zip(map_name, mean_ap)]
)
logger.info("[Epoch {}] Validation: \n{}".format(epoch, val_msg))
current_map = float(mean_ap[-1])
else:
current_map = 0.0
save_params(
net, best_map, current_map, epoch, args_save_interval, args_save_prefix
)
print("-" * 10)
print("Building dataset")
print("-" * 10)
full_dataset = TrackingDatasetBuilder(
dataset_path=opt.data_dir,
batch_size=opt.batch_size,
input_shape=(512, 512),
net=net,
)
train_dataset, val_dataset = full_dataset._get_dataset()
eval_metric = VOC07MApMetric(iou_thresh=0.5, class_names=val_dataset.classes)
print("length of training dataset:", len(train_dataset))
print("length of validation dataset:", len(val_dataset))
train_dataloader, valid_dataloader = full_dataset._get_dataloader()
print("-" * 10)
print("Launching the training")
print("-" * 10)
train(net, train_dataloader, valid_dataloader, eval_metric, ctx)
```
|
{
"source": "JefeLitman/Biv2LabNN",
"score": 3
}
|
#### File: Biv2LabNN/Models/inception_modules.py
```python
from tensorflow import keras
def inception_naive(inputs, n_layer, output_filters):
if output_filters < inputs.shape[-1]:
raise ValueError("Is not possible to reduce the filters with the inception a module, because it have a maxpool layer")
remaining_filters = output_filters - inputs.shape[-1]
if remaining_filters % 2 != 0:
remaining_filters += 1
x1 = keras.layers.Conv3D(filters=remaining_filters//2, kernel_size=1,padding="same",
activation="relu",name='incep-1_V1-1x1x1_'+str(n_layer))(inputs)
x2 = keras.layers.Conv3D(filters=remaining_filters//4, kernel_size=3,padding="same",
activation="relu",name='incep-2_V1-3x3x3_'+str(n_layer))(inputs)
x3 = keras.layers.Conv3D(filters=remaining_filters//4, kernel_size=5,padding="same",
activation="relu",name='incep-3_V1-5x5x5_'+str(n_layer))(inputs)
x4 = keras.layers.MaxPooling3D(pool_size=(1,1,1), strides=(1,1,1),
name='max_pool3d_incep-4_V1_1x1x1_'+str(n_layer))(inputs)
return keras.layers.Concatenate(axis=-1, name="concat_incep_V1_"+str(n_layer))([x1,x2,x3,x4])
def inception_enhanced(inputs, n_layer, output_filters):
if output_filters < inputs.shape[-1]:
raise ValueError("Is not possible to reduce the filters with the inception b module, because it have a maxpool layer")
remaining_filters = output_filters - inputs.shape[-1]
if remaining_filters % 2 != 0:
remaining_filters += 1
#First layer
x1 = keras.layers.Conv3D(filters=remaining_filters//2, kernel_size=1,padding="same",
activation="relu",name='incep-1_V2-1x1x1_'+str(n_layer)+'_0')(inputs)
x2 = keras.layers.Conv3D(filters=remaining_filters//4, kernel_size=1,padding="same",
activation="relu",name='incep-2_V2-1x1x1_'+str(n_layer)+'_0')(inputs)
x3 = keras.layers.Conv3D(filters=remaining_filters//4, kernel_size=1,padding="same",
activation="relu",name='incep-3_V2-1x1x1_'+str(n_layer)+'_0')(inputs)
x4 = keras.layers.MaxPooling3D(pool_size=(1,1,1), strides=(1,1,1),
name='max_pool3d_incep-4_V2_1x1x1_'+str(n_layer)+'_0')(inputs)
#Second layer
x2 = keras.layers.Conv3D(filters=x2.shape[-1], kernel_size=3,padding="same",
activation="relu",name='incep-2_v2-3x3x3_'+str(n_layer)+'_1')(x2)
x3 = keras.layers.Conv3D(filters=x3.shape[-1], kernel_size=5,padding="same",
activation="relu",name='incep-3_v2-5x5x5_'+str(n_layer)+'_1')(x3)
x4 = keras.layers.Conv3D(filters=x4.shape[-1], kernel_size=1,padding="same",
activation="relu",name='incep-4_v2-1x1x1_'+str(n_layer)+'_1')(x4)
return keras.layers.Concatenate(axis=-1, name="concat_incep_v2_"+str(n_layer))([x1,x2,x3,x4])
def inception_ours_naive(inputs, n_layer, output_filters):
x1 = keras.layers.Conv3D(filters=output_filters//2, kernel_size=1,padding="same",
activation="relu",name="conv_channels-1_naive_"+str(n_layer))(inputs)
x2 = keras.layers.Conv3D(filters=output_filters//2, kernel_size=1,padding="same",
activation="relu",name="conv_channels-2_naive_"+str(n_layer))(inputs)
return keras.layers.Concatenate(axis=-1, name="concat_conv_channels_"+str(n_layer))([x1,x2])
def convTrans3D(inputs, n_layer, output_filters):
return keras.layers.Conv3DTranspose(filters=output_filters, kernel_size=1, padding="same",
activation="relu", name="conv3D_Trans_"+str(n_layer))(inputs)
def conv3D_Channels(inputs, n_layer, output_filters):
return keras.layers.Conv3D(filters=output_filters, kernel_size=1, padding="same",
activation="relu", name="conv3D_channels_"+str(n_layer))(inputs)
def conv3D_asym(inputs, n_layer):
"""This method always return 1 filter in the output"""
x = keras.layers.Reshape((inputs.shape[1],inputs.shape[2]*inputs.shape[3],inputs.shape[4]),
name="reshape_"+str(n_layer))(inputs)
x = keras.layers.Conv3D(filters=inputs.shape[-1]//4, kernel_size=(1,1,3), padding="same",
activation="relu", name="conv3D_simplified_"+str(n_layer))(x)
return None
```
|
{
"source": "JefeLitman/CochesRedNeuronalFDD",
"score": 3
}
|
#### File: JefeLitman/CochesRedNeuronalFDD/Red_Neuronal.py
```python
import numpy as np
class capa_neuronal():
def __init__(self, n_conexiones, n_neuronas):
self.b = np.random.random((1, n_neuronas))
self.W = np.random.random((n_conexiones, n_neuronas))
class red_neuronal():
def __init__(self,topologia,fun_act,der_fun_act):
self.red_neuronal = self.crear_red(topologia)
self.fun_act = fun_act #Funcion de Activacion
self.dfun_act = der_fun_act #Derivada de la funcion de activacion
def crear_red(self,topologia):
red = []
for capa in range(len(topologia[:-1])):
red.append(capa_neuronal(topologia[capa],topologia[capa+1]))
return red
def transmitir_entradas(self,entradas):
#Entradas debe ser una matrix
salida = [(None,entradas)]
for capa in range(len(self.red_neuronal)):
z = salida[-1][1] @ self.red_neuronal[capa].W + self.red_neuronal[capa].b
a = self.fun_act(z)
salida.append((z,a))
return salida
def retropropagacion(self,salida,der_coste):
deltas = []
for capa in reversed(range(len(self.red_neuronal))):
a = salida[capa + 1][1] #El uno es porque es donde esta el a de la capa siguiente
if (capa == len(self.red_neuronal) - 1):
deltas.insert(0, der_coste * self.dfun_act(a))
else:
deltas.insert(0, deltas[0] @ self.red_neuronal[capa+1].W.T * self.dfun_act(a))
return deltas
def descenso_gradiente(self,salida,deltas,tasa_aprendizaje=0.5):
for capa in range(len(self.red_neuronal)):
self.red_neuronal[capa].b = self.red_neuronal[capa].b - deltas[capa] * tasa_aprendizaje
self.red_neuronal[capa].W = self.red_neuronal[capa].W - salida[capa][1].T @ deltas[capa] * tasa_aprendizaje
def activar(self,entradas,objetivo,valor_dfun_coste,tasa_aprendizaje=0.5):
"""El valor_dfun_coste debe ser un valor o una lista dependiendo de la
cantidad de salidas (Lista con derivadas parciales con el mismo indice y
orden que las variables de entrada)
La entradas y objetivo ya deben estar normalizadas y ser del mismo tamaño
y tipo
"""
if (np.array_equal(entradas,objetivo)):
return entradas
else:
salidas = self.transmitir_entradas(entradas)
deltas = self.retropropagacion(salidas,valor_dfun_coste)
self.descenso_gradiente(salidas,deltas,tasa_aprendizaje)
return salidas[-1][1]
```
|
{
"source": "JefeLitman/emargame",
"score": 3
}
|
#### File: emargame/corrupcion_malversacion/models.py
```python
from otree.api import (
models, widgets, BaseConstants, BaseSubsession, BaseGroup, BasePlayer,
Currency as c, currency_range
)
from itertools import cycle
from random import choice
author = '<NAME> & <NAME> & <NAME>'
doc = """
Your app description
"""
class Constants(BaseConstants):
name_in_url = 'malversacion'
players_per_group = 5
num_rounds = 10
dotacion = c(3000)
multiplicador = 2
class Subsession(BaseSubsession):
def creating_session(self):
tipos_grupo = cycle([1,2,3])
for grupo in self.get_groups():
grupo.id_grupo = next(tipos_grupo)
class Group(BaseGroup):
id_grupo = models.IntegerField(doc="""Identificador del tipo de grupo de los integrantes.
1 - Presidente al azar
2 - Presidente por competencia
3 - Presidente por votacion""")
orden_llegada = models.StringField(doc="""Sera un array de letras que contendra el orden de
llegada de los jugadores en las diferentes rondas:
Ej: 'id_jugador_xid_jugador_y' o '231...' """)
BolsaPublica = models.CurrencyField(min=0,max=Constants.dotacion)
CuentaPrivadaPresidente = models.CurrencyField(min=0,max=Constants.dotacion)
contador = models.IntegerField()
def inicializar_orden_llegada(self):
self.orden_llegada = ""
def contador_jugadores(self):
contador = 5
for id in self.orden_llegada:
if int(id) in [1,2,3,4,5]:
contador = contador - 1
return contador
def get_jugadores_aceptaron(self):
jugadores = []
for j in self.get_players():
if (j.in_round(1).consentimiento):
jugadores.append(j)
return jugadores
def set_presidente(self,presidente):
presidente.es_presidente = True
for otros in presidente.get_others_in_group():
otros.es_presidente = False
def set_Presidente_Azar(self):
jugadores = self.get_jugadores_aceptaron()
presidente = choice(jugadores)
self.set_presidente(presidente)
def set_presidente_competencia(self):
jugadores = self.get_jugadores_aceptaron()
puntajes = [j.puntaje for j in jugadores]
for jugador in jugadores:
if (jugador.puntaje == max(puntajes)):
presidente = jugador
self.set_presidente(presidente)
def agregar_jugador(self, jugador):
extra = self.contador_jugadores()
jugador.puntaje = jugador.puntaje + extra
self.orden_llegada = self.orden_llegada + str(jugador.id_in_group)
def set_presidente_votacion(self):
jugadores = self.get_jugadores_aceptaron()
votos = [p.voto for p in jugadores]
contador = 0
for i in jugadores:
if votos.count( 'Jugador ' + str( i.id_in_group)) >= int(len(jugadores)/2) +1 :
presidente = i
break
else:
contador = contador + 1
if contador == len(jugadores):
return False
else:
self.set_presidente(presidente)
return True
def set_presidente_votacion_azar(self):
jugadores = self.get_jugadores_aceptaron()
votos = [p.voto for p in jugadores]
numero_votos = [votos.count('Jugador ' + str(j.id_in_group)) for j in jugadores]
posibles_presidentes =[]
for i,cantidad in enumerate(numero_votos):
if cantidad == max(numero_votos):
posibles_presidentes.append(i+1)
id_presidente = choice(posibles_presidentes)
presidente = self.get_player_by_id(id_presidente)
self.set_presidente(presidente)
def calcularGananciasJugadores(self):
jugadores = self.get_jugadores_aceptaron()
rentabilidad = (self.BolsaPublica * Constants.multiplicador)/len(jugadores)
for j in jugadores:
if j.es_presidente == True:
j.cuenta = rentabilidad + self.CuentaPrivadaPresidente
else:
j.cuenta = rentabilidad
j.payoff = j.cuenta
class Player(BasePlayer):
propuesta = models.LongStringField(max_length=140)
cuenta = models.CurrencyField()
es_presidente = models.BooleanField()
puntaje = models.IntegerField()
voto = models.StringField()
opinion = models.BooleanField(choices=[[True, 'Si' ], [False, 'No']])
nombre= models.StringField()
celular= models.IntegerField()
correo= models.StringField()
genero = models.StringField(choices=['Femenino','Masculino'])
edad = models.IntegerField()
semestre = models.IntegerField()
participacion = models.BooleanField(choices=[[True, 'Si' ], [False, 'No']])
estudiante = models.BooleanField(choices=[[True, 'Si' ], [False, 'No']])
carrera= models.StringField(blank=True)
universidad= models.StringField(blank=True)
consentimiento = models.BooleanField(choices=[[True, 'Si autorizo'], [False, 'No autorizo']])
profesion = models.StringField(blank=True)
```
#### File: emargame/debates/pages.py
```python
from otree.api import Currency as c, currency_range
from ._builtin import Page, WaitPage
from .models import Constants
class Presentacion(Page):
pass
class Equipo(Page):
#timeout_seconds = 5
form_model = 'player'
form_fields = ['I1AF','I1EC']
class Conocimiento(Page):
#timeout_seconds = 5
form_model = 'player'
form_fields = ['I2AF','I2EC']
class Coherencia(Page):
#timeout_seconds = 5
form_model = 'player'
form_fields = ['I3AF','I3EC']
class Vocabulario(Page):
#timeout_seconds = 5
form_model = 'player'
form_fields = ['I4AF','I4EC']
class Escucha(Page):
#timeout_seconds = 5
form_model = 'player'
form_fields = ['I5AF','I5EC']
class esperaTodosJugadores(WaitPage):
wait_for_all_groups = True
class Calculos(WaitPage):
def after_all_players_arrive(self):
self.subsession.calculopromIAF()
self.subsession.calculopromIEC()
class Resultados(Page):
# Tambien ponemos? timeout_seconds = 60
pass
class Ganador(Page):
def vars_for_template(self):
return {
'CasaAFavor': self.subsession.calcularCasaGanadora()[0],
'CasaEnContra': self.subsession.calcularCasaGanadora()[1]
}
class Gracias(Page):
#Tambien ponemos? timeout_seconds = 30
pass
page_sequence = [
Presentacion,
Equipo,
Conocimiento,
Coherencia,
Vocabulario,
Escucha,
esperaTodosJugadores,
Calculos,
Resultados,
Ganador,
Gracias,
]
```
#### File: emargame/paradoja_votante/models.py
```python
from otree.api import (
models, widgets, BaseConstants, BaseSubsession, BaseGroup, BasePlayer,
Currency as c, currency_range
)
from random import randint
from random import random
from sklearn.preprocessing import MinMaxScaler
author = '<NAME> & <NAME>'
doc = """
Your app description
"""
class Constants(BaseConstants):
name_in_url = 'paradoja_votante'
players_per_group = None
num_rounds = 20
Costo = c(500)
Multiplicador = c(500)
class Subsession(BaseSubsession):
Reinicio = models.BooleanField()
TSIN = models.BooleanField()
N_Azules = models.FloatField()
N_Rojos = models.FloatField()
N_Verdes = models.FloatField()
V_Azul = models.IntegerField()
V_Rojo = models.IntegerField()
V_Verde = models.IntegerField()
Ganador = models.StringField()
def creating_session(self):
self.set_preferencias_jugadores()
self.set_n_jugadores()
def set_variables_subsesion(self, ronda, rondas_totales, ConSin):
# Definiendo la variable de reinicio
self.Reinicio = ronda > rondas_totales / 2
# Definiendo la variable de TMAS
if (ConSin):
if (ronda <= rondas_totales / 2):
self.TSIN = False
else:
self.TSIN = True
else:
if (ronda <= rondas_totales / 2):
self.TSIN = True
else:
self.TSIN = False
def set_preferencias_jugadores(self):
for j in self.get_players():
Preferencias = [1,2,3]
j.P_Azul = Preferencias.pop(randint(0,len(Preferencias)-1))
j.P_Rojo = Preferencias.pop(randint(0,len(Preferencias)-1))
j.P_Verde = Preferencias[0]
def set_n_jugadores(self):
"""obtener la distribución de cada partido"""
contA, contR, contV,contNo = 0, 0, 0, 0
l = len(self.get_players())
for j in self.get_players():
if j.P_Azul == 3:
contA = contA + 1
elif j.P_Rojo == 3:
contR = contR + 1
else:
contV = contV + 1
self.N_Azules = 100*float("{0:.2f}".format(contA/l))
self.N_Verdes = 100*float("{0:.2f}".format(contV/l))
self.N_Rojos = 100*float("{0:.2f}".format(contR/l))
def get_distribucion_preferencias(self):
distribucion_preferencias = {'Azul':self.N_Azules, 'Verde': self.N_Verdes, 'Rojo': self.N_Rojos}
Valores = list(distribucion_preferencias.values())
Valores.sort(reverse=True)
Llaves = list(distribucion_preferencias.keys())
orden_candidatos = []
for v in Valores:
for l in Llaves:
if v == distribucion_preferencias.get(l):
orden_candidatos.append(l)
preferencias = [orden_candidatos,Valores]
return preferencias
def set_ganador(self):
contA, contR, contV,contVN = 0, 0, 0,0
for j in self.get_players():
if j.Voto_Azul == True:
contA = contA + 1
elif j.Voto_Rojo == True:
contR = contR + 1
elif j.Voto_Verde == True:
contV = contV + 1
else:
contVN = contVN+1
Votos = {"Azul":contA + random(),"Verde":contV + random(),"Rojo":contR + random()}
Valores = list(Votos.values())
Valores.sort()
Llaves = list(Votos.keys())
for v in Valores:
for l in Llaves:
if v == Votos.get(l):
self.Ganador = l
def getPagosTotalesJugadores(self):
jugadores = self.get_players()
PagosTotalesJugadores = []
for j in jugadores:
PagosTotalesJugadores.append([j.TotalPagos])
return PagosTotalesJugadores
def getPuntajesCalificaciones(self):
Puntajes = self.getPagosTotalesJugadores()
scaler = MinMaxScaler(feature_range=(3.0, 5.0))
Calificaciones = scaler.fit_transform(Puntajes)
return Calificaciones
def setNotas(self):
jugadores = self.get_players()
calificaciones = self.getPuntajesCalificaciones()
for j in range(len(jugadores)):
jugadores[j].Calificacion = calificaciones[j]
class Group(BaseGroup):
pass
class Player(BasePlayer):
Codigo = models.StringField()
P_Azul = models.IntegerField(min=1, max=3)
P_Rojo = models.IntegerField(min=1, max=3)
P_Verde = models.IntegerField(min=1, max=3)
Voto_Azul = models.BooleanField(blank=True)
Voto_Rojo = models.BooleanField(blank=True)
Voto_Verde = models.BooleanField(blank=True)
VotoNo = models.BooleanField(blank=True)
Pagos = models.CurrencyField(min=0, max=1500)
TotalPagos = models.CurrencyField()
Preferencia_ganador = models.IntegerField(min=1, max=3)
Calificacion = models.FloatField()
def setPagos(self):
self.Pagos = self.Preferencia_ganador * Constants.Multiplicador - Constants.Costo * c(not self.VotoNo)
self.payoff = self.Pagos
def setTotalPagos(self):
#La suma de todos los pagos en todas las rondas
self.TotalPagos = sum([p.Pagos for p in self.in_all_rounds()])
def get_orden_preferencias(self):
Candidatos = {'Azul':self.P_Azul ,'Rojo':self.P_Rojo,'Verde':self.P_Verde}
Valores = list(Candidatos.values())
Valores.sort(reverse=True)
Llaves = list(Candidatos.keys())
orden_candidatos = []
for v in Valores:
for l in Llaves:
if v == Candidatos.get(l):
orden_candidatos.append(l)
return orden_candidatos
def set_preferencia_ganador(self, ganador):
Candidatos = {'Azul':self.P_Azul ,'Rojo':self.P_Rojo,'Verde':self.P_Verde}
self.Preferencia_ganador = Candidatos.get(ganador)
def set_votacion_aleatorio(self):
votos = [self.Voto_Azul,self.Voto_Rojo,self.Voto_Verde,self.VotoNo]
if True in votos:
self.set_false()
else:
posicion_true = randint(0, len(votos) - 1)
votos[posicion_true] = True
if posicion_true == 0:
self.Voto_Azul = True
elif posicion_true == 1:
self.Voto_Rojo = True
elif posicion_true == 2:
self.P_Verde = True
else:
self.VotoNo = True
self.set_false()
def set_false(self):
if self.Voto_Azul == None:
self.Voto_Azul = False
if self.Voto_Rojo == None:
self.Voto_Rojo = False
if self.Voto_Verde == None:
self.Voto_Verde = False
if self.VotoNo == None:
self.VotoNo = False
```
|
{
"source": "jefemagril/fermipy",
"score": 2
}
|
#### File: fermipy/fermipy/defaults.py
```python
from __future__ import absolute_import, division, print_function
import copy
from collections import OrderedDict
import numpy as np
import astropy
from astropy.coordinates import SkyCoord
import fermipy.skymap
from fermipy.data_struct import MutableNamedTuple
def make_default_dict(d):
o = {}
for k, v in d.items():
o[k] = copy.deepcopy(v[0])
return o
def make_default_tuple(d):
vals = [(k, copy.deepcopy(v[0])) for k, v in d.items()]
return MutableNamedTuple(vals)
def make_attrs_class(typename, d):
import attr
vals = {}
for k, v in d.items():
if v[2] == float:
vals[k] = attr.ib(
default=v[0], validator=attr.validators.instance_of(v[2]))
else:
vals[k] = attr.ib(default=v[0])
C = attr.make_class(typename, vals)
return C()
DIFF_FLUX_UNIT = ':math:`\mathrm{cm}^{-2}~\mathrm{s}^{-1}~\mathrm{MeV}^{-1}`'
FLUX_UNIT = ':math:`\mathrm{cm}^{-2}~\mathrm{s}^{-1}`'
ENERGY_FLUX_UNIT = ':math:`\mathrm{MeV}~\mathrm{cm}^{-2}~\mathrm{s}^{-1}`'
# Options that are common to several sections
common = {
'multithread': (False, 'Split the calculation across number of processes set by nthread option.', bool),
'nthread': (None, 'Number of processes to create when multithread is True. If None then one process '
'will be created for each available core.', int),
'model': (None, 'Dictionary defining the spatial/spectral properties of the test source. '
'If model is None the test source will be a PointSource with an Index 2 power-law spectrum.', dict),
'free_background': (False, 'Leave background parameters free when performing the fit. If True then any '
'parameters that are currently free in the model will be fit simultaneously '
'with the source of interest.', bool),
'fix_shape': (False, 'Fix spectral shape parameters of the source of interest. If True then only '
'the normalization parameter will be fit.', bool),
'free_radius': (None, 'Free normalizations of background sources within this angular distance in degrees '
'from the source of interest. If None then no sources will be freed.', float),
'make_plots': (False, 'Generate diagnostic plots.', bool),
'use_weights' : (False, 'Used weighted version of maps in making plots.', bool),
'write_fits': (True, 'Write the output to a FITS file.', bool),
'write_npy': (True, 'Write the output dictionary to a numpy file.', bool),
'loge_bounds': (None, 'Restrict the analysis to an energy range (emin,emax) in '
'log10(E/MeV) that is a subset of the analysis energy range. '
'By default the full analysis energy range will be used. If '
'either emin/emax are None then only an upper/lower bound on '
'the energy range wil be applied.', list),
}
# Options for defining input data files
data = {
'evfile': (None, 'Path to FT1 file or list of FT1 files.', str),
'scfile': (None, 'Path to FT2 (spacecraft) file.', str),
'ltcube': (None, 'Path to livetime cube. If none a livetime cube will be generated with ``gtmktime``.', str),
'cacheft1': (True, 'Cache FT1 files when performing binned analysis. If false then only the counts cube is retained.', bool),
}
# Options for data selection.
selection = {
'emin': (None, 'Minimum Energy (MeV)', float),
'emax': (None, 'Maximum Energy (MeV)', float),
'logemin': (None, 'Minimum Energy (log10(MeV))', float),
'logemax': (None, 'Maximum Energy (log10(MeV))', float),
'tmin': (None, 'Minimum time (MET).', int),
'tmax': (None, 'Maximum time (MET).', int),
'zmax': (None, 'Maximum zenith angle.', float),
'evclass': (None, 'Event class selection.', int),
'evtype': (None, 'Event type selection.', int),
'convtype': (None, 'Conversion type selection.', int),
'phasemin': (None, 'Minimum pulsar phase', float),
'phasemax': (None, 'Maximum pulsar phase', float),
'target': (None, 'Choose an object on which to center the ROI. '
'This option takes precendence over ra/dec or glon/glat.', str),
'ra': (None, '', float),
'dec': (None, '', float),
'glat': (None, '', float),
'glon': (None, '', float),
'radius': (None, 'Radius of data selection. If none this will be automatically set from the ROI size.', float),
'filter': (None, 'Filter string for ``gtmktime`` selection.', str),
'roicut': ('no', '', str)
}
# Options for ROI model.
model = {
'src_radius':
(None,
'Radius of circular region in degrees centered on the ROI that selects '
'sources for inclusion in the model. If this parameter is none then no '
'selection is applied. This selection is ORed with the ``src_roiwidth`` selection.',
float),
'src_roiwidth':
(None,
'Width of square region in degrees centered on the ROI that selects '
'sources for inclusion in the model. If this parameter is none then no '
'selection is applied. This selection will be ORed with the ``src_radius`` selection.', float),
'src_radius_roi':
(None,
'Half-width of ``src_roiwidth`` selection. This parameter can be used in '
'lieu of ``src_roiwidth``.',
float),
'isodiff': (None, 'Set the path to one or more isotropic templates. A separate component will be '
'generated for each item in this list.', list),
'galdiff': (None, 'Set the path to one or more galactic IEM mapcubes. A separate component will be '
'generated for each item in this list.', list),
'limbdiff': (None, '', list),
'diffuse': (None, '', list),
'diffuse_xml': (None, '', list),
'sources': (None, '', list),
'extdir': (None, 'Set a directory that will be searched for extended source FITS templates. Template files in this directory '
'will take precendence over catalog source templates with the same name.', str),
'diffuse_dir': (None, '', list),
'catalogs': (None, '', list),
'merge_sources':
(True, 'Merge properties of sources that appear in multiple '
'source catalogs. If merge_sources=false then subsequent sources with '
'the same name will be ignored.', bool),
'assoc_xmatch_columns':
(['3FGL_Name'], 'Choose a set of association columns on which to '
'cross-match catalogs.', list),
'extract_diffuse': (
False, 'Extract a copy of all mapcube components centered on the ROI.',
bool)
}
# Options for configuring likelihood analysis
gtlike = {
'irfs': (None, 'Set the IRF string.', str),
'edisp': (True, 'Enable the correction for energy dispersion.', bool),
'edisp_disable': (None,
'Provide a list of sources for which the edisp '
'correction should be disabled.',
list),
'minbinsz': (0.05, 'Set the minimum bin size used for resampling diffuse maps.', float),
'rfactor': (2, '', int),
'convolve': (True, '', bool),
'resample': (True, '', bool),
'srcmap': (None, 'Set the source maps file. When defined this file will be used instead of the '
'local source maps file.', str),
'bexpmap': (None, '', str),
'bexpmap_roi': (None, '', str),
'srcmap_base': (None, 'Set the baseline source maps file. This will be used to generate a scaled source map.', str),
'bexpmap_base': (None, 'Set the basline all-sky expoure map file. This will be used to generate a scaled source map.', str),
'bexpmap_roi_base': (None, 'Set the basline ROI expoure map file. This will be used to generate a scaled source map.', str),
'use_external_srcmap': (False, 'Use an external precomputed source map file.', bool),
'use_scaled_srcmap': (False, 'Generate source map by scaling an external srcmap file.', bool),
'wmap': (None, 'Likelihood weights map.', str),
'llscan_npts': (20, 'Number of evaluation points to use when performing a likelihood scan.', int),
'src_expscale': (None, 'Dictionary of exposure corrections for individual sources keyed to source name. The exposure '
'for a given source will be scaled by this value. A value of 1.0 corresponds to the nominal exposure.', dict),
'expscale': (None, 'Exposure correction that is applied to all sources in the analysis component. '
'This correction is superseded by `src_expscale` if it is defined for a source.', float),
}
# Options for generating livetime cubes
ltcube = {
'binsz': (1.0, 'Set the angular bin size for generating livetime cubes.', float),
'phibins': (0, 'Set the number of phi bins for generating livetime cubes.', int),
'dcostheta': (0.025, 'Set the inclination angle binning represented as the cosine of the off-axis angle.', float),
'use_local_ltcube': (False, 'Generate a livetime cube in the vicinity of the ROI using interpolation. '
'This option disables LT cube generation with gtltcube.', bool),
}
# Options for binning.
binning = {
'projtype': ('WCS', 'Projection mode (WCS or HPX).', str),
'proj': ('AIT', 'Spatial projection for WCS mode.', str),
'coordsys': ('CEL', 'Coordinate system of the spatial projection (CEL or GAL).', str),
'npix':
(None,
'Number of pixels. If none then this will be set from ``roiwidth`` '
'and ``binsz``.', int),
'roiwidth': (10.0,
'Width of the ROI in degrees. The number of pixels in each spatial dimension will be set from ``roiwidth`` / ``binsz`` (rounded up).',
float),
'binsz': (0.1, 'Spatial bin size in degrees.', float),
'binsperdec': (8, 'Number of energy bins per decade.', float),
'enumbins': (
None,
'Number of energy bins. If none this will be inferred from energy '
'range and ``binsperdec`` parameter.', int),
'hpx_ordering_scheme': ('RING', 'HEALPix Ordering Scheme', str),
'hpx_order': (10, 'Order of the map (int between 0 and 12, included)', int),
'hpx_ebin': (True, 'Include energy binning', bool)
}
# Options related to I/O and output file bookkeeping
fileio = {
'outdir': (None, 'Path of the output directory. If none this will default to the directory containing the configuration file.', str),
'scratchdir': ('/scratch', 'Path to the scratch directory. If ``usescratch`` is True then a temporary working directory '
'will be created under this directory.', str),
'workdir': (None, 'Path to the working directory.', str),
'logfile': (None, 'Path to log file. If None then log will be written to fermipy.log.', str),
'savefits': (True, 'Save intermediate FITS files.', bool),
'workdir_regex': (['\.fits$|\.fit$|\.xml$|\.npy$'],
'Stage files to the working directory that match at least one of the regular expressions in this list. '
'This option only takes effect when ``usescratch`` is True.', list),
'outdir_regex': (['\.fits$|\.fit$|\.xml$|\.npy$|\.png$|\.pdf$|\.yaml$'],
'Stage files to the output directory that match at least one of the regular expressions in this list. '
'This option only takes effect when ``usescratch`` is True.', list),
'usescratch': (
False, 'Run analysis in a temporary working directory under ``scratchdir``.', bool),
}
logging = {
'prefix': ('', 'Prefix that will be appended to the logger name.', str),
'chatter': (3, 'Set the chatter parameter of the STs.', int),
'verbosity': (3, '', int)
}
# Options related to likelihood optimizer
optimizer = {
'optimizer':
('MINUIT', 'Set the optimization algorithm to use when maximizing the '
'likelihood function.', str),
'tol': (1E-3, 'Set the optimizer tolerance.', float),
'max_iter': (100, 'Maximum number of iterations for the Newtons method fitter.', int),
'init_lambda': (1E-4, 'Initial value of damping parameter for step size calculation '
'when using the NEWTON fitter. A value of zero disables damping.', float),
'retries': (3, 'Set the number of times to retry the fit when the fit quality is less than ``min_fit_quality``.', int),
'min_fit_quality': (2, 'Set the minimum fit quality.', int),
'verbosity': (0, '', int)
}
fit_output = {
'edm': (None, 'Estimated distance to maximum of log-likelihood function.', float),
'fit_status': (None, 'Optimizer return code (0 = ok).', int, 'int'),
'fit_quality': (None, 'Fit quality parameter for MINUIT and NEWMINUIT optimizers (3 - Full accurate covariance matrix, '
'2 - Full matrix, but forced positive-definite (i.e. not accurate), '
'1 - Diagonal approximation only, not accurate, '
'0 - Error matrix not calculated at all)', int, 'int'),
'covariance': (None, 'Covariance matrix between free parameters of the fit.', np.ndarray),
'correlation': (None, 'Correlation matrix between free parameters of the fit.', np.ndarray),
'dloglike': (None, 'Improvement in log-likehood value.', float),
'loglike': (None, 'Post-fit log-likehood value.', float),
'values': (None, 'Vector of best-fit parameter values (unscaled).', np.ndarray),
'errors': (None, 'Vector of parameter errors (unscaled).', np.ndarray),
'config': (None, 'Copy of input configuration to this method.', dict),
}
# MC options
mc = {
'seed': (None, '', int)
}
# ROI Optimization
roiopt = {
'npred_threshold': (1.0, '', float),
'npred_frac': (0.95, '', float),
'shape_ts_threshold':
(25.0, 'Threshold on source TS used for determining the sources '
'that will be fit in the third optimization step.', float),
'max_free_sources':
(5, 'Maximum number of sources that will be fit simultaneously in '
'the first optimization step.', int),
'skip':
(None, 'List of str source names to skip while optimizing.', list)
}
roiopt_output = {
'loglike0': (None, 'Pre-optimization log-likelihood value.', float),
'loglike1': (None, 'Post-optimization log-likelihood value.', float),
'dloglike': (None, 'Improvement in log-likehood value.', float),
'config': (None, 'Copy of input configuration to this method.', dict),
}
# Residual Maps
residmap = {
'model': common['model'],
'exclude': (None, 'List of sources that will be removed from the model when '
'computing the residual map.', list),
'loge_bounds': common['loge_bounds'],
'make_plots': common['make_plots'],
'use_weights': common['use_weights'],
'write_fits': common['write_fits'],
'write_npy': common['write_npy'],
}
# TS Map
tsmap = {
'model': common['model'],
'exclude': (None, 'List of sources that will be removed from the model when '
'computing the TS map.', list),
'multithread': common['multithread'],
'nthread': common['nthread'],
'max_kernel_radius': (3.0, 'Set the maximum radius of the test source kernel. Using a '
'smaller value will speed up the TS calculation at the loss of '
'accuracy.', float),
'loge_bounds': common['loge_bounds'],
'make_plots': common['make_plots'],
'write_fits': common['write_fits'],
'write_npy': common['write_npy'],
}
# TS Cube
tscube = {
'model': common['model'],
'do_sed': (True, 'Compute the energy bin-by-bin fits', bool),
'nnorm': (10, 'Number of points in the likelihood v. normalization scan', int),
'norm_sigma': (5.0, 'Number of sigma to use for the scan range ', float),
'cov_scale_bb': (-1.0, 'Scale factor to apply to global fitting '
'cov. matrix in broadband fits. ( < 0 -> no prior ) ', float),
'cov_scale': (-1.0, 'Scale factor to apply to broadband fitting cov. '
'matrix in bin-by-bin fits ( < 0 -> fixed ) ', float),
'tol': (1E-3, 'Critetia for fit convergence (estimated vertical distance to min < tol )', float),
'max_iter': (30, 'Maximum number of iterations for the Newtons method fitter.', int),
'tol_type': (0, 'Absoulte (0) or relative (1) criteria for convergence.', int),
'remake_test_source': (False, 'If true, recomputes the test source image (otherwise just shifts it)', bool),
'st_scan_level': (0, 'Level to which to do ST-based fitting (for testing)', int),
'init_lambda': (0, 'Initial value of damping parameter for newton step size calculation. A value of zero disables damping.', float),
}
# Options for Source Finder
sourcefind = {
'model': common['model'],
'min_separation': (1.0,
'Minimum separation in degrees between sources detected in each '
'iteration. The source finder will look for the maximum peak '
'in the TS map within a circular region of this radius.', float),
'sqrt_ts_threshold': (5.0, 'Source threshold in sqrt(TS). Only peaks with sqrt(TS) '
'exceeding this threshold will be used as seeds for new '
'sources.', float),
'max_iter': (5, 'Maximum number of source finding iterations. The source '
'finder will continue adding sources until no additional '
'peaks are found or the number of iterations exceeds this '
'number.', int),
'sources_per_iter': (4, 'Maximum number of sources that will be added in each '
'iteration. If the number of detected peaks in a given '
'iteration is larger than this number, only the N peaks with '
'the largest TS will be used as seeds for the current '
'iteration.', int),
'tsmap_fitter': ('tsmap', 'Set the method for generating the TS map. Valid options are tsmap or tscube.', str),
'free_params': (None, '', list),
'multithread': common['multithread'],
'nthread': common['nthread'],
}
# Options for lightcurve analysis
lightcurve = {
'outdir': (None, r'Store all data in this directory (e.g. "30days"). If None then use current directory.', str),
'use_local_ltcube': (True, 'Generate a fast LT cube.', bool),
'use_scaled_srcmap': (False, 'Generate approximate source maps for each time bin by scaling '
'the current source maps by the exposure ratio with respect to that time bin.', bool),
'save_bin_data': (True, 'Save analysis directories for individual time bins. If False then only '
'the analysis results table will be saved.', bool),
'binsz': (86400.0, 'Set the lightcurve bin size in seconds.', float),
'shape_ts_threshold': (16.0, 'Set the TS threshold at which shape parameters of '
'sources will be freed. If a source is detected with TS less than this '
'value then its shape parameters will be fixed to values derived from the '
'analysis of the full time range.', float),
'nbins': (None, 'Set the number of lightcurve bins. The total time range will be evenly '
'split into this number of time bins.', int),
'time_bins': (None, 'Set the lightcurve bin edge sequence in MET. This option '
'takes precedence over binsz and nbins.', list),
'free_background': common['free_background'],
'free_radius': common['free_radius'],
'free_sources': (None, 'List of sources to be freed. These sources will be added to the list of sources '
'satisfying the free_radius selection.', list),
'free_params': (None, 'Set the parameters of the source of interest that will be re-fit in each time bin. '
'If this list is empty then all parameters will be freed.', list),
'max_free_sources':
(5, 'Maximum number of sources that will be fit simultaneously with the source of interest.', int),
'make_plots': common['make_plots'],
'write_fits': common['write_fits'],
'write_npy': common['write_npy'],
'multithread': common['multithread'],
'nthread': common['nthread'],
'systematic': (0.02, 'Systematic correction factor for TS:subscript:`var`. See Sect. 3.6 in 2FGL for details.', float),
}
# Output for lightcurve Analysis
lightcurve_output = OrderedDict((
('name', (None, 'Name of Source'', ', str)),
('tmin', (None, 'Lower edge of time bin in MET.', np.ndarray)),
('tmax', (None, 'Upper edge of time bin in MET.', np.ndarray)),
('fit_success', (None, 'Did the likelihood fit converge? True if yes.',
np.ndarray)),
('config', ({}, 'Copy of the input configuration to this method.', dict)),
('ts_var', (None, r'TS of variability. Should be distributed as :math:`\chi^2` with '
':math:`n-1` degrees of freedom, where :math:`n` is the number of time bins.', float)),
))
# Options for SED analysis
sed = {
'bin_index': (2.0, 'Spectral index that will be use when fitting the energy distribution within an energy bin.', float),
'use_local_index': (False, 'Use a power-law approximation to the shape of the global spectrum in '
'each bin. If this is false then a constant index set to `bin_index` '
'will be used.', bool),
'free_background': common['free_background'],
'free_radius': common['free_radius'],
'free_pars': (None, 'Set the parameters of the source of interest that will be freed when performing '
'the global fit. By default all parameters will be freed.', list),
'ul_confidence': (0.95, 'Confidence level for flux upper limit.',
float),
'cov_scale': (3.0, 'Scale factor that sets the strength of the prior on nuisance '
'parameters that are free. Setting this to None disables the prior.', float),
'make_plots': common['make_plots'],
'write_fits': common['write_fits'],
'write_npy': common['write_npy'],
}
# Output for SED analysis
sed_output = OrderedDict((
('loge_min', (None, 'Lower edges of SED energy bins (log10(E/MeV)).',
np.ndarray)),
('loge_max', (None, 'Upper edges of SED energy bins (log10(E/MeV)).',
np.ndarray)),
('loge_ctr', (None, 'Centers of SED energy bins (log10(E/MeV)).',
np.ndarray)),
('loge_ref', (None, 'Reference energies of SED energy bins (log10(E/MeV)).',
np.ndarray)),
('e_min', (None, 'Lower edges of SED energy bins (MeV).',
np.ndarray)),
('e_max', (None, 'Upper edges of SED energy bins (MeV).',
np.ndarray)),
('e_ctr', (None, 'Centers of SED energy bins (MeV).', np.ndarray)),
('e_ref', (None, 'Reference energies of SED energy bins (MeV).',
np.ndarray)),
('ref_flux', (None, 'Flux of the reference model in each bin (%s).' %
FLUX_UNIT, np.ndarray)),
('ref_eflux', (None, 'Energy flux of the reference model in each bin (%s).' %
ENERGY_FLUX_UNIT, np.ndarray)),
('ref_dnde', (None, 'Differential flux of the reference model evaluated at the bin center (%s)' %
DIFF_FLUX_UNIT, np.ndarray)),
('ref_dnde_e_min', (None, 'Differential flux of the reference model evaluated at the lower bin edge (%s)' %
DIFF_FLUX_UNIT, np.ndarray)),
('ref_dnde_e_max', (None, 'Differential flux of the reference model evaluated at the upper bin edge (%s)' %
DIFF_FLUX_UNIT, np.ndarray)),
('ref_e2dnde', (None, 'E^2 x the differential flux of the reference model evaluated at the bin center (%s)' %
ENERGY_FLUX_UNIT, np.ndarray)),
('ref_npred', (None, 'Number of predicted counts in the reference model in each bin.',
np.ndarray)),
('norm', (None, 'Normalization in each bin in units of the reference model.',
np.ndarray)),
('flux', (None, 'Flux in each bin (%s).' %
FLUX_UNIT, np.ndarray)),
('eflux', (None, 'Energy flux in each bin (%s).' %
ENERGY_FLUX_UNIT, np.ndarray)),
('dnde', (None, 'Differential flux in each bin (%s).' %
DIFF_FLUX_UNIT, np.ndarray)),
('e2dnde', (None, 'E^2 x the differential flux in each bin (%s).' %
ENERGY_FLUX_UNIT, np.ndarray)),
('dnde_err', (None, '1-sigma error on dnde evaluated from likelihood curvature.',
np.ndarray)),
('dnde_err_lo', (None, 'Lower 1-sigma error on dnde evaluated from the profile likelihood (MINOS errors).',
np.ndarray)),
('dnde_err_hi', (None, 'Upper 1-sigma error on dnde evaluated from the profile likelihood (MINOS errors).',
np.ndarray)),
('dnde_ul95', (None, '95% CL upper limit on dnde evaluated from the profile likelihood (MINOS errors).',
np.ndarray)),
('dnde_ul', (None, 'Upper limit on dnde evaluated from the profile likelihood using a CL = ``ul_confidence``.',
np.ndarray)),
('e2dnde_err', (None, '1-sigma error on e2dnde evaluated from likelihood curvature.',
np.ndarray)),
('e2dnde_err_lo', (None, 'Lower 1-sigma error on e2dnde evaluated from the profile likelihood (MINOS errors).',
np.ndarray)),
('e2dnde_err_hi', (None, 'Upper 1-sigma error on e2dnde evaluated from the profile likelihood (MINOS errors).',
np.ndarray)),
('e2dnde_ul95', (None, '95% CL upper limit on e2dnde evaluated from the profile likelihood (MINOS errors).',
np.ndarray)),
('e2dnde_ul', (None, 'Upper limit on e2dnde evaluated from the profile likelihood using a CL = ``ul_confidence``.',
np.ndarray)),
('ts', (None, 'Test statistic.', np.ndarray)),
('loglike', (None, 'Log-likelihood of model for the best-fit amplitude.',
np.ndarray)),
('npred', (None, 'Number of model counts.', np.ndarray)),
('fit_quality', (None, 'Fit quality parameter for MINUIT and NEWMINUIT optimizers (3 - Full accurate covariance matrix, '
'2 - Full matrix, but forced positive-definite (i.e. not accurate), '
'1 - Diagonal approximation only, not accurate, '
'0 - Error matrix not calculated at all).', np.ndarray)),
('fit_status', (None, 'Fit status parameter (0=ok).', np.ndarray)),
('index', (None, 'Spectral index of the power-law model used to fit this bin.',
np.ndarray)),
('norm_scan', (None, 'Array of NxM normalization values for the profile likelihood scan in N '
'energy bins and M scan points. A row-wise multiplication with '
'any of ``ref`` columns can be used to convert this matrix to the '
'respective unit.',
np.ndarray)),
('dloglike_scan', (None, 'Array of NxM delta-loglikelihood values for the profile likelihood '
'scan in N energy bins and M scan points.', np.ndarray)),
('loglike_scan', (None, 'Array of NxM loglikelihood values for the profile likelihood scan '
'in N energy bins and M scan points.', np.ndarray)),
('param_covariance', (None, 'Covariance matrix for the best-fit spectral parameters of the source.',
np.ndarray)),
('param_names', (None, 'Array of names for the parameters in the global spectral parameterization of this source.',
np.ndarray)),
('param_values', (None, 'Array of parameter values.', np.ndarray)),
('param_errors', (None, 'Array of parameter errors.', np.ndarray)),
('model_flux', (None, 'Dictionary containing the differential flux uncertainty '
'band of the best-fit global spectral parameterization for the '
'source.', dict)),
('config', (None, 'Copy of input configuration to this method.', dict)),
))
# Options for extension analysis
extension = {
'spatial_model': ('RadialGaussian', 'Spatial model that will be used to test the source'
'extension. The spatial scale parameter of the '
'model will be set such that the 68% containment radius of '
'the model is equal to the width parameter.', str),
'width': (None, 'Sequence of values in degrees for the likelihood scan over spatial extension '
'(68% containment radius). If this argument is None then the scan points will '
'be determined from width_min/width_max/width_nstep.', list),
'fit_position': (False, 'Perform a simultaneous fit to the source position and extension.', bool),
'width_min': (0.01, 'Minimum value in degrees for the likelihood scan over spatial extent.', float),
'width_max': (1.0, 'Maximum value in degrees for the likelihood scan over spatial extent.', float),
'width_nstep': (21, 'Number of scan points between width_min and width_max. '
'Scan points will be spaced evenly on a logarithmic scale '
'between `width_min` and `width_max`.', int),
'free_background': common['free_background'],
'fix_shape': common['fix_shape'],
'free_radius': common['free_radius'],
'fit_ebin': (False, 'Perform a fit for the angular extension in each analysis energy bin.', bool),
'update': (False, 'Update this source with the best-fit model for spatial '
'extension if TS_ext > ``tsext_threshold``.', bool),
'save_model_map': (False, 'Save model counts cubes for the best-fit model of extension.', bool),
'sqrt_ts_threshold': (None, 'Threshold on sqrt(TS_ext) that will be applied when ``update`` is True. If None then no'
'threshold is applied.', float),
'psf_scale_fn': (None, 'Tuple of two vectors (logE,f) defining an energy-dependent PSF scaling function '
'that will be applied when building spatial models for the source of interest. '
'The tuple (logE,f) defines the fractional corrections f at the sequence of energies '
'logE = log10(E/MeV) where f=0 corresponds to no correction. The correction function f(E) is evaluated '
'by linearly interpolating the fractional correction factors f in log(E). The '
'corrected PSF is given by P\'(x;E) = P(x/(1+f(E));E) where x is the angular separation.',
tuple),
'make_tsmap': (True, 'Make a TS map for the source of interest.', bool),
'make_plots': common['make_plots'],
'write_fits': common['write_fits'],
'write_npy': common['write_npy'],
}
# Options for localization analysis
localize = {
'nstep': (5, 'Number of steps in longitude/latitude that will be taken '
'when refining the source position. The bounds of the scan '
'range are set to the 99% positional uncertainty as '
'determined from the TS map peak fit. The total number of '
'sampling points will be nstep**2.', int),
'dtheta_max': (0.5, 'Half-width of the search region in degrees used for the first pass of the localization search.', float),
'free_background': common['free_background'],
'fix_shape': common['fix_shape'],
'free_radius': common['free_radius'],
'update': (True, 'Update the source model with the best-fit position.', bool),
'make_plots': common['make_plots'],
'write_fits': common['write_fits'],
'write_npy': common['write_npy'],
}
# Output for localization analysis
localize_output = OrderedDict((
('name', (None, 'Name of source.', str)),
('file', (None, 'Name of output FITS file.', str)),
('config', ({}, 'Copy of the input configuration to this method.', dict)),
# Position
('ra', (np.nan, 'Right ascension of best-fit position (deg).', float)),
('dec', (np.nan, 'Declination of best-fit position (deg).', float)),
('glon', (np.nan, 'Galactic Longitude of best-fit position (deg).', float)),
('glat', (np.nan, 'Galactic Latitude of best-fit position (deg).', float)),
('xpix', (np.nan, 'Longitude pixel coordinate of best-fit position.', float)),
('ypix', (np.nan, 'Latitude pixel coordinate of best-fit position.', float)),
('deltax', (np.nan, 'Longitude offset from old position (deg).', float)),
('deltay', (np.nan, 'Latitude offset from old position (deg).', float)),
('skydir', (None, '', astropy.coordinates.SkyCoord,
'`~astropy.coordinates.SkyCoord`')),
('ra_preloc', (np.nan, 'Right ascension of pre-localization position (deg).', float)),
('dec_preloc', (np.nan, 'Declination of pre-localization position (deg).', float)),
('glon_preloc', (np.nan,
'Galactic Longitude of pre-localization position (deg).', float)),
('glat_preloc', (np.nan,
'Galactic Latitude of pre-localization position (deg).', float)),
# Positional Errors
('ra_err', (np.nan, 'Std. deviation of positional uncertainty in right ascension (deg).', float)),
('dec_err', (np.nan, 'Std. deviation of positional uncertainty in declination (deg).', float)),
('glon_err', (np.nan, 'Std. deviation of positional uncertainty in galactic longitude (deg).', float)),
('glat_err', (np.nan, 'Std. deviation of positional uncertainty in galactic latitude (deg).', float)),
('pos_offset', (np.nan, 'Angular offset (deg) between the old and new (localized) source positions.', float)),
('pos_err', (np.nan, '1-sigma positional uncertainty (deg).', float)),
('pos_r68', (np.nan, '68% positional uncertainty (deg).', float)),
('pos_r95', (np.nan, '95% positional uncertainty (deg).', float)),
('pos_r99', (np.nan, '99% positional uncertainty (deg).', float)),
('pos_err_semimajor', (np.nan,
'1-sigma uncertainty (deg) along major axis of uncertainty ellipse.', float)),
('pos_err_semiminor', (np.nan,
'1-sigma uncertainty (deg) along minor axis of uncertainty ellipse.', float)),
('pos_angle', (np.nan, 'Position angle of uncertainty ellipse with respect to major axis.', float)),
('pos_ecc', (np.nan,
'Eccentricity of uncertainty ellipse defined as sqrt(1-b**2/a**2).', float)),
('pos_ecc2', (np.nan,
'Eccentricity of uncertainty ellipse defined as sqrt(a**2/b**2-1).', float)),
('pos_gal_cov', (np.nan * np.ones((2, 2)),
'Covariance matrix of positional uncertainties in local projection in galactic coordinates.',
np.ndarray)),
('pos_gal_corr', (np.nan * np.ones((2, 2)),
'Correlation matrix of positional uncertainties in local projection in galactic coordinates.',
np.ndarray)),
('pos_cel_cov', (np.nan * np.ones((2, 2)),
'Covariance matrix of positional uncertainties in local projection in celestial coordinates.',
np.ndarray)),
('pos_cel_corr', (np.nan * np.ones((2, 2)),
'Correlation matrix of positional uncertainties in local projection in celestial coordinates.',
np.ndarray)),
# Maps
('tsmap', (None, '', fermipy.skymap.Map)),
('tsmap_peak', (None, '', fermipy.skymap.Map)),
# Miscellaneous
('loglike_init', (np.nan, 'Log-Likelihood of model before localization.', float)),
('loglike_base', (np.nan, 'Log-Likelihood of model after initial spectral fit.', float)),
('loglike_loc', (np.nan, 'Log-Likelihood of model after localization.', float)),
('dloglike_loc', (np.nan,
'Difference in log-likelihood before and after localization.', float)),
('fit_success', (True, '', bool)),
('fit_inbounds', (True, '', bool)),
('fit_init', (None, '', dict)),
('fit_scan', (None, '', dict)),
))
# Output for extension analysis
extension_output = OrderedDict((
('name', (None, 'Name of source.', str)),
('file', (None, 'Name of output FITS file.', str)),
('config', ({}, 'Copy of the input configuration to this method.', dict)),
# Extension
('width', (None, 'Vector of width (intrinsic 68% containment radius) values (deg).',
np.ndarray)),
('dloglike', (None, 'Delta-log-likelihood values for each point in the profile likelihood scan.',
np.ndarray)),
('loglike', (None, 'Log-likelihood values for each point in the scan over the spatial extension.',
np.ndarray)),
('loglike_ptsrc', (np.nan,
'Log-Likelihood value of the best-fit point-source model.', float)),
('loglike_ext', (np.nan, 'Log-Likelihood of the best-fit extended source model.', float)),
('loglike_init', (np.nan, 'Log-Likelihood of model before extension fit.', float)),
('loglike_base', (np.nan, 'Log-Likelihood of model after initial spectral fit.', float)),
('ext', (np.nan, 'Best-fit extension (68% containment radius) (deg).', float)),
('ext_err_hi', (np.nan,
'Upper (1-sigma) error on the best-fit extension (deg).', float)),
('ext_err_lo', (np.nan,
'Lower (1-sigma) error on the best-fit extension (deg).', float)),
('ext_err', (np.nan, 'Symmetric (1-sigma) error on the best-fit extension (deg).', float)),
('ext_ul95', (np.nan, '95% CL upper limit on the spatial extension (deg).', float)),
('ts_ext', (np.nan, 'Test statistic for the extension hypothesis.', float)),
# Extension vs. Energy
('ebin_e_min', (None, '', np.ndarray)),
('ebin_e_ctr', (None, '', np.ndarray)),
('ebin_e_max', (None, '', np.ndarray)),
('ebin_ext', (None, 'Best-fit extension as measured in each energy bin (intrinsic 68% containment radius) (deg).',
np.ndarray)),
('ebin_ext_err', (None,
'Symmetric (1-sigma) error on best-fit extension in each energy bin (deg).',
np.ndarray)),
('ebin_ext_err_hi', (None,
'Upper (1-sigma) error on best-fit extension in each energy bin (deg).',
np.ndarray)),
('ebin_ext_err_lo', (None,
'Lower (1-sigma) error on best-fit extension in each energy bin (deg).',
np.ndarray)),
('ebin_ext_ul95', (None,
'95% CL upper limit on best-fit extension in each energy bin (deg).',
np.ndarray)),
('ebin_ts_ext', (None,
'Test statistic for extension hypothesis in each energy bin.',
np.ndarray)),
('ebin_dloglike', (None, 'Delta-log-likelihood values for scan over the spatial extension in each energy bin.',
np.ndarray)),
('ebin_loglike', (None, 'Log-likelihood values for scan over the spatial extension in each energy bin.',
np.ndarray)),
('ebin_loglike_ptsrc', (None,
'Log-Likelihood value of the best-fit point-source model in each energy bin.',
np.ndarray)),
('ebin_loglike_ext', (None, 'Log-Likelihood value of the best-fit extended source model in each energy bin.',
np.ndarray)),
# Position
('ra', localize_output['ra']),
('dec', localize_output['dec']),
('glon', localize_output['glon']),
('glat', localize_output['glat']),
('ra_err', localize_output['ra_err']),
('dec_err', localize_output['dec_err']),
('glon_err', localize_output['glon_err']),
('glat_err', localize_output['glat_err']),
('pos_offset', localize_output['pos_offset']),
('pos_err', localize_output['pos_err']),
('pos_r68', localize_output['pos_r68']),
('pos_r95', localize_output['pos_r95']),
('pos_r99', localize_output['pos_r99']),
('pos_err_semimajor', localize_output['pos_err_semimajor']),
('pos_err_semiminor', localize_output['pos_err_semiminor']),
('pos_angle', localize_output['pos_angle']),
# Maps
('tsmap', (None, '', fermipy.skymap.Map)),
('ptsrc_tot_map', (None, '', fermipy.skymap.Map)),
('ptsrc_src_map', (None, '', fermipy.skymap.Map)),
('ptsrc_bkg_map', (None, '', fermipy.skymap.Map)),
('ext_tot_map', (None, '', fermipy.skymap.Map)),
('ext_src_map', (None, '', fermipy.skymap.Map)),
('ext_bkg_map', (None, '', fermipy.skymap.Map)),
# Miscellaneous
('source_fit', ({}, 'Dictionary with parameters of the best-fit extended source model.', dict)),
))
# Options for plotting
plotting = {
'loge_bounds': (None, '', list),
'catalogs': (None, '', list),
'graticule_radii': (None, 'Define a list of radii at which circular graticules will be drawn.', list),
'format': ('png', '', str),
'cmap': ('magma', 'Set the colormap for 2D plots.', str),
'cmap_resid': ('RdBu_r', 'Set the colormap for 2D residual plots.', str),
'figsize': ([8.0, 6.0], 'Set the default figure size.', list),
'label_ts_threshold':
(0., 'TS threshold for labeling sources in sky maps. If None then no sources will be labeled.', float),
'interactive': (False, 'Enable interactive mode. If True then plots will be drawn after each plotting command.', bool),
}
# Source dictionary
source_meta_output = OrderedDict((
('name', (None, 'Name of the source.', str)),
('Source_Name', (None, 'Name of the source.', str)),
('SpatialModel', (None, 'Spatial model.', str)),
('SpatialWidth', (None, 'Spatial size parameter.', float)),
('SpatialType', (None, 'Spatial type string. This corresponds to the type attribute of '
'the spatialModel component in the XML model.', str)),
('SourceType', (None, 'Source type string (PointSource or DiffuseSource).', str)),
('SpectrumType', (None, 'Spectrum type string. This corresponds to the type attribute of '
'the spectrum component in the XML model (e.g. PowerLaw, LogParabola, etc.).', str)),
('Spatial_Filename',
(None, 'Path to spatial template associated to this source.', str)),
('Spectrum_Filename', (None,
'Path to file associated to the spectral model of this source.', str)),
('correlation', ({}, 'Dictionary of correlation coefficients.', dict)),
('model_counts', (None, 'Vector of predicted counts for this source in each analysis energy bin.',
np.ndarray)),
('model_counts_wt', (None, 'Vector of predicted counts for this source in each analysis energy bin.',
np.ndarray)),
('sed', (None, 'Output of SED analysis. See :ref:`sed` for more information.', dict)),
))
source_pos_output = OrderedDict((
('ra', (np.nan, 'Right ascension of the source (deg).', float)),
('dec', (np.nan, 'Declination of the source (deg).', float)),
('glon', (np.nan, 'Galactic longitude of the source (deg).', float)),
('glat', (np.nan, 'Galactic latitude of the source (deg).', float)),
('ra_err', localize_output['ra_err']),
('dec_err', localize_output['dec_err']),
('glon_err', localize_output['glon_err']),
('glat_err', localize_output['glat_err']),
('pos_err', localize_output['pos_err']),
('pos_r68', localize_output['pos_r68']),
('pos_r95', localize_output['pos_r95']),
('pos_r99', localize_output['pos_r99']),
('pos_err_semimajor', localize_output['pos_err_semimajor']),
('pos_err_semiminor', localize_output['pos_err_semiminor']),
('pos_angle', localize_output['pos_angle']),
('pos_gal_cov', localize_output['pos_gal_cov']),
('pos_gal_corr', localize_output['pos_gal_corr']),
('pos_cel_cov', localize_output['pos_cel_cov']),
('pos_cel_corr', localize_output['pos_cel_corr']),
('offset_ra', (np.nan, 'Right ascension offset from ROI center in local celestial projection (deg).', float)),
('offset_dec', (np.nan, 'Declination offset from ROI center in local celestial projection (deg).', float)),
('offset_glon', (np.nan, 'Galactic longitude offset from ROI center in local galactic projection (deg).', float)),
('offset_glat', (np.nan, 'Galactic latitude offset from ROI center in local galactic projection (deg).', float)),
('offset_roi_edge', (np.nan, 'Distance from the edge of the ROI (deg). Negative (positive) values '
'indicate locations inside (outside) the ROI.', float)),
('offset', (np.nan, 'Angular offset from ROI center (deg).', float)),
))
source_flux_output = OrderedDict((
('param_names', (np.zeros(10, dtype='S32'),
'Names of spectral parameters.', np.ndarray)),
('param_values', (np.empty(10, dtype=float) * np.nan,
'Spectral parameter values.', np.ndarray)),
('param_errors', (np.empty(10, dtype=float) * np.nan,
'Spectral parameters errors.', np.ndarray)),
('ts', (np.nan, 'Source test statistic.', float)),
('loglike', (np.nan, 'Log-likelihood of the model evaluated at the best-fit normalization of the source.', float)),
('loglike_scan', (np.array(
[np.nan]), 'Log-likelihood values for scan of source normalization.', np.ndarray)),
('dloglike_scan', (np.array(
[np.nan]), 'Delta Log-likelihood values for scan of source normalization.', np.ndarray)),
('eflux_scan', (np.array(
[np.nan]), 'Energy flux values for scan of source normalization.', np.ndarray)),
('flux_scan', (np.array(
[np.nan]), 'Flux values for scan of source normalization.', np.ndarray)),
('norm_scan', (np.array(
[np.nan]), 'Normalization parameters values for scan of source normalization.', np.ndarray)),
('npred', (np.nan, 'Number of predicted counts from this source integrated over the analysis energy range.', float)),
('npred_wt', (np.nan, 'Number of predicted counts from this source integrated over the analysis energy range.', float)),
('pivot_energy', (np.nan, 'Decorrelation energy in MeV.', float)),
('flux', (np.nan, 'Photon flux (%s) integrated over analysis energy range' % FLUX_UNIT,
float)),
('flux100', (np.nan, 'Photon flux (%s) integrated from 100 MeV to 316 GeV.' % FLUX_UNIT,
float)),
('flux1000', (np.nan, 'Photon flux (%s) integrated from 1 GeV to 316 GeV.' % FLUX_UNIT,
float)),
('flux10000', (np.nan, 'Photon flux (%s) integrated from 10 GeV to 316 GeV.' % FLUX_UNIT,
float)),
('flux_err', (np.nan, 'Photon flux uncertainty (%s) integrated over analysis energy range' % FLUX_UNIT,
float)),
('flux100_err', (np.nan, 'Photon flux uncertainty (%s) integrated from 100 MeV to 316 GeV.' % FLUX_UNIT,
float)),
('flux1000_err', (np.nan, 'Photon flux uncertainty (%s) integrated from 1 GeV to 316 GeV.' % FLUX_UNIT,
float)),
('flux10000_err', (np.nan, 'Photon flux uncertainty (%s) integrated from 10 GeV to 316 GeV.' % FLUX_UNIT,
float)),
('flux_ul95', (np.nan, '95%' + ' CL upper limit on the photon flux (%s) integrated over analysis energy range' % FLUX_UNIT,
float)),
('flux100_ul95', (np.nan, '95%' + ' CL upper limit on the photon flux (%s) integrated from 100 MeV to 316 GeV.' % FLUX_UNIT,
float)),
('flux1000_ul95', (np.nan, '95%' + ' CL upper limit on the photon flux (%s) integrated from 1 GeV to 316 GeV.' % FLUX_UNIT,
float)),
('flux10000_ul95', (np.nan, '95%' + ' CL upper limit on the photon flux (%s) integrated from 10 GeV to 316 GeV.' % FLUX_UNIT,
float)),
('eflux', (np.nan, 'Energy flux (%s) integrated over analysis energy range' % ENERGY_FLUX_UNIT,
float)),
('eflux100', (np.nan, 'Energy flux (%s) integrated from 100 MeV to 316 GeV.' % ENERGY_FLUX_UNIT,
float)),
('eflux1000', (np.nan, 'Energy flux (%s) integrated from 1 GeV to 316 GeV.' % ENERGY_FLUX_UNIT,
float)),
('eflux10000', (np.nan, 'Energy flux (%s) integrated from 10 GeV to 316 GeV.' % ENERGY_FLUX_UNIT,
float)),
('eflux_err', (np.nan, 'Energy flux uncertainty (%s) integrated over analysis energy range' % ENERGY_FLUX_UNIT,
float)),
('eflux100_err', (np.nan, 'Energy flux uncertainty (%s) integrated from 100 MeV to 316 GeV.' % ENERGY_FLUX_UNIT,
float)),
('eflux1000_err', (np.nan, 'Energy flux uncertainty (%s) integrated from 1 GeV to 316 GeV.' % ENERGY_FLUX_UNIT,
float)),
('eflux10000_err', (np.nan, 'Energy flux uncertainty (%s) integrated from 10 GeV to 316 GeV.' % ENERGY_FLUX_UNIT,
float)),
('eflux_ul95', (np.nan, '95%' + ' CL upper limit on the energy flux (%s) integrated over analysis energy range' % ENERGY_FLUX_UNIT,
float)),
('eflux100_ul95', (np.nan, '95%' + ' CL upper limit on the energy flux (%s) integrated from 100 MeV to 316 GeV.' % ENERGY_FLUX_UNIT,
float)),
('eflux1000_ul95', (np.nan, '95%' + ' CL upper limit on the energy flux (%s) integrated from 1 GeV to 316 GeV.' % ENERGY_FLUX_UNIT,
float)),
('eflux10000_ul95', (np.nan, '95%' + ' CL upper limit on the energy flux (%s) integrated from 10 GeV to 316 GeV.' % ENERGY_FLUX_UNIT,
float)),
('dnde', (np.nan, 'Differential photon flux (%s) evaluated at the pivot energy.' % DIFF_FLUX_UNIT,
float)),
('dnde100', (np.nan, 'Differential photon flux (%s) evaluated at 100 MeV.' % DIFF_FLUX_UNIT,
float)),
('dnde1000', (np.nan, 'Differential photon flux (%s) evaluated at 1 GeV.' % DIFF_FLUX_UNIT,
float)),
('dnde10000', (np.nan, 'Differential photon flux (%s) evaluated at 10 GeV.' % DIFF_FLUX_UNIT,
float)),
('dnde_err', (np.nan, 'Differential photon flux uncertainty (%s) evaluated at the pivot energy.' % DIFF_FLUX_UNIT,
float)),
('dnde100_err', (np.nan, 'Differential photon flux uncertainty (%s) evaluated at 100 MeV.' % DIFF_FLUX_UNIT,
float)),
('dnde1000_err', (np.nan, 'Differential photon flux uncertainty (%s) evaluated at 1 GeV.' % DIFF_FLUX_UNIT,
float)),
('dnde10000_err', (np.nan, 'Differential photon flux uncertainty (%s) evaluated at 10 GeV.' % DIFF_FLUX_UNIT,
float)),
('dnde_index', (np.nan, 'Logarithmic slope of the differential photon spectrum evaluated at the pivot energy.',
float)),
('dnde100_index', (np.nan, 'Logarithmic slope of the differential photon spectrum evaluated at 100 MeV.',
float)),
('dnde1000_index', (np.nan, 'Logarithmic slope of the differential photon spectrum evaluated evaluated at 1 GeV.',
float)),
('dnde10000_index', (np.nan, 'Logarithmic slope of the differential photon spectrum evaluated at 10 GeV.',
float)),
))
source_output = OrderedDict(list(source_meta_output.items()) +
list(source_pos_output.items()) +
list(source_flux_output.items()))
# Top-level dictionary for output file
file_output = OrderedDict((
('roi', (None, 'A dictionary containing information about the ROI as a whole.', dict)),
('sources', (None, 'A dictionary containing information about individual sources in the model (diffuse and point-like). '
'Each element of this dictionary maps to a single source in the ROI model.', dict)),
('config', (None, 'The configuration dictionary of the :py:class:`~fermipy.gtanalysis.GTAnalysis` instance.', dict)),
('version', (None, 'The version of the Fermipy package that was used to run the analysis. This is automatically generated from the git release tag.', str))
))
```
#### File: fermipy/diffuse/gt_split_and_bin.py
```python
from __future__ import absolute_import, division, print_function
import os
import math
import yaml
from fermipy.jobs.utils import is_null
from fermipy.jobs.link import Link
from fermipy.jobs.chain import Chain
from fermipy.jobs.scatter_gather import ScatterGather
from fermipy.jobs.slac_impl import make_nfs_path
from fermipy.diffuse.utils import create_inputlist
from fermipy.diffuse.name_policy import NameFactory
from fermipy.diffuse import defaults as diffuse_defaults
from fermipy.diffuse.binning import EVT_TYPE_DICT
from fermipy.diffuse.job_library import Gtlink_select,\
Gtlink_bin, Gtexpcube2_SG
from fermipy.diffuse.gt_coadd_split import CoaddSplit_SG
NAME_FACTORY = NameFactory()
def make_full_path(basedir, outkey, origname):
"""Make a full file path"""
return os.path.join(basedir, outkey, os.path.basename(
origname).replace('.fits', '_%s.fits' % outkey))
class SplitAndBin(Chain):
"""Small class to split and bin data according to some user-provided specification
This chain consists multiple `Link` objects:
select-energy-EBIN-ZCUT : `Gtlink_select`
Initial splitting by energy bin and zenith angle cut
select-type-EBIN-ZCUT-FILTER-TYPE : `Gtlink_select`
Refinement of selection from event types
bin-EBIN-ZCUT-FILTER-TYPE : `Gtlink_bin`
Final binning of the data for each event type
"""
appname = 'fermipy-split-and-bin'
linkname_default = 'split-and-bin'
usage = '%s [options]' % (appname)
description = 'Run gtselect and gtbin together'
default_options = dict(data=diffuse_defaults.diffuse['data'],
comp=diffuse_defaults.diffuse['comp'],
hpx_order_max=diffuse_defaults.diffuse['hpx_order_ccube'],
ft1file=(None, 'Input FT1 file', str),
evclass=(128, 'Event class bit mask', int),
outdir=('counts_cubes_cr', 'Base name for output files', str),
outkey=(None, 'Key for this particular output file', str),
pfiles=(None, 'Directory for .par files', str),
scratch=(None, 'Scratch area', str),
dry_run=(False, 'Print commands but do not run them', bool))
__doc__ += Link.construct_docstring(default_options)
def __init__(self, **kwargs):
"""C'tor
"""
super(SplitAndBin, self).__init__(**kwargs)
self.comp_dict = None
def _map_arguments(self, args):
"""Map from the top-level arguments to the arguments provided to
the indiviudal links """
comp_file = args.get('comp', None)
datafile = args.get('data', None)
if is_null(comp_file):
return
if is_null(datafile):
return
NAME_FACTORY.update_base_dict(datafile)
outdir = args.get('outdir', None)
outkey = args.get('outkey', None)
ft1file = args['ft1file']
if is_null(outdir) or is_null(outkey):
return
pfiles = os.path.join(outdir, outkey)
self.comp_dict = yaml.safe_load(open(comp_file))
coordsys = self.comp_dict.pop('coordsys')
full_out_dir = make_nfs_path(os.path.join(outdir, outkey))
for key_e, comp_e in sorted(self.comp_dict.items()):
emin = math.pow(10., comp_e['log_emin'])
emax = math.pow(10., comp_e['log_emax'])
enumbins = comp_e['enumbins']
zmax = comp_e['zmax']
zcut = "zmax%i" % comp_e['zmax']
evclassstr = NAME_FACTORY.base_dict['evclass']
kwargs_select = dict(zcut=zcut,
ebin=key_e,
psftype='ALL',
coordsys=coordsys,
mktime='none')
selectfile_energy = make_full_path(outdir, outkey, NAME_FACTORY.select(**kwargs_select))
linkname = 'select-energy-%s-%s' % (key_e, zcut)
self._set_link(linkname, Gtlink_select,
infile=ft1file,
outfile=selectfile_energy,
zmax=zmax,
emin=emin,
emax=emax,
evclass=NAME_FACTORY.evclassmask(evclassstr),
pfiles=pfiles,
logfile=os.path.join(full_out_dir, "%s.log" % linkname))
if 'evtclasses' in comp_e:
evtclasslist_vals = comp_e['evtclasses']
else:
evtclasslist_vals = [NAME_FACTORY.base_dict['evclass']]
for evtclassval in evtclasslist_vals:
for psf_type, psf_dict in sorted(comp_e['psf_types'].items()):
linkname_select = 'select-type-%s-%s-%s-%s' % (
key_e, zcut, evtclassval, psf_type)
linkname_bin = 'bin-%s-%s-%s-%s' % (key_e, zcut, evtclassval, psf_type)
hpx_order = psf_dict['hpx_order']
kwargs_bin = kwargs_select.copy()
kwargs_bin['psftype'] = psf_type
selectfile_psf = make_full_path(
outdir, outkey, NAME_FACTORY.select(**kwargs_bin))
binfile = make_full_path(outdir, outkey, NAME_FACTORY.ccube(**kwargs_bin))
self._set_link(linkname_select, Gtlink_select,
infile=selectfile_energy,
outfile=selectfile_psf,
zmax=zmax,
emin=emin,
emax=emax,
evtype=EVT_TYPE_DICT[psf_type],
evclass=NAME_FACTORY.evclassmask(evtclassval),
pfiles=pfiles,
logfile=os.path.join(full_out_dir, "%s.log" % linkname_select))
self._set_link(linkname_bin, Gtlink_bin,
coordsys=coordsys,
hpx_order=hpx_order,
evfile=selectfile_psf,
outfile=binfile,
emin=emin,
emax=emax,
enumbins=enumbins,
pfiles=pfiles,
logfile=os.path.join(full_out_dir, "%s.log" % linkname_bin))
class SplitAndBin_SG(ScatterGather):
"""Small class to generate configurations for SplitAndBin
"""
appname = 'fermipy-split-and-bin-sg'
usage = "%s [options]" % (appname)
description = "Prepare data for diffuse all-sky analysis"
clientclass = SplitAndBin
job_time = 1500
default_options = dict(comp=diffuse_defaults.diffuse['comp'],
data=diffuse_defaults.diffuse['data'],
hpx_order_max=diffuse_defaults.diffuse['hpx_order_ccube'],
ft1file=(None, 'Input FT1 file', str),
scratch=(None, 'Path to scratch area', str))
__doc__ += Link.construct_docstring(default_options)
def build_job_configs(self, args):
"""Hook to build job configurations
"""
job_configs = {}
comp_file = args.get('comp', None)
if comp_file is not None:
comp_dict = yaml.safe_load(open(comp_file))
coordsys = comp_dict.pop('coordsys')
for v in comp_dict.values():
v['coordsys'] = coordsys
else:
return job_configs
NAME_FACTORY.update_base_dict(args['data'])
inputfiles = create_inputlist(args['ft1file'])
outdir_base = os.path.join(NAME_FACTORY.base_dict['basedir'], 'counts_cubes')
for idx, infile in enumerate(inputfiles):
key = <KEY>
output_dir = os.path.join(outdir_base, key)
try:
os.mkdir(output_dir)
except OSError:
pass
logfile = make_nfs_path(os.path.join(output_dir, 'scatter_%s.log' % key))
job_configs[key] = comp_dict.copy()
job_configs[key].update(dict(ft1file=infile,
comp=args['comp'],
hpx_order_max=args['hpx_order_max'],
outdir=outdir_base,
outkey=key,
logfile=logfile,
pfiles=output_dir))
return job_configs
class SplitAndBinChain(Chain):
"""Chain to run split and bin and then make exposure cubes
This chain consists of:
split-and-bin : `SplitAndBin_SG`
Chain to make the binned counts maps for each input file
coadd-split : `CoaddSplit_SG`
Link to co-add the binnec counts maps files
expcube2 : `Gtexpcube2_SG`
Link to make the corresponding binned exposure maps
"""
appname = 'fermipy-split-and-bin-chain'
linkname_default = 'split-and-bin-chain'
usage = '%s [options]' % (appname)
description = 'Run split-and-bin, coadd-split and exposure'
default_options = dict(data=diffuse_defaults.diffuse['data'],
comp=diffuse_defaults.diffuse['comp'],
ft1file=diffuse_defaults.diffuse['ft1file'],
hpx_order_ccube=diffuse_defaults.diffuse['hpx_order_ccube'],
hpx_order_expcube=diffuse_defaults.diffuse['hpx_order_expcube'],
scratch=diffuse_defaults.diffuse['scratch'],
dry_run=diffuse_defaults.diffuse['dry_run'])
__doc__ += Link.construct_docstring(default_options)
def _map_arguments(self, args):
"""Map from the top-level arguments to the arguments provided to
the indiviudal links """
data = args.get('data')
comp = args.get('comp')
ft1file = args.get('ft1file')
scratch = args.get('scratch', None)
dry_run = args.get('dry_run', None)
self._set_link('split-and-bin', SplitAndBin_SG,
comp=comp, data=data,
hpx_order_max=args.get('hpx_order_ccube', 9),
ft1file=ft1file,
scratch=scratch,
dry_run=dry_run)
self._set_link('coadd-split', CoaddSplit_SG,
comp=comp, data=data,
ft1file=ft1file)
self._set_link('expcube2', Gtexpcube2_SG,
comp=comp, data=data,
hpx_order_max=args.get('hpx_order_expcube', 5),
dry_run=dry_run)
def register_classes():
"""Register these classes with the `LinkFactory` """
SplitAndBin.register_class()
SplitAndBin_SG.register_class()
SplitAndBinChain.register_class()
```
#### File: fermipy/fermipy/gtutils.py
```python
from __future__ import absolute_import, division, print_function
import copy
from functools import wraps
import numpy as np
import pyLikelihood as pyLike
from SrcModel import SourceModel
from AnalysisBase import AnalysisBase
from LikelihoodState import LikelihoodState
import pyIrfLoader
pyIrfLoader.Loader_go()
_funcFactory = pyLike.SourceFactory_funcFactory()
import BinnedAnalysis
import SummedLikelihood
from fermipy import utils
from fermipy import model_utils
evtype_string = {
4: 'PSF0',
8: 'PSF1',
16: 'PSF2',
32: 'PSF3'
}
def bitmask_to_bits(mask):
bits = []
for i in range(32):
if mask & (2**i):
bits += [2**i]
return bits
DEFAULT_SCALE_DICT = {'value': 1000.0,
'scale': 1.0, 'min': 0.001, 'max': 1000.0}
DEFAULT_NORM_DICT = {'value': 1E-12, 'scale': 1.0, 'min': 1E-5, 'max': 1000.0}
DEFAULT_INTEGRAL_DICT = {'value': 1E-6,
'scale': 1.0, 'min': 1E-5, 'max': 1000.0}
DEFAULT_INDEX_DICT = {'value': 2.0, 'scale': -1.0, 'min': 0.0, 'max': 5.0}
FUNCTION_NORM_PARS = {}
FUNCTION_PAR_NAMES = {}
FUNCTION_DEFAULT_PARS = {
'PowerLaw': {
'Index': DEFAULT_INDEX_DICT,
'Scale': DEFAULT_SCALE_DICT,
'Prefactor': DEFAULT_NORM_DICT},
'PowerLaw2': {
'Index': DEFAULT_INDEX_DICT,
'LowerLimit': {'value': 100.0, 'scale': 1.0, 'min': 20.0, 'max': 1000000.},
'UpperLimit': {'value': 100000.0, 'scale': 1.0, 'min': 20.0, 'max': 1000000.},
'Integral': DEFAULT_INTEGRAL_DICT},
'BrokenPowerLaw': {
'Index1': DEFAULT_INDEX_DICT,
'Index2': DEFAULT_INDEX_DICT,
'BreakValue': DEFAULT_SCALE_DICT,
'Prefactor': DEFAULT_NORM_DICT},
'BrokenPowerLaw2': {
'Index1': DEFAULT_INDEX_DICT,
'Index2': DEFAULT_INDEX_DICT,
'LowerLimit': {'value': 100.0, 'scale': 1.0, 'min': 20.0, 'max': 1000000.},
'UpperLimit': {'value': 100000.0, 'scale': 1.0, 'min': 20.0, 'max': 1000000.},
'BreakValue': DEFAULT_SCALE_DICT,
'Integral': DEFAULT_INTEGRAL_DICT},
'BPLExpCutoff': {
'Index1': DEFAULT_INDEX_DICT,
'Index2': DEFAULT_INDEX_DICT,
'BreakValue': DEFAULT_SCALE_DICT,
'Prefactor': DEFAULT_NORM_DICT},
'SmoothBrokenPowerLaw': {
'Index1': DEFAULT_INDEX_DICT,
'Index2': DEFAULT_INDEX_DICT,
'BreakValue': DEFAULT_SCALE_DICT,
'Prefactor': DEFAULT_NORM_DICT,
'Beta': {'value': 0.2, 'scale': 1.0, 'min': 0.01, 'max': 10.0}},
'PLSuperExpCutoff': {
'Cutoff': DEFAULT_SCALE_DICT,
'Index1': {'value': 2.0, 'scale': -1.0, 'min': 0.0, 'max': 5.0},
'Index2': {'value': 1.0, 'scale': 1.0, 'min': 0.0, 'max': 2.0},
'Prefactor': DEFAULT_NORM_DICT,
},
'LogParabola': {
'norm': DEFAULT_NORM_DICT,
'alpha': {'value': 2.0, 'scale': 1.0, 'min': -5.0, 'max': 5.0},
'beta': {'value': 0.0, 'scale': 1.0, 'min': -2.0, 'max': 2.0},
'Eb': DEFAULT_SCALE_DICT},
'SpatialMap': {
'Prefactor': {'value': 1.0, 'scale': 1.0, 'min': 1.0, 'max': 1.0}},
'ConstantValue': {
'Normalization': {'value': 1.0, 'scale': 1.0, 'min': 1E-5, 'max': 1000.0}},
'FileFunction': {
'Normalization': {'value': 1.0, 'scale': 1.0, 'min': 1E-5, 'max': 1000.0}},
'Gaussian': {
'Mean': {'value': 1000.0, 'scale': 1.0, 'min': 1E-5, 'max': 1E5},
'Sigma': {'value': 100.0, 'scale': 1.0, 'min': 10., 'max': 1E5},
'Prefactor': DEFAULT_NORM_DICT},
}
def init_function_pars():
global FUNCTION_PAR_NAMES
global FUNCTION_NORM_PARS
global FUNCTION_DEFAULT_PARS
FUNCTION_PAR_NAMES = {}
FUNCTION_NORM_PARS = {}
funcFactory = pyLike.SourceFactory_funcFactory()
names = pyLike.StringVector()
funcFactory.getFunctionNames(names)
for fname in names:
pars = FUNCTION_DEFAULT_PARS.setdefault(fname, {})
par_names = FUNCTION_PAR_NAMES.setdefault(fname, [])
if 'EblAtten' in fname and fname[len('EblAtten::'):] in FUNCTION_DEFAULT_PARS:
pars.update(FUNCTION_DEFAULT_PARS[fname[len('EblAtten::'):]])
fn = funcFactory.create(fname)
try:
FUNCTION_NORM_PARS[fname] = fn.normPar().getName()
except Exception:
FUNCTION_NORM_PARS[fname] = None
params = pyLike.ParameterVector()
fn.getParams(params)
for i, p in enumerate(params):
pname = p.getName()
par_names += [pname]
if pname == 'Scale':
pars.setdefault(pname, DEFAULT_SCALE_DICT)
elif pname == 'Prefactor':
pars.setdefault(pname, DEFAULT_NORM_DICT)
else:
pars.setdefault(pname, {})
bounds = p.getBounds()
par_dict = dict(name=pname,
value=p.getValue(),
min=bounds[0],
max=bounds[1],
scale=1.0,
free=False)
par_dict.update(copy.deepcopy(pars[pname]))
par_dict['name'] = pname
pars[pname] = par_dict
def get_function_par_names(function_type):
if not FUNCTION_NORM_PARS:
init_function_pars()
if not function_type in FUNCTION_PAR_NAMES.keys():
raise Exception('Invalid Function Type: %s' % function_type)
return copy.deepcopy(FUNCTION_PAR_NAMES[function_type])
def get_function_norm_par_name(function_type):
if not FUNCTION_NORM_PARS:
init_function_pars()
return FUNCTION_NORM_PARS[function_type]
def get_function_defaults(function_type):
if not FUNCTION_NORM_PARS:
init_function_pars()
return copy.deepcopy(FUNCTION_DEFAULT_PARS[function_type])
def build_piecewise_powerlaw(fn, spectral_pars):
ppl = pyLike.PiecewisePowerLaw.cast(fn)
index_l = spectral_pars['IndexL']['value']
index_h = spectral_pars['IndexH']['value']
i = 0
energies = pyLike.DoubleVector()
dndes = pyLike.DoubleVector()
while True:
try:
energy = spectral_pars['Energy%i'%i]['value']
dnde = spectral_pars['dNdE%i'%i]['value']
energies.push_back(energy)
dndes.push_back(dnde)
i += 1
except KeyError:
break
ppl.addParams(index_l, index_h, dndes, energies)
def create_spectrum_from_dict(spectrum_type, spectral_pars, fn=None):
"""Create a Function object from a parameter dictionary.
Parameters
----------
spectrum_type : str
String identifying the spectrum type (e.g. PowerLaw).
spectral_pars : dict
Dictionary of spectral parameters.
"""
if fn is None:
fn = pyLike.SourceFactory_funcFactory().create(str(spectrum_type))
if spectrum_type == 'PiecewisePowerLaw':
build_piecewise_powerlaw(fn, spectral_pars)
for k, v in spectral_pars.items():
v.setdefault('scale', 1.0)
v.setdefault('min', v['value'] * 1E-3)
v.setdefault('max', v['value'] * 1E3)
par = fn.getParam(str(k))
vmin = min(float(v['value']), float(v['min']))
vmax = max(float(v['value']), float(v['max']))
par.setValue(float(v['value']))
par.setBounds(vmin, vmax)
par.setScale(float(v['scale']))
if 'free' in v and int(v['free']) != 0:
par.setFree(True)
else:
par.setFree(False)
fn.setParam(par)
return fn
def gtlike_spectrum_to_dict(spectrum):
""" Convert a pyLikelihood object to a python dictionary which can
be easily saved to a file."""
parameters = pyLike.ParameterVector()
spectrum.getParams(parameters)
d = dict(spectrum_type=spectrum.genericName())
for p in parameters:
pname = p.getName()
pval = p.getTrueValue()
perr = abs(p.error() * p.getScale()) if p.isFree() else np.nan
d[pname] = np.array([pval, perr])
if d['spectrum_type'] == 'FileFunction':
ff = pyLike.FileFunction_cast(spectrum)
d['file'] = ff.filename()
return d
def gtlike_spectrum_to_vectors(spectrum):
""" Convert a pyLikelihood object to a python dictionary which can
be easily saved to a file."""
parameters = pyLike.ParameterVector()
spectrum.getParams(parameters)
npar = max(parameters.size(), 10)
o = {'param_names': np.zeros(npar, dtype='S32'),
'param_values': np.empty(npar, dtype=float) * np.nan,
'param_errors': np.empty(npar, dtype=float) * np.nan,
}
for i, p in enumerate(parameters):
o['param_names'][i] = p.getName()
o['param_values'][i] = p.getTrueValue()
perr = abs(p.error() * p.getScale()) if p.isFree() else np.nan
o['param_errors'][i] = perr
return o
def get_function_pars_dict(fn):
pars = get_function_pars(fn)
pars_dict = {p['name']: p for p in pars}
return pars_dict
def get_function_pars(fn):
"""Extract the parameters of a pyLikelihood function object
(value, scale, bounds).
Parameters
----------
fn : pyLikelihood.Function
Returns
-------
pars : list
"""
pars = []
par_names = pyLike.StringVector()
fn.getParamNames(par_names)
for pname in par_names:
par = fn.getParam(pname)
bounds = par.getBounds()
perr = par.error() if par.isFree() else np.nan
pars += [dict(name=pname,
value=par.getValue(),
error=perr,
min=bounds[0],
max=bounds[1],
free=par.isFree(),
scale=par.getScale())]
return pars
def get_params_dict(like):
params = get_params(like)
params_dict = {}
for p in params:
params_dict.setdefault(p['src_name'], [])
params_dict[p['src_name']] += [p]
return params_dict
def get_params(like):
params = []
for src_name in like.sourceNames():
src = like[src_name].src
spars, ppars = get_source_pars(src)
for p in spars:
p['src_name'] = src_name
params += [p]
for p in ppars:
p['src_name'] = src_name
params += [p]
return params
def get_priors(like):
"""Extract priors from a likelihood object."""
npar = len(like.params())
vals = np.ones(npar)
errs = np.ones(npar)
has_prior = np.array([False] * npar)
for i, p in enumerate(like.params()):
prior = like[i].log_prior()
if prior is None:
continue
par_names = pyLike.StringVector()
prior.getParamNames(par_names)
if not 'Mean' in par_names:
raise Exception('Failed to find Mean in prior parameters.')
if not 'Sigma' in par_names:
raise Exception('Failed to find Sigma in prior parameters.')
for t in par_names:
if t == 'Mean':
vals[i] = prior.parameter(t).getValue()
if t == 'Sigma':
errs[i] = prior.parameter(t).getValue()
has_prior[i] = True
return vals, errs, has_prior
def get_source_pars(src):
"""Extract the parameters associated with a pyLikelihood Source object.
"""
fnmap = src.getSrcFuncs()
keys = fnmap.keys()
if 'Position' in keys:
ppars = get_function_pars(src.getSrcFuncs()[str('Position')])
elif 'SpatialDist' in keys:
ppars = get_function_pars(src.getSrcFuncs()[str('SpatialDist')])
else:
raise Exception('Failed to extract spatial parameters.')
fn = src.getSrcFuncs()[str('Spectrum')]
spars = get_function_pars(fn)
for i, p in enumerate(ppars):
ppars[i]['is_norm'] = False
for i, p in enumerate(spars):
if fn.normPar().getName() == p['name']:
spars[i]['is_norm'] = True
else:
spars[i]['is_norm'] = False
return spars, ppars
def savefreestate(func):
@wraps(func)
def wrapper(self, *args, **kwargs):
free_params = self.get_free_param_vector()
o = func(self, *args, **kwargs)
self.set_free_param_vector(free_params)
return o
return wrapper
def savestate(func):
@wraps(func)
def wrapper(self, *args, **kwargs):
saved_state = LikelihoodState(self.like)
o = func(self, *args, **kwargs)
saved_state.restore()
return o
return wrapper
class FreeParameterState(object):
def __init__(self, gta):
self._gta = gta
self._free = gta.get_free_param_vector()
def restore(self):
self._gta.set_free_param_vector(self._free)
class SourceMapState(object):
def __init__(self, like, names):
self._srcmaps = {}
self._like = like
for name in names:
self._srcmaps[name] = []
for c in self._like.components:
self._srcmaps[name] += [c.logLike.sourceMap(str(name)).model()]
def restore(self):
for name in self._srcmaps.keys():
for i, c in enumerate(self._like.components):
c.logLike.setSourceMapImage(str(name),
self._srcmaps[name][i])
class SummedLikelihood(SummedLikelihood.SummedLikelihood):
def nFreeParams(self):
"""Count the number of free parameters in the active model."""
nF = 0
pars = self.params()
for par in pars:
if par.isFree():
nF += 1
return nF
def optimize(self, verbosity=3, tol=None, optimizer=None, optObject=None):
self._syncParams()
if optimizer is None:
optimizer = self.optimizer
if tol is None:
tol = self.tol
if optObject is None:
optFactory = pyLike.OptimizerFactory_instance()
myOpt = optFactory.create(optimizer, self.logLike)
else:
myOpt = optObject
myOpt.find_min_only(verbosity, tol, self.tolType)
self.saveBestFit()
def Ts2(self, srcName, reoptimize=False, approx=True,
tol=None, MaxIterations=10, verbosity=0):
srcName = str(srcName)
if verbosity > 0:
print("*** Start Ts_dl ***")
source_attributes = self.components[0].getExtraSourceAttributes()
self.syncSrcParams()
freeParams = pyLike.DoubleVector()
self.components[0].logLike.getFreeParamValues(freeParams)
logLike1 = -self()
for comp in self.components:
comp.scaleSource(srcName, 1E-10)
comp._ts_src = comp.logLike.getSource(srcName)
free_flag = comp._ts_src.spectrum().normPar().isFree()
if reoptimize:
comp._ts_src.spectrum().normPar().setFree(False)
self.syncSrcParams()
logLike0 = -self()
if tol is None:
tol = self.tol
if reoptimize:
if verbosity > 0:
print("** Do reoptimize")
optFactory = pyLike.OptimizerFactory_instance()
myOpt = optFactory.create(self.optimizer, self.composite)
Niter = 1
while Niter <= MaxIterations:
try:
myOpt.find_min(0, tol)
break
except RuntimeError as e:
print(e)
if verbosity > 0:
print("** Iteration :", Niter)
Niter += 1
else:
if approx:
try:
self._renorm()
except ZeroDivisionError:
pass
self.syncSrcParams()
logLike0 = max(-self(), logLike0)
Ts_value = 2 * (logLike1 - logLike0)
for comp in self.components:
comp.scaleSource(srcName, 1E10)
if reoptimize:
comp._ts_src.spectrum().normPar().setFree(free_flag)
self.syncSrcParams(srcName)
comp.logLike.setFreeParamValues(freeParams)
comp.model = SourceModel(comp.logLike)
for src in source_attributes:
comp.model[src].__dict__.update(source_attributes[src])
self.model = self.components[0].model
return Ts_value
def _renorm(self, factor=None):
if factor is None:
freeNpred, totalNpred = self._npredValues()
deficit = self.total_nobs() - totalNpred
self.renormFactor = 1. + deficit / freeNpred
else:
self.renormFactor = factor
if self.renormFactor < 1:
self.renormFactor = 1
srcNames = self.sourceNames()
for src in srcNames:
if src == self.components[0]._ts_src.getName():
continue
parameter = self.normPar(src)
if (parameter.isFree() and
self.components[0]._isDiffuseOrNearby(src)):
oldValue = parameter.getValue()
newValue = oldValue * self.renormFactor
# ensure new value is within parameter bounds
xmin, xmax = parameter.getBounds()
if xmin <= newValue and newValue <= xmax:
parameter.setValue(newValue)
class BinnedAnalysis(BinnedAnalysis.BinnedAnalysis):
def __init__(self, binnedData, srcModel=None, optimizer='Drmngb',
use_bl2=False, verbosity=0, psfcorr=True, convolve=True,
resample=True, resamp_fact=2, minbinsz=0.1, wmap=None):
AnalysisBase.__init__(self)
if srcModel is None:
srcModel, optimizer = self._srcDialog()
self.binnedData = binnedData
self.srcModel = srcModel
self.optimizer = optimizer
if use_bl2:
self.logLike = pyLike.BinnedLikelihood2(binnedData.countsMap,
binnedData.observation,
binnedData.srcMaps,
True, psfcorr, convolve,
resample,
resamp_fact,
minbinsz)
else:
if wmap is None or wmap == "none":
self.logLike = pyLike.BinnedLikelihood(binnedData.countsMap,
binnedData.observation,
binnedData.srcMaps,
True, psfcorr, convolve,
resample,
resamp_fact,
minbinsz)
self._wmap = None
else:
self._wmap = pyLike.WcsMapLibrary.instance().wcsmap(wmap, "")
self._wmap.setInterpolation(False)
self._wmap.setExtrapolation(True)
self.logLike = pyLike.BinnedLikelihood(binnedData.countsMap,
self._wmap,
binnedData.observation,
binnedData.srcMaps,
True, psfcorr, convolve,
resample,
resamp_fact,
minbinsz)
self.verbosity = verbosity
self.logLike.initOutputStreams()
self.logLike.readXml(srcModel, _funcFactory, False, True, False)
self.model = SourceModel(self.logLike, srcModel)
self.energies = np.array(self.logLike.energies())
self.e_vals = np.sqrt(self.energies[:-1] * self.energies[1:])
self.nobs = self.logLike.countsSpectrum()
self.sourceFitPlots = []
self.sourceFitResids = []
def scaleSource(self, srcName, scale):
src = self.logLike.getSource(srcName)
old_scale = src.spectrum().normPar().getScale()
src.spectrum().normPar().setScale(old_scale * scale)
self.logLike.syncParams()
def Ts2(self, srcName, reoptimize=False, approx=True,
tol=None, MaxIterations=10, verbosity=0):
"""Computes the TS value for a source indicated by "srcName."
If "reoptimize=True" is selected this function will reoptimize
the model up to "MaxIterations" given the tolerance "tol"
(default is the tolerance selected for the overall fit). If
"appox=True" is selected (the default) it will renormalize the
model (see _renorm).
"""
saved_state = LikelihoodState(self)
if verbosity > 0:
print("*** Start Ts_dl ***")
source_attributes = self.getExtraSourceAttributes()
self.logLike.syncParams()
src = self.logLike.getSource(srcName)
self._ts_src = src
freeParams = pyLike.DoubleVector()
self.logLike.getFreeParamValues(freeParams)
logLike1 = self.logLike.value()
self.scaleSource(srcName, 1E-10)
logLike0 = self.logLike.value()
if tol is None:
tol = self.tol
if reoptimize:
if verbosity > 0:
print("** Do reoptimize")
optFactory = pyLike.OptimizerFactory_instance()
myOpt = optFactory.create(self.optimizer, self.logLike)
Niter = 1
while Niter <= MaxIterations:
try:
myOpt.find_min(0, tol)
break
except RuntimeError as e:
print(e)
if verbosity > 0:
print("** Iteration :", Niter)
Niter += 1
else:
if approx:
try:
self._renorm()
except ZeroDivisionError:
pass
self.logLike.syncParams()
logLike0 = max(self.logLike.value(), logLike0)
Ts_value = 2 * (logLike1 - logLike0)
self.scaleSource(srcName, 1E10)
self.logLike.setFreeParamValues(freeParams)
self.model = SourceModel(self.logLike)
for src in source_attributes:
self.model[src].__dict__.update(source_attributes[src])
saved_state.restore()
self.logLike.value()
return Ts_value
def _isDiffuseOrNearby(self, srcName):
if (self[srcName].src.getType() in ['Diffuse','Composite'] or
self._ts_src.getType() in ['Diffuse','Composite']):
return True
elif self._separation(self._ts_src, self[srcName].src) < self.maxdist:
return True
return False
```
#### File: fermipy/fermipy/model_utils.py
```python
from __future__ import absolute_import, division, print_function
import os
import copy
import yaml
import numpy as np
from fermipy import utils
def get_function_par_names(name):
"""Get the list of parameters associated with a function.
Parameters
----------
name : str
Name of the function.
"""
fn_spec = get_function_spec(name)
return copy.deepcopy(fn_spec['par_names'])
def get_function_norm_par_name(name):
"""Get the normalization parameter associated with a function.
Parameters
----------
name : str
Name of the function.
"""
fn_spec = get_function_spec(name)
return fn_spec['norm_par']
def get_function_defaults(name):
fn_spec = get_function_spec(name)
return copy.deepcopy(fn_spec['defaults'])
def get_function_spec(name):
"""Return a dictionary with the specification of a function:
parameter names and defaults (value, bounds, scale, etc.).
Returns
-------
par_names : list
List of parameter names for this function.
norm_par : str
Name of normalization parameter.
default : dict
Parameter defaults dictionary.
"""
if not hasattr(get_function_spec, 'fndict'):
modelfile = os.path.join('$FERMIPY_ROOT',
'data', 'models.yaml')
modelfile = os.path.expandvars(modelfile)
get_function_spec.fndict = yaml.load(open(modelfile))
if not name in get_function_spec.fndict.keys():
raise Exception('Invalid Function Name: %s' % name)
return get_function_spec.fndict[name]
def get_source_type(spatial_type):
"""Translate a spatial type string to a source type."""
if spatial_type == 'SkyDirFunction':
return 'PointSource'
else:
return 'DiffuseSource'
def get_spatial_type(spatial_model):
"""Translate a spatial model string to a spatial type."""
if spatial_model in ['SkyDirFunction', 'PointSource',
'Gaussian']:
return 'SkyDirFunction'
elif spatial_model in ['SpatialMap']:
return 'SpatialMap'
elif spatial_model in ['RadialGaussian', 'RadialDisk']:
try:
import pyLikelihood
if hasattr(pyLikelihood, 'RadialGaussian'):
return spatial_model
else:
return 'SpatialMap'
except Exception:
return spatial_model
else:
return spatial_model
def extract_pars_from_dict(name, src_dict):
par_names = get_function_par_names(name)
o = {}
for k in par_names:
o[k] = {}
if not k in src_dict:
continue
v = src_dict.pop(k)
if isinstance(v, dict):
o[k] = v.copy()
else:
o[k] = {'name': k, 'value': v}
return o
def create_pars_from_dict(name, pars_dict, rescale=True, update_bounds=False):
"""Create a dictionary for the parameters of a function.
Parameters
----------
name : str
Name of the function.
pars_dict : dict
Existing parameter dict that will be merged with the
default dictionary created by this method.
rescale : bool
Rescale parameter values.
"""
o = get_function_defaults(name)
pars_dict = pars_dict.copy()
for k in o.keys():
if not k in pars_dict:
continue
v = pars_dict[k]
if not isinstance(v, dict):
v = {'name': k, 'value': v}
o[k].update(v)
kw = dict(update_bounds=update_bounds,
rescale=rescale)
if 'min' in v or 'max' in v:
kw['update_bounds'] = False
if 'scale' in v:
kw['rescale'] = False
o[k] = make_parameter_dict(o[k], **kw)
return o
def make_parameter_dict(pdict, fixed_par=False, rescale=True,
update_bounds=False):
"""
Update a parameter dictionary. This function will automatically
set the parameter scale and bounds if they are not defined.
Bounds are also adjusted to ensure that they encompass the
parameter value.
"""
o = copy.deepcopy(pdict)
o.setdefault('scale', 1.0)
if rescale:
value, scale = utils.scale_parameter(o['value'] * o['scale'])
o['value'] = np.abs(value) * np.sign(o['value'])
o['scale'] = np.abs(scale) * np.sign(o['scale'])
if 'error' in o:
o['error'] /= np.abs(scale)
if update_bounds:
o['min'] = o['value'] * 1E-3
o['max'] = o['value'] * 1E3
if fixed_par:
o['min'] = o['value']
o['max'] = o['value']
if float(o['min']) > float(o['value']):
o['min'] = o['value']
if float(o['max']) < float(o['value']):
o['max'] = o['value']
return o
def cast_pars_dict(pars_dict):
"""Cast the bool and float elements of a parameters dict to
the appropriate python types.
"""
o = {}
for pname, pdict in pars_dict.items():
o[pname] = {}
for k, v in pdict.items():
if k == 'free':
o[pname][k] = bool(int(v))
elif k == 'name':
o[pname][k] = v
else:
o[pname][k] = float(v)
return o
def pars_dict_to_vectors(function_name, pars_dict):
o = {'param_names': np.zeros(10, dtype='S32'),
'param_values': np.empty(10, dtype=float) * np.nan,
'param_errors': np.empty(10, dtype=float) * np.nan,
}
par_names = get_function_par_names(function_name)
for i, p in enumerate(par_names):
value = pars_dict[p]['value'] * pars_dict[p]['scale']
scale = pars_dict[p]['error'] * pars_dict[p]['scale']
o['param_names'][i] = p
o['param_values'][i] = value
o['param_errors'][i] = scale
return o
```
|
{
"source": "jefequien/PSPNet-Keras",
"score": 3
}
|
#### File: src/disc/disc.py
```python
from __future__ import print_function
from __future__ import division
from os.path import splitext, join, isfile
from os import environ
from math import ceil
import argparse
import numpy as np
from scipy import misc, ndimage
from keras.applications.resnet50 import ResNet50
from keras.layers import Input, Dense, Flatten
from keras.optimizers import SGD
from keras import backend as K
from keras.models import Model, load_model
import tensorflow as tf
from utils import image_utils
from utils.data import open_file
class Discriminator(object):
"""Discriminator for classes"""
def __init__(self, lr=1e-4, checkpoint=None):
print("checkpoint %s" % checkpoint)
"""Instanciate a Resnet discriminator"""
if checkpoint is None:
print("Building Resnet discriminator")
self.model = self.build_model(lr)
else:
print("Loading from checkpoint %s" % checkpoint)
self.model = load_model(checkpoint)
self.input_shape = (473,473)
def build_model(self, lr):
inp = Input((473,473,4))
resnet = ResNet50(input_tensor=inp, weights=None, include_top=False)
x = Flatten()(resnet.outputs[0])
output = Dense(1, activation='sigmoid')(x)
model = Model(inputs=inp, outputs=output)
sgd = SGD(lr=lr, momentum=0.9, nesterov=True)
model.compile(optimizer=sgd,
loss="binary_crossentropy",
metrics=['accuracy'])
return model
def predict(self, img, prediction, category):
"""
Predict segmentation for an image.
Arguments:
img: must be rowsxcolsx3
prediction: must be rowsxcolsxN-1
category: must be 1 ... N
"""
img_resized = misc.imresize(img, self.input_shape)
img_preprocessed = image_utils.preprocess_image(img_resized)
input_data = prepare_disc_data(img_preprocessed, prediction, category)
input_data = input_data[np.newaxis, :, :, :] # Append sample dimension for keras
prediction = self.model.predict(input_data)[0]
return prediction
def prepare_disc_data(img, prediction, category):
s = prediction[category-1]
s = image_utils.scale(s, img.shape[:2])
s = s > 0.5
s = s * 255
data = np.concatenate((img, s[:,:,np.newaxis]), axis=2)
return data
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('-i', '--input_path', type=str, default='example_images/ade20k.jpg',
help='Path the input image')
parser.add_argument('-p', '--prediction', type=str, default='example_results/ade20k.h5',
help='Path to output')
parser.add_argument('-c', '--category', type=int, default='1',
help='Model/Weights to use')
parser.add_argument('--id', default="0")
args = parser.parse_args()
environ["CUDA_VISIBLE_DEVICES"] = args.id
sess = tf.Session()
K.set_session(sess)
with sess.as_default():
img = misc.imread(args.input_path)
prediction = open_file(args.prediction, ftype="ap")
print(args)
disc = Discriminator()
prob = disc.predict(img, prediction, args.category)
print(prob)
```
#### File: src/pspnet/layers_builder.py
```python
from __future__ import print_function
from math import ceil
from keras.layers import Conv2D, MaxPooling2D, AveragePooling2D
from keras.layers import BatchNormalization, Activation, Input, Dropout, ZeroPadding2D, Lambda
from keras.layers.merge import Concatenate, Add
from keras.models import Model
from keras.optimizers import SGD
learning_rate = 1e-4 # Layer specific learning rate
# Weight decay not implemented
def BN(name=""):
return BatchNormalization(momentum=0.95, name=name, epsilon=1e-5)
def Interp(x, shape):
from keras.backend import tf as ktf
new_height, new_width = shape
resized = ktf.image.resize_images(x, [new_height, new_width],
align_corners=True)
return resized
def residual_conv(prev, level, pad=1, lvl=1, sub_lvl=1, modify_stride=False):
lvl = str(lvl)
sub_lvl = str(sub_lvl)
names = ["conv"+lvl+"_" + sub_lvl + "_1x1_reduce",
"conv"+lvl+"_" + sub_lvl + "_1x1_reduce_bn",
"conv"+lvl+"_" + sub_lvl + "_3x3",
"conv"+lvl+"_" + sub_lvl + "_3x3_bn",
"conv"+lvl+"_" + sub_lvl + "_1x1_increase",
"conv"+lvl+"_" + sub_lvl + "_1x1_increase_bn"]
if modify_stride is False:
prev = Conv2D(64 * level, (1, 1), strides=(1, 1), name=names[0],
use_bias=False)(prev)
elif modify_stride is True:
prev = Conv2D(64 * level, (1, 1), strides=(2, 2), name=names[0],
use_bias=False)(prev)
prev = BN(name=names[1])(prev)
prev = Activation('relu')(prev)
prev = ZeroPadding2D(padding=(pad, pad))(prev)
prev = Conv2D(64 * level, (3, 3), strides=(1, 1), dilation_rate=pad,
name=names[2], use_bias=False)(prev)
prev = BN(name=names[3])(prev)
prev = Activation('relu')(prev)
prev = Conv2D(256 * level, (1, 1), strides=(1, 1), name=names[4],
use_bias=False)(prev)
prev = BN(name=names[5])(prev)
return prev
def short_convolution_branch(prev, level, lvl=1, sub_lvl=1, modify_stride=False):
lvl = str(lvl)
sub_lvl = str(sub_lvl)
names = ["conv" + lvl+"_" + sub_lvl + "_1x1_proj",
"conv" + lvl+"_" + sub_lvl + "_1x1_proj_bn"]
if modify_stride is False:
prev = Conv2D(256 * level, (1, 1), strides=(1, 1), name=names[0],
use_bias=False)(prev)
elif modify_stride is True:
prev = Conv2D(256 * level, (1, 1), strides=(2, 2), name=names[0],
use_bias=False)(prev)
prev = BN(name=names[1])(prev)
return prev
def empty_branch(prev):
return prev
def residual_short(prev_layer, level, pad=1, lvl=1, sub_lvl=1, modify_stride=False):
prev_layer = Activation('relu')(prev_layer)
block_1 = residual_conv(prev_layer, level,
pad=pad, lvl=lvl, sub_lvl=sub_lvl,
modify_stride=modify_stride)
block_2 = short_convolution_branch(prev_layer, level,
lvl=lvl, sub_lvl=sub_lvl,
modify_stride=modify_stride)
added = Add()([block_1, block_2])
return added
def residual_empty(prev_layer, level, pad=1, lvl=1, sub_lvl=1):
prev_layer = Activation('relu')(prev_layer)
block_1 = residual_conv(prev_layer, level, pad=pad,
lvl=lvl, sub_lvl=sub_lvl)
block_2 = empty_branch(prev_layer)
added = Add()([block_1, block_2])
return added
def ResNet(inp, layers):
# Names for the first couple layers of model
names = ["conv1_1_3x3_s2",
"conv1_1_3x3_s2_bn",
"conv1_2_3x3",
"conv1_2_3x3_bn",
"conv1_3_3x3",
"conv1_3_3x3_bn"]
# Short branch(only start of network)
cnv1 = Conv2D(64, (3, 3), strides=(2, 2), padding='same', name=names[0],
use_bias=False)(inp) # "conv1_1_3x3_s2"
bn1 = BN(name=names[1])(cnv1) # "conv1_1_3x3_s2/bn"
relu1 = Activation('relu')(bn1) # "conv1_1_3x3_s2/relu"
cnv1 = Conv2D(64, (3, 3), strides=(1, 1), padding='same', name=names[2],
use_bias=False)(relu1) # "conv1_2_3x3"
bn1 = BN(name=names[3])(cnv1) # "conv1_2_3x3/bn"
relu1 = Activation('relu')(bn1) # "conv1_2_3x3/relu"
cnv1 = Conv2D(128, (3, 3), strides=(1, 1), padding='same', name=names[4],
use_bias=False)(relu1) # "conv1_3_3x3"
bn1 = BN(name=names[5])(cnv1) # "conv1_3_3x3/bn"
relu1 = Activation('relu')(bn1) # "conv1_3_3x3/relu"
res = MaxPooling2D(pool_size=(3, 3), padding='same',
strides=(2, 2))(relu1) # "pool1_3x3_s2"
# ---Residual layers(body of network)
"""
Modify_stride --Used only once in first 3_1 convolutions block.
changes stride of first convolution from 1 -> 2
"""
# 2_1- 2_3
res = residual_short(res, 1, pad=1, lvl=2, sub_lvl=1)
for i in range(2):
res = residual_empty(res, 1, pad=1, lvl=2, sub_lvl=i+2)
# 3_1 - 3_3
res = residual_short(res, 2, pad=1, lvl=3, sub_lvl=1, modify_stride=True)
for i in range(3):
res = residual_empty(res, 2, pad=1, lvl=3, sub_lvl=i+2)
if layers is 50:
# 4_1 - 4_6
res = residual_short(res, 4, pad=2, lvl=4, sub_lvl=1)
for i in range(5):
res = residual_empty(res, 4, pad=2, lvl=4, sub_lvl=i+2)
elif layers is 101:
# 4_1 - 4_23
res = residual_short(res, 4, pad=2, lvl=4, sub_lvl=1)
for i in range(22):
res = residual_empty(res, 4, pad=2, lvl=4, sub_lvl=i+2)
else:
print("This ResNet is not implemented")
# 5_1 - 5_3
res = residual_short(res, 8, pad=4, lvl=5, sub_lvl=1)
for i in range(2):
res = residual_empty(res, 8, pad=4, lvl=5, sub_lvl=i+2)
res = Activation('relu')(res)
return res
def interp_block(prev_layer, level, feature_map_shape, str_lvl=1, ):
str_lvl = str(str_lvl)
names = [
"conv5_3_pool"+str_lvl+"_conv",
"conv5_3_pool"+str_lvl+"_conv_bn"
]
kernel = (10*level, 10*level)
strides = (10*level, 10*level)
prev_layer = AveragePooling2D(kernel, strides=strides)(prev_layer)
prev_layer = Conv2D(512, (1, 1), strides=(1, 1), name=names[0],
use_bias=False)(prev_layer)
prev_layer = BN(name=names[1])(prev_layer)
prev_layer = Activation('relu')(prev_layer)
prev_layer = Lambda(Interp, arguments={'shape': feature_map_shape})(prev_layer)
return prev_layer
def build_pyramid_pooling_module(res, input_shape):
"""Build the Pyramid Pooling Module."""
# ---PSPNet concat layers with Interpolation
feature_map_size = tuple(int(ceil(input_dim / 8.0)) for input_dim in input_shape)
print("PSP module will interpolate to a final feature map size of %s" % (feature_map_size, ))
interp_block1 = interp_block(res, 6, feature_map_size, str_lvl=1)
interp_block2 = interp_block(res, 3, feature_map_size, str_lvl=2)
interp_block3 = interp_block(res, 2, feature_map_size, str_lvl=3)
interp_block6 = interp_block(res, 1, feature_map_size, str_lvl=6)
# concat all these layers. resulted shape=(1,feature_map_size_x,feature_map_size_y,4096)
res = Concatenate()([res,
interp_block6,
interp_block3,
interp_block2,
interp_block1])
return res
def build_pspnet(nb_classes, resnet_layers, input_shape, activation='softmax'):
"""Build PSPNet."""
print("Building a PSPNet based on ResNet %i expecting inputs of shape %s predicting %i classes with activation %s" % (resnet_layers, input_shape, nb_classes, activation))
inp = Input((input_shape[0], input_shape[1], 3))
res = ResNet(inp, layers=resnet_layers)
psp = build_pyramid_pooling_module(res, input_shape)
x = Conv2D(512, (3, 3), strides=(1, 1), padding="same", name="conv5_4",
use_bias=False)(psp)
x = BN(name="conv5_4_bn")(x)
x = Activation('relu')(x)
x = Dropout(0.1)(x)
x = Conv2D(nb_classes, (1, 1), strides=(1, 1), name="conv6")(x)
x = Lambda(Interp, arguments={'shape': (input_shape[0], input_shape[1])})(x)
x = Activation(activation)(x)
model = Model(inputs=inp, outputs=x)
# Solver
loss = ""
if activation == 'softmax':
loss = 'categorical_crossentropy'
elif activation == 'sigmoid':
loss = 'binary_crossentropy'
sgd = SGD(lr=learning_rate, momentum=0.9, nesterov=True)
model.compile(optimizer=sgd,
loss=loss,
metrics=['accuracy'])
return model
```
#### File: src/pspnet/pspnet_utils.py
```python
import numpy as np
from keras.models import Model
# For printing the activations in each layer
# Useful for debugging
def debug(model, data):
names = [layer.name for layer in model.layers]
for name in names[:]:
print_activation(model, name, data)
def print_activation(model, layer_name, data):
intermediate_layer_model = Model(inputs=model.input,
outputs=model.get_layer(layer_name).output)
io = intermediate_layer_model.predict(data)
print(layer_name, array_to_str(io))
def array_to_str(a):
return "{} {} {} {} {}".format(a.dtype, a.shape, np.min(a),
np.max(a), np.mean(a))
```
#### File: src/vis/vis_project.py
```python
import argparse
import os
import random
import uuid
import time
import numpy as np
import pandas as pd
import utils
from utils.data import DataSource
from utils.evaluator import Evaluator
from vis_image import ImageVisualizer
TMP_DIR = "tmp/"
IMAGES_DIR = "tmp/images/"
if not os.path.exists(IMAGES_DIR):
os.makedirs(IMAGES_DIR)
class ProjectVisualizer:
def __init__(self, project, datasource, MAX=100, evaluator=None):
self.project = project
self.image_visualizer = ImageVisualizer(project, datasource)
self.evaluator = evaluator
self.MAX = MAX
fname = "{}_{}.html".format(project, int(time.time()))
self.output_path = os.path.join(TMP_DIR, fname)
self.init_output_file(datasource)
def init_output_file(self, datasource):
head = str(datasource.config)
body = ""
html = "<html><head>" + head + "</head><body>" + body + "</body></html>"
with open(self.output_path, 'w') as f:
f.write(html)
# Print link to output file
root = "/data/vision/oliva/scenedataset/"
abs_path = os.path.abspath(self.output_path)
rel_path = os.path.relpath(abs_path, root)
print "http://places.csail.mit.edu/{}".format(rel_path)
def visualize_images(self, im_list, category=None):
for n, line in enumerate(im_list[:self.MAX]):
print n, line
self.add_image_section(line, category=category)
def add_image_section(self, line, category=None):
im = line.split()[0]
image_tags = []
paths = self.image_visualizer.visualize(im)
order = np.arange(0,150)
if category is None:
paths1 = self.image_visualizer.visualize_all_categories(im)
paths.update(paths1)
order = paths["order"]
del paths["order"]
else:
paths2 = self.image_visualizer.visualize_category(im, category)
paths.update(paths2)
for key in paths:
tag = self.get_image_tag(paths[key])
image_tags.append(tag)
# Results
results = self.evaluator.get_results(im)
# Build section
title = "{} {}".format(self.project, line)
img_section = ' '.join(image_tags)
results_section = self.build_results_section(results, order)
section = "<br><br>{}<br><br>{}<br>{}".format(title, img_section, results_section)
# Append to body
with open(self.output_path, 'r') as f:
html = f.read()
new_html = html.replace("</body>", "{}</body>".format(section))
with open(self.output_path, 'w') as f:
f.write(new_html)
def build_results_section(self, results, order):
keys = []
values = []
for key in results.keys():
keys.append(key)
values.append(results[key])
values = np.stack(values)
sorted_values = values[:,order]
df = pd.DataFrame(sorted_values, index=keys, columns=order+1)
html = df.to_html()
return html
def get_image_tag(self, path):
if os.path.isabs(path):
# Symlink into tmp image directory
path = self.symlink(path)
path = os.path.relpath(path, os.path.dirname(self.output_path))
return "<img src=\"{}\" height=\"256px\">".format(path)
def symlink(self, path):
fn = "{}.jpg".format(uuid.uuid4().hex)
dst = os.path.join(IMAGES_DIR, fn)
os.symlink(path, dst)
return dst
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('-n', '--name', type=str, required=True, help="Name of run")
parser.add_argument('-p', '--project', type=str, required=True, help="Project name")
parser.add_argument("--prediction", type=str, required=True, help="")
parser.add_argument('-r', '--randomize', action='store_true', default=False, help="Randomize image list")
parser.add_argument('-i', '--im_list', type=str, help="Specific image list")
parser.add_argument('-N', '--number', type=int, default=10, help="Number of images")
parser.add_argument('-s', '--start', type=int, default=0, help="Number of images")
parser.add_argument('-c', '--category', type=int, help="Category")
args = parser.parse_args()
# Configuration
config = utils.get_config(args.project)
if args.prediction is not None:
config["pspnet_prediction"] = args.prediction
datasource = DataSource(config)
evaluator = Evaluator(args.name, args.project, datasource) # Evaluation results
vis = ProjectVisualizer(args.project, datasource, MAX=args.number, evaluator=evaluator)
# Image List
im_list = None
if args.im_list:
# Open specific image list
im_list = utils.open_im_list(args.im_list)
elif args.category:
im_list = evaluator.get_im_list_by_category(args.category)
else:
# Open default image list
im_list = utils.open_im_list(config["im_list"])
if args.randomize:
# Shuffle image list
random.seed(3)
random.shuffle(im_list)
im_list = im_list[args.start:]
vis.visualize_images(im_list, category=args.category)
```
|
{
"source": "jefernathan/Python",
"score": 4
}
|
#### File: jefernathan/Python/ex098.py
```python
from time import sleep
def contador(i, f, p):
print(f'Contagem de {i} até {f} de {p} em {p}')
if p < 0:
p *= -1
if p == 0:
p = 1
if i > f:
for x in range(i, f - p, -p):
print(x, end=' ')
sleep(0.25)
print()
else:
for y in range(i, f + p, p):
print(y, end=' ')
sleep(0.25)
print()
contador(1, 10, 1)
sleep(0.5)
contador(10, 0, 2)
sleep(0.5)
print('Agora você: ')
inicio = int(input('Ínicio: '))
fim = int(input('Fim: '))
passo = int(input('Passo: '))
contador(inicio, fim, passo)
```
#### File: jefernathan/Python/ex101.py
```python
def voto(n):
from datetime import date
global idade
idade = date.today().year - n
if 18 <= idade < 70:
return 'OBRIGATÓRIO'
elif 16 <= idade or n >= 70:
return 'OPCIONAL'
else:
return 'NEGADO'
idade = int()
nascimento = int(input('Em que ano você nasceu? '))
voto = voto(nascimento)
print(f'Com {idade} anos o voto é {voto}')
```
#### File: jefernathan/Python/ex104.py
```python
def leiaint(leia):
num = ' '
while not num.isnumeric():
num = input(leia)
if not num.isnumeric():
print('\033[;31mERRO, TENTE NOVAMENTE\033[m')
return num
n = leiaint('Digite um número: ')
print(f'Você acabou de digitar o numero {n}')
```
#### File: Python/ex109/moeda.py
```python
def aumentar(n=0, p=0, formato=False):
"""
Somar porcentagem
:param n: número a ser somado
:param p: porcentagem a ser somada
:param formato: (opicional) mostrar o moeda
:return: resultado
"""
n = float(n)
resultado = n + (n * p / 100)
return moeda(resultado) if formato else resultado
def dimimuir(n=0, p=0, formato=False):
"""
Subtrair porcentagem
:param n: número a ser subtraido
:param p: porcentagem a ser subtraida
:param formato: (opicional) mostrar o moeda
:return: resultado final
"""
n = float(n)
resultado = n - (n * p / 100)
return moeda(resultado) if formato else resultado
def dobro(n=0, formato=False):
"""
Dobrar número
:param n: número a ser dobrado
:param formato: (opicional) mostrar o moeda
:return: resultado
"""
n = float(n)
n += n
return moeda(n) if formato else n
def metade(n=0, formato=False):
"""
Dividir número pela metade
:param n: número a ser dividido
:param formato: (opicional) mostrar o moeda
:return: resultado da divisão
"""
n = float(n)
n = n / 2
return moeda(n) if formato else n
def moeda(n=0, moeda='R$'):
"""
Moeda comercial
:param n: valor do dinheiro
:param moeda: Tipo de moeda
:return: valor com a meoda
"""
n = float(n)
return f'{moeda}{n:.2f}'.replace('.', ',')
```
#### File: utilidadesCeV/dados/__init__.py
```python
def leiadinheiro(texto):
while True:
resultado = input(texto).strip().replace(',', '.')
if resultado.isalpha():
print(f'\033[;31mErro, \"{resultado}\" não é um número\033[m')
else:
return float(resultado)
```
#### File: ex115/opcoes/__init__.py
```python
def menu():
print(f'{"-"*30}\n{"MENU PRINCIPAL": ^30}\n{"-"*30}')
print('''\033[33m 1- \033[35mVer pessoas cadastradas
\033[33m 2- \033[35mCadastrar nova pessoa
\033[33m 3- \033[35mSair do sistema\033[m''')
print('-'*30)
while True:
try:
retorno = int(input('Escolha uma opção: '))
except ValueError:
print('\033[31mErro, o valor digitado não é um número\033[m')
else:
if retorno in (1, 2, 3):
return retorno
else:
print('\033[31mErro, escolha uma opção valida\033[m')
```
#### File: opcoes/opcao2/__init__.py
```python
def cadastrar_pessoa():
print(f'{"-"*30}\n{"NOVO CADASTRO": ^30}\n{"-"*30}')
pessoas = open("ex115/pessoas.txt", "a")
temp = list()
temp.append(str(input('Nome: ')).strip().capitalize())
temp.append(';')
while True:
idade = input('Idade: ').strip()
if idade.isnumeric():
temp.append(idade)
temp.append('\n')
break
else:
print('\033[31mERRO, insira sua idade\033[m')
pessoas.writelines(temp)
```
|
{
"source": "Jefersonalves/diario-oficial",
"score": 3
}
|
#### File: gazette/spiders/pr_sao_jose_pinhais.py
```python
import re
from datetime import date, datetime
from urllib.parse import urlencode
import dateparser
import w3lib.url
from scrapy import Request
from gazette.items import Gazette
from gazette.spiders.base import BaseGazetteSpider
class PrSaoJosePinhaisSpider(BaseGazetteSpider):
TERRITORY_ID = "4125506"
allowed_domains = ["diariooficial.sjp.pr.gov.br"]
name = "pr_sao_jose_pinhais"
BASE_URL = "http://diariooficial.sjp.pr.gov.br/"
GAZETTE_ELEMENT_CSS = ".container-publicacao .item-publicacao"
DATE_XPATH = './/div[contains(@class, "item-label") and text()="Publicado em"]/following-sibling::div[1]/text()'
LAST_PAGE_CSS = ".item-paginacao a:last-child::attr(href)"
def start_requests(self):
params = {"entidade": 12526, "pg": 1}
if hasattr(self, "start_date"):
params.update(
{
"dt_publicacao_de": self.start_date.strftime("%d/%m/%Y"),
"dt_publicacao_ate": date.today().strftime("%d/%m/%Y"),
}
)
yield Request(f"{self.BASE_URL}?{urlencode(params)}")
def parse(self, response):
for element in response.css(self.GAZETTE_ELEMENT_CSS):
url = element.css("a::attr(href)").extract_first()
date = dateparser.parse(
element.xpath(self.DATE_XPATH).extract_first(), languages=["pt"]
).date()
yield Gazette(
date=date,
file_urls=[url],
is_extra_edition=False,
territory_id=self.TERRITORY_ID,
power="executive",
scraped_at=datetime.utcnow(),
)
current_page = w3lib.url.url_query_parameter(response.url, "pg")
if (
not response.css(self.LAST_PAGE_CSS)
.extract_first()
.endswith("pg=" + current_page)
):
next_url = w3lib.url.add_or_replace_parameter(
response.url, "pg", str(int(current_page) + 1)
)
yield Request(next_url)
```
#### File: gazette/spiders/ro_porto_velho.py
```python
import datetime as dt
import json
from dateparser import parse
from dateutil.rrule import MONTHLY, rrule
from scrapy.http import Request
from scrapy.selector import Selector
from gazette.items import Gazette
from gazette.spiders.base import BaseGazetteSpider
class RoPortoVelho(BaseGazetteSpider):
TERRITORY_ID = "1100205"
BASE_URL = "https://www.portovelho.ro.gov.br/dom/datatablearquivosmes/"
AVAILABLE_FROM = dt.datetime(2007, 1, 1)
name = "ro_porto_velho"
allowed_domains = ["portovelho.ro.gov.br"]
def start_requests(self):
interval = rrule(MONTHLY, dtstart=self.AVAILABLE_FROM, until=dt.date.today())[
::-1
]
for date in interval:
yield Request(f"{self.BASE_URL}{date.year}/{date.month}")
def parse(self, response):
paragraphs = json.loads(response.body_as_unicode())["aaData"]
for paragraph, *_ in paragraphs:
selector = Selector(text=paragraph)
url = selector.css("p a ::attr(href)").extract_first()
text = selector.css("p strong ::text")
is_extra_edition = text.extract_first().startswith("Suplemento")
date = text.re_first("\d{1,2} de \w+ de \d{4}")
date = parse(date, languages=["pt"]).date()
yield Gazette(
date=date,
file_urls=[url],
is_extra_edition=is_extra_edition,
power="executive_legislative",
)
```
|
{
"source": "Jefersonalves/fala-parlamentar",
"score": 3
}
|
#### File: fala-parlamentar/fala_parlamentar/fala_deputado.py
```python
import re
import requests
from datetime import datetime
import pandas as pd
from bs4 import BeautifulSoup
from unicodedata import normalize
from xml.etree import ElementTree
def remove_acentos(text):
"""
remove os acentos do texto
"""
return normalize('NFKD', text).encode('ASCII', 'ignore').decode('ASCII')
def limpa_url(url_text):
"""
remove caracteres indesejados nas urls
"""
url_text = re.sub('\t', '', url_text)
url_text = re.sub('\n', '', url_text)
url_text = re.sub('\r', '', url_text)
return url_text
def get_transcricao_discurso_camara(url, nomeParlamentar):
"""
obtém via webscraping a transcrição de um discurso do deputado dada a url e o nome parlamentar
"""
r = requests.get(url)
soup = BeautifulSoup(r.content, 'html.parser')
texto = None
titulo = None
try:
texto_element = soup.select('#content > p')[0]
texto = texto_element.text.strip()
data = re.search(r'[0-9]{2}/[0-9]{2}/[0-9]{4}', url).group(0)
titulo = 'Pronunciamento de {} em {}'.format(nomeParlamentar, data)
except:
pass
discurso = {'titulo': titulo, 'texto': texto}
return discurso
def get_discursos_deputado(nomeParlamentar, data_inicio, data_fim):
"""
obtém os discursos de um deputado dado o identificador na API da camara(ideCadastro) e retorna uma lista dos mesmos
Parâmetros
----------
nomeParlamentar : string
nome parlamentar do deputado
- Ex: "RODRIGO MAIA"
data_inicio : string
data de início no formato AAAA-MM-DD
- Ex: '2019-01-01'
data_fim : string
data fim no formato AAAA-MM-DD
- Ex: '2019-12-31'
Retornos
-------
list_discursos_deputado : list of dicts
Lista dos discursos do senador no intervalo de tempo fornecido
- Ex: [
{
...
}
]
"""
data_inicio_datetime = datetime.strptime(data_inicio, "%Y-%m-%d")
data_inicio_formated = data_inicio_datetime.strftime("%d-%m-%Y")
data_fim_datetime = datetime.strptime(data_fim, "%Y-%m-%d")
data_fim_formated = data_fim_datetime.strftime("%d-%m-%Y")
#obtenção das urls dos discursos via webscraping e busca por parlamentar
nomeOrador = re.sub(' ', '+', remove_acentos(nomeParlamentar)).lower()
url_busca_deputado = 'https://www.camara.leg.br/internet/sitaqweb/resultadoPesquisaDiscursos.asp?txOrador={}&txPartido=&txUF=&dtInicio={}&dtFim={}&txTexto=&txSumario=&basePesq=plenario&CampoOrdenacao=dtSessao&PageSize=10000&TipoOrdenacao=DESC&btnPesq=Pesquisar#'.format(nomeOrador, data_inicio_formated, data_fim_formated)
r = requests.get(url_busca_deputado)
#extração dos elementos que contém as urls dos discursos
soup = BeautifulSoup(r.text, 'html.parser')
disc_tags_even = soup.findAll(class_ = 'even')
disc_tags_odd = soup.findAll(class_ = 'odd')
disc_tags = disc_tags_even + disc_tags_odd
#extração das urls
link_tags = [tag.find('a') for tag in disc_tags]
urls = []
for tag in link_tags:
try:
urls.append(tag['href'])
except:
pass
#limpeza e adição de prefixo nas urls
url_prefix = 'https://www.camara.leg.br/internet/sitaqweb/'
links_discursos = [url_prefix+limpa_url(url) for url in urls]
list_discursos_deputado = []
for link in links_discursos:
discurso = get_transcricao_discurso_camara(link, nomeParlamentar)
list_discursos_deputado.append(
{
'urlTexto': requests.utils.requote_uri(link),
'transcricao': discurso['texto'],
'titulo': discurso['titulo']
}
)
return list_discursos_deputado
```
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.