max_stars_repo_path
stringlengths 3
269
| max_stars_repo_name
stringlengths 4
119
| max_stars_count
int64 0
191k
| id
stringlengths 1
7
| content
stringlengths 6
1.05M
| score
float64 0.23
5.13
| int_score
int64 0
5
|
---|---|---|---|---|---|---|
quasimodo/assertion_normalization/subject_removal_submodule.py | Aunsiels/CSK | 16 | 12789451 | <filename>quasimodo/assertion_normalization/subject_removal_submodule.py<gh_stars>10-100
from quasimodo.data_structures.submodule_interface import SubmoduleInterface
from quasimodo.data_structures.subject import Subject
import logging
to_remove = ["it", "some", "one", "we", "a", "b", "c",
"d", "e", "f", "g", "h", "i", "j", "k", "l", "m",
"n", "o", "p", "q", "r", "s", "t", "u", "v", "w",
"x", "y", "z", "you", "he", "she", "they", "me",
"an", "someone", "mr", "ms", "anyone", "1", "2",
"3", "4", "5", "6", "7", "8", "9"]
class SubjectRemovalSubmodule(SubmoduleInterface):
def __init__(self, module_reference):
super().__init__()
self._module_reference = module_reference
self._name = "Subject Removal"
def process(self, input_interface):
logging.info("Start removing some subjects")
subjects = set(input_interface.get_subjects())
for subject in to_remove:
subject = Subject(subject)
if subject in subjects:
subjects.remove(subject)
subjects = list(subjects)
return input_interface.replace_subjects(subjects)
| 2.40625 | 2 |
Nodes/Statements/Iterative/LegacyFor.py | Mohsin-Ul-Islam/YAPL | 0 | 12789452 | class Node:
def __init__(self, initialization, condition, increment, body):
self.initialization = initialization
self.condition = condition
self.increment = increment
self.body = body
def visit(self, context):
rvalue = None
self.initialization.visit(context)
while self.condition.visit(context):
rvalue = self.body.visit(context)
self.increment.visit(context)
return rvalue
| 3.09375 | 3 |
Lecture/Lecture06-Sorting/linearSearch.py | tonysulfaro/CSE-331 | 2 | 12789453 | <filename>Lecture/Lecture06-Sorting/linearSearch.py<gh_stars>1-10
def linearSearch3(a,key):
for i in range(len(a)):
if(a[i]== key):
return i
return None
def recLinearSearch(alist, l, r, key):
if r<l:
return -1
if alist[l]==key:
return l
return recLinearSearch(alist,l+1,r,key)
# Recursive function to search x in arr[l..r]
def recSearch(arr, l, r, x):
if r < l:
return -1
if arr[l] == x:
return l
return recSearch(arr, l + 1, r, x)
def linearSearch2(a,v):
i=0
while i < len(a) and v !=a[i]:
i=i+1
if i>= len(a):
i=None
else:
return i
def linearSearch(a, value):
position = 0
found = False
while position < len(a) and not found:
if a[position] == value:
found = True
position += 1
return found
# def linearSearch(a, value):
# position = 0
# found = False
# while position < len(a) and #your code goes here
# #your code goes here
# #your code goes here
# #your code goes here
# #your code goes here
#
#alist = [2, 4, 93, 17, 77, 31, 44, 55, 20]
alist = [2, 4, 93, 17]
if not(linearSearch2(alist,100)==None):
print("Found it at index: ", linearSearch2(alist,17))
else:
print("Not found")
n=len(alist)
if linearSearch(alist,100):
print("Found")
else:
print("Not found")
print(recLinearSearch(alist,0,n-1,100))
blist = [54,26,93]
| 4.125 | 4 |
scrapybot/urls/da.py | roadt/scrapybot | 0 | 12789454 |
from . import mongodb
def art(**kwargs):
'''generate urls of art with s= (search text), u= (","-separated author_names), t= (","-separated tags) '''
with mongodb() as db:
cond = {}
_fill_cond(cond, kwargs)
urls = map(lambda e: e['url'], db.art.find(cond, {'url': 1}))
return { 'url': urls }
def art_to_down(**kwargs):
'''generate urls of art which not yet downloaded
with s= (search text), u= (","-separated author_names), t= (","-separated tags) '''
with mongodb() as db:
cond = { "file_url" : {'$eq' :None}}
cond = _fill_cond(cond, kwargs)
urls = map(lambda e: e['url'], db.art.find(cond, {'url': 1}))
return { 'url' : urls}
def _fill_cond(cond, kwargs):
if 's' in kwargs:
cond['$text'] = { '$search' : kwargs.get('s') }
if 'u' in kwargs:
cond['author_name'] = { '$in' : kwargs.get('u').split(',') }
if 't' in kwargs:
cond['tag_names'] = { '$in' : kwargs.get('t').split(',') }
return cond
| 2.8125 | 3 |
t_time.py | ibramuppal/learning_tensorflow | 0 | 12789455 | import os
os.environ['TF_CPP_MIN_LOG_LEVEL']='2'
import tensorflow as tf
#hello_constant = tf.constant('Hello World!')
#with tf.Session() as sess:
# output = sess.run(hello_constant)
# print(output)
# this is how you create an input field in Tensorflow
# not the tf.string part but the tf.placeholder part
# the tf.string part is just the type of placeholder that this specific case is
x = tf.placeholder(tf.string)
y = tf.placeholder(tf.int32)
z = tf.placeholder(tf.float32)
with tf.Session() as sess:
#output = sess.run(x, feed_dict={x: 'Hello World' })
output = sess.run([x, y, z], feed_dict={x: 'Test String', y:123, z: 45.67})
print(output)
| 3.0625 | 3 |
tests/test_lambda_assigning.py | GibranHL0/lime-lynter | 0 | 12789456 | """Test the lambda assigning visitor."""
import ast
import pytest
from lime_lynter.Violations.correctness import LambdaAssigningViolation
from lime_lynter.Visitors.Correctness.correctness import LambdaAssigningVisitor
lambda_assigning = """
f = lambda x: 2 * x
"""
@pytest.mark.parametrize('code', [lambda_assigning])
def test_lambda_assigning(
code,
):
"""
Test lambda assigning.
Args:
code: Sample code.
"""
tree = ast.parse(code)
visitor = LambdaAssigningVisitor()
visitor.run(tree)
for violation in visitor.violations:
isinstance(violation, LambdaAssigningViolation)
assert len(visitor.violations) == 1
| 2.890625 | 3 |
utils/losses.py | BaekduChoi/DescreenGAN | 0 | 12789457 | <filename>utils/losses.py<gh_stars>0
import sys
import os
sys.path.append(os.path.join(os.path.dirname(__file__),'.'))
from torch import nn
import torch
from torch.nn import functional as F
from torch.nn.utils import spectral_norm as SN
import torchvision
def klvloss(mu,logvar) :
return torch.mean(-0.5*torch.sum(1+logvar-mu.pow(2)-logvar.exp()))
"""
Loss for LSGAN
"""
class LSGANLoss(object) :
def __init__(self,device) :
super().__init__()
self.loss = nn.MSELoss()
self.device = device
def get_label(self,prediction,is_real) :
if is_real :
return torch.ones_like(prediction)
else :
return torch.zeros_like(prediction)
def __call__(self,prediction,is_real) :
label = self.get_label(prediction,is_real)
label.to(self.device)
return self.loss(prediction,label)
"""
Loss for relativistic average LSGAN
"""
class RaLSGANLoss(object) :
def __init__(self,device) :
super().__init__()
self.loss = nn.MSELoss()
self.device = device
def __call__(self,real,fake) :
avg_real = torch.mean(real,dim=0,keepdim=True)
avg_fake = torch.mean(fake,dim=0,keepdim=True)
loss1 = self.loss(real-avg_fake,torch.ones_like(real).to(self.device))
loss2 = self.loss(fake-avg_real,-torch.ones_like(fake).to(self.device))
return loss1+loss2
"""
Loss for hingeGAN discriminator
"""
class HingeGANLossD(object) :
def __init__(self) :
super().__init__()
def __call__(self,prediction,is_real) :
if is_real :
loss = F.relu(1-prediction)
else :
loss = F.relu(1+prediction)
return loss.mean()
"""
Loss for hingeGAN generator
"""
class HingeGANLossG(object) :
def __init__(self) :
super().__init__()
def __call__(self,prediction) :
return -prediction.mean()
"""
Loss for relativistic average hingeGAN
"""
class RaHingeGANLoss(object) :
def __init__(self) :
super().__init__()
def __call__(self,real,fake) :
avg_real = torch.mean(real,dim=0,keepdim=True)
avg_fake = torch.mean(fake,dim=0,keepdim=True)
dxr = real - avg_fake
dxf = fake - avg_real
loss1 = F.relu(1-dxr)
loss2 = F.relu(1+dxf)
return (loss1+loss2).mean()
"""
Code adapted from https://gist.github.com/alper111/8233cdb0414b4cb5853f2f730ab95a49
"""
class PerceptualLoss(torch.nn.Module):
def __init__(self, resize=True, loss_type = 'mse', feature_layers=[3], before_act=False):
super().__init__()
assert loss_type in ['mse','l1']
self.loss = F.l1_loss if loss_type == 'l1' else F.mse_loss
# self.bl = (torchvision.models.vgg19(pretrained=True).features[:27].eval())
# for p in self.bl.parameters():
# p.requires_grad = False
blocks = []
if before_act :
blocks.append(torchvision.models.vgg19(pretrained=True).features[:3].eval())
blocks.append(torchvision.models.vgg19(pretrained=True).features[3:8].eval())
blocks.append(torchvision.models.vgg19(pretrained=True).features[8:17].eval())
blocks.append(torchvision.models.vgg19(pretrained=True).features[17:26].eval())
else :
blocks.append(torchvision.models.vgg19(pretrained=True).features[:4].eval())
blocks.append(torchvision.models.vgg19(pretrained=True).features[4:9].eval())
blocks.append(torchvision.models.vgg19(pretrained=True).features[9:18].eval())
blocks.append(torchvision.models.vgg19(pretrained=True).features[18:27].eval())
for bl in blocks :
for p in bl.parameters():
p.requires_grad = False
self.blocks = torch.nn.ModuleList(blocks)
self.feature_layers = feature_layers
self.transform = torch.nn.functional.interpolate
self.resize = resize
self.register_buffer("mean", torch.tensor([0.485, 0.456, 0.406]).view(1, 3, 1, 1))
self.register_buffer("std", torch.tensor([0.229, 0.224, 0.225]).view(1, 3, 1, 1))
def forward(self, input, target):
if input.shape[1] != 3:
input = input.repeat(1, 3, 1, 1)
target = target.repeat(1, 3, 1, 1)
input = (input-self.mean) / self.std
target = (target-self.mean) / self.std
if self.resize:
input = self.transform(input, mode='bilinear', size=(224, 224), align_corners=False)
target = self.transform(target, mode='bilinear', size=(224, 224), align_corners=False)
x = input
y = target
loss = 0.
for i,block in enumerate(self.blocks) :
x = block(x.clone())
y = block(y.clone())
if i in self.feature_layers :
loss += self.loss(x,y)
return loss | 2.28125 | 2 |
leiaapi/generated/api/annotation_api.py | labinnovationdocapost/leia-api-python-sdk | 0 | 12789458 | <reponame>labinnovationdocapost/leia-api-python-sdk<filename>leiaapi/generated/api/annotation_api.py<gh_stars>0
# coding: utf-8
"""
LEIA RESTful API for AI
Leia API # noqa: E501
OpenAPI spec version: 1.0.0
Contact: <EMAIL>
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import re # noqa: F401
# python 2 and python 3 compatibility library
import six
from leiaapi.generated.api_client import ApiClient
class AnnotationApi(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
Ref: https://github.com/swagger-api/swagger-codegen
"""
def __init__(self, api_client=None):
if api_client is None:
api_client = ApiClient()
self.api_client = api_client
def create_annotation(self, body, token, annotation_type, document_id, **kwargs): # noqa: E501
"""Creates an annotation # noqa: E501
Creates an annotation # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.create_annotation(body, token, annotation_type, document_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param object body: The prediction that should be associated to document in this annotation, in free form json (required)
:param str token: The login token obtained via GET /login/{api_key} (required)
:param AnnotationTypes annotation_type: The type of the annotation (required)
:param str document_id: The id of the document to annotate (required)
:param str name: The name of the annotation (for information purposes only)
:param list[str] tags: The tags of the annotation
:return: Annotation
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.create_annotation_with_http_info(body, token, annotation_type, document_id, **kwargs) # noqa: E501
else:
(data) = self.create_annotation_with_http_info(body, token, annotation_type, document_id, **kwargs) # noqa: E501
return data
def create_annotation_with_http_info(self, body, token, annotation_type, document_id, **kwargs): # noqa: E501
"""Creates an annotation # noqa: E501
Creates an annotation # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.create_annotation_with_http_info(body, token, annotation_type, document_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param object body: The prediction that should be associated to document in this annotation, in free form json (required)
:param str token: The login token obtained via GET /login/{api_key} (required)
:param AnnotationTypes annotation_type: The type of the annotation (required)
:param str document_id: The id of the document to annotate (required)
:param str name: The name of the annotation (for information purposes only)
:param list[str] tags: The tags of the annotation
:return: Annotation
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['body', 'token', 'annotation_type', 'document_id', 'name', 'tags'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method create_annotation" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'body' is set
if ('body' not in params or
params['body'] is None):
raise ValueError("Missing the required parameter `body` when calling `create_annotation`") # noqa: E501
# verify the required parameter 'token' is set
if ('token' not in params or
params['token'] is None):
raise ValueError("Missing the required parameter `token` when calling `create_annotation`") # noqa: E501
# verify the required parameter 'annotation_type' is set
if ('annotation_type' not in params or
params['annotation_type'] is None):
raise ValueError("Missing the required parameter `annotation_type` when calling `create_annotation`") # noqa: E501
# verify the required parameter 'document_id' is set
if ('document_id' not in params or
params['document_id'] is None):
raise ValueError("Missing the required parameter `document_id` when calling `create_annotation`") # noqa: E501
collection_formats = {}
path_params = {}
if 'document_id' in params:
path_params['document_id'] = params['document_id'] # noqa: E501
query_params = []
if 'annotation_type' in params:
query_params.append(('annotation_type', params['annotation_type'])) # noqa: E501
if 'name' in params:
query_params.append(('name', params['name'])) # noqa: E501
if 'tags' in params:
query_params.append(('tags', params['tags'])) # noqa: E501
collection_formats['tags'] = 'multi' # noqa: E501
header_params = {}
if 'token' in params:
header_params['token'] = params['token'] # noqa: E501
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = [] # noqa: E501
return self.api_client.call_api(
'/annotation/{document_id}', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='Annotation', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def delete_annotation(self, token, annotation_id, **kwargs): # noqa: E501
"""Deletes an annotation # noqa: E501
Deletes an annotation # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_annotation(token, annotation_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str token: The login token obtained via GET /login/{api_key} (required)
:param str annotation_id: The id of the annotation (for information purposes only) (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.delete_annotation_with_http_info(token, annotation_id, **kwargs) # noqa: E501
else:
(data) = self.delete_annotation_with_http_info(token, annotation_id, **kwargs) # noqa: E501
return data
def delete_annotation_with_http_info(self, token, annotation_id, **kwargs): # noqa: E501
"""Deletes an annotation # noqa: E501
Deletes an annotation # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_annotation_with_http_info(token, annotation_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str token: The login token obtained via GET /login/{api_key} (required)
:param str annotation_id: The id of the annotation (for information purposes only) (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['token', 'annotation_id'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method delete_annotation" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'token' is set
if ('token' not in params or
params['token'] is None):
raise ValueError("Missing the required parameter `token` when calling `delete_annotation`") # noqa: E501
# verify the required parameter 'annotation_id' is set
if ('annotation_id' not in params or
params['annotation_id'] is None):
raise ValueError("Missing the required parameter `annotation_id` when calling `delete_annotation`") # noqa: E501
collection_formats = {}
path_params = {}
if 'annotation_id' in params:
path_params['annotation_id'] = params['annotation_id'] # noqa: E501
query_params = []
header_params = {}
if 'token' in params:
header_params['token'] = params['token'] # noqa: E501
form_params = []
local_var_files = {}
body_params = None
# Authentication setting
auth_settings = [] # noqa: E501
return self.api_client.call_api(
'/annotation/{annotation_id}', 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None, # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_annotation(self, token, annotation_id, **kwargs): # noqa: E501
"""Retrieves an annotation # noqa: E501
Retrieves an annotation # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_annotation(token, annotation_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str token: The login token obtained via GET /login/{api_key} (required)
:param str annotation_id: The id of the annotation (for information purposes only) (required)
:return: Annotation
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.get_annotation_with_http_info(token, annotation_id, **kwargs) # noqa: E501
else:
(data) = self.get_annotation_with_http_info(token, annotation_id, **kwargs) # noqa: E501
return data
def get_annotation_with_http_info(self, token, annotation_id, **kwargs): # noqa: E501
"""Retrieves an annotation # noqa: E501
Retrieves an annotation # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_annotation_with_http_info(token, annotation_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str token: The login token obtained via GET /login/{api_key} (required)
:param str annotation_id: The id of the annotation (for information purposes only) (required)
:return: Annotation
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['token', 'annotation_id'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_annotation" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'token' is set
if ('token' not in params or
params['token'] is None):
raise ValueError("Missing the required parameter `token` when calling `get_annotation`") # noqa: E501
# verify the required parameter 'annotation_id' is set
if ('annotation_id' not in params or
params['annotation_id'] is None):
raise ValueError("Missing the required parameter `annotation_id` when calling `get_annotation`") # noqa: E501
collection_formats = {}
path_params = {}
if 'annotation_id' in params:
path_params['annotation_id'] = params['annotation_id'] # noqa: E501
query_params = []
header_params = {}
if 'token' in params:
header_params['token'] = params['token'] # noqa: E501
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = [] # noqa: E501
return self.api_client.call_api(
'/annotation/{annotation_id}', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='Annotation', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_annotations(self, token, **kwargs): # noqa: E501
"""Retrieves annotations (paginated) # noqa: E501
Retrieves annotations # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_annotations(token, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str token: The login token obtained via GET /login/{api_key} (required)
:param str annotation_id: If specified, filters the annotations id
:param AnnotationTypes annotation_type: If specified, filters the annotations by type
:param str name: If specified, filters the annotations by name
:param list[str] tags: If specified, filters the annotations by tag
:param str document_id: If specified, filters the annotations attached to a given document
:param datetime created_after: If specified, keeps only annotations created after given UTC timestamp (ISO 8601 format : yyyy-MM-ddThh:mm:ss)
:param datetime created_before: If specified, keeps only annotations created before given UTC timestamp (ISO 8601 format : yyyy-MM-ddThh:mm:ss)
:param int offset: Number of the first annotation to send (pagination)
:param int limit: Maximum number of annotation to send (pagination)
:return: list[Annotation]
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.get_annotations_with_http_info(token, **kwargs) # noqa: E501
else:
(data) = self.get_annotations_with_http_info(token, **kwargs) # noqa: E501
return data
def get_annotations_with_http_info(self, token, **kwargs): # noqa: E501
"""Retrieves annotations (paginated) # noqa: E501
Retrieves annotations # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_annotations_with_http_info(token, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str token: The login token obtained via GET /login/{api_key} (required)
:param str annotation_id: If specified, filters the annotations id
:param AnnotationTypes annotation_type: If specified, filters the annotations by type
:param str name: If specified, filters the annotations by name
:param list[str] tags: If specified, filters the annotations by tag
:param str document_id: If specified, filters the annotations attached to a given document
:param datetime created_after: If specified, keeps only annotations created after given UTC timestamp (ISO 8601 format : yyyy-MM-ddThh:mm:ss)
:param datetime created_before: If specified, keeps only annotations created before given UTC timestamp (ISO 8601 format : yyyy-MM-ddThh:mm:ss)
:param int offset: Number of the first annotation to send (pagination)
:param int limit: Maximum number of annotation to send (pagination)
:return: list[Annotation]
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['token', 'annotation_id', 'annotation_type', 'name', 'tags', 'document_id', 'created_after', 'created_before', 'offset', 'limit'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_annotations" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'token' is set
if ('token' not in params or
params['token'] is None):
raise ValueError("Missing the required parameter `token` when calling `get_annotations`") # noqa: E501
collection_formats = {}
path_params = {}
query_params = []
if 'annotation_id' in params:
query_params.append(('annotation_id', params['annotation_id'])) # noqa: E501
if 'annotation_type' in params:
query_params.append(('annotation_type', params['annotation_type'])) # noqa: E501
if 'name' in params:
query_params.append(('name', params['name'])) # noqa: E501
if 'tags' in params:
query_params.append(('tags', params['tags'])) # noqa: E501
collection_formats['tags'] = 'multi' # noqa: E501
if 'document_id' in params:
query_params.append(('document_id', params['document_id'])) # noqa: E501
if 'created_after' in params:
query_params.append(('created_after', params['created_after'])) # noqa: E501
if 'created_before' in params:
query_params.append(('created_before', params['created_before'])) # noqa: E501
if 'offset' in params:
query_params.append(('offset', params['offset'])) # noqa: E501
if 'limit' in params:
query_params.append(('limit', params['limit'])) # noqa: E501
header_params = {}
if 'token' in params:
header_params['token'] = params['token'] # noqa: E501
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = [] # noqa: E501
return self.api_client.call_api(
'/annotation', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='list[Annotation]', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def tag_annotation(self, token, annotation_id, tag, **kwargs): # noqa: E501
"""Tags an annotation # noqa: E501
Tags an annotation # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.tag_annotation(token, annotation_id, tag, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str token: The login token obtained via GET /login/{api_key} (required)
:param str annotation_id: The id of the annotation (required)
:param str tag: The tag to add to the annotation (required)
:return: Annotation
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.tag_annotation_with_http_info(token, annotation_id, tag, **kwargs) # noqa: E501
else:
(data) = self.tag_annotation_with_http_info(token, annotation_id, tag, **kwargs) # noqa: E501
return data
def tag_annotation_with_http_info(self, token, annotation_id, tag, **kwargs): # noqa: E501
"""Tags an annotation # noqa: E501
Tags an annotation # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.tag_annotation_with_http_info(token, annotation_id, tag, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str token: The login token obtained via GET /login/{api_key} (required)
:param str annotation_id: The id of the annotation (required)
:param str tag: The tag to add to the annotation (required)
:return: Annotation
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['token', 'annotation_id', 'tag'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method tag_annotation" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'token' is set
if ('token' not in params or
params['token'] is None):
raise ValueError("Missing the required parameter `token` when calling `tag_annotation`") # noqa: E501
# verify the required parameter 'annotation_id' is set
if ('annotation_id' not in params or
params['annotation_id'] is None):
raise ValueError("Missing the required parameter `annotation_id` when calling `tag_annotation`") # noqa: E501
# verify the required parameter 'tag' is set
if ('tag' not in params or
params['tag'] is None):
raise ValueError("Missing the required parameter `tag` when calling `tag_annotation`") # noqa: E501
collection_formats = {}
path_params = {}
if 'annotation_id' in params:
path_params['annotation_id'] = params['annotation_id'] # noqa: E501
if 'tag' in params:
path_params['tag'] = params['tag'] # noqa: E501
query_params = []
header_params = {}
if 'token' in params:
header_params['token'] = params['token'] # noqa: E501
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = [] # noqa: E501
return self.api_client.call_api(
'/annotation/{annotation_id}/tag/{tag}', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='Annotation', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def untag_annotation(self, token, annotation_id, tag, **kwargs): # noqa: E501
"""Untags an annotation # noqa: E501
Untags an annotation # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.untag_annotation(token, annotation_id, tag, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str token: The login token obtained via GET /login/{api_key} (required)
:param str annotation_id: The id of the annotation (required)
:param str tag: The tag to delete from the annotation (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.untag_annotation_with_http_info(token, annotation_id, tag, **kwargs) # noqa: E501
else:
(data) = self.untag_annotation_with_http_info(token, annotation_id, tag, **kwargs) # noqa: E501
return data
def untag_annotation_with_http_info(self, token, annotation_id, tag, **kwargs): # noqa: E501
"""Untags an annotation # noqa: E501
Untags an annotation # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.untag_annotation_with_http_info(token, annotation_id, tag, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str token: The login token obtained via GET /login/{api_key} (required)
:param str annotation_id: The id of the annotation (required)
:param str tag: The tag to delete from the annotation (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['token', 'annotation_id', 'tag'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method untag_annotation" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'token' is set
if ('token' not in params or
params['token'] is None):
raise ValueError("Missing the required parameter `token` when calling `untag_annotation`") # noqa: E501
# verify the required parameter 'annotation_id' is set
if ('annotation_id' not in params or
params['annotation_id'] is None):
raise ValueError("Missing the required parameter `annotation_id` when calling `untag_annotation`") # noqa: E501
# verify the required parameter 'tag' is set
if ('tag' not in params or
params['tag'] is None):
raise ValueError("Missing the required parameter `tag` when calling `untag_annotation`") # noqa: E501
collection_formats = {}
path_params = {}
if 'annotation_id' in params:
path_params['annotation_id'] = params['annotation_id'] # noqa: E501
if 'tag' in params:
path_params['tag'] = params['tag'] # noqa: E501
query_params = []
header_params = {}
if 'token' in params:
header_params['token'] = params['token'] # noqa: E501
form_params = []
local_var_files = {}
body_params = None
# Authentication setting
auth_settings = [] # noqa: E501
return self.api_client.call_api(
'/annotation/{annotation_id}/tag/{tag}', 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None, # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def update_annotation(self, body, token, annotation_id, **kwargs): # noqa: E501
"""Updates an annotation # noqa: E501
Updates an annotation # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.update_annotation(body, token, annotation_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param object body: The new prediction that should be associated to document in this annotation, in free form json (required)
:param str token: The login token obtained via GET /login/{api_key} (required)
:param str annotation_id: The id of the annotation to modify (required)
:param str name: The new name of the annotation (won't change if not set)
:return: Annotation
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.update_annotation_with_http_info(body, token, annotation_id, **kwargs) # noqa: E501
else:
(data) = self.update_annotation_with_http_info(body, token, annotation_id, **kwargs) # noqa: E501
return data
def update_annotation_with_http_info(self, body, token, annotation_id, **kwargs): # noqa: E501
"""Updates an annotation # noqa: E501
Updates an annotation # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.update_annotation_with_http_info(body, token, annotation_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param object body: The new prediction that should be associated to document in this annotation, in free form json (required)
:param str token: The login token obtained via GET /login/{api_key} (required)
:param str annotation_id: The id of the annotation to modify (required)
:param str name: The new name of the annotation (won't change if not set)
:return: Annotation
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['body', 'token', 'annotation_id', 'name'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method update_annotation" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'body' is set
if ('body' not in params or
params['body'] is None):
raise ValueError("Missing the required parameter `body` when calling `update_annotation`") # noqa: E501
# verify the required parameter 'token' is set
if ('token' not in params or
params['token'] is None):
raise ValueError("Missing the required parameter `token` when calling `update_annotation`") # noqa: E501
# verify the required parameter 'annotation_id' is set
if ('annotation_id' not in params or
params['annotation_id'] is None):
raise ValueError("Missing the required parameter `annotation_id` when calling `update_annotation`") # noqa: E501
collection_formats = {}
path_params = {}
if 'annotation_id' in params:
path_params['annotation_id'] = params['annotation_id'] # noqa: E501
query_params = []
if 'name' in params:
query_params.append(('name', params['name'])) # noqa: E501
header_params = {}
if 'token' in params:
header_params['token'] = params['token'] # noqa: E501
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = [] # noqa: E501
return self.api_client.call_api(
'/annotation/{annotation_id}', 'PATCH',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='Annotation', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
| 1.9375 | 2 |
src/minifier.py | kraifpatrik/Xpanda | 0 | 12789459 | # -*- coding: utf-8 -*-
import re
def minify(code: str) -> str:
# Mark end of directives
code = re.sub(r"([^\S\n]*#[^\n]+)", "%NEWLINE%\\1%NEWLINE%", code)
# Remove comments
code = re.sub(r"//[^\n]*", "", code)
code = re.sub(r"/\*(?:\*[^/]|[^*])*\*/", "", code)
# Remove newlines
code = re.sub(r"\n+", " ", code)
# Collapse whitespace
code = re.sub(r"\s{2,}", " ", code)
for c in r"+-*%/!~|&=$<>[]{}().:,;?":
code = re.sub(f"\\s*\{c}\\s*", c, code)
# Add in newlines for directives
code = code.replace("%NEWLINE%", "\n")
code = re.sub(r"\n\s+", "\n", code)
return code.strip()
| 2.96875 | 3 |
foodProj/foodApp/admin.py | cs-fullstack-2019-spring/django-authentication-cw-ChelsGreg | 0 | 12789460 | <reponame>cs-fullstack-2019-spring/django-authentication-cw-ChelsGreg<filename>foodProj/foodApp/admin.py
from django.contrib import admin
from .models import FoodModel
# Register your models here.
admin.site.register(FoodModel) | 1.296875 | 1 |
registration/migrations/0006_alter_child_birth_date.py | screw-pack/hazel | 0 | 12789461 | <filename>registration/migrations/0006_alter_child_birth_date.py
# Generated by Django 3.2.4 on 2021-06-11 12:57
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('registration', '0005_auto_20210611_0904'),
]
operations = [
migrations.AlterField(
model_name='child',
name='birth_date',
field=models.CharField(max_length=20),
),
]
| 1.617188 | 2 |
Complexity/complexity.py | dykra/ComplexityEstimator | 0 | 12789462 | <reponame>dykra/ComplexityEstimator<gh_stars>0
# -*- coding: utf-8 -*-
import numpy as np
import signal
import sys
from Complexity.executionHelpers import exec_fun_with_time, create_loggers_helper, exec_fun_without_output
from Complexity.exceptions import TimeOutException
import importlib
import time
def create_loggers():
import logging
logger = create_loggers_helper(logging.getLogger(__name__))
logger.setLevel(logging.ERROR)
return logger
def complexity_estimate(structure, fun, create_struct_fun, time, given_array):
logger = create_loggers()
logger.debug("I am in complexity_estimate.")
np_array__n = np.array(list(range(1000, 100000000, 1000))) # tablica z wartościami N
array_time = pack_create_array(time, np_array__n, structure, fun, create_struct_fun)
np_array_time = np.array(array_time)
names_array = ["n", "n * log(n)", "n^2"]
if np_array_time.size < np_array__n.size:
np_array__n = np.delete(np_array__n, list(range(np_array_time.size, np_array__n.size)))
np_tmp_array = np.array([np.var(np.log(np_array_time / np_array__n)),
np.var(np.log(np_array_time / (np_array__n * np.log(np_array__n)))),
np.var(np.log(np_array_time / np_array__n ** 2))]
)
logger.info("complexity_ estimate finished correctly.")
if not given_array:
if np_tmp_array.min() == 0:
return [i for (i, x) in enumerate(np_tmp_array) if x == 0], names_array
return np_tmp_array.argmin(), names_array # gives the index corresponding to the minimum
if np_tmp_array.min() == 0:
return [i for (i, x) in enumerate(np_tmp_array) if x == 0], names_array, array_time, np_array__n
return np_tmp_array.argmin(), names_array, array_time, np_array__n # gives the index corresponding to the minimum
def dead_line(timeout, *args, **kwargs):
print("Timeout is " + "%s" % timeout)
def decorate(f):
logger = create_loggers()
logger.info("I am decorating " + f.__name__)
def handler(signum, frame):
logger.warning("TimeOut exception is going to be raised in " + f.__name__)
raise TimeOutException("Deadline for creating table.")
def new_f(*args, **kwargs):
signal.signal(signal.SIGALRM, handler)
signal.alarm(timeout)
return f(*args, **kwargs)
new_f.func_name = f.__name__
return new_f
return decorate
def give_complexity(file, fun, create_struct_fun, time):
(minimum, names_array) = complexity_estimate(file, fun, create_struct_fun, time, False)
if isinstance(minimum, list):
logger = create_loggers()
logger.info("Few results are highly possible")
return [names_array[i] for i in minimum]
return names_array[minimum]
def pack_create_array(deadline, np_array__n, file, fun, create_struct_fun):
@dead_line(deadline)
def create_array_with_times(np_array__n, file, fun, create_struct_fun):
logger = create_loggers()
logger.info("I am in create_array")
np_array_time = []
for i in range(np_array__n.size):
try:
# time_execution = exec_fun_with_time(file, create_struct_fun, fun, np_array__n[i])
module = importlib.import_module(file)
function = getattr(module, create_struct_fun)
structure = function(np_array__n[i])
time1 = time.time()
exec_fun_without_output(file, fun, structure)
time2 = time.time()
time_execution = (time2 - time1) * 1000000
np_array_time.append(time_execution)
except TimeOutException:
logger.warning("Creating time array was interrupted.")
break
except Exception as e:
print(e.args)
logger.error("Exception was raised in create_array")
sys.exit(1)
return np_array_time
return create_array_with_times(np_array__n, file, fun, create_struct_fun)
| 2.578125 | 3 |
store/migrations/0009_auto_20200502_1612.py | it5prasoon/ShoppingCart | 2 | 12789463 | # Generated by Django 2.0 on 2020-05-02 10:42
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('store', '0008_variation'),
]
operations = [
migrations.AddField(
model_name='variation',
name='image',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='images', to='store.Product'),
),
migrations.AddField(
model_name='variation',
name='price',
field=models.DecimalField(blank=True, decimal_places=2, max_digits=100, null=True),
),
]
| 1.53125 | 2 |
apps/usuarios/models.py | Marthox1999/ProyectoDesarrolloII | 0 | 12789464 | from django.db import models
import hashlib
from django.core.validators import RegexValidator
from inventario.models import DetallesProducto
class Cliente(models.Model):
TIPO_DOC = {
('PAS','Pasaporte'),
('CC','Cedula de Ciudadania'),
('TI','Tarjeta de Identidad'),
}
nombre = models.CharField(max_length=128, unique=True, primary_key=True)
clave = models.CharField(max_length=128, editable=True)
fechaNacimiento = models.DateField()
direccion = models.CharField(max_length=32)
telefono_regex = RegexValidator(regex=r'^\+?1?\d{7,10}$', message="El telefono debe tener formato: '+7777777'. Up to 10 digits allowed.")
telefono = models.CharField(validators=[telefono_regex], max_length=12, blank=True) # validators should be a list
tipoDocumento = models.CharField(max_length=3, choices = TIPO_DOC)
numeroDocumento = models.IntegerField()
#super().save(*args, **kwargs) para guardar en esta tabla
def save(self, *args, **kwargs):
self.clave = hashlib.md5(self.clave.encode('utf-8')).hexdigest()
super(Cliente, self).save(*args, **kwargs)
def autenticarCliente(self, *args, **kwargs):
auth = Cliente.objects.filter(nombre=self.nombre,
clave=hashlib.md5(self.clave.encode('utf-8')).hexdigest()).exists()
return auth
def buscarCliente(self, *args, **kwargs):
aux = Cliente.objects.filter(nombre=self.nombre,
clave=hashlib.md5(self.clave.encode('utf-8')).hexdigest())
return aux
class AdministradorDuenio (models.Model):
TIPO = {
('ADMIN','Administrador'),
('CEO','Duenio'),
}
pkAdministradorDuenio = models.AutoField(primary_key=True)
nombreUsuario = models.CharField(max_length=128, unique=True)
clave = models.CharField(max_length=128, editable=True)
tipo = models.CharField(max_length=5, choices=TIPO)
#super().save(*args, **kwargs) para guardar en esta tabla
def save(self, *args, **kwargs):
self.clave = hashlib.md5(self.clave.encode('utf-8')).hexdigest()
super(AdministradorDuenio, self).save(*args, **kwargs)
def autenticarAdmin(self, *args, **kwargs):
auth = AdministradorDuenio.objects.filter(nombreUsuario=self.nombreUsuario, clave=hashlib.md5(self.clave.encode('utf-8')).hexdigest(), tipo='ADMIN').exists()
return auth
def autenticarDuenio(self, *args, **kwargs):
auth = AdministradorDuenio.objects.filter(nombreUsuario=self.nombreUsuario, clave=hashlib.md5(self.clave.encode('utf-8')).hexdigest(), tipo='CEO').exists()
return auth
#ProductosEnCarrito
class Carrito(models.Model):
pkCarrito = models.AutoField(primary_key=True)
fkNombreCliente = models.ForeignKey(Cliente, on_delete=models.SET_NULL, null=True)
fkDetalleProducto = models.ForeignKey(DetallesProducto, on_delete=models.CASCADE)
cantidad = models.IntegerField()
precioActual = models.FloatField() | 2.1875 | 2 |
rlpyt/distributions/discrete.py | cambel/rlpyt | 17 | 12789465 | <reponame>cambel/rlpyt
import torch
from rlpyt.utils.tensor import to_onehot, from_onehot
class DiscreteMixin:
def __init__(self, dim, dtype=torch.long, onehot_dtype=torch.float):
self._dim = dim
self.dtype = dtype
self.onehot_dtype = onehot_dtype
@property
def dim(self):
return self._dim
def to_onehot(self, indexes, dtype=None):
return to_onehot(indexes, self._dim, dtype=dtype or self.onehot_dtype)
def from_onehot(self, onehot, dtype=None):
return from_onehot(onehot, dtpye=dtype or self.dtype)
| 2.28125 | 2 |
results-tables/test/test_experiments.py | alepulver/my-thesis | 0 | 12789466 | import experiments
import stages
from nose.tools import assert_equals
import factory
rows = None
def setup_module():
global rows
data = factory.Stages()
rows = data.indexed_rows()
class TestExperiments:
@classmethod
def setup_class(cls):
stgs = [stages.stage_from(r) for r in rows]
exps = experiments.experiments_from(stgs)
cls.experiments = {}
for e in exps:
cls.experiments[e.experiment_id()] = e
def test_complete(self):
exp = self.experiments['a7e32988-9e4b-4a2d-bbb2-a35798e7e8f1']
assert_equals(exp.has_stage('timeline'), True)
assert_equals(type(exp.get_stage('timeline')), stages.Timeline)
assert_equals(exp.time_start(), 1410966188680)
assert_equals(exp.time_duration(), 556956)
assert_equals(exp.num_stages(), 8)
#assert_equals(exp.experiment_id(), 'a7e32988-9e4b-4a2d-bbb2-a35798e7e8f1')
assert_equals(exp.size_in_bytes(), 131271)
assert_equals(exp.is_complete(), True)
def test_incomplete(self):
exp = self.experiments['c16705a3-64c1-43e5-b70f-24a769dced32']
assert_equals(exp.has_stage('timeline'), False)
assert_equals(exp.num_stages(), 6)
#assert_equals(exp.experiment_id(), 'c16705a3-64c1-43e5-b70f-24a769dced32')
assert_equals(exp.is_complete(), False)
| 2.328125 | 2 |
gateway/hmsGateway.py | rauterRaphael/HomeMonitoringSystem | 0 | 12789467 | import regex as re
import requests
from time import sleep
from digi.xbee.devices import XBeeDevice, RemoteXBeeDevice, XBee64BitAddress
from digi.xbee.exception import TimeoutException
from datetime import datetime
class MSG_TYPES:
ACKN = 0
SYNC = 1
UPDA = 2
SYNACK = 3
class UpdatePayload:
lightIntensity = 0
temperature = 0
batteryLevel = 0
rssiToGateway = 0
motionDetected = 0
class AckPayload:
seqNumToAck = 0
class SynAckPayload:
nodeId = 0
utcSec = ""
defaultSleep = 0
class HMSFrame:
seqNum = 0
nodeId = 0
srcAddr = 0
dstAddr = 0
msgType = 0
payloadLen = 0
payload = ""
cksum = 0
class HMSGateway():
SENSOR_NODE_ID = "SENSOR_NODE"
SENSOR_NODE_ADDR = "0013A200416B4BA2"
#SENSOR_NODE_ADDR = "0000000000000001"
nodeUrl = "http://127.0.0.1:8000/rest/node/"
dataUrl = "http://127.0.0.1:8000/rest/data/"
defaultSleep = 30
ACKS = []
LAST_UPDA = []
lastSyncedAt = []
src_node = None
sequenceNum = 0
nodeID = 0
nodeAddr = 0
SYNC_IN_PROGRESS = False
NODE_ID_WITH_ADDRESS = []
def postNodeInfo(self, nodeID, rssi, motionDetected):
postData = {
"nodeId": nodeID,
"rssi": rssi,
"motionDetected": motionDetected,
"updated_at": "{}".format(datetime.now())
}
requests.post(self.nodeUrl, data = postData)
def postNodeData(self, nodeID, updatePayload):
postData = {
"fromNodeID": nodeID,
"lightIntensity": updatePayload.lightIntensity,
"temperature": updatePayload.temperature,
"batteryLevel": updatePayload.batteryLevel
}
requests.post(self.dataUrl, data = postData)
def encode_hms_frame(self, txFrame):
txFrame.payloadLen, txFrame.payload = self.encode_hmsframe_payload(txFrame)
frameAsStr = ''.join((
str(txFrame.seqNum) + ";",
str(txFrame.nodeId) + ";",
str(txFrame.srcAddr) + ";",
str(txFrame.dstAddr) + ";",
str(txFrame.msgType) + ";",
str(txFrame.payloadLen) + ";",
str(txFrame.payload) + ";",
str(txFrame.cksum) + ";",
))
print(frameAsStr)
return bytearray(frameAsStr, 'utf-8')
def decode_hms_frame(self, rxMsg):
frameData = rxMsg.split(";")
if len(frameData) != 9:
return None
rxFrame = HMSFrame()
rxFrame.seqNum = int(frameData[0])
rxFrame.nodeId = int(frameData[1])
rxFrame.srcAddr = int(frameData[2])
rxFrame.dstAddr = int(frameData[3])
rxFrame.msgType = int(frameData[4])
rxFrame.payloadLen = int(frameData[5])
rxFrame.payload = frameData[6]
rxFrame.cksum = int(frameData[7])
# check cksum
rxFrame.payload = self.decode_hmsframe_payload(rxFrame)
return rxFrame
def encode_hmsframe_payload(self, txFrame):
if txFrame.payload == "":
print("No payload in frame")
return 0, ""
if txFrame.msgType == MSG_TYPES.ACKN:
print("ACK payload")
ackPayloadAsStr = str(txFrame.payload.seqNumToAck) + "|"
return len(ackPayloadAsStr), ackPayloadAsStr
elif txFrame.msgType == MSG_TYPES.SYNACK:
print("SYNACK payload")
synAckPayloadAsStr = ''.join((
str(txFrame.payload.nodeId) + "|",
str(txFrame.payload.utcSec) + "|",
str(txFrame.payload.defaultSleep) + "|",
))
return len(synAckPayloadAsStr), synAckPayloadAsStr
else:
print("Payload not known")
return 0, ""
def decode_hmsframe_payload(self, rxFrame):
if rxFrame.payloadLen == 0:
return ""
payload = rxFrame.payload.split("|")
if rxFrame.msgType == MSG_TYPES.ACKN:
if len(payload) != 2:
return ""
acknPayload = AckPayload()
acknPayload.seqNumToAck = int(payload[0])
return acknPayload
elif rxFrame.msgType == MSG_TYPES.UPDA:
if len(payload) != 6:
return ""
print("Updating")
updatePayload = UpdatePayload()
updatePayload.lightIntensity = int(payload[0])
updatePayload.temperature = int(payload[1])
updatePayload.batteryLevel = int(payload[2])
updatePayload.rssiToGateway = int(payload[3])
updatePayload.motionDetected = int(payload[4])
return updatePayload
elif rxFrame.msgType == MSG_TYPES.SYNC:
return ""
else:
print("Unknown msg type to decode")
return ""
def process_received_frame(self, rxFrame):
if rxFrame.dstAddr == 0:
if rxFrame.msgType == MSG_TYPES.ACKN and rxFrame.payload != "":
self.ACKS.append(rxFrame.payload.seqNumToAck)
print("ACK RECEVIED")
elif rxFrame.msgType == MSG_TYPES.SYNC:
print("SYNC RECEVIED")
self.handle_sync_request(rxFrame)
elif rxFrame.msgType == MSG_TYPES.UPDA:
print("UPDA RECEVIED")
if rxFrame.nodeId != self.getNextSensorIdOrSync(rxFrame)[1]:
self.NODE_ID_WITH_ADDRESS = [item for item in self.NODE_ID_WITH_ADDRESS if item[1] != rxFrame.srcAddr]
self.handle_sync_request(rxFrame)
else:
if self.store_node_sync_if_needed(rxFrame) == True:
self.handle_sync_request(rxFrame)
else:
txFrame = HMSFrame()
txFrame.msgType = MSG_TYPES.ACKN
txFrame.dstAddr = rxFrame.srcAddr
acknPayload = AckPayload()
acknPayload.seqNumToAck = rxFrame.seqNum
txFrame.payload = acknPayload
print("SENDING ACK")
self.send_HMS_Frame(txFrame)
sleep(0.2)
current = int((datetime.utcnow()-datetime(1970,1,1)).total_seconds())
nodeNotFound = True
for i in range(0, len(self.LAST_UPDA)):
if self.LAST_UPDA[i][0] == rxFrame.nodeId:
nodeNotFound = False
if self.LAST_UPDA[i][1] < current - self.defaultSleep:
self.LAST_UPDA[i] = (rxFrame.nodeId, current)
self.postNodeData(rxFrame.nodeId, rxFrame.payload)
self.postNodeInfo(rxFrame.nodeId, rxFrame.payload.rssiToGateway, rxFrame.payload.motionDetected)
if nodeNotFound == True:
self.LAST_UPDA.append((rxFrame.nodeId, current))
self.postNodeData(rxFrame.nodeId, rxFrame.payload)
self.postNodeInfo(rxFrame.nodeId, rxFrame.payload.rssiToGateway, rxFrame.payload.motionDetected)
elif rxFrame.msgType == MSG_TYPES.SYNACK:
print("SYNACK RECEVIED")
else:
print("Msg not for Gateway")
def store_node_sync_if_needed(self, rxFrame):
nodeNotFound = True
syncNode = False
current = int((datetime.utcnow()-datetime(1970,1,1)).total_seconds())
for i in range(0, len(self.lastSyncedAt)):
if self.lastSyncedAt[i][0] == rxFrame.nodeId and self.lastSyncedAt[i][1] < (current - 600):
self.lastSyncedAt[i] = (rxFrame.nodeId, current)
nodeNotFound = False
syncNode = True
if nodeNotFound == True:
self.lastSyncedAt.append((rxFrame.nodeId, current))
return syncNode
def send_HMS_Frame(self, txFrame):
txFrame.nodeId = self.nodeID
txFrame.seqNum = self.sequenceNum
txFrame.cksum = 0
txFrame.srcAddr = self.nodeAddr
encodedFrame = self.encode_hms_frame(txFrame)
self.src_node.set_sync_ops_timeout(0.8)
for i in range(0, 5):
try:
self.src_node.send_data_broadcast(encodedFrame)
except Exception as e:
pass
self.sequenceNum += 1
return txFrame.seqNum
def handle_sync_request(self, rxFrame):
self.SYNC_IN_PROGRESS = True
txFrame = HMSFrame()
txFrame.msgType = MSG_TYPES.SYNACK
txFrame.dstAddr = rxFrame.srcAddr
synAckPayload = SynAckPayload()
synAckPayload.nodeId = self.getNextSensorIdOrSync(rxFrame)[1]
now = datetime.now()
synAckPayload.utcSec = now.strftime("%y:%m:%d:0%w:%H:%M:%S")
synAckPayload.defaultSleep = self.defaultSleep
txFrame.payload = synAckPayload
self.send_frame_and_wait_for_ack(txFrame, synAckPayload)
def getNextSensorIdOrSync(self, rxFrame):
for item in self.NODE_ID_WITH_ADDRESS:
if item[1] == rxFrame.srcAddr:
return True, item[0]
maxNodeId = len(self.NODE_ID_WITH_ADDRESS) + 1
self.NODE_ID_WITH_ADDRESS.append((maxNodeId, rxFrame.srcAddr))
return False, maxNodeId
def data_receive_callback(self, frame):
if frame is not None:
rx_data = frame.data.decode(errors='replace')
if rx_data != "":
rxMsg = rx_data.split("STR:")[1]
if rxMsg != "":
rxMsg = rxMsg.replace("#", "")
print(rxMsg)
hmsFrame = self.decode_hms_frame(rxMsg)
self.process_received_frame(hmsFrame)
def send_frame_and_wait_for_ack(self, txFrame, payload, waitForAck=False):
max_retries = 5
num_retry = 0
while(num_retry < max_retries):
seqNumToAck = self.send_HMS_Frame(txFrame)
sleep(1)
if seqNumToAck in self.ACKS:
self.ACKS.remove(seqNumToAck)
break
num_retry += 1
txFrame.payload = payload
print("RETRYING - NO ACK RECEIVED")
def init_and_open_xbee_device(self):
serialPort = input("Serial Port [COM4]: ")
if serialPort == "":
serialPort = "COM4"
bdrate = input("Baudrate [115200]: ")
if bdrate == "":
bdrate = 115200
else:
bdrate = int(bdrate)
try:
self.src_node = XBeeDevice(serialPort, bdrate)
self.src_node.open()
return True
except Exception as e:
pass
return True
####################################
def runApp(self):
print("\n\n### HOME MONITORING SYSTEM - GATEWAY ###\n\n")
ret = self.init_and_open_xbee_device()
if not ret:
print("Initialization failed -> check log\n")
print("XBEE Device initialized\n")
self.src_node.add_data_received_callback(self.data_receive_callback)
print("# CALLBACK ADDED #\n")
while(1):
sleep(1)
| 2.609375 | 3 |
src/tests/unit/test_auth_hasher.py | DmitryBurnaev/podcast | 1 | 12789468 | import uuid
from modules.auth.hasher import PBKDF2PasswordHasher
def test_password_encode():
hasher = PBKDF2PasswordHasher()
row_password = uuid.<PASSWORD>().hex
encoded = hasher.encode(row_password, salt="<PASSWORD>")
algorithm, iterations, salt, hash_ = encoded.split("$", 3)
assert salt == "test_salt"
assert algorithm == "pbkdf2_sha256"
assert hash_ != row_password
def test_password_verify():
hasher = PBKDF2PasswordHasher()
row_password = uuid.<PASSWORD>().hex
encoded = hasher.encode(row_password, salt="<PASSWORD>")
assert hasher.verify(row_password, encoded)
| 3.140625 | 3 |
et_micc_build/__init__.py | etijskens/et-micc-build | 0 | 12789469 | # -*- coding: utf-8 -*-
"""
Package et_micc_build
=====================
Top-level package for et_micc_build.
"""
import et_micc_build.cli_micc_build
import et_micc
# et-micc and et--micc-build have identical version string, although
# they are different packages.
__version__ = et_micc.__version__
| 1.265625 | 1 |
apps/profiles/test/test_admin.py | ecoo-app/ecoo-backend | 1 | 12789470 | # TODO:FIXME: add tests for the admin pages and for the admin actions
| 0.90625 | 1 |
packjobs/packjobs.py | migueldiascosta/packjobs | 0 | 12789471 | #!/usr/bin/env python
import os
import sys
import argparse
from glob import glob
from textwrap import dedent
from collections import namedtuple
__version__ = 20200612
__doc__ = """Pack multiple small jobs into large queue jobs
* How it works
* The script merely generates a queue job script and a (mpi-aware) python script
* An outer mpirun in the queue job script places job launchers in the correct nodes
* An Inner mpirun in the job launchers run the application inside each node
* The "trick" here is simply to make the queue treat the inner mpi processes
as if they were openmp threads of the outer mpi processes
* How to use
* Run ./packjobs.py -h to see all the command line options
* Test run with e.g. 2 nodes, 12 procs per job, 2*24/12=4 simultaneous jobs, 1 hour:
./packjobs.py -i jobs_folder -r vasp_std -m VASP --nodes 2 --cpn 24 --ppj 12 --time 1
* Production run with e.g. 50 nodes, 4 procs per job, 50*24/4=300 simultaneous jobs, 24 hours:
./packjobs.py -i jobs_folder -r vasp_std -m VASP --nodes 50 --cpn 24 --ppj 4 --time 24
* Limitations
* If subfolders are added to the job folder after the launchers start running,
the new subfolders will not be considered, although this may change in the future
* However, this script can be run multiple times on the same job folder,
without duplications (the script tags each subfolder as "running" or "done")
* After a queue job is killed or expires, you may need to clean any "running" tags
with "--clean running"
"""
def parse_arguments():
"""Use argparse to get parameters from the command line"""
parser = argparse.ArgumentParser(description=__doc__,
formatter_class=argparse.RawTextHelpFormatter)
parser.add_argument('-V', '--version', action='version', version='%%(prog)s %s' % __version__)
parser.add_argument('-i', '--input', dest='folder', type=str,
help="folder containing job folders (mandatory)", required=True)
parser.add_argument('-r', '--run', dest='job_cmd', type=str,
help="job command (e.g. vasp_std) (mandatory)", required=True)
parser.add_argument('-m', '--mod', dest='job_mod', type=str,
help="app module (e.g. VASP) (mandatory)", required=True)
parser.add_argument('-p', '--python-mod', dest='python_mod', type=str,
help="python module (e.g. Python)", default='Python')
parser.add_argument('-n', '--nodes', dest='nodes', type=int,
help="number of nodes (mandatory)", required=True)
parser.add_argument('-t', '--time', dest='hours', type=int, default=1,
help="number of hours for qjob (default: 1)")
parser.add_argument('-q', '--queue', dest='queue', type=str, default='normal',
help="name of batch queue for qjob (default: normal)")
parser.add_argument('-b', '--batch', dest='batch', type=str, default='pbs',
help="name of batch system for qjob, currently pbs or lsf (default: pbs)")
parser.add_argument('--cpn', '--cores-per-node', dest='cores_per_node', type=int, default=24,
help="number of cores per node (default: 24)")
parser.add_argument('--mpn', '--memory-per-node', dest='memory_per_node', type=int, default=96,
help="memory per node, in GB (default: 96)")
parser.add_argument('--ppj', '--procs-per-job', dest='procs_per_job', type=int, default=1,
help="number of mpi processes per job (default: 1)")
parser.add_argument('-d', '--dry-run', dest='dry', action='store_true', default=False,
help="don't submit, only create scripts (default: false)")
parser.add_argument('-f', '--force', dest='force', action='store_true', default=False,
help="don't ask for confirmation when deleting files (default: false)")
parser.add_argument('-c', '--clean', action='append', default=[],
choices=['done', 'running', 'scripts', 'all'],
help='delete previously generated file (default: false)')
args = parser.parse_args()
if 'all' in args.clean:
args.clean.append('done')
args.clean.append('running')
args.clean.append('scripts')
if not os.path.isdir(args.folder):
print("\n Folder %s does not exist, exiting" % args.folder)
sys.exit(1)
if args.cores_per_node % args.procs_per_job != 0:
print("\n cores_per_node must be divisible by procs_per_job")
sys.exit(1)
args.jobs_per_node = int(args.cores_per_node/args.procs_per_job)
print("\n Requesting %s nodes, %s cores per node, using %s processes per job" %
(args.nodes, args.cores_per_node, args.procs_per_job))
print("\n This means %s jobs per node, %s simultaneous jobs at any given time\n" %
(args.jobs_per_node, args.jobs_per_node*args.nodes))
return args
class PackJobs:
__doc__ = __doc__
def __init__(self, **kwargs):
"""Takes keywords and maps them explicitly to class attributes"""
self.nodes = kwargs.pop('nodes')
self.folder = kwargs.pop('folder')
self.job_cmd = kwargs.pop('job_cmd')
self.job_mod = kwargs.pop('job_mod')
self.python_mod = kwargs.pop('python_mod')
self.hours = kwargs.pop('hours', 1)
self.queue = kwargs.pop('queue', 'normal')
self.batch = kwargs.pop('batch', 'pbs')
self.cores_per_node = kwargs.pop('cores_per_node', 24)
self.memory_per_node = kwargs.pop('memory_per_node', 96)
self.procs_per_job = kwargs.pop('procs_per_job', 1)
self.jobs_per_node = kwargs.pop('jobs_per_node', int(self.cores_per_node/self.procs_per_job))
self.dry = kwargs.pop('dry', False)
self.force = kwargs.pop('force', False)
self.clean = kwargs.pop('clean', False)
if len(kwargs.keys()) > 0:
self.log("don't know what to do with remaining arguments %s" % str(kwargs))
if self.batch == 'lsf':
self.qjob_script_template = self.qjob_lsf_template
self.qjob_sub_cmd = 'bsub <'
self.qjob_stat_cmd = 'bjobs'
else:
self.qjob_script_template = self.qjob_pbs_template
self.qjob_sub_cmd = 'qsub'
self.qjob_stat_cmd = 'qstat'
self.mpirun_job = ''
self.qjob_script_path = ''
def run(self):
"""Run all steps (clean, read_jobs, write_scripts, submit_jobs)"""
self.clean_files()
self.read_jobs()
self.write_scripts()
self.submit_jobs()
def clean_files(self):
"""Clean previously generated files if requested applicable"""
if 'all' in self.clean:
self.log("Warning: Deleting all files (but not subfolders) in %s" % self.folder)
if self.confirm():
for f in glob(os.path.join(self.folder, '*')):
if os.path.isfile(f):
os.remove(f)
else:
if 'scripts' in self.clean:
self.log("Warning: Deleting any previously generated qjob and worker scripts")
if self.confirm():
for qjob_script in glob(os.path.join(self.folder, 'qjob.script')):
os.remove(qjob_script)
for worker_py in glob(os.path.join(self.folder, 'worker*.py')):
os.remove(worker_py)
def read_jobs(self):
"""Look for jobs in job folder"""
self.log("Reading from folder %s" % self.folder)
Job = namedtuple('Job', ['folder', 'running', 'done'])
all_jobs = sorted([Job(subfolder,
os.path.isfile(os.path.join(self.folder, subfolder, 'running')),
os.path.isfile(os.path.join(self.folder, subfolder, 'done')))
for subfolder in os.listdir(self.folder)
if os.path.isdir(os.path.join(self.folder, subfolder))])
running_jobs = [job.folder for job in all_jobs if job.running]
finished_jobs = [job.folder for job in all_jobs if job.done]
unstarted_jobs = [job.folder for job in all_jobs if not job.running and not job.done]
self.log("Found %s jobs, %s of them currently running, %s of them done" %
(len(all_jobs), len(running_jobs), len(finished_jobs)))
jobs = unstarted_jobs
if 'running' in self.clean:
self.log("Warning: Forcing execution of jobs tagged as running")
if self.confirm():
for job in running_jobs:
os.remove(os.path.join(self.folder, job, 'running'))
jobs.extend(running_jobs)
if 'done' in self.clean:
self.log("Warning: Forcing execution of jobs tagged as done")
if self.confirm():
for job in finished_jobs:
os.remove(os.path.join(self.folder, job, 'done'))
jobs.extend(finished_jobs)
if len(jobs) > 0:
self.log("Adding %s jobs" % len(jobs))
if len(jobs) < self.jobs_per_node*self.nodes:
print("WARNING: with these jobs and parameters, some cores will be idle")
else:
self.log("No jobs left to run, exiting. You may want to use clean done and/or clean running")
sys.exit(1)
def write_scripts(self):
"""Write queue job and launcher scripts according to given parameters"""
self.mpirun_job = "mpirun -host $(hostname) -np %s %s > out 2> error" % \
(self.procs_per_job, self.job_cmd)
var_dict = {
'folder': self.folder,
'job_cmd': self.mpirun_job,
'job_mod': self.job_mod,
'python_mod': self.python_mod,
'nnodes': self.nodes,
'cpn': self.cores_per_node,
'mpn': self.memory_per_node,
'sjpn': self.jobs_per_node,
'ppj': self.procs_per_job,
'hours': self.hours,
'queue': self.queue,
'njobs': self.jobs_per_node*self.nodes,
'nslots': int(self.nodes*self.cores_per_node),
}
existing_workers = glob(os.path.join(self.folder, 'worker*.py'))
worker = 'worker%s' % (len(existing_workers)+1)
worker_py = worker + '.py'
var_dict['worker'] = worker
var_dict['worker_py'] = worker_py
worker_py_path = os.path.join(self.folder, worker_py)
if not self.dry:
self.log("Writing %s" % worker_py_path)
f = open(worker_py_path, 'w')
f.write(dedent(self.worker_script_template % var_dict))
f.close()
os.system("chmod +x %s" % worker_py_path)
existing_qjobs = glob(os.path.join(self.folder, 'qjob*.script'))
self.qjob_script_path = os.path.join(self.folder, 'qjob%s.script' % (len(existing_qjobs) + 1))
if not self.dry:
self.log("Writing %s" % self.qjob_script_path)
f = open(self.qjob_script_path, 'w')
f.write(dedent(self.qjob_script_template % var_dict))
f.close()
def submit_jobs(self):
"""Submit queue job"""
if not self.dry:
self.log("Submitting %s" % self.qjob_script_path)
folder, script = os.path.split(self.qjob_script_path)
os.system("cd %s; %s %s" % (folder, self.qjob_sub_cmd, script))
sys.stdout.write("\n")
os.system(self.qjob_stat_cmd)
def log(self, msg):
"""Print formatted log message"""
output = " "
if self.dry:
output += "(dry run) "
output += msg
output += "\n\n"
sys.stdout.write(output)
def confirm(self, prompt=None, default_yes=True, abort_no=False):
"""Prompt for confirmation, optionally aborting execution"""
if self.dry:
return False
if self.force:
return True
if prompt is None:
prompt = 'Proceed?'
if default_yes:
prompt = '%s [%s]|%s: ' % (prompt, 'y', 'n')
else:
prompt = '%s [%s]|%s: ' % (prompt, 'n', 'y')
while True:
ask = getattr(__builtins__, 'raw_input', input)
ans = ask(prompt)
if not ans:
return default_yes
if ans not in ['y', 'Y', 'n', 'N']:
print('please enter y or n.')
continue
if ans in ('Y', 'y'):
return True
if ans in ('N', 'n'):
if abort_no:
sys.exit(1)
else:
return False
qjob_pbs_template = """\
#!/bin/bash
#PBS -N %(worker)s
#PBS -l select=%(nnodes)s:ncpus=%(cpn)s:mpiprocs=%(sjpn)s:ompthreads=%(ppj)s:mem=%(mpn)sGB
#PBS -l walltime=%(hours)s:00:00
#PBS -j oe
#PBS -q %(queue)s
cd $PBS_O_WORKDIR
module purge
module load %(python_mod)s %(job_mod)s
# this mpirun, combined with mpiprocs and ompthreads queue settings,
# starts job launchers in the correct nodes
mpirun -np %(njobs)s ./%(worker_py)s
"""
qjob_lsf_template = """\
#!/bin/bash
#BSUB -J %(worker)s
#BSUB -n %(nslots)s
#BSUB -q %(queue)s
#BSUB -R \"span[ptile=%(sjpn)s]\"
#BSUB -R \"rusage[mem=%(mpn)s000]\"
#BSUB -W %(hours)s:00
#BSUB -eo
#BSUB -x
module purge
module load %(python_mod)s %(job_mod)s
# this mpirun, combined with the span[ptile] queue setting,
# starts job launchers in the correct nodes
mpirun -np %(njobs)s ./%(worker_py)s
"""
worker_script_template = """\
#!/usr/bin/env python
import os
import sys
import glob
import argparse
from mpi4py import MPI
comm = MPI.COMM_WORLD
rank = comm.Get_rank()
size = comm.Get_size()
jobs = sorted([d for d in os.listdir(os.getcwd()) if os.path.isdir(d)])
j = rank
name = os.path.splitext(os.path.basename(sys.argv[0]))[0]
status = open('status.' + name + '.rank' + str(rank), 'w')
while j < len(jobs):
running = os.path.isfile(os.path.join(jobs[j], 'running'))
done = os.path.isfile(os.path.join(jobs[j], 'done'))
if not running and not done:
status.write("running " + jobs[j] + "\\n")
status.flush()
os.chdir(jobs[j])
open('running', 'w').close()
error = os.system("%(job_cmd)s")
if not error:
os.remove('running')
open('done', 'w').close()
status.write(jobs[j] + " done\\n")
status.flush()
else:
status.write(jobs[j] + " failed\\n")
status.flush()
os.chdir('..')
else:
status.write(jobs[j] + " skipped\\n")
status.flush()
j += size
status.write("finished\\n")
status.close()
"""
if __name__ == "__main__":
args_dict = vars(parse_arguments())
p = PackJobs(**args_dict)
p.run()
| 2.5 | 2 |
op_search.py | Yale-LILY/QueryReformulator | 3 | 12789472 | # -*- coding: utf-8 -*-
'''
Custom theano class to query the search engine.
'''
import numpy as np
import theano
from theano import gof
from theano import tensor
import parameters as prm
import utils
import average_precision
import random
class Search(theano.Op):
__props__ = ()
def __init__(self,options):
self.options = options
self.options['reformulated_queries'] = {}
def make_node(self, x1, x2, x3, x4):
assert hasattr(self, '_props'), "Your version of theano is too old to support __props__."
x1 = tensor.as_tensor_variable(x1)
x2 = tensor.as_tensor_variable(x2)
x3 = tensor.as_tensor_variable(x3)
x4 = tensor.as_tensor_variable(x4)
out = [tensor.fmatrix().type(), tensor.itensor3().type(), tensor.imatrix().type(), tensor.fmatrix().type()]
return theano.Apply(self, [x1, x2, x3, x4], out)
def perform(self, node, inputs, output_storage):
q_m = inputs[0]
D_truth = inputs[1]
n_iter = int(inputs[2])
is_train = int(inputs[3])
#outputs
metrics = np.zeros((len(q_m), len(prm.metrics_map)), np.float32)
if is_train:
max_feedback_docs = prm.max_feedback_docs_train
else:
max_feedback_docs = prm.max_feedback_docs
D_i = -2 * np.ones((len(q_m), max_feedback_docs, prm.max_words_input), np.int32)
D_gt_m = np.zeros((len(q_m), prm.max_candidates), np.float32)
D_id = np.zeros((len(q_m), prm.max_candidates), np.int32)
# no need to retrieve extra terms in the last iteration
if n_iter == prm.n_iterations - 1:
extra_terms = False
else:
extra_terms = True
# allow the search engine to cache queries only in the first iteration.
if n_iter == 0:
save_cache = prm.use_cache
else:
save_cache = False
max_cand = prm.max_candidates
qs = []
for i, q_lst in enumerate(self.options['current_queries']):
q = []
for j, word in enumerate(q_lst):
if q_m[i,j] == 1:
q.append(str(word))
q = ' '.join(q)
if len(q) == 0:
q = 'dummy'
qs.append(q)
# only used to print the reformulated queries.
self.options['reformulated_queries'][n_iter] = qs
# always return one more candidate because one of them might be the input doc.
candss = self.options['engine'].get_candidates(qs, max_cand, prm.max_feedback_docs, save_cache, extra_terms)
for i, cands in enumerate(candss):
D_truth_dic = {}
for d_truth in D_truth[i]:
if d_truth > -1:
D_truth_dic[d_truth] = 0
D_id[i,:len(cands.keys())] = cands.keys()
j = 0
m = 0
cand_ids = []
selected_docs = np.arange(prm.max_feedback_docs)
if is_train:
selected_docs = np.random.choice(selected_docs, size=prm.max_feedback_docs_train, replace=False)
for k, (cand_id, (words_idx, words)) in enumerate(cands.items()):
cand_ids.append(cand_id)
# no need to add candidate words in the last iteration.
if n_iter < prm.n_iterations - 1:
# only add docs selected by sampling (if training).
if k in selected_docs:
words = words[:prm.max_terms_per_doc]
words_idx = words_idx[:prm.max_terms_per_doc]
D_i[i,m,:len(words_idx)] = words_idx
# append empty strings, so the list size becomes <dim>.
words = words + max(0, prm.max_words_input - len(words)) * ['']
# append new words to the list of current queries.
self.options['current_queries'][i] += words
m += 1
if cand_id in D_truth_dic:
D_gt_m[i,j] = 1.
j += 1
cands_set = set(cands.keys())
if qs[i].lower() in self.options['engine'].title_id_map:
input_doc_id = self.options['engine'].title_id_map[qs[i].lower()]
# Remove input doc from returned docs.
# This operation does not raise an error if the element is not there.
cands_set.discard(input_doc_id)
intersec = len(set(D_truth_dic.keys()) & cands_set)
recall = intersec / max(1., float(len(D_truth_dic)))
precision = intersec / max(1., float(prm.max_candidates))
metrics[i,prm.metrics_map['RECALL']] = recall
metrics[i,prm.metrics_map['PRECISION']] = precision
metrics[i,prm.metrics_map['F1']] = 2 * recall * precision / max(0.01, recall + precision)
avg_precision = average_precision.compute(D_truth_dic.keys(), cand_ids)
metrics[i,prm.metrics_map['MAP']] = avg_precision
metrics[i,prm.metrics_map['LOG-GMAP']] = np.log(avg_precision + 1e-5)
output_storage[0][0] = metrics
output_storage[1][0] = D_i
output_storage[2][0] = D_id
output_storage[3][0] = D_gt_m
def grad(self, inputs, output_grads):
return [tensor.zeros_like(ii, dtype=theano.config.floatX) for ii in inputs]
| 2.296875 | 2 |
moe/moe_project.py | cgruber/make-open-easy | 5 | 12789473 | <filename>moe/moe_project.py
#!/usr/bin/env python
#
# Copyright 2011 Google Inc. All Rights Reserved.
"""Objects to represent a MOE Project."""
__author__ = '<EMAIL> (<NAME>)'
from moe import base
from moe import config_utils
from moe.translators import python_translators
from moe.translators import translators
from moe.translators import undo_scrubbing_translator
class MoeProjectContext(object):
"""A MoeProjectContext comprises the objects to do stuff with a project.
Most MOE apps will only have one MoeProjectContext in their runtime.
But they might, and that's why MoeProjectContext is not a global but
MoeRun is.
"""
def __init__(self, config, db):
self.config = config
self.db = db
self.translators = MakeTranslators(
config.config_json.get('translators', []), self)
self.internal_repository, self.internal_codebase_creator = (
config.internal_repository_config.MakeRepository())
self.public_repository, self.public_codebase_creator = (
config.public_repository_config.MakeRepository())
_SCRUBBING_TRANSLATOR_CONFIG_KEYS = [
u'from_project_space',
u'to_project_space',
u'scrubber_config',
u'type',
]
_TRANSLATOR_CONFIG_KEYS = [
u'from_project_space',
u'to_project_space',
u'type',
]
def MakeTranslators(translators_config, project):
"""Construct the Translators from their config.
Args:
translators_config: array of dictionaries (of the sort that come from JSON)
project: MoeProjectContext
Returns:
list of translators.Translator
"""
result = []
dependent_scrubber_configs = []
for config_json in translators_config:
type_arg = config_json.get(u'type')
if not type_arg:
raise base.Error('Translator config requires a "type"')
if type_arg == u'scrubber':
config_utils.CheckJsonKeys('translator_config', config_json,
_SCRUBBING_TRANSLATOR_CONFIG_KEYS)
result.append(translators.ScrubberInvokingTranslator(
config_json.get('from_project_space'),
config_json.get('to_project_space'),
config_json.get('scrubber_config')))
continue
if type_arg == u'python_2to3':
config_utils.CheckJsonKeys('translator_config', config_json,
_TRANSLATOR_CONFIG_KEYS)
result.append(python_translators.TwoToThreeTranslator(
config_json.get('from_project_space'),
config_json.get('to_project_space')))
continue
if type_arg == u'python_3to2':
config_utils.CheckJsonKeys('translator_config', config_json,
_TRANSLATOR_CONFIG_KEYS)
result.append(python_translators.ThreeToTwoTranslator(
config_json.get('from_project_space'),
config_json.get('to_project_space')))
continue
if type_arg == u'identity':
config_utils.CheckJsonKeys('translator_config', config_json,
_TRANSLATOR_CONFIG_KEYS)
result.append(translators.IdentityTranslator(
config_json.get('from_project_space'),
config_json.get('to_project_space')))
continue
if type_arg in set([u'undo_scrubbing']):
dependent_scrubber_configs.append(config_json)
continue
raise base.Error('Translator type "%s" unknown' % type_arg)
# TODO(dbentley): new translator type that uses project arg
for config_json in dependent_scrubber_configs:
# These are scrubbers that are dependent on other translators.
# We can only process them once the first round of translators are
# constructed.
type_arg = config_json.get(u'type')
if type_arg == u'undo_scrubbing':
# The undo scrubber undoes a previous scrubbing translator. Which
# previous translator? The one that has the opposite from/to project
# spaces as this one.
config_utils.CheckJsonKeys('translator_config', config_json,
_TRANSLATOR_CONFIG_KEYS)
forward_translator = None
for t in result:
if (t.ToProjectSpace() == config_json.get('from_project_space') and
t.FromProjectSpace() == config_json.get('to_project_space')):
forward_translator = t
break
else:
raise base.Error(
'Could find no forward_translator from %s to %s to undo' %
(config_json.get('to_project_space'),
config_json.get('from_project_space')))
result.append(undo_scrubbing_translator.UndoScrubbingTranslator(
config_json.get('from_project_space'),
config_json.get('to_project_space'),
project,
forward_translator
))
return result
| 2.5 | 2 |
print_nightscout_profiles.py | viq/print_nightscout_profiles | 0 | 12789474 | #!/usr/bin/env python
"""
Fetch profile changes from nightscout and display their contents
"""
# Make it work on both python 2 and 3
# Probably a bit wide, but I'm still learning
from __future__ import absolute_import, with_statement, print_function, unicode_literals
# Built-in modules
import argparse
from datetime import datetime
import json
import logging
# External modules
import requests
from texttable import Texttable
logging.basicConfig(level=logging.INFO)
TIMED_ENTRIES = ['carbratio', 'sens', 'basal', 'target_low', 'target_high']
def normalize(profile, entry):
"""
Set entry to blank if it doesn't exist, thus avoiding KeyError
"""
try:
if profile[entry]:
pass
except KeyError:
profile[entry] = ''
def normalize_entry(entry):
"""
Clean up an entry before further processing
"""
logging.debug("Normalizing entry: %s", entry)
try:
if entry["timeAsSeconds"]:
pass
except KeyError:
entry_timeasseconds = datetime.strptime(entry["time"], "%H:%M")
entry[
"timeAsSeconds"] = 3600 * entry_timeasseconds.hour + 60 * entry_timeasseconds.minute
try:
if entry["time"]:
pass
except KeyError:
entry_hour = int(entry['timeAsSeconds'] / 3600)
entry_minute = int(entry['timeAsSeconds'] % 60)
entry["time"] = str(entry_hour).rjust(
2, '0') + ":" + str(entry_minute).rjust(2, '0')
entry["start"] = entry["time"] + ":00"
entry["minutes"] = int(entry["timeAsSeconds"]) / 60
def get_profile_switches(nightscout, token, date_from, count):
"""
Get list of profile switch events
"""
p_url = (
nightscout +
"/api/v1/treatments.json?find[eventType][$eq]=Profile%20Switch&count="
+ count + "&find[created_at][$gte]=" + date_from)
if token is not None:
p_url = p_url + "&token=" + token
p_switch = requests.get(p_url).json()
logging.debug("Profiles: %s", p_switch)
for profile in p_switch:
print("Profile named {} enabled at {} for duration {}".format(
profile['profile'], profile['created_at'], profile['duration']))
extracted_profile = json.loads(profile['profileJson'])
extracted_profile['name'] = profile['profile']
for key in ['timezone', 'delay', 'startDate']:
normalize(extracted_profile, key)
for entry_type in TIMED_ENTRIES:
for entry in extracted_profile[entry_type]:
normalize_entry(entry)
display_text(extracted_profile)
def display_text(p_data):
"""
Display profile in text format
"""
# p_data = profile_data[0]["store"][profile_name]
logging.debug("Data keys: %s", p_data.keys())
# Single value data
singletons = Texttable()
singletons.set_deco(Texttable.HEADER)
singletons.set_cols_align(["c", "c", "c", "c", "c", "c"])
singletons.add_rows([
["Profile name", "Timezone", "Units", "DIA", "Delay", "Start date"],
[
p_data["name"],
p_data["timezone"],
p_data["units"],
p_data["dia"],
p_data["delay"],
p_data["startDate"],
],
])
print(singletons.draw() + "\n")
times = {}
tgt_low = {v["time"]: v["value"] for v in p_data["target_low"]}
tgt_high = {v["time"]: v["value"] for v in p_data["target_high"]}
carb_ratio = {v["time"]: v["value"] for v in p_data["carbratio"]}
sens = {v["time"]: v["value"] for v in p_data["sens"]}
basal = {v["time"]: v["value"] for v in p_data["basal"]}
logging.debug(tgt_high, tgt_low, carb_ratio, sens, basal)
for (time, basal) in basal.items():
times.setdefault(time, {})
times[time]["basal"] = basal
for (time, sens) in sens.items():
times.setdefault(time, {})
times[time]["sens"] = sens
for (time, c_r) in carb_ratio.items():
times.setdefault(time, {})
times[time]["carbratio"] = c_r
for (time, tgt_h) in tgt_high.items():
times.setdefault(time, {})
times[time]["tgt_high"] = tgt_h
for (time, tgt_l) in tgt_low.items():
times.setdefault(time, {})
times[time]["tgt_low"] = tgt_l
logging.debug("Times: %s", times)
times_list = [["Time", "Basal", "ISF", "CR", "Target Low", "Target High"]]
for time in sorted(times.keys()):
times_list.append([
time,
times[time].get("basal", ""),
times[time].get("sens", ""),
times[time].get("carbratio", ""),
times[time].get("tgt_low", ""),
times[time].get("tgt_high", ""),
])
times_table = Texttable()
times_table.set_cols_align(["c", "c", "c", "c", "c", "c"])
times_table.add_rows(times_list)
print(times_table.draw() + "\n")
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Get nightscout profile.")
parser.add_argument(
"--nightscout",
help="Nightscout URL",
required=True,
nargs="?",
const="http://127.0.0.1:1337",
default="http://127.0.0.1:1337",
)
parser.add_argument("--token", help="Authenticaton token")
parser.add_argument("--from",
help="Starting date to look for profile change events",
dest="date_from")
parser.add_argument("--count", help="Number of profiles to display")
logging.debug(vars(parser.parse_args()))
# https://stackoverflow.com/questions/4575747/get-selected-subcommand-with-argparse/44948406#44948406
# I have no idea what it does, but it seems to do the trick
kwargs = vars(parser.parse_args())
get_profile_switches(**kwargs)
| 2.546875 | 3 |
duct/sources/redis.py | geostarling/duct | 12 | 12789475 | <reponame>geostarling/duct
"""
.. module:: redis
:platform: Unix
:synopsis: A source module for redis stats
.. moduleauthor:: <NAME> <<EMAIL>>
"""
from zope.interface import implementer
from twisted.internet import defer
from twisted.python import log
from duct.interfaces import IDuctSource
from duct.objects import Source
from duct.aggregators import Counter
@implementer(IDuctSource)
class Queues(Source):
"""Query llen from redis-cli
**Configuration arguments:**
:param queue: Queue name (defaults to 'celery', just because)
:type queue: str.
:param db: DB number
:type db: int.
:param clipath: Path to redis-cli (default: /usr/bin/redis-cli)
:type clipath: str.
**Metrics:**
:(service_name): Queue length
:(service_name): Queue rate
"""
ssh = True
def __init__(self, *a, **kw):
Source.__init__(self, *a, **kw)
self.queue = self.config.get('queue', 'celery')
self.db = int(self.config.get('db', 0))
self.clipath = self.config.get('clipath', '/usr/bin/redis-cli')
@defer.inlineCallbacks
def get(self):
out, err, code = yield self.fork(self.clipath, args=('-n',
str(self.db),
'llen',
self.queue,))
if code == 0:
val = int(out.strip('\n').split()[-1])
defer.returnValue([
self.createEvent('ok', '%s queue length' % self.queue, val),
self.createEvent('ok', 'Queue rate', val, prefix='rate',
aggregation=Counter)
])
else:
err = 'Error running %s: %s' % (self.clipath, repr(err))
log.msg(err)
defer.returnValue(self.createEvent('critical', err, None))
| 2.03125 | 2 |
baseframe/__init__.py | sauravsrijan/pydocstyle-test | 0 | 12789476 | <filename>baseframe/__init__.py
# -*- coding: utf-8 -*-
from __future__ import absolute_import
import types
import json
import gettext
from pytz import timezone, UTC
from pytz.tzinfo import BaseTzInfo
from speaklater import is_lazy_string
import six
from furl import furl
import pycountry
from flask import Blueprint, request, current_app
from flask.json import JSONEncoder as JSONEncoderBase
from flask_assets import Environment, Bundle
from flask_caching import Cache
from flask_babelex import Babel, Domain
from coaster.assets import split_namespec
from coaster.auth import current_auth, request_has_auth
from coaster.sqlalchemy import RoleAccessProxy, MarkdownComposite
try:
from flask_debugtoolbar import DebugToolbarExtension
except ImportError:
DebugToolbarExtension = None
try:
from flask_debugtoolbar_lineprofilerpanel.profile import line_profile
except ImportError:
line_profile = None
from ._version import * # NOQA
from .assets import assets, Version
from . import translations
__all__ = ['baseframe', 'baseframe_js', 'baseframe_css', 'assets', 'Version', '_', '__'] # NOQA
networkbar_cache = Cache(with_jinja2_ext=False)
asset_cache = Cache(with_jinja2_ext=False)
cache = Cache()
babel = Babel()
if DebugToolbarExtension is not None: # pragma: no cover
toolbar = DebugToolbarExtension()
else: # pragma: no cover
toolbar = None
THEME_FILES = {
'bootstrap3': {
'ajaxform.html.jinja2': 'baseframe/bootstrap3/ajaxform.html.jinja2',
'autoform.html.jinja2': 'baseframe/bootstrap3/autoform.html.jinja2',
'delete.html.jinja2': 'baseframe/bootstrap3/delete.html.jinja2',
'message.html.jinja2': 'baseframe/bootstrap3/message.html.jinja2',
'redirect.html.jinja2': 'baseframe/bootstrap3/redirect.html.jinja2'
},
'mui': {
'ajaxform.html.jinja2': 'baseframe/mui/ajaxform.html.jinja2',
'autoform.html.jinja2': 'baseframe/mui/autoform.html.jinja2',
'delete.html.jinja2': 'baseframe/mui/delete.html.jinja2',
'message.html.jinja2': 'baseframe/mui/message.html.jinja2',
'redirect.html.jinja2': 'baseframe/mui/redirect.html.jinja2'
}
}
baseframe_translations = Domain(translations.__path__[0], domain='baseframe')
_ = baseframe_translations.gettext
__ = baseframe_translations.lazy_gettext
class JSONEncoder(JSONEncoderBase):
"""
Custom JSON encoder that adds support to types that are not supported
by Flask's JSON encoder. Eg: lazy_gettext
"""
def default(self, o):
if is_lazy_string(o):
return six.text_type(o)
if isinstance(o, BaseTzInfo):
return o.zone
if isinstance(o, RoleAccessProxy):
return dict(o)
if isinstance(o, furl):
return o.url
if isinstance(o, types.GeneratorType):
return list(o)
if isinstance(o, MarkdownComposite):
return {'text': o.text, 'html': o.html}
return super(JSONEncoder, self).default(o)
def _select_jinja_autoescape(filename):
"""
Returns `True` if autoescaping should be active for the given template name.
"""
if filename is None:
return False
return filename.endswith(('.html', '.htm', '.xml', '.xhtml',
'.html.jinja', '.html.jinja2', '.xml.jinja', '.xml.jinja2', '.xhtml.jinja', '.xhtml.jinja2'))
class BaseframeBlueprint(Blueprint):
def init_app(self, app, requires=[], ext_requires=[], bundle_js=None, bundle_css=None, assetenv=None, theme='bootstrap3'):
"""
Initialize an app and load necessary assets.
:param requires: List of required assets. If an asset has both .js
and .css components, both will be added to the requirement list.
Loaded assets will be minified and concatenated into the app's
``static/js`` and ``static/css`` folders. If an asset has problems
with either of these, it should be loaded pre-bundled via the
``bundle_js`` and ``bundle_css`` parameters.
:param ext_requires: Same as requires, but will be loaded from
an external cookiefree server if ``ASSET_SERVER`` is in config,
before the reqular requires list. Assets are loaded as part of
``requires`` if there is no asset server
:param bundle_js: Bundle of additional JavaScript
:param bundle_css: Bundle of additional CSS
:param assetenv: Environment for assets (in case your app needs a custom environment)
"""
# Since Flask 0.11, templates are no longer auto reloaded.
# Setting the config alone doesn't seem to work, so we explicitly
# set the jinja environment here.
if app.config.get('TEMPLATES_AUTO_RELOAD') or (
app.config.get('TEMPLATES_AUTO_RELOAD') is None and app.config.get('DEBUG')):
app.jinja_env.auto_reload = True
app.jinja_env.add_extension('jinja2.ext.do')
app.jinja_env.autoescape = _select_jinja_autoescape
if app.subdomain_matching:
# Does this app want a static subdomain? (Default: yes, 'static').
# Apps can disable this by setting STATIC_SUBDOMAIN = None.
# Since Werkzeug internally uses '' instead of None, but takes None
# as the default parameter, we remap '' to None in our config
subdomain = app.config.get('STATIC_SUBDOMAIN', 'static') or None
if subdomain:
for rule in app.url_map.iter_rules('static'):
# For safety, seek out and update the static route added by Flask.
# Do not touch additional static routes added by the app or other
# blueprints
if not rule.subdomain: # Will be '' not None
rule.subdomain = subdomain
rule.refresh()
break
else:
subdomain = None
ignore_js = ['!jquery.js']
ignore_css = []
ext_js = []
ext_css = []
if app.config.get('ASSET_SERVER'):
for itemgroup in ext_requires:
sub_js = []
sub_css = []
if not isinstance(itemgroup, (list, tuple)):
itemgroup = [itemgroup]
for item in itemgroup:
name, spec = split_namespec(item)
for alist, ilist, ext in [(sub_js, ignore_js, '.js'), (sub_css, ignore_css, '.css')]:
if name + ext in assets:
alist.append(name + ext + six.text_type(spec))
ilist.append('!' + name + ext)
if sub_js:
ext_js.append(sub_js)
if sub_css:
ext_css.append(sub_css)
else:
requires = [item for itemgroup in ext_requires
for item in (itemgroup if isinstance(itemgroup, (list, tuple)) else [itemgroup])] + requires
app.config['ext_js'] = ext_js
app.config['ext_css'] = ext_css
assets_js = []
assets_css = []
for item in requires:
name, spec = split_namespec(item)
for alist, ext in [(assets_js, '.js'), (assets_css, '.css')]:
if name + ext in assets:
alist.append(name + ext + six.text_type(spec))
js_all = Bundle(assets.require(*(ignore_js + assets_js)),
filters='uglipyjs', output='js/baseframe-packed.js')
css_all = Bundle(assets.require(*(ignore_css + assets_css)),
filters=['cssrewrite', 'cssmin'], output='css/baseframe-packed.css')
if bundle_js:
js_all = Bundle(js_all, bundle_js)
if bundle_css:
css_all = Bundle(css_all, bundle_css)
if assetenv is None:
app.assets = Environment(app)
else:
app.assets = assetenv
app.assets.register('js_jquery', assets.require('jquery.js'))
app.assets.register('js_all', js_all)
app.assets.register('css_all', css_all)
app.register_blueprint(self, static_subdomain=subdomain)
# Optional config for a client app to use a manifest file
# to load fingerprinted assets
# If used with webpack, the client app is expected to specify its own webpack.config.js
# Set `ASSETS_MANIFEST_PATH` in `app.config` to the path for `manifest.json`.
# Eg: "static/build/manifest.json"
# Set `ASSET_BASE_PATH` in `app.config` to the path in which the compiled assets are present.
# Eg: "static/build"
if app.config.get('ASSET_MANIFEST_PATH'):
# Load assets into config from a manifest file
with app.open_resource(app.config['ASSET_MANIFEST_PATH']) as f:
asset_bundles = json.loads(f.read())
if app.config.get('assets'):
raise ValueError("Loading assets via a manifest file needs the `ASSETS` config key to be unused")
app.config['assets'] = {}
for asset_key, asset_path in asset_bundles['assets'].items():
app.config['assets'][asset_key] = asset_path
app.config.setdefault('CACHE_KEY_PREFIX', 'flask_cache_' + app.name + '/')
nwcacheconfig = dict(app.config)
nwcacheconfig['CACHE_KEY_PREFIX'] = 'networkbar_'
if 'CACHE_TYPE' not in nwcacheconfig:
nwcacheconfig['CACHE_TYPE'] = 'simple'
acacheconfig = dict(app.config)
acacheconfig['CACHE_KEY_PREFIX'] = 'asset_'
if 'CACHE_TYPE' not in acacheconfig:
acacheconfig['CACHE_TYPE'] = 'simple'
networkbar_cache.init_app(app, config=nwcacheconfig)
asset_cache.init_app(app, config=acacheconfig)
cache.init_app(app)
babel.init_app(app)
if toolbar is not None:
if 'DEBUG_TB_PANELS' not in app.config:
app.config['DEBUG_TB_PANELS'] = [
'flask_debugtoolbar.panels.versions.VersionDebugPanel',
'flask_debugtoolbar.panels.timer.TimerDebugPanel',
'flask_debugtoolbar.panels.headers.HeaderDebugPanel',
'flask_debugtoolbar.panels.request_vars.RequestVarsDebugPanel',
'flask_debugtoolbar.panels.config_vars.ConfigVarsDebugPanel',
'flask_debugtoolbar.panels.template.TemplateDebugPanel',
'flask_debugtoolbar.panels.sqlalchemy.SQLAlchemyDebugPanel',
'flask_debugtoolbar.panels.logger.LoggingPanel',
'flask_debugtoolbar.panels.route_list.RouteListDebugPanel',
'flask_debugtoolbar.panels.profiler.ProfilerDebugPanel',
]
if line_profile is not None:
app.config['DEBUG_TB_PANELS'].append(
'flask_debugtoolbar_lineprofilerpanel.panels.LineProfilerPanel')
toolbar.init_app(app)
app.json_encoder = JSONEncoder
# If this app has a Lastuser extension registered, give it a cache
lastuser = getattr(app, 'extensions', {}).get('lastuser')
if lastuser and hasattr(lastuser, 'init_cache'):
lastuser.init_cache(app=app, cache=cache)
app.config['tz'] = timezone(app.config.get('TIMEZONE', 'UTC'))
if theme not in THEME_FILES:
raise ValueError("Unrecognised theme: %s" % theme)
app.config['theme'] = theme
if 'NETWORKBAR_DATA' not in app.config:
app.config['NETWORKBAR_DATA'] = 'https://api.hasgeek.com/1/networkbar/networkbar.json'
if isinstance(app.config.get('NETWORKBAR_DATA'), (list, tuple)):
app.config['NETWORKBAR_LINKS'] = app.config['NETWORKBAR_DATA']
app.config.setdefault('RECAPTCHA_DATA_ATTRS', {})
app.config['RECAPTCHA_DATA_ATTRS'].setdefault('callback', 'onInvisibleRecaptchaSubmit')
app.config['RECAPTCHA_DATA_ATTRS'].setdefault('size', 'invisible')
def register(self, app, options, first_registration=False):
"""
Called by :meth:`Flask.register_blueprint` to register all views
and callbacks registered on the blueprint with the application. Creates
a :class:`.BlueprintSetupState` and calls each :meth:`record` callback
with it.
:param app: The application this blueprint is being registered with.
:param options: Keyword arguments forwarded from
:meth:`~Flask.register_blueprint`.
:param first_registration: Whether this is the first time this
blueprint has been registered on the application.
"""
self._got_registered_once = True
state = self.make_setup_state(app, options, first_registration)
if self.has_static_folder:
state.add_url_rule(
self.static_url_path + '/<path:filename>',
view_func=self.send_static_file,
endpoint='static',
subdomain=options.get('static_subdomain'))
for deferred in self.deferred_functions:
deferred(state)
baseframe = BaseframeBlueprint('baseframe', __name__,
static_folder='static',
static_url_path='/_baseframe',
template_folder='templates')
@babel.localeselector
def get_locale():
# If this app and request have a user that specifies a locale, use it
user = current_auth.actor # Use 'actor' instead of 'user' to support anon users
if user is not None and hasattr(user, 'locale') and user.locale:
return user.locale
# Otherwise try to guess the language from the user accept
# header the browser transmits. We support a few in this
# example. The best match wins.
# FIXME: Do this properly. Don't use a random selection of languages
return request.accept_languages.best_match(['de', 'fr', 'es', 'hi', 'te', 'ta', 'kn', 'ml', 'en']) or 'en'
@babel.timezoneselector
def get_timezone():
# If this app and request have a user, return user's timezone,
# else return app default timezone
if current_auth.actor is not None: # Use 'actor' instead of 'user' to support anon users
user = current_auth.actor
if hasattr(user, 'tz'):
return user.tz
elif hasattr(user, 'timezone'):
return timezone(user.timezone)
return current_app.config.get('tz') or UTC
def localized_country_list():
"""
Returns a list of country codes (ISO3166-1 alpha-2) and country names,
localized to the user's locale as determined by :func:`get_locale`.
The localized list is cached for 24 hours.
"""
return _localized_country_list_inner(get_locale())
@cache.memoize(timeout=86400)
def _localized_country_list_inner(locale):
"""
Inner function supporting :func:`localized_country_list`.
"""
if locale == 'en':
countries = [(country.name, country.alpha_2) for country in pycountry.countries]
else:
pycountry_locale = gettext.translation('iso3166-1', pycountry.LOCALES_DIR, languages=[locale])
if six.PY2:
countries = [(pycountry_locale.gettext(country.name).decode('utf-8'), country.alpha_2) for country in pycountry.countries]
else:
countries = [(pycountry_locale.gettext(country.name), country.alpha_2) for country in pycountry.countries]
countries.sort()
return [(code, name) for (name, code) in countries]
def localize_timezone(datetime, tz=None):
"""
Convert a datetime into the user's timezone, or into the specified
timezone. Naive datetimes are assumed to be in UTC.
"""
if not datetime.tzinfo:
datetime = UTC.localize(datetime)
if not tz:
tz = get_timezone()
if isinstance(tz, six.string_types):
tz = timezone(tz)
return datetime.astimezone(tz)
@baseframe.after_app_request
def process_response(response):
if request.endpoint in ('static', 'baseframe.static'):
if 'Access-Control-Allow-Origin' not in response.headers:
# This is required for webfont resources
# Note: We do not serve static assets in production, nginx does.
# That means this piece of code will never be called in production.
response.headers['Access-Control-Allow-Origin'] = '*'
if 'Vary' in response.headers:
vary_values = [item.strip() for item in response.headers['Vary'].split(',')]
if 'Accept-Language' not in vary_values:
vary_values.append('Accept-Language')
if 'Cookie' not in vary_values:
vary_values.append('Cookie')
response.headers['Vary'] = ', '.join(vary_values)
else:
response.headers['Vary'] = 'Accept-Language, Cookie'
# Prevent pages from being placed in an iframe. If the response already
# set has a value for this option, let it pass through
if 'X-Frame-Options' in response.headers:
frameoptions = response.headers.get('X-Frame-Options')
if not frameoptions or frameoptions == 'ALLOW':
# 'ALLOW' is an unofficial signal from the app to Baseframe.
# It signals us to remove the header and not set a default
response.headers.pop('X-Frame-Options')
else:
if request_has_auth() and getattr(current_auth, 'login_required', False):
# Protect only login_required pages from appearing in frames
response.headers['X-Frame-Options'] = 'SAMEORIGIN'
# In memoriam. http://www.gnuterrypratchett.com/
response.headers['X-Clacks-Overhead'] = 'GNU Terry Pratchett'
return response
# Replace gettext handlers for imports
b_ = _
b__ = __
from flask_babelex import gettext as _, lazy_gettext as __
from .utils import * # NOQA
from .views import * # NOQA
from .errors import * # NOQA
from .filters import * # NOQA
# Deprecated imports
from .deprecated import * # NOQA
| 1.710938 | 2 |
checksum_utils.py | piyushkumar2903/python-utils | 0 | 12789477 | import hashlib
import logging
import os
import shutil
import sys
LOGGER = logging.getLogger()
_MISSING_FILE_PATH_MSG = "Missing parameter file_path: %s"
def generate_sha256_checksum(file_path):
'''
Purpose: Calculate the SHA256 checksum of a given file
Parameters: Path to file
Returns: The SHA256 checksum or None if something went wrong during calculation
'''
if not file_path \
or not os.path.isfile(file_path):
LOGGER.debug(_MISSING_FILE_PATH_MSG, str(file_path))
return None
sha_hash = hashlib.sha256()
buffer = 4096
with open(file_path, 'rb') as sha_file:
while True:
data = sha_file.read(buffer)
if not data:
break
sha_hash.update(data)
return sha_hash.hexdigest()
def generate_md5_checksum(file_path):
'''
Purpose: Will calculate a file's checksum (md5)
Returns: either False or the MD5 checksum of the file
Parameters: path to file
'''
if not file_path:
LOGGER.debug(_MISSING_FILE_PATH_MSG, str(file_path))
return None
checksum_readable = ""
if os.path.isfile(file_path):
with open(file_path, mode='rb') as checksum_file:
checksum = hashlib.md5()
for buf in iter(partial(checksum_file.read, 128), b''):
checksum.update(buf)
checksum_readable = checksum.hexdigest()
else:
return None
return checksum_readable
| 3.671875 | 4 |
evalutils/evalutils.py | HolyGuacamole/evalutils | 0 | 12789478 | # -*- coding: utf-8 -*-
import json
import logging
from abc import ABC, abstractmethod
from pathlib import Path
from typing import Tuple, Dict, Set, Callable
from warnings import warn
from pandas import DataFrame, merge, Series, concat
from .exceptions import FileLoaderError, ValidationError, ConfigurationError
from .io import first_int_in_filename_key, FileLoader, CSVLoader
from .validators import DataFrameValidator
logger = logging.getLogger(__name__)
class BaseEvaluation(ABC):
def __init__(
self,
*,
ground_truth_path: Path = Path("/usr/src/evaluation/ground-truth/"),
predictions_path: Path = Path("/input/"),
file_sorter_key: Callable = first_int_in_filename_key,
file_loader: FileLoader,
validators: Tuple[DataFrameValidator, ...],
join_key: str = None,
aggregates: Set[str] = {
"mean",
"std",
"min",
"max",
"25%",
"50%",
"75%",
"count",
"uniq",
"freq",
},
output_file: Path = Path("/output/metrics.json"),
):
self._ground_truth_path = ground_truth_path
self._predictions_path = predictions_path
self._file_sorter_key = file_sorter_key
self._file_loader = file_loader
self._validators = validators
self._join_key = join_key
self._aggregates = aggregates
self._output_file = output_file
self._ground_truth_cases = DataFrame()
self._predictions_cases = DataFrame()
self._cases = DataFrame()
self._case_results = DataFrame()
self._aggregate_results = {}
super().__init__()
if isinstance(self._file_loader, CSVLoader) and self._join_key is None:
raise ConfigurationError(
f"You must set a `join_key` when using {self._file_loader}."
)
@property
def _metrics(self):
return {
"case": self._case_results.to_dict(),
"aggregates": self._aggregate_results,
}
def evaluate(self):
self.load()
self.validate()
self.merge_ground_truth_and_predictions()
self.cross_validate()
self.score()
self.save()
def load(self):
self._ground_truth_cases = self._load_cases(
folder=self._ground_truth_path
)
self._predictions_cases = self._load_cases(
folder=self._predictions_path
)
def _load_cases(self, *, folder: Path) -> DataFrame:
cases = None
for f in sorted(folder.glob("**/*"), key=self._file_sorter_key):
try:
new_cases = self._file_loader.load(fname=f)
except FileLoaderError:
logger.warning(
f"Could not load {f.name} using {self._file_loader}."
)
else:
if cases is None:
cases = [new_cases]
else:
cases.append(new_cases)
if cases is None:
raise FileLoaderError(
f"Could not load and files in {folder} with "
f"{self._file_loader}."
)
return DataFrame(cases)
def validate(self):
self._validate_data_frame(df=self._ground_truth_cases)
self._validate_data_frame(df=self._predictions_cases)
def _validate_data_frame(self, *, df: DataFrame):
for validator in self._validators:
validator.validate(df=df)
@abstractmethod
def merge_ground_truth_and_predictions(self):
pass
@abstractmethod
def cross_validate(self):
pass
def _raise_missing_predictions_error(self, *, missing=None):
if missing is not None:
message = (
"Predictions missing: you did not submit predictions for "
f"{missing}. Please try again."
)
else:
message = (
"Predictions missing: you did not submit enough predictions, "
"please try again."
)
raise ValidationError(message)
def _raise_extra_predictions_error(self, *, extra=None):
if extra is not None:
message = (
"Too many predictions: we do not have the ground truth data "
f"for {extra}. Please try again."
)
else:
message = (
"Too many predictions: you submitted too many predictions, "
"please try again."
)
raise ValidationError(message)
@abstractmethod
def score(self):
pass
# noinspection PyUnusedLocal
@staticmethod
def score_case(*, idx: int, case: DataFrame) -> Dict:
return {}
def score_aggregates(self) -> Dict:
aggregate_results = {}
for col in self._case_results.columns:
aggregate_results[col] = self.aggregate_series(
series=self._case_results[col]
)
return aggregate_results
def aggregate_series(self, *, series: Series) -> Dict:
summary = series.describe()
valid_keys = [a for a in self._aggregates if a in summary]
series_summary = {}
for k in valid_keys:
value = summary[k]
# % in keys could cause problems when looking up values later
key = k.replace("%", "pc")
try:
json.dumps(value)
except TypeError:
logger.warning(
f"Could not serialize {key}: {value} as json, "
f"so converting {value} to int."
)
value = int(value)
series_summary[key] = value
return series_summary
def save(self):
self.write_metrics_json()
def write_metrics_json(self):
with open(self._output_file, "w") as f:
f.write(json.dumps(self._metrics))
class ClassificationEvaluation(BaseEvaluation):
"""
ClassificationEvaluations have the same number of predictions as the
number of ground truth cases. These can be things like, what is the
stage of this case, or segment some things in this case.
"""
def merge_ground_truth_and_predictions(self):
if self._join_key:
kwargs = {"on": self._join_key}
else:
kwargs = {"left_index": True, "right_index": True}
self._cases = merge(
left=self._ground_truth_cases,
right=self._predictions_cases,
indicator=True,
how="outer",
suffixes=("_ground_truth", "_prediction"),
**kwargs,
)
def cross_validate(self):
missing = [p for _, p in self._cases.iterrows() if
p["_merge"] == "left_only"]
if missing:
if self._join_key:
missing = [p[self._join_key] for p in missing]
self._raise_missing_predictions_error(missing=missing)
extra = [p for _, p in self._cases.iterrows() if
p["_merge"] == "right_only"]
if extra:
if self._join_key:
extra = [p[self._join_key] for p in extra]
self._raise_extra_predictions_error(extra=extra)
def score(self):
self._case_results = DataFrame()
for idx, case in self._cases.iterrows():
self._case_results = self._case_results.append(
self.score_case(idx=idx, case=case), ignore_index=True
)
self._aggregate_results = self.score_aggregates()
class Evaluation(ClassificationEvaluation):
"""
Legacy class, you should use ClassificationEvaluation instead.
"""
def __init__(self, *args, **kwargs):
warn(
(
"The Evaluation class is deprecated, "
"please use ClassificationEvaluation instead"
),
DeprecationWarning
)
super().__init__(*args, **kwargs)
class DetectionEvaluation(BaseEvaluation):
"""
DetectionEvaluations have a different number of predictions from the
number of ground truth annotations. An example would be detecting lung
nodules in a CT volume, or malignant cells in a pathology slide.
"""
def merge_ground_truth_and_predictions(self):
self._cases = concat(
[self._ground_truth_cases, self._predictions_cases],
keys=["ground_truth", "predictions"]
)
def cross_validate(self):
expected_keys = set(self._ground_truth_cases[self._join_key])
submitted_keys = set(self._predictions_cases[self._join_key])
missing = expected_keys - submitted_keys
if missing:
self._raise_missing_predictions_error(missing=missing)
extra = submitted_keys - expected_keys
if extra:
self._raise_extra_predictions_error(extra=extra)
def score(self):
cases = set(self._ground_truth_cases[self._join_key])
self._case_results = DataFrame()
for idx, case in enumerate(cases):
self._case_results = self._case_results.append(
self.score_case(
idx=idx,
case=self._cases.loc[self._cases[self._join_key] == case],
), ignore_index=True
)
self._aggregate_results = self.score_aggregates()
def score_aggregates(self):
aggregate_results = super().score_aggregates()
totals = self._case_results.sum()
for s in totals.index:
aggregate_results[s]["sum"] = totals[s]
tp = aggregate_results["true_positives"]["sum"]
fp = aggregate_results["false_positives"]["sum"]
fn = aggregate_results["false_negatives"]["sum"]
aggregate_results["precision"] = tp / (tp + fp)
aggregate_results["recall"] = tp / (tp + fn)
aggregate_results["f1_score"] = 2 * tp / ((2 * tp) + fp + fn)
return aggregate_results
| 2.09375 | 2 |
snakewm/apps/system/exit snakewm/__init__.py | Admicos/snakeware | 0 | 12789479 | import pygame
def load(manager, params):
return pygame.quit()
| 1.679688 | 2 |
hackerrank/4. sets/4.py | Eurydia/Xian-assignment | 0 | 12789480 | <reponame>Eurydia/Xian-assignment
n = int(input().rstrip())
s = set()
for _ in range(n):
country = input().rstrip()
s.add(country)
print(len(s))
| 3.375 | 3 |
provarme_dashboard/migrations/0011_merge_20190626_1024.py | arferreira/dropazul_app | 0 | 12789481 | # Generated by Django 2.0.5 on 2019-06-26 10:24
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('provarme_dashboard', '0009_order'),
('provarme_dashboard', '0010_auto_20190625_2058'),
]
operations = [
]
| 1.296875 | 1 |
bot/botmain.py | TagesenergieBot/Tagesenergie-Twitterbot | 0 | 12789482 | <reponame>TagesenergieBot/Tagesenergie-Twitterbot<gh_stars>0
import tweepy
from bot import timetool, loggingservice, grabber
from secret import keys
bot_username = 'Tagesenergie-Twitterbot'
logfile_name = bot_username + ".log"
def create_tweet():
"""Creates the text of the tweet."""
try:
text = "Die Tagesenergie-Werte vom " + timetool.get_date()
text = text + "\nMagie-O-Meter: " + grabber.get_magicvalue()
text = text + "\nEnergie Impulswert: " + grabber.get_energyimpulsvalue()
text = text + "\nBewusstwerdungsindex: " + grabber.get_consiousvalue()
except AttributeError as ae:
loggingservice.log(repr(ae), logfile_name)
text = grabber.get_errortext()
return text
def tweet(text):
"""Send out the text as a tweet."""
# Twitter authentication
auth = tweepy.OAuthHandler(keys.CONSUMER_KEY, keys.CONSUMER_SECRET)
auth.set_access_token(keys.ACCESS_TOKEN, keys.ACCESS_SECRET)
api = tweepy.API(auth)
# Send the tweet and log success or failure
try:
api.update_status(text)
except tweepy.error.TweepError as e:
loggingservice.log(repr(e), logfile_name)
else:
loggingservice.log("Tweeted:\n" + text + "\n", logfile_name)
if __name__ == "__main__":
tweet_text = create_tweet()
tweet(tweet_text)
| 2.578125 | 3 |
deepforest/_version.py | ethanwhite/DeepForest-pytorch | 0 | 12789483 | __version__ = '__version__ = '0.1.43''
| 1.0625 | 1 |
Plots.py | airanmehr/Utils | 0 | 12789484 | <filename>Plots.py
'''
Copyleft May 11, 2016 <NAME>, PhD Student, Bafna Lab, UC San Diego, Email: <EMAIL>
'''
from __future__ import print_function
import matplotlib as mpl
import numpy as np
import pandas as pd
import pylab as plt
import seaborn as sns
import UTILS.Util as utl
import UTILS.Hyperoxia as htl
from UTILS import *
def setStyle(style="darkgrid", lw=2, fontscale=1, fontsize=10):
sns.axes_style(style)
mpl.rc('font', **{'family': 'serif', 'serif': ['Computer Modern'], 'size': fontsize});
mpl.rc('text', usetex=True)
sns.set_context(font_scale=fontscale, rc={"lines.linewidth": lw})
class PLOS:
max_width = 7.5
min_width = 2.6
max_height = 8.75
dpi = 300
extention = 'tiff'
@staticmethod
def get_figsize(width=None, height=None):
if width is not None:
width = min(width, PLOS.max_width)
return (width, 2. / 3 * width)
else:
return (6, 4)
def get_axis_limits(ax, upper=True):
return ax.get_xlim()[(0, 1)[upper]], ax.get_ylim()[(0, 1)[upper]]
def annotate(comment, loc=1, fontsize=26, xpad=0.05, ypad=0.05, ax=None, axtoplot=None):
"""
Args:
comment: text
"""
if ax is None: ax = plt.gca()
if axtoplot is None: axtoplot = ax
xrang = getAxRange(ax, 0)
yrang = getAxRange(ax, 1)
xy = get_axis_limits(ax, upper=False)[0] + xpad * xrang, get_axis_limits(ax)[1] - ypad * yrang
axtoplot.annotate(comment, xy=xy, xycoords='data', size=fontsize, horizontalalignment='left',
verticalalignment='top')
def getAxRange(ax, axi=0):
return get_axis_limits(ax, upper=True)[axi] - get_axis_limits(ax, upper=False)[axi]
def getColorMap(n):
colors = ['darkblue', 'r', 'green', 'darkviolet', 'k', 'darkorange', 'olive', 'darkgrey', 'chocolate', 'rosybrown',
'gold', 'aqua']
if n == 1: return [colors[0]]
if n <= len(colors):
return colors[:n]
return [mpl.cm.jet(1. * i / n) for i in range(n)]
def getMarker(n, addDashed=True):
markers = np.array(['o', '^', 's', 'D', 'd', 'h', '*', 'p','v', '3', 'H', '8','<','2', '4'])[:n]# '<', '>'
if addDashed: markers = map(lambda x: '--' + x, markers)
return markers
# mpl.rc('font', **{'family': 'serif', 'serif': ['Computer Modern'], 'size':30}) ;
# mpl.rc('text', usetex=True)
def addGlobalPOSIndex(df,chroms):
if df is not None:
df['gpos'] = df.POS + chroms.offset.loc[df.CHROM].values
df.set_index('gpos', inplace=True);
df.sort_index(inplace=True)
def GenomeChromosomewise(df, candSNPs=None, genes=None, axes=None,outliers=None):
markerSize = 6
fontsize = 6
chrsize = df.reset_index().groupby('CHROM').POS.max()
if axes is None:
if chrsize.shape[0]>1:
_, axes = plt.subplots(int(np.ceil(chrsize.shape[0] / 2.)), 2, sharey= True, dpi=200, figsize=(12, 6));
ax = axes.reshape(-1)
else:
ax = [plt.subplots(1,1, sharey=True, dpi=200, figsize=(10, 6))[1]]
for j, (chrom, a) in enumerate(df.groupby(level=0)):
if candSNPs is not None:
try:
candSNPs.loc[chrom]
for pos in candSNPs.loc[chrom].index.values:
ax[j].axvline(pos, color='r', linewidth=0.5, alpha=0.5)
ax[j].annotate(
'{:.0f},{:.2f}'.format(candSNPs['rank'].loc[(chrom, pos)], candSNPs.nu0.loc[(chrom, pos)]),
xy=(pos, a.max()), xytext=(pos, a.max()), fontsize=fontsize - 2)
except:
pass
if genes is not None:
try:
X = genes.loc[chrom]
if len(genes.loc[chrom].shape) == 1:
X = pd.DataFrame(X).T
for _, row in X.iterrows():
ax[j].fill_between([row.start, row.end], a.min(), a.max(), color='r')
ax[j].annotate(row['name'], xy=(row.start, a.max()), xytext=(row.start, a.max()),
fontsize=fontsize - 2)
except:
pass
ax[j].scatter(a.loc[chrom].index, a.loc[chrom], s=markerSize, alpha=0.8, edgecolors='none')
if outliers is not None:
try:
ax[j].scatter(outliers.loc[chrom].index, outliers.loc[chrom], s=markerSize, c='r', alpha=0.8, edgecolors='none')
except:
pass
setSize(ax[j], fontsize)
# ax[j].set_xlim([-1000, chrsize[chrom] + 1000])
# ax[j].set_title(chrom, fontsize=fontsize+2)
ax[j].set_xlabel(chrom , fontsize=fontsize + 6)
# annotate(chrom, ax=ax[j],fontsize=fontsize+4)
ax[j].locator_params(axis='x', nbins=10)
plt.tight_layout(pad=0.1)
plt.gcf().subplots_adjust(bottom=0.1)
def Manhattan(data, columns=None, names=None, fname=None, colors=['black', 'gray'], markerSize=20, ylim=None, show=True,
std_th=None, top_k=None, cutoff=None, common=None, Outliers=None, shade=None, fig=None, ticksize=16,
sortedAlready=False,lw=1,axes=None,shareY=False,color=None,CHROMLen=None,alpha=0.4,shade2=None):
def reset_index(x):
if x is None: return None
if 'CHROM' not in x.columns.values:
return x.reset_index()
else:
return x
if type(data) == pd.Series:
DF = pd.DataFrame(data)
else:
DF = data
if columns is None: columns=DF.columns
if names is None:names=columns
df = reset_index(DF)
Outliers = reset_index(Outliers)
if not sortedAlready: df = df.sort_index()
if not show:
plt.ioff()
from itertools import cycle
def plotOne(b, d, name, chroms,common,shade,shade2,ax):
a = b.dropna()
c = d.loc[a.index]
if ax is None:
ax=plt.gca()
def plotShade(shade,c):
for _ , row in shade.iterrows():
if shareY:
MAX = DF.replace({np.inf: None}).max().max()
MIN = DF.replace({-np.inf: None}).min().min()
else:
MAX = a.replace({np.inf: None}).max()
MIN = a.replace({-np.inf: None}).min()
ax.fill_between([row.gstart, row.gend], MIN,MAX, color=c, alpha=alpha)
if 'name' in row.index:
if row['name'] == 1: row.gstart -= 1e6
if row['name']== 8: row.gstart=row.gend+1e6
if row['name'] == 'LR2.1': row.gstart -= 2e6
if row['name'] == 'LR2.2': row.gstart += 1e6
xy=(row.gstart, (MAX*1.1))
try:shadename=row['name']
except:shadename=row['gene']
ax.text(xy[0],xy[1],shadename,fontsize=ticksize+2,rotation=0,ha= 'center', va= 'bottom')
if shade is not None: plotShade(shade,c='b')
if shade2 is not None: plotShade(shade2,c='r')
# ax.annotate(' '+shadename,
# # bbox=dict(boxstyle='round,pad=1.2', fc='yellow', alpha=0.3),
# xy=xy, xytext=xy, xycoords='data',horizontalalignment='center',fontsize=ticksize,rotation=90,verticalalignment='bottom')
ax.scatter(a.index, a, s=markerSize, c=c, alpha=0.8, edgecolors='none')
outliers=None
if Outliers is not None:
outliers=Outliers[name].dropna()
if cutoff is not None:
outliers = a[a >= cutoff[name]]
elif top_k is not None:
outliers = a.sort_values(ascending=False).iloc[:top_k]
elif std_th is not None:
outliers = a[a > a.mean() + std_th * a.std()]
if outliers is not None:
if len(outliers):
ax.scatter(outliers.index, outliers, s=markerSize, c='r', alpha=0.8, edgecolors='none')
# ax.axhline(outliers.min(), color='k', ls='--',lw=lw)
if common is not None:
for ii in common.index: plt.axvline(ii,c='g',alpha=0.5)
ax.axis('tight');
if CHROMLen is not None:
ax.set_xlim(0, CHROMLen.sum());
else:
ax.set_xlim(max(0,a.index[0]-10000), a.index[-1]);
setSize(ax,ticksize)
ax.set_ylabel(name, fontsize=ticksize * 1.5)
if chroms.shape[0]>1:
plt.xticks([x for x in chroms.mid], [str(x) for x in chroms.index], rotation=-90, fontsize=ticksize * 1.5)
# plt.setp(plt.gca().get_xticklabels(), visible=False)
plt.locator_params(axis='y', nbins=4)
mpl.rc('ytick', labelsize=ticksize)
if ylim is not None: plt.ylim(ymin=ylim)
chroms = pd.DataFrame(df.groupby('CHROM').POS.apply(lambda x:x.max()-x.min()).rename('len').loc[df.reset_index().CHROM.unique()] + 1000)
chroms = pd.DataFrame(df.groupby('CHROM').POS.apply(lambda x:x.max()).rename('len').loc[df.reset_index().CHROM.unique()] + 1000)
if CHROMLen is not None:
chroms=pd.DataFrame(CHROMLen)
chroms['offset'] = np.append([0], chroms.len.cumsum().iloc[:-1].values)
chroms['color'] = [c for (_, c) in zip(range(chroms.shape[0]), cycle(colors))]
if color is not None: chroms['color']=color
chroms['start']=df.groupby('CHROM').POS.min()
if CHROMLen is not None:
chroms['start']=0
chroms['mid'] = [x + y / 2 for x, y in zip(chroms.offset+chroms.start, chroms.len)]
chroms['mid'] = [x + y / 2 for x, y in zip(chroms.offset+chroms.start, chroms.len)]
df['color'] = chroms.color.loc[df.CHROM].values
df['gpos'] = df.POS + chroms.offset.loc[df.CHROM].values
df['color'] = chroms.color.loc[df.CHROM].values
df.set_index('gpos', inplace=True);
def fff(shade):
shade['gstart'] = shade.start #
shade['gend'] = shade.end #
if chroms.shape[0] > 1:
shade['gstart'] += chroms.offset.loc[shade.CHROM].values
shade['gend'] += + chroms.offset.loc[shade.CHROM].values
if 'name' in shade.columns:
shade.sort_values('gstart', ascending=False, inplace=True)
shade['ID'] = range(1, shade.shape[0] + 1)
return shade
if shade is not None: shade=fff(shade)
if shade2 is not None: shade2 = fff(shade2)
addGlobalPOSIndex(common, chroms);
addGlobalPOSIndex(Outliers, chroms)
if fig is None and axes is None:
fig,axes=plt.subplots(columns.size, 1, sharex=True,sharey=shareY,figsize=(20, columns.size * 4));
if columns.size==1:
axes=[axes]
elif axes is None:
axes=fig.axes
for i in range(columns.size):
if not i:
sh=shade
else:
if shade is not None and 'name' in shade.columns:
sh= shade.drop('name', 1)
plotOne(df[columns[i]], df.color, names[i], chroms,common, sh,shade2,axes[i])
# plt.setp(plt.gca().get_xticklabels(), visible=True)
xlabel='Chromosome'
if chroms.shape[0]==1:
xlabel+=' {}'.format(chroms.index[0])
axes[-1].set_xlabel(xlabel, size=ticksize * 1.5)
plt.gcf().subplots_adjust(bottom=0.2,hspace=0.05)
if fname is not None:
print ('saving ', fname)
plt.savefig(fname)
if not show:
plt.ion()
return fig
def TimeSeries(data, methodColumn=None, ax=None, fname=None, color='r', ci=1,shade=[0,50],samplingTimes=None):
"""
Args:
data: a dataframe containing mu and st fields,
methodColumn: when method column is given, it plots together
ax:
fname:
Returns:
"""
if ax is None: fig=plt.figure(figsize=(12,4), dpi=200)
if methodColumn is None:
dfs=[('aa',data)]
else:
dfs=data.groupby(methodColumn)
for name,df in dfs:
if 'color' in df.columns:color=df.color.unique()[0]
df.mu.plot(linewidth=1, color=color, label=name, ax=ax)
# plt.gca().fill_between(df.index, (df.mu+df.st).apply(lambda x:min(x,1)), (df.mu-df.st).apply(lambda x:max(x,0)), color=color, alpha=0.25)
ax.fill_between(df.index.values.astype(int), (df.mu + ci * df.st), (df.mu - ci * df.st), color=color,
alpha=0.25)
if shade is not None:
ax.axvspan(shade[0], shade[1], alpha=0.25, color='black')
ax.set_xticks(np.append([50], plt.xticks()[0]))
if samplingTimes is not None:
for t in samplingTimes:ax.axvline(t,color='k',ls='--',lw=1,alpha=0.35)
mpl.rc('font', **{'family': 'serif', 'serif': ['Computer Modern'], 'size':26}) ;
# mpl.rc('text', usetex=True)
if fname is not None:
plt.savefig(fname)
def QQPval(a,z,nq=20, s=40, alpha=0.8, fname=None):
"""pplt.QQPval(exp10(logpa),exp10(logpz))"""
def getQuantilesLog2():
q=[1]
for i in range(nq):q+=[q[-1]/2.]
q=pd.Series(q,index=q).iloc[1:]
return q
q=getQuantilesLog2()
qq=pd.DataFrame(q.apply(lambda x: [abs((x)),z.quantile(x),a.quantile(x)]).sort_index().tolist(),index=q,columns=['expected','null','data']).applymap(lambda x: -np.log10(x))
plt.figure(figsize=(8,6),dpi=200)
qq.plot.scatter(x='expected',y='null',color='k',s=s,alpha=alpha,ax=plt.gca())
qq.plot.scatter(x='expected',y='data',ax=plt.gca(),s=s,alpha=alpha,color='r',lw = 0);
plt.ylim([-1, plt.ylim()[1]]);
xmax = plt.xlim()[1]
plt.plot([0, xmax], [0, xmax],ls='--', c="k",alpha=0.3)
plt.xlim([0,xmax])
plt.xlabel('Expected -log$_{10}$($p$-value)');
plt.ylabel('Observed -log$_{10}$($p$-value)')
mpl.rc('font', **{'family': 'serif', 'serif': ['Computer Modern'], 'size':26}) ;
mpl.rc('text', usetex=True)
if fname is not None: plt.savefig(fname)
def plotSiteReal(site, ax=None, fontsize=8, legend=False, title=None):
if ax is None:
dpi = 300
_, ax = plt.subplots(1, 1, figsize=(3, 2), dpi=dpi, sharex=True, sharey=True)
sns.set_context("notebook", font_scale=1, rc={"lines.linewidth": 1.2})
pos = site.name
site = site.sort_index().groupby(level=[0, 1]).apply(lambda x: (x.iloc[0], x.iloc[1]))
df = site.apply(lambda x: pd.Series(np.random.binomial(x[1], x[0] / x[1], 10000)) / x[1]).T
df = df.stack(['REP', 'GEN']).reset_index(['REP', 'GEN'])
idx = pd.Series(range(site.index.get_level_values('GEN').unique().shape[0]), index=np.sort(site.index.get_level_values('GEN').unique()))
ax = sns.boxplot(data=df, x='GEN', y=0, hue='REP', width=0.3, ax=ax);
for i, mybox in enumerate(ax.artists):
# Change the appearance of that box
c = mybox.get_facecolor()
mybox.set_facecolor('None')
mybox.set_edgecolor(c)
for j in range(i * 6, i * 6 + 6):
line = ax.lines[j]
line.set_color(c)
line.set_mfc('None')
for nnn,(_, xxx) in enumerate(site.apply(lambda x: x[0] / x[1]).unstack('REP').iteritems()):
# print idx.loc[xxx.dropna().index] + (nnn - 1) * (0.1)
try:
pd.Series(xxx.dropna().values, index=idx.loc[xxx.dropna().index] + (nnn - 1) * (0.1)).plot(style='-o',
color=
sns.color_palette()[
nnn], ax=ax,
markersize=3,
grid=False,
linewidth=0.5)
except:
pass
handles, labels = ax.get_legend_handles_labels();
ax.set_xlim([ax.get_xlim()[0] - ax.get_xlim()[1] * 0.03, ax.get_xlim()[1] + ax.get_xlim()[1] * 0.03])
ax.set_ylim([ax.get_ylim()[0] - ax.get_ylim()[1] * 0.03, ax.get_ylim()[1] + ax.get_ylim()[1] * 0.03])
if legend:
ax.legend(handles[3:], map(lambda x: 'Replicate {}'.format(int(x) + 1), labels[3:]), loc='best', title='',
fontsize=fontsize - 2)
else:
ax.legend_.remove()
ax.set_ylabel('')
ax.set_xlabel('Generation')
setSize(ax, fontsize=fontsize - 2)
if title is not None:
ax.set_title('{}:{}'.format(pos[0], pos[1]), fontsize=fontsize)
ax.xaxis.grid(True, linewidth=6)
def getNameColorMarker(df):
f = lambda x: x.method.replace('HMM', r'$\mathcal{H}$').replace('MarkovChain', r'$\mathcal{M}')
# + '$,\pi=$' + str(int(x.q * 100))
# f = lambda x: x.method.replace('HMM', r'$\mathcal{H}$')
cols = ['method']
if 'q' in df.index.names:
cols = ['q'] + cols
names = df.unstack('S').reset_index()[cols].drop_duplicates()
names['name'] = names.apply(f, axis=1)
names = names.set_index(cols).sort_index(level='method')
names['marker'] = getMarker(names.shape[0])
names['color'] = getColorMap(names.shape[0])
return names
def plotOnePower(df, info, axes, legendSubplot=-1, fontsize=7, markersize=5, ylabel='Hard', panel=list('ABC')):
for j, (name, dff) in enumerate(df.groupby(level='coverage')):
dff = dff.unstack('S')
dff = dff.sortlevel(['method'], ascending=True)
names = info.loc[dff.reset_index('coverage').index]
dff.index = names.name
dff.T.plot(ax=axes[j], legend=False, color=names.color.tolist(), style=names.marker.tolist(),
markersize=markersize)
axes[j].axhline(y=5, color='k');
setTicks(dff)
if j == legendSubplot:
handles, labels = axes[j].get_legend_handles_labels()
axes[j].legend(handles[::-1], labels[::-1], loc='center left', fontsize=fontsize)
if name == np.inf:
name = r'$\infty$'
else:
name = '{:.0f}'.format(name)
if ylabel == 'Hard': axes[j].set_title(r'$\lambda=$' + name, fontsize=fontsize)
axes[j].set_xlabel(r'$s$')
axes[j].set_ylabel(r'Power ({} Sweep)'.format(ylabel))
setSize(axes[j], fontsize=fontsize)
def setSize(ax, fontsize=5):
for item in ([ax.title, ax.xaxis.label, ax.yaxis.label] + ax.get_xticklabels() + ax.get_yticklabels()):
item.set_fontsize(fontsize)
try:
for item in ([ax.zaxis.label] + ax.get_zticklabels()):
item.set_fontsize(fontsize)
except:
pass
def setLegendSize(ax, fontsize=5):
handles, labels = ax.get_legend_handles_labels()
ax.legend(handles, labels, prop={'size': fontsize}, loc='best')
def setTicks(df):
plt.xticks(df.columns.values);
plt.xlim([0.018, 0.105]);
plt.ylim([-2.0, 105]);
plt.yticks(np.sort(np.append(np.arange(20, 101, 20), [5])))
plt.xlabel('')
def savefig(name, dpi,path=PATH.paperFigures,extensions=['pdf','tiff']):
import os
for e in extensions:
os.system('rm -f '+ path+ name + '.'+e)
plt.savefig(path+ name + '.'+e, dpi=dpi)
def plotQuantile(df, kde):
from UTILS import Util as utl
quantiles = np.sort(np.append(np.linspace(0.0, 1, 1000)[:-1], np.linspace(0.999, 1, 10)))
qq = pd.concat([utl.getQantilePvalues(df.COMALE, kde, quantiles=quantiles),
utl.getQantilePvalues(df.COMALENC, kde, quantiles=quantiles)], axis=1);
qq.columns = ['data', 'null'];
QQPval(qq, fname=utl.paperFiguresPath + 'qq.pdf')
def plotGeneTrack(a,ax=None,ntracks=4,minx=None,maxx=None,genesToColor=None):
d=0.01
if ax is None:
plt.figure(); ax=plt.gca()
for i ,(n,row) in enumerate(a.set_index('name')[['start', 'end']].iterrows()):
y=(i%ntracks) *d
if minx is not None:
row['start']=max(minx,row['start'])
if maxx is not None:
row['end'] = min(maxx, row['end'])
c=None;alpha=1
if genesToColor is not None:
alpha=0.75
c='k'
if n in genesToColor:
c='r'
ax.plot([row['start'],row['end']],[y,y],lw=5,label=n,c=c,alpha=alpha)
# xy=row.min()-5000,y+d/3
xy = row.min() , y + d / 4
text = ax.annotate(n, xy=xy, horizontalalignment='left', verticalalignment='bottom', size=5)
# text=ax.annotate(n, xy=xy, xytext=xy,horizontalalignment='left',verticalalignment='bottom',size=5)
ax.legend()
ax.set_ylim([-d/3,(ntracks-1)*d+2*d -d])
ax.yaxis.set_major_formatter(plt.NullFormatter())
# ax.legend(frameon=True, loc='upper left', bbox_to_anchor=(-0.0, 1.2), ncol=5);
try:ax.legend_.remove()
except:pass
ax.set_ylabel('Genes \n')
# ax.xaxis.label.set_size(2)
def overlap(I, a,start=None,end=None):
if start is not None:return I[(I.start <= end) & (I.end >= start)]
return I[(I.start <= a.index[-1]) & (I.end >= a.index[0])]
def plotTracks(a,i=None,dosharey=True,marker='o',ls='',alpha=0.3,CHROM=None,xmin=None,xmax=None,
ymax=None,ymin=None,hline=None,dmel=False,ntracks=-1,dpi=200,figWidth=7,colors=None,
cjet=None,markersize=4,CMAP=None,genesToColor=None,oneColorCMAP=False,plotColorBar=True,
hue=None,ax=None,DROP_CR_Genes=False,subsample=None,fontsize=12,fig=None,alpha_shade=0.2):
if subsample is not None:
if subsample<a.shape[0] and subsample>0:
np.random.seed(0)
a=a.sample(subsample).sort_index()
GENECOORDS=None
if ntracks>0:
if CHROM is None:
if 'CHROM' in a.index.names:
CHROM = a.index.get_level_values('CHROM').unique()[0]
a = a.loc[CHROM]
else:
CHROM = i.CHROM
if dmel is not None:
f=utl.home+'storage/Data/Dmelanogaster/geneCoordinates/dmel{}.df'.format(dmel)
GENECOORDS=pd.read_pickle(f).set_index('CHROM').loc[[CHROM]]
GENECOORDS['len']=GENECOORDS.end - GENECOORDS.start
else:
from Scripts.KyrgysHAPH import Util as kutl
GENECOORDS=kutl.GENECOORS.loc[[CHROM]]
if len(a.index.names)==1: a.index.name='POS'
if len(a.shape)==1: a=pd.DataFrame(a)
def sharey():
ma = max(map(lambda ax: ax.get_ylim()[1], plt.gcf().axes))
mi = min(map(lambda ax: ax.get_ylim()[0], plt.gcf().axes))
for ax in plt.gcf().axes[:-1]: ax.set_ylim([mi, ma])
if ntracks<0:plt.gcf().axes[-1].set_ylim([mi, ma])
n=a.shape[1]
if ntracks>0:n+=1
if ax is None and fig is None:
fig, ax = plt.subplots(n, 1, sharex=True,figsize=(figWidth,n*1.5),dpi=dpi)
if ntracks>0:
axgene=ax[-1]
if n>1:ax=ax.reshape(-1)
else: ax=[ax]
# print a.shape
for ii in range(a.shape[1]):
color='darkblue'
if colors is not None: color=colors[ii]
if CMAP is not None:
col=a.columns[ii]
df=a[[col]].join(CMAP.rename('c'),how='inner').reset_index()
# cmap=sns.cubehelix_palette("coolwarm", as_cmap=True)
# cmap=sns.dark_palette("purple",input="xkcd", as_cmap=True,reverse=True,light=1)
# cmap=sns.diverging_palette(240, 10, as_cmap=True, center = "dark")
# cmap=sns.color_palette("coolwarm", as_cmap=True)
# cmap=sns.choose_colorbrewer_palette("coolwarm", as_cmap=True)
if oneColorCMAP:
cmap = sns.cubehelix_palette(dark=0, light=.85, as_cmap=True)
else:
cmap = 'jet'
dfc=df[['POS',col,'c']].dropna()
sc=ax[ii].scatter(dfc.POS,dfc[col], c=dfc.c.values,cmap=cmap,alpha=alpha,s=markersize)
if plotColorBar:
if fig is None:
fig=plt.gcf()
cbaxes = fig.add_axes([0.905, 0.537, 0.02, 0.412]) #[ lower left corner X, Y, width, height]
tl=np.linspace(0,1,5)
cb = plt.colorbar(sc, cax=cbaxes, ticks=tl, orientation='vertical')
cb.ax.set_yticklabels(["%.2f" % x for x in tl],size=8)
cb.set_label(label='Final Frequency', size=10)
dosharey=False
else:
if cjet is not None:
for jj,_ in a.iterrows():
a.loc[[jj]].iloc[:, ii].plot(legend=False, ax=ax[ii], marker=marker, ls=ls, alpha=alpha, c=cjet.loc[jj],markersize=4)
elif hue is not None:
for name in hue.groupby(0).size().sort_values().index[::-1]:
# if name!=2:continue
jj = hue[hue[0] == name]['index'].sort_values().tolist()
color=hue[hue[0] == name].c.iloc[0]
a.iloc[:, ii].loc[jj].plot(legend=False, ax=ax[ii], marker=marker, ls=ls, c=color, alpha=alpha,markersize=markersize)
else:
a.iloc[:, ii].plot(legend=False, ax=ax[ii],marker=marker,ls=ls,c=color,alpha=alpha,markersize=markersize)
if hline is not None:ax[ii].axhline(hline, c='r', alpha=0.6)
ax[ii].set_ylabel(a.columns[ii])
setSize(ax[ii],fontsize)
# plotGeneTrack(overlap(kutl.GENECOORS.loc[[i.CHROM]], 0,xmin, xmax), ax[-1], minx=a.index.min(), maxx=a.index.max())
if xmin is None:
xmin, xmax=a.index[0],a.index[-1]
if ymax is not None:
axylim=(ax,ax[:-1])[ntracks>0]
for axi in axylim:
if ymin is not None:
axi.set_ylim([ymin, ymax])
else:axi.set_ylim([axi.get_ylim()[0],ymax])
if ntracks>0:
if DROP_CR_Genes:
GENECOORDS=GENECOORDS[GENECOORDS.name.apply(lambda x: x[:2] != 'CR').values]
plotGeneTrack(overlap(GENECOORDS, a, xmin, xmax), axgene, minx=xmin, maxx=xmax,ntracks=ntracks,genesToColor=genesToColor)
if dosharey:sharey()
if i is not None:
for ii in range(a.shape[1]):
ax[ii].fill_between([i.start, i.end], ax[ii].get_ylim()[0], ax[ii].get_ylim()[1], color = 'k', alpha = alpha_shade)
# ax[-1].fill_between([i.start, i.end], ax[ii].get_ylim()[0], ax[ii].get_ylim()[1], color='k', alpha=alpha_shade)
if CHROM is not None:
jj=-1
if plotColorBar: jj-=1
ax[jj].set_xlabel('Chromosome {}'.format(CHROM))
# plt.tight_layout()
plt.gcf().subplots_adjust(top=0.95, left=0.09)
if CMAP is not None:
plt.gcf().subplots_adjust(hspace=0.0)
return GENECOORDS
def plotTracksList(a,i,marker=None, ls='-',shade=True, alpha=0.3,height=1.5):
from Scripts.KyrgysHAPH import Util as kutl
n=len(a)+1
fig, ax = plt.subplots(n, 1, sharex=True,figsize=(10,n*height),dpi=200)
for ii in range(len(a)):
if marker is None:a.iloc[ ii].plot(legend=True, ax=ax[ii])
else:
color = getColorMap(a.iloc[ ii].shape[1])
for jj in range(a.iloc[ ii].shape[1]):
a.iloc[ii].iloc[:,jj].plot( ax=ax[ii], marker=marker, ls=ls, color=color[jj],alpha=alpha, markersize=4, label=a.iloc[ii].columns[jj])
if shade:
ax[ii].fill_between([i.start, i.end], ax[ii].get_ylim()[0], ax[ii].get_ylim()[1], color='k', alpha=0.2)
ax[ii].set_ylabel(a.index[ii])
plotGeneTrack(overlap(kutl.GENECOORS.loc[[i.CHROM]], None,start=i.start,end=i.end), ax[-1],minx=a.iloc[0].index.min(),maxx=a.iloc[0].index.max())
if shade:
ax[-1].fill_between([i.start, i.end], ax[-1].get_ylim()[0], ax[-1].get_ylim()[1], color='k', alpha=0.1)
plt.xlabel('Chromosome {}'.format(i.CHROM))
plt.gcf().subplots_adjust(top=0.95, left=0.09,hspace=0.03)
def plotDAF(i,pairs,AA=False,pad=500000,lite=0.2,lites=None,delta=True,ABS=False,pos=None,compact=False):
try:
if delta:
ylim = [-1.05, 1.05]
diff=lambda x: x.iloc[:,0]-x.iloc[:,1]
# for pair in pairs:print utl.loadFreqs(pair, i,pad=pad,AA=AA)
a= utl.quickMergeGenome(map(lambda pair: diff(utl.loadFreqs(pair, i,pad=pad,AA=AA)).rename(pair[0]+'\nvs\n'+pair[1]),pairs)).loc[i.CHROM]
else:
pairs=list(set(pairs))
print(pairs)
ylim = [-.05, 1.05]
a = utl.loadFreqs(list(set(pairs)), i, pad=pad, AA=AA).loc[i.CHROM]
xmin,xmax=[utl.BED.expand(i, pad).start, utl.BED.expand(i, pad).end]
if a is None:
print('AA',i.CHROM,i.start,i.end,AA)
return
if lite>0:
a = a[a.iloc[:, 0] >= lite]
if lites is not None:
for ii , lite in enumerate(lites):
a = a[a.iloc[:, ii].abs() >= lite]
if ABS is True:
a=a.abs();
ylim[0] +=1
a=a[a.iloc[:,0].abs()>0]
# a = a.apply(lambda x: x.dropna().rolling(50, center=True).mean())
if pos is not None: a=a.loc[pos]
if compact:
plotTracksList(pd.Series([a]), i, ls='', marker='o', shade=False);
plt.gcf().axes[0].set_ylabel('Derived Allele Freq.');
plt.gcf().axes[0].legend(borderaxespad=0.4, fontsize=12, frameon=True, shadow=not True)
else:
plotTracks(a, i, xmin=xmin, xmax=xmax)
plt.xlim(xmin,xmax)
if ylim is not None:
for x in plt.gcf().axes[:-1]:x.set_ylim(ylim)
return a
except:pass
def plotDAFpops(i,pop=None,HA=False,additionalPairs=[],against=None,pairs=None,lite=0,AA=False,delta=True,ABS=False,pos=None,pad=500000,compact=False):
if pairs is None:
if not HA:
if against is None:
# pairs=[[pop, 'JPT'], [pop, 'SAS'], [pop, 'AMR'], [pop, 'EUR'], [pop, 'YRI']]
pairs = [[pop, 'EAS'], [pop, 'SAS'], [pop, 'EUR']]
else:
pairs=[[pop,x] for x in against]
else:
pairs = [[pop, 'TIB'], [pop, 'AND'], [pop, 'ETH'], [pop, 'BGI'],[pop, 'EAS'],[pop, 'AFR'],[pop, 'EUR']]
pairs = additionalPairs+ pairs
if not delta: pairs=[pop]+ [pair[1] for pair in pairs]
else:
pairs=[ pair for pair in pairs if pair[0]!=pair[1]]
return plotDAF(i,pairs,lite=lite, AA= AA, delta=delta,ABS=ABS,pos=pos,pad=pad,compact=compact)
def plotPBS(i,pops,additionalPairs=[],lite=0,AA=False,delta=True,pad=500000,pos=None):
try:
ie=utl.BED.expand(i,pad=pad)
a=utl.pbsi(ie,pops)
a = a[a.iloc[:, 0] > lite]
a = a[a.iloc[:, -1] > 0]
# print utl.BED.expand(i, pad).start
xmin, xmax = [utl.BED.expand(i, pad).start, utl.BED.expand(i, pad).end]
if pos is not None: a = a.loc[pos]
plotTracks(a, i,xmax=xmax,xmin=xmin)
plt.xlim(xmin,xmax)
for x in plt.gcf().axes[:-2]:x.set_ylim([-0.05,1.05])
plt.gcf().axes[-2].set_ylim([0,1])
# plt.figure()
# b = pd.concat([a.PBS], keys=[i.CHROM])
# b.index.names=['CHROM','POS']
# Manhattan(utl.scanGenome(b))
# plt.figure()
# Manhattan(utl.scanGenome(b),np.sum)
# plt.show()
except:pass
class Trajectory:
@staticmethod
def Fly(xx,reps=[1],title='',pop='H',ax=None,hue=None,color=None,sumFreqCutoff=0,subsample=-1,titles=None,
foldOn=None,fname=None,alpha=None,suptitle=None,logscale=True,fontsize=14,ticksSize=10,noticks=[7, 15]):
if len(xx.columns.names)<2:
xx=htl.aug(xx)
x=xx[xx.sort_index(1)[pop].loc[:,pd.IndexSlice[:,reps]].sum(1)>sumFreqCutoff]
if subsample>0 and subsample<x.shape[0]:
ii=np.random.choice(x.shape[0], subsample, replace=False)
x=x.iloc[ii]
if hue is not None: hue=hue.iloc[ii]
if color is not None:
hue = pd.Series([color for _ in range(x.shape[0])], index=x.index.tolist(),name='c').reset_index()
hue[0] = True
if ax is not None:
try:
len(ax)
axes=ax
except:
axes = [ax]
else:
fig, axes = plt.subplots(1, len(reps), sharey=True, figsize=(3*len(reps)+2, 3), dpi=100)
if len(reps)==1:axes=[axes]
for i,rep in enumerate(reps):
if not i:
if title!='':title='('+title+')'
ax=axes[i]
xrep=x.xs(rep, 1, 2)[pop]
if len(noticks):
for t in noticks:
if t in xrep.columns:
xrep=xrep.drop(t,1)
Trajectory.FlyRep(xrep, suff=' Rep. {}'.format(rep),ax=ax,hue=hue,title=title,foldOn=foldOn,alpha=alpha,logscale=logscale)
if fname is not None:plt.savefig(fname)
for ax in axes:
setSize(ax,ticksSize)
if logscale:
ax.set_xlim(99,300)
if titles is not None:
for ax,t in zip(axes,titles):
ax.set_title(t,fontsize=fontsize)
axes[0].set_ylabel('Allele Frequency', fontsize=fontsize)
axes[(0,1)[len(axes)==3]].set_xlabel('Generation', fontsize=fontsize)
if len(reps)==3:
plt.gcf().tight_layout(pad=0.1)
if suptitle:
# plt.suptitle(suptitle,fontsize=14)
# plt.gcf().tight_layout(pad=0.1, rect=[0.0, 0, 0.9, .9])
axt=axes[-1].twinx()
axt.set_ylabel(suptitle,fontsize=fontsize)
axt.set_yticks([])
# plt.setp(axes[-1].get_yticklabels(), visible=False)
# axes[-1].set_yticks([])
@staticmethod
def FlyRep(zz, suff='', ax=None, hue=None, title='', hueDenovo=False, foldOn=None,alpha=None,logscale=True):
if not zz.shape[0]: return
if alpha is None: alpha = 0.12
def one(y, ax, pref, hue,alpha):
x = y.copy(True)
if not (foldOn is None):
x = x.T
if foldOn>0:
fold = x[foldOn] < 0.5
else:
fold = x[-foldOn] > 0.5
x.loc[fold, :] = 1 - x.loc[fold, :]
x = x.T
g = list(map(str, x.index.values.astype(int)))
if logscale:x.index = x.index.values + 100
x=x.rename({280:290})
# print x
title = pref + suff
# title = ''
if hue is None:
x.plot(legend=False, c='k', alpha=alpha, ax=ax, title=title)
else:
for name in hue.groupby(0).size().sort_values().index[::-1]:
group = hue[hue[0] == name]
xx = x.loc[:, group['index']]
# print xx
lab = str(group[0].iloc[0])
try:
lab += ' {}'.format(xx.shape[1])
except:
pass
c = group['c'].iloc[0]
# alpha = (0.2, 1)[name == 'BA']
for iii, (_, y) in enumerate(xx.T.iterrows()):
if not iii:
y.plot(legend=False, c=c, label=lab, alpha=alpha, ax=ax, title=title)
else:
y.plot(legend=False, c=c, label='_nolegend_', alpha=alpha, ax=ax, title=title)
# plt.tight_layout(rect=[0, 0, 0.5, 1])
# ax.legend(bbox_to_anchor=(1., 0.9))
# ax.legend()
if logscale:
ax.set_xscale("log", basex=2);
ax.get_xaxis().set_major_formatter(mpl.ticker.ScalarFormatter())
# print(x.index.tolist())
ax.set_xticks(x.index)
ax.set_xticklabels(g)
# ax.tick_params(axis='both', which='both', bottom='on', top='on', labelbottom='on', right='on', left='on',
# labelleft='on', labelright='on')
if ax is None: fig, ax = plt.subplots(1, 1, sharey=True, figsize=(6, 3), dpi=100)
one(zz.T, ax, title + '', hue,alpha)
ax.set_title(ax.get_title() + ' ({} SNPs {})'.format(zz.shape[0], ''))
@staticmethod
def clusters(a,seq,i=None,reps=[1,2,3],ss=300,pop=['H'],allpops=False):
if allpops: pop=list('HCL')
if len(a.columns.names)<2:
a=htl.aug(a)
reps=[1];pop=['H']
if i is not None:
seq=map(lambda x: utl.mask(x, i), seq)
SNPS=[]
def snps(V):
v=V.copy(True)
v.iloc[1:-1] = v.iloc[1:-1].replace({False: None});
v = v.dropna();
v = v.astype(int)
return v
for p in pop:
axes = plt.subplots(len(seq), len(reps), figsize=(3.5*len(reps)+2, len(seq)*3), sharex=True, sharey=True)[1]
if len(seq) ==1: axes=[axes]
for xx, ax in zip(seq, axes):
if i is not None or 'CHROM' in a.index.names:
aa=a.loc[pd.IndexSlice[i.CHROM, utl.TI(xx), :]]
else:
aa=a.loc[utl.TI(xx)]
SNPS+=[ snps(xx)]
Trajectory.Fly(aa, pop=p, subsample=ss,reps=reps, ax=ax);
if len(pop)>1:
plt.suptitle(p)
@staticmethod
def Win(j, X, ax, rep=1, subsample=-1, verbose=False, foldOn=None):
x = utl.mask(X, j.loc[j.name], full=True)
if not x.shape[0]: return
ss = lambda x: x
if subsample > 0: ss = lambda x: x.iloc[np.random.choice(x.shape[0], min(x.shape[0], subsample), replace=False)]
x = ss(x)
Trajectory.Fly(x, reps=[rep], ax=ax, foldOn=foldOn)
ax.set_title('({}) {}. n={}'.format(j.name + 1, Trajectory.title(j.iloc[0]), x.shape[0]), fontsize=12)
if verbose: print('{},'.format(j.name),)
@staticmethod
def title(istr):
CHROM, start = istr.split('-')[0].split(':');
end = istr.split('-')[1]
return '{}:{:,d}K-{:,d}k'.format(CHROM, int(start) / 1000, int(end) / 1000)
@staticmethod
def slidingWindowRep(batch, X, rep, subsample=-1, verbose=False, foldOn=None, name='',shape=None):
if shape is None:
rows, cols = 10, 10
rows = batch.shape[0] / 10
else:
rows,cols=shape
ax = plt.subplots(rows, cols, dpi=100, figsize=(4*cols, 3 * rows), sharex=True, sharey=True)[1].reshape(-1)
batch.groupby(level=0).apply(
lambda i: Trajectory.Win(i, X, ax[i.name], subsample=subsample, rep=rep, verbose=verbose, foldOn=foldOn));
title = 'Interval {}. {}:{}K-{}K Rep {}. '.format(name, batch.iloc[0].split('-')[0].split(':')[0],
int(batch.iloc[0].split('-')[0].split(':')[1]) / 1000,
int(batch.iloc[-1].split('-')[1]) / 1000, rep)
title += ('ALT allele', 'MAF at Gen {}'.format(foldOn))[foldOn != None]
plt.suptitle(title, fontsize=12, y=1.08)
@staticmethod
def slidingWindowReps(batch, X, subsample=-1, verbose=False, foldOn=None):
for i in batch:
Trajectory.Fly(utl.mask(X,i),reps=[1,2,3],foldOn=foldOn)
plt.suptitle(Trajectory.title(i), fontsize=16, y=1.04)
@staticmethod
def getBatch(i = 'chr2R:7700000-7730000',N=100,step = 10000):
return pd.Series(np.arange(0, step * N, step)).apply(lambda x: utl.BED.shift(i, x))
def FlySFSRep(a,title=None,subplots=True,includeOther=True):
if includeOther:
b = utl.renameColumns(a.H, 'H', pre=True)
b=pd.concat([b,a.C[180].rename('C180'),a.L[180].rename('L180')],1)
fs = (10, 6)
else:
b = utl.renameColumns(a.H, 'F', pre=True)
b=b.drop('F7',1)
fs = (6, 3)
# b=b.round(1).apply(lambda x: x.value_counts()).fillna(0)
b=b.apply(utl.sfs).fillna(0)
b.plot.bar(width=0.9,subplots=subplots,layout=(-1, 3), figsize=fs,sharex=True,sharey=True,color='k',alpha=0.75);
plt.gcf().axes[0].set_ylabel('Num. of SNPs')
plt.gcf().axes[3].set_ylabel('Num. of SNPs')
plt.gcf().axes[4].set_xlabel('Frequency Bin')
for ax in plt.gcf().axes: ax.set_title('')
# if title!=None:
# plt.suptitle(title)
# annotate(title,loc=0,ax=plt.gcf().axes[0])
# plt.gcf().tight_layout(pad=0.2, rect=[0.0, 0.2, 1, 1])
plt.subplots_adjust(left=.0, bottom=.0, right=1, top=.9,wspace=0.05, hspace=0.05)
# plt.tight_layout()
def FlySFS(a,reps=[1,2,3]):
for rep in reps:
FlySFSRep(a.xs(rep,1,2),includeOther=False)
plt.suptitle('Rep. {}'.format( rep),fontsize=14)
#
def getColorBlindHex():
return sns.color_palette('colorblind').as_hex()
def visualizeRealFeatures(X,hue=None,transform=None):
# pplt.visualizeRealFeatures(X.join(a.CHD),hue='CHD')
XX=X.copy(True)
cols=utl.TI(X.apply(lambda x: x.unique().size)>2).tolist()
if hue is not None: cols+=[hue]
XX=XX[list(set(cols))]
scale = lambda x: x / (x.max() - x.min())
if transform=='scale':
XX = scale(XX)
XX=XX-XX.min()
elif transform == 'z':
XX=XX.apply(utl.pval.zscore)
ax = plt.subplots(3, 1, figsize=(XX.shape[1]/1.5, 8), dpi=150, sharex= True)[1]
if hue is not None:
XX=XX.melt(id_vars=[hue])
palette = {0: 'gray', 1: 'r'}
color=None
else:
XX=XX.melt()
palette=None
color='gray'
palette=None
sns.stripplot(data=XX, x='variable', y='value', jitter=0.15, ax=ax[0], alpha=0.2, hue=hue,palette=palette)
try:
sns.violinplot(data=XX, x='variable', y='value', ax=ax[1], hue=hue, palette=palette, color=color, split=True)
except:
sns.violinplot(data=XX, x='variable', y='value', ax=ax[1], hue=hue, palette=palette, color=color, split=False)
sns.boxplot(data=XX, x='variable', y='value', ax=ax[2])
# ax[1].set_xticks(ax[1].get_xticks(), rotation='vertical')
plt.xticks(rotation=90)
ax[0].set_xlabel('');ax[1].set_xlabel('')
try:ax[1].legend_.remove()
except:pass
def visualizeOneRealFeat(data,x,y):
ax = plt.subplots(1, 3, sharey=True, dpi=100, figsize=(12, 3.5))[1]
sns.stripplot(data=data, x=x, y=y, jitter=0.1, alpha=0.1, ax=ax[0])
sns.boxplot(data=data, x=x, y=y, ax=ax[1])
sns.violinplot(data=data, x=x, y=y, ax=ax[2])
def visualizeCatFeatures(X,hue):
XX = X.copy(True)
cols = utl.TI(XX.apply(lambda x: x.unique().size) < 10).tolist()
XX = XX[cols].astype(int)
norm = lambda x: x / x.sum()
r=[]
rows=max(1,int(np.ceil(len(cols)/3.)))
ax=plt.subplots(rows,3,dpi=150,figsize=(8,rows*2))[1].reshape(-1)
j=0
for c in XX.columns:
if c==hue:continue
norm(pd.crosstab(XX[hue], XX[c])).plot.bar(ax=ax[j])
leg=ax[j].legend(loc='best', prop={'size': 6})
leg.set_title(c, prop={'size': 6})
j+=1
ax = plt.subplots(rows, 3, dpi=150, figsize=(8, rows * 2))[1].reshape(-1)
j = 0
for c in XX.columns:
if c == hue: continue
norm(pd.crosstab(XX[hue], XX[c])).T.plot.bar(ax=ax[j])
leg = ax[j].legend(loc='best', prop={'size': 6})
leg.set_title(hue, prop={'size': 6})
j += 1
def visualizeFeatures(X,hue=None):
visualizeCatFeatures(X, hue)
visualizeRealFeatures(X, hue)
def puComment(fig, comment):
if comment is not None:
fig.text(.05, .05, 'Comment: ' + comment, fontsize=26, color='red')
| 2.3125 | 2 |
tests/test_parser.py | 1st1/httptools | 1 | 12789485 | import httptools
import unittest
from unittest import mock
RESPONSE1_HEAD = b'''HTTP/1.1 200 OK
Date: Mon, 23 May 2005 22:38:34 GMT
Server: Apache/1.3.3.7
(Unix) (Red-Hat/Linux)
Last-Modified: Wed, 08 Jan 2003 23:11:55 GMT
ETag: "3f80f-1b6-3e1cb03b"
Content-Type: text/html;
charset=UTF-8
Content-Length: 130
Accept-Ranges: bytes
Connection: close
'''
RESPONSE1_BODY = b'''
<html>
<head>
<title>An Example Page</title>
</head>
<body>
Hello World, this is a very simple HTML document.
</body>
</html>'''
CHUNKED_REQUEST1_1 = b'''POST /test.php?a=b+c HTTP/1.2
User-Agent: Fooo
Host: bar
Transfer-Encoding: chunked
5\r\nhello\r\n6\r\n world\r\n'''
CHUNKED_REQUEST1_2 = b'''0\r\nVary: *\r\nUser-Agent: spam\r\n\r\n'''
class TestResponseParser(unittest.TestCase):
def test_parser_response_1(self):
m = mock.Mock()
headers = {}
m.on_header.side_effect = headers.__setitem__
p = httptools.HttpResponseParser(m)
p.feed_data(RESPONSE1_HEAD)
self.assertEqual(p.get_http_version(), '1.1')
self.assertEqual(p.get_status_code(), 200)
m.on_status.assert_called_once_with(b'OK')
m.on_headers_complete.assert_called_once_with()
self.assertEqual(m.on_header.call_count, 8)
self.assertEqual(len(headers), 8)
self.assertEqual(headers.get(b'Connection'), b'close')
self.assertEqual(headers.get(b'Content-Type'),
b'text/html; charset=UTF-8')
self.assertFalse(m.on_body.called)
p.feed_data(bytearray(RESPONSE1_BODY))
m.on_body.assert_called_once_with(RESPONSE1_BODY)
m.on_message_complete.assert_called_once_with()
self.assertFalse(m.on_url.called)
self.assertFalse(m.on_chunk_header.called)
self.assertFalse(m.on_chunk_complete.called)
with self.assertRaisesRegex(
httptools.HttpParserError,
'data received after completed connection'):
p.feed_data(b'12123123')
def test_parser_response_2(self):
with self.assertRaisesRegex(TypeError, 'expected bytes'):
httptools.HttpResponseParser(None).feed_data('')
def test_parser_response_3(self):
callbacks = {'on_header', 'on_headers_complete', 'on_body',
'on_message_complete'}
for cbname in callbacks:
with self.subTest('{} callback fails correctly'.format(cbname)):
with self.assertRaisesRegex(httptools.HttpParserCallbackError,
'callback failed'):
m = mock.Mock()
getattr(m, cbname).side_effect = Exception()
p = httptools.HttpResponseParser(m)
p.feed_data(RESPONSE1_HEAD + RESPONSE1_BODY)
def test_parser_response_4(self):
p = httptools.HttpResponseParser(None)
with self.assertRaises(httptools.HttpParserInvalidStatusError):
p.feed_data(b'HTTP/1.1 1299 FOOSPAM\r\n')
def test_parser_response_5(self):
m = mock.Mock()
m.on_status = None
m.on_header = None
m.on_body = None
m.on_headers_complete = None
m.on_chunk_header = None
m.on_chunk_complete = None
p = httptools.HttpResponseParser(m)
p.feed_data(RESPONSE1_HEAD)
p.feed_data(RESPONSE1_BODY)
m.on_message_complete.assert_called_once_with()
class TestRequestParser(unittest.TestCase):
def test_parser_request_chunked_1(self):
m = mock.Mock()
p = httptools.HttpRequestParser(m)
p.feed_data(CHUNKED_REQUEST1_1)
self.assertEqual(p.get_method(), b'POST')
m.on_url.assert_called_once_with(b'/test.php?a=b+c')
self.assertEqual(p.get_http_version(), '1.2')
m.on_header.assert_called_with(b'Transfer-Encoding', b'chunked')
m.on_chunk_header.assert_called_with()
m.on_chunk_complete.assert_called_with()
self.assertFalse(m.on_message_complete.called)
m.reset_mock()
p.feed_data(CHUNKED_REQUEST1_2)
m.on_chunk_header.assert_called_with()
m.on_chunk_complete.assert_called_with()
m.on_header.assert_called_with(b'User-Agent', b'spam')
self.assertEqual(m.on_header.call_count, 2)
m.on_message_complete.assert_called_once_with()
def test_parser_request_chunked_2(self):
m = mock.Mock()
headers = {}
m.on_header.side_effect = headers.__setitem__
m.on_url = None
m.on_body = None
m.on_headers_complete = None
m.on_chunk_header = None
m.on_chunk_complete = None
p = httptools.HttpRequestParser(m)
p.feed_data(CHUNKED_REQUEST1_1)
p.feed_data(CHUNKED_REQUEST1_2)
self.assertEqual(
headers,
{b'User-Agent': b'spam',
b'Transfer-Encoding': b'chunked',
b'Host': b'bar',
b'Vary': b'*'})
def test_parser_request_2(self):
p = httptools.HttpRequestParser(None)
with self.assertRaises(httptools.HttpParserInvalidMethodError):
p.feed_data(b'SPAM /test.php?a=b+c HTTP/1.2')
def test_parser_request_3(self):
p = httptools.HttpRequestParser(None)
with self.assertRaises(httptools.HttpParserInvalidURLError):
p.feed_data(b'POST HTTP/1.2')
class TestUrlParser(unittest.TestCase):
def parse(self, url:bytes):
parsed = httptools.parse_url(url)
return (parsed.schema, parsed.host, parsed.port, parsed.path,
parsed.query, parsed.fragment, parsed.userinfo)
def test_parser_url_1(self):
self.assertEqual(
self.parse(b'dsf://aaa/b/c?aa#123'),
(b'dsf', b'aaa', None, b'/b/c', b'aa', b'123', None))
self.assertEqual(
self.parse(b'dsf://i:n@aaa:88/b/c?aa#123'),
(b'dsf', b'aaa', 88, b'/b/c', b'aa', b'123', b'i:n'))
self.assertEqual(
self.parse(b'////'),
(None, None, None, b'////', None, None, None))
self.assertEqual(
self.parse(b'////1/1?a=b&c[]=d&c[]=z'),
(None, None, None, b'////1/1', b'a=b&c[]=d&c[]=z', None, None))
self.assertEqual(
self.parse(b'/////?#123'),
(None, None, None, b'/////', None, b'123', None))
self.assertEqual(
self.parse(b'/a/b/c?b=1&'),
(None, None, None, b'/a/b/c', b'b=1&', None, None))
def test_parser_url_2(self):
self.assertEqual(
self.parse(b''),
(None, None, None, None, None, None, None))
def test_parser_url_3(self):
with self.assertRaises(httptools.HttpParserInvalidURLError):
self.parse(b' ')
def test_parser_url_4(self):
with self.assertRaises(httptools.HttpParserInvalidURLError):
self.parse(b':///1')
def test_parser_url_5(self):
self.assertEqual(
self.parse(b'http://[fdf8:f53e:61e4::18:4]:67/'),
(b'http', b'fdf8:f53e:61e4::18:4', 67, b'/', None, None, None))
def test_parser_url_6(self):
self.assertEqual(
self.parse(bytearray(b'/')),
(None, None, None, b'/', None, None, None))
def test_parser_url_7(self):
url = httptools.parse_url(b'/')
with self.assertRaisesRegex(AttributeError, 'not writable'):
url.port = 0
def test_parser_url_8(self):
with self.assertRaises(TypeError):
httptools.parse_url(None)
| 2.984375 | 3 |
integration_test/test_heartbeat_checker.py | lynix94/nbase-arc | 176 | 12789486 | <gh_stars>100-1000
#
# Copyright 2015 <NAME>.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import unittest
import testbase
import util
import time
import gateway_mgmt
import redis_mgmt
import smr_mgmt
import default_cluster
import config
import load_generator
import telnet
import json
import constant as c
class TestHeartbeatChecker( unittest.TestCase ):
cluster = config.clusters[0]
leader_cm = config.clusters[0]['servers'][0]
max_load_generator = 1
load_gen_thrd_list = {}
key_base = 'key_thbc'
@classmethod
def setUpClass( cls ):
return 0
@classmethod
def tearDownClass( cls ):
return 0
def setUp( self ):
util.set_process_logfile_prefix( 'TestHeartbeatChecker_%s' % self._testMethodName )
self.conf_checker = default_cluster.initialize_starting_up_smr_before_redis( self.cluster )
self.assertIsNotNone(self.conf_checker, 'failed to initialize cluster')
def tearDown( self ):
testbase.defaultTearDown(self)
def getseq_log(self, s):
smr = smr_mgmt.SMR( s['id'] )
try:
ret = smr.connect( s['ip'], s['smr_mgmt_port'] )
if ret != 0:
return
smr.write( 'getseq log\r\n' )
response = smr.read_until( '\r\n', 1 )
util.log('getseq log (pgs%d) = %s' % (s['id'], response[:-2]))
smr.disconnect()
except IOError:
pass
def get_expected_smr_state( self, server, expected, max_try=60 ):
for i in range( 0, max_try ):
state = util.get_smr_state( server, self.leader_cm )
if state == expected:
break;
time.sleep( 1 )
return state
def state_transition( self ):
server = util.get_server_by_role( self.cluster['servers'], 'slave' )
self.assertNotEquals( server, None, 'failed to get_server_by_role-slave' )
# get gateway info
ip, port = util.get_rand_gateway( self.cluster )
gw = gateway_mgmt.Gateway( self.cluster['servers'][0]['id'] )
# check initial state
state = self.get_expected_smr_state( server, 'N' )
role = util.get_role_of_server( server )
self.assertEquals( 'N', state,
'server%d - state:%s, role:%s, expected:N' % (server['id'], state, role) )
# shutdown
ret = testbase.request_to_shutdown_smr( server )
self.assertEquals( ret, 0, 'failed to shutdown smr' )
ret = testbase.request_to_shutdown_redis( server )
self.assertEquals( ret, 0, 'failed to shutdown redis' )
time.sleep( 3 )
# check state F
expected = 'F'
state = self.get_expected_smr_state( server, expected )
self.assertEquals( expected , state,
'server%d - state:%s, but expected:%s' % (server['id'], state, expected) )
# set value
ret = gw.connect( ip, port )
self.assertEquals( ret, 0, 'failed to connect to gateway, %s:%d' % (ip, port) )
timestamp = 0.0
for i in range( 0, 100 ):
timestamp = time.time()
key = 'new_key_haha'
cmd = 'set %s %f\r\n' % (key, timestamp)
gw.write( cmd )
res = gw.read_until( '\r\n' )
self.assertEquals( res, '+OK\r\n' )
gw.disconnect()
# recovery
ret = testbase.request_to_start_smr( server )
self.assertEquals( ret, 0, 'failed to start smr' )
ret = testbase.request_to_start_redis( server )
self.assertEquals( ret, 0, 'failed to start redis' )
ret = testbase.wait_until_finished_to_set_up_role( server, 10 )
self.assertEquals( ret, 0, 'failed to role change. smr_id:%d' % (server['id']) )
time.sleep( 5 )
redis = redis_mgmt.Redis( server['id'] )
ret = redis.connect( server['ip'], server['redis_port'] )
self.assertEquals( ret, 0, 'failed to connect to redis' )
# check state N
expected = 'N'
max_try = 20
for i in range( 0, max_try ):
state = self.get_expected_smr_state( server, expected )
if state == expected:
break
time.sleep( 1 )
role = util.get_role_of_server( server )
self.assertEquals( expected , state,
'server%d - state:%s, role:%s, but expected:%s' % (server['id'], state, role, expected) )
def test_1_state_transition( self ):
util.print_frame()
self.state_transition()
def get_mss( self ):
# get master, slave1, and slave2
master = util.get_server_by_role( self.cluster['servers'], 'master' )
self.assertNotEquals( master, None, 'failed to get master' )
slave1 = util.get_server_by_role( self.cluster['servers'], 'slave' )
self.assertNotEquals( slave1, None, 'failed to get slave1' )
slave2 = None
for server in self.cluster['servers']:
id = server['id']
if id != master['id'] and id != slave1['id']:
slave2 = server
break
self.assertNotEquals( slave2, None, 'failed to get slave2' )
return master, slave1, slave2
def test_2_consistent_after_failover( self ):
util.print_frame()
for i in range(3):
util.log('loop %d' % i)
self.consistent_after_failover()
def consistent_after_failover( self ):
max = 10000
wait_count = 15
key = 'caf'
# get master, slave1, and slave2
master, slave1, slave2 = self.get_mss()
# set value
ip, port = util.get_rand_gateway( self.cluster )
gw = gateway_mgmt.Gateway( ip )
gw.connect( ip, port )
for i in range( 0, max ):
cmd = 'set %s%d %d\r\n' % (key, i, i)
gw.write( cmd )
res = gw.read_until( '\r\n' )
self.assertEquals( res, '+OK\r\n' )
time.sleep( 5 )
# shutdown
servers = [master, slave1, slave2]
for server in servers:
util.log('before shutdown pgs%d' % server['id'])
for s in servers:
self.getseq_log(s)
ret = testbase.request_to_shutdown_smr( server )
self.assertEqual( ret, 0, 'failed to shutdown smr, server:%d' % server['id'] )
ret = testbase.request_to_shutdown_redis( server )
self.assertEquals( ret, 0, 'failed to shutdown redis' )
time.sleep( 5 )
# check state F
for server in servers:
state = self.get_expected_smr_state( server, 'F' )
self.assertEquals( 'F', state,
'server%d - state:%s' % (server['id'], state) )
# recovery
for server in servers:
ret = testbase.request_to_start_smr( server )
self.assertEqual( ret, 0, 'failed to start smr, server:%d' % server['id'] )
ret = testbase.request_to_start_redis( server, False )
self.assertEqual( ret, 0, 'failed to start redis, server:%d' % server['id'] )
util.log('after restart pgs%d' % server['id'])
for s in servers:
self.getseq_log(s)
time.sleep( 5 )
# wait for master election
for i in xrange(10):
ret = util.check_cluster( self.cluster['cluster_name'], self.leader_cm['ip'], self.leader_cm['cm_port'] )
if ret:
break
time.sleep(1)
# check state
for server in servers:
ret = testbase.wait_until_finished_to_set_up_role( server, wait_count )
self.assertEquals( ret, 0, 'failed to role change. server:%d' % (server['id']) )
state = self.get_expected_smr_state( server, 'N' )
role = util.get_role_of_server( server )
self.assertEquals( 'N', state,
'server%d - state:%s, role:%s' % (server['id'], state, role) )
the_number_of_master = 0
the_number_of_slave = 0
for server in servers:
role = util.get_role_of_server( server )
if role == c.ROLE_MASTER:
the_number_of_master = the_number_of_master + 1
elif role == c.ROLE_SLAVE:
the_number_of_slave = the_number_of_slave + 1
self.assertTrue( 1 == the_number_of_master and 2 == the_number_of_slave,
'failed to set roles, the number of master:%d, the number of slave:%d' %
(the_number_of_master, the_number_of_slave) )
# get master, slave1, and slave2
master, slave1, slave2 = self.get_mss()
# connect to a master`s redis and set data
redis = redis_mgmt.Redis( master['id'] )
ret = redis.connect( master['ip'], master['redis_port'] )
self.assertEquals( ret, 0, 'failed to connect to redis, server:%d' % master['id'] )
for i in range( max, max*2 ):
cmd = 'set %s%d %d\r\n' % (key, i, i)
redis.write( cmd )
res = redis.read_until( '\r\n' )
self.assertEquals( res, '+OK\r\n',
'failed to get response, server:%d' % master['id'] )
redis.disconnect()
# check slaves`s data
slaves = [slave1, slave2]
for slave in slaves:
slave_redis = redis_mgmt.Redis( slave['id'] )
ret = slave_redis .connect( slave['ip'], slave['redis_port'] )
self.assertEquals( ret, 0, 'failed to connect to redis, server:%d' % slave['id'] )
for i in range( 0, max*2 ):
cmd = 'get %s%d\r\n' % (key, i)
slave_redis.write( cmd )
trash = slave_redis.read_until( '\r\n' )
res = slave_redis.read_until( '\r\n' )
self.assertEquals( res, '%d\r\n' % i,
'inconsistent, server:%d, expected %d but %s' % (slave['id'], i, res) )
slave_redis.disconnect()
def test_3_heartbeat_target_connection_count( self ):
util.print_frame()
util.log( 'wait until all connections are established' )
for i in range(1, 8):
time.sleep(1)
util.log( '%d sec' % i )
# check pgs
for server in self.cluster['servers']:
before_cnt_redis = util.get_clients_count_of_redis(server['ip'], server['redis_port'])
before_cnt_smr = util.get_clients_count_of_smr(server['smr_mgmt_port'])
cmd = 'pgs_leave %s %d forced' % (self.cluster['cluster_name'], server['id'])
ret = util.cm_command(self.leader_cm['ip'], self.leader_cm['cm_port'], cmd)
jobj = json.loads(ret)
self.assertEqual( jobj['state'], 'success', 'failed : cmd="%s", reply="%s"' % (cmd, ret[:-2]) )
util.log( 'succeeded : cmd="%s", reply="%s"' % (cmd, ret[:-2]) )
# check redis
success = False
for i in range(5):
after_cnt = util.get_clients_count_of_redis(server['ip'], server['redis_port'])
if after_cnt <= 2:
success = True
break
time.sleep(1)
self.assertEquals( success, True, 'failed : the number of connections to redis%d(%s:%d) is %d, exptected:n<=2, before=%d' % (server['id'], server['ip'], server['redis_port'], after_cnt, before_cnt_redis) )
util.log( 'succeeded : the number of connections to redis%d(%s:%d) is %d, exptected=n<=2, before=%d' % (server['id'], server['ip'], server['redis_port'], after_cnt, before_cnt_redis) )
# check smr
success = False
expected = 1
for i in range(5):
after_cnt = util.get_clients_count_of_smr(server['smr_mgmt_port'])
if after_cnt == expected:
success = True
break
time.sleep(1)
self.assertEquals( success, True, 'failed : the number of connections to smr%d(%s:%d) is %d, exptected=%d, before=%d' % (server['id'], server['ip'], server['smr_mgmt_port'], after_cnt, expected, before_cnt_smr) )
util.log( 'succeeded : the number of connections to smr%d(%s:%d) is %d, exptected=%d, before=%d' % (server['id'], server['ip'], server['smr_mgmt_port'], after_cnt, expected, before_cnt_smr) )
# Go back to initial configuration
self.assertTrue(util.pgs_join(self.leader_cm['ip'], self.leader_cm['cm_port'], server['cluster_name'], server['id']),
'failed to join pgs %d' % server['id'])
# check gateway
for server in self.cluster['servers']:
before_cnt = util.get_clients_count_of_gw(server['ip'], server['gateway_port'])
cmd = 'gw_del %s %d' % (self.cluster['cluster_name'], server['id'])
ret = util.cm_command(self.leader_cm['ip'], self.leader_cm['cm_port'], cmd)
jobj = json.loads(ret)
self.assertEqual( jobj['state'], 'success', 'failed : cmd="%s", reply="%s"' % (cmd, ret[:-2]) )
util.log( 'succeeded : cmd="%s", reply="%s"' % (cmd, ret[:-2]) )
success = False
expected = 1
for i in range(5):
after_cnt = util.get_clients_count_of_gw(server['ip'], server['gateway_port'])
if after_cnt == expected:
success = True
break
time.sleep(1)
self.assertEquals( success, True, 'failed : the number of connections to gateway%d(%s:%d) is %d, exptected=%d.' % (server['id'], server['ip'], server['gateway_port'], after_cnt, expected) )
util.log( 'succeeded : the number of connections to gateway%d(%s:%d) is %d, exptected=%d.' % (server['id'], server['ip'], server['gateway_port'], after_cnt, expected) )
# Go back to initial configuration
self.assertTrue(util.gw_add(server['cluster_name'], server['id'], server['pm_name'], server['ip'], server['gateway_port'], self.leader_cm['ip'], self.leader_cm['cm_port']),
'failed to add gw %d' % server['id'])
def test_4_elect_master_randomly( self ):
util.print_frame()
for i in range(1):
self.elect_master_randomly()
def elect_master_randomly( self ):
# set data
ip, port = util.get_rand_gateway(self.cluster)
gw = gateway_mgmt.Gateway( '0' )
gw.connect( ip, port )
for i in range( 0, 1000 ):
cmd = 'set %s%d %d\r\n' % (self.key_base, i, i)
gw.write( cmd )
res = gw.read_until( '\r\n' )
self.assertEqual( res, '+OK\r\n', 'failed to set values to gw(%s:%d). cmd:%s, res:%s' % (ip, port, cmd[:-2], res[:-2]) )
server_ids = []
for server in self.cluster['servers']:
server_ids.append( server['id'] )
for try_cnt in range( 30 ):
# get master, slave1, slave2
m, s1, s2 = util.get_mss( self.cluster )
self.assertNotEqual( m, None, 'master is None.' )
self.assertNotEqual( s1, None, 'slave1 is None.' )
self.assertNotEqual( s2, None, 'slave2 is None.' )
util.log( 'master id : %d' % m['id'] )
if try_cnt != 0:
if m['id'] in server_ids:
server_ids.remove( m['id'] )
smr = smr_mgmt.SMR( m['id'] )
ret = smr.connect( m['ip'], m['smr_mgmt_port'] )
self.assertEqual( ret, 0, 'failed to connect to master. %s:%d' % (m['ip'], m['smr_mgmt_port']) )
cmd = 'role lconn\r\n'
smr.write( cmd )
reply = smr.read_until( '\r\n' )
self.assertEqual( reply, '+OK\r\n', 'failed : cmd="%s", reply="%s"' % (cmd[:-2], reply[:-2]) )
util.log( 'succeeded : cmd="%s", reply="%s"' % (cmd[:-2], reply[:-2]) )
# wait until role-change is finished
for role_change_try_cnt in range( 5 ):
count_master = 0
count_slave = 0
for server in self.cluster['servers']:
real_role = util.get_role_of_server( server )
real_role = util.roleNumberToChar( real_role )
if real_role == 'M':
count_master = count_master + 1
elif real_role == 'S':
count_slave = count_slave + 1
if count_master == 1 and count_slave == 2:
break;
time.sleep( 1 )
# check the number of master and slave
self.assertEqual( count_master, 1, 'failed : the number of master is not 1, count_master=%d, count_slave=%d' % (count_master, count_slave) )
self.assertEqual( count_slave, 2, 'failed : the number of slave is not 2, count_master=%d, count_slave=%d' % (count_master, count_slave) )
util.log( 'succeeded : the number of master is 1 and the number of slave is 2' )
# check states of all pgs in pg
for try_cnt in range( 3 ):
ok = True
for s in self.cluster['servers']:
real_role = util.get_role_of_server( s )
real_role = util.roleNumberToChar( real_role )
smr_info = util.get_smr_info( s, self.leader_cm )
cc_role = smr_info['smr_Role']
cc_hb = smr_info['hb']
if cc_hb != 'Y':
ok = False
if real_role != cc_role:
ok = False
if ok:
util.log( 'succeeded : a role of real pgs is the same with a role in cc, id=%d, real=%s, cc=%s, hb=%s' % (s['id'], real_role, cc_role, cc_hb) )
else:
util.log( '\n\n**********************************************************\n\nretry: a role of real pgs is not the same with a role in cc, id=%d, real=%s, cc=%s, hb=%s' % (s['id'], real_role, cc_role, cc_hb) )
if ok == False:
time.sleep( 0.5 )
else:
break
self.assertTrue( ok, 'failed : role check' )
if len( server_ids ) == 0:
util.log( 'succeeded : all smrs have been as a master' )
return 0
self.assertEqual( 0, len( server_ids ) , 'failed : remains server ids=[%s]' % (','.join('%d' % id for id in server_ids)) )
return 0
def test_5_from_n_to_1_heartbeat_checkers( self ):
util.print_frame()
for i in range( 0, len( self.cluster['servers'] ) - 1 ):
util.log( 'loop %d' % i )
server = self.cluster['servers'][i]
self.assertEquals( 0, testbase.request_to_shutdown_cm( server ),
'failed to request_to_shutdown_cm, server:%d' % server['id'] )
time.sleep( 20 )
self.leader_cm = self.cluster['servers'][i+1]
self.match_cluster_info(self.leader_cm['ip'], self.leader_cm['cm_port'], self.cluster)
self.state_transition()
# Go back to initial configuration
self.assertTrue(util.recover_confmaster(self.cluster, [0,1], 0),
'failed to recover confmaster.')
def test_6_from_3_to_6_heartbeat_checkers( self ):
util.print_frame()
hbc_svr_list = []
i = 5000 + len( self.cluster['servers'] )
for server in self.cluster['servers']:
i = i + 1
hbc_svr = {}
hbc_svr['id'] = i
hbc_svr['ip'] = server['ip']
hbc_svr['zk_port'] = server['zk_port']
hbc_svr_list.append(hbc_svr)
ret = testbase.setup_cm( i )
self.assertEquals( 0, ret, 'failed to copy heartbeat checker, server:%d' % hbc_svr['id'] )
ret = testbase.request_to_start_cm( i, i )
self.assertEquals( 0, ret,
'failed to request_to_start_cm, server:%d' % hbc_svr['id'] )
self.state_transition()
# Go back to initial configuration
for hbc_svr in hbc_svr_list:
self.assertEqual(0, testbase.request_to_shutdown_cm(hbc_svr),
'failed to shutdown confmaster')
def test_7_remaining_hbc_connection( self ):
util.print_frame()
# check pgs
for server in self.cluster['servers']:
before_cnt_redis = util.get_clients_count_of_redis(server['ip'], server['redis_port'])
before_cnt_smr = util.get_clients_count_of_smr(server['smr_mgmt_port'])
cmd = 'pgs_leave %s %d forced\r\npgs_del %s %d' % (self.cluster['cluster_name'], server['id'], self.cluster['cluster_name'], server['id'])
util.cm_command(self.leader_cm['ip'], self.leader_cm['cm_port'], cmd)
for server in self.cluster['servers']:
# check redis
success = False
for i in range(5):
after_cnt = util.get_clients_count_of_redis(server['ip'], server['redis_port'])
if after_cnt <= 2:
success = True
break
time.sleep(1)
self.assertEquals( success, True, 'failed : the number of connections to redis%d(%s:%d) is %d, exptected=n<=2, before=%d' % (server['id'], server['ip'], server['redis_port'], after_cnt, before_cnt_redis) )
util.log( 'succeeded : the number of connections to redis%d(%s:%d) is %d, exptected=n<=2, before=%d' % (server['id'], server['ip'], server['redis_port'], after_cnt, before_cnt_redis) )
# check smr
success = False
expected = 0
for i in range(5):
after_cnt = util.get_clients_count_of_smr(server['smr_mgmt_port'])
if after_cnt == expected:
success = True
break
time.sleep(1)
self.assertEquals( success, True, 'failed : the number of connections to smr%d(%s:%d) is %d, exptected=%d, before=%d' % (server['id'], server['ip'], server['smr_mgmt_port'], after_cnt, expected, before_cnt_smr) )
util.log( 'succeeded : the number of connections to smr%d(%s:%d) is %d, exptected=%d, before=%d' % (server['id'], server['ip'], server['smr_mgmt_port'], after_cnt, expected, before_cnt_smr) )
# check gateway
for server in self.cluster['servers']:
before_cnt = util.get_clients_count_of_gw(server['ip'], server['gateway_port'])
cmd = 'gw_del %s %d' % (self.cluster['cluster_name'], server['id'])
util.cm_command(self.leader_cm['ip'], self.leader_cm['cm_port'], cmd)
for server in self.cluster['servers']:
success = False
expected = 1
for i in range(5):
after_cnt = util.get_clients_count_of_gw(server['ip'], server['gateway_port'])
if after_cnt == expected:
success = True
break
time.sleep(1)
self.assertEquals( success, True, 'failed : the number of connections to gateway%d(%s:%d) is %d, exptected=%d.' % (server['id'], server['ip'], server['gateway_port'], after_cnt, expected) )
util.log( 'succeeded : the number of connections to gateway%d(%s:%d) is %d, exptected=%d.' % (server['id'], server['ip'], server['gateway_port'], after_cnt, expected) )
# Go back to initial configuration
# Cleanup PG
self.assertTrue(util.cm_success(util.cm_command(
self.leader_cm['ip'], self.leader_cm['cm_port'],
'pg_del %s %d' % (self.cluster['cluster_name'], self.cluster['servers'][0]['pg_id'])))[0])
# Cleanup processes of PGS and GW
for s in self.cluster['servers']:
self.assertEqual(0, util.shutdown_redis(s['id'], s['redis_port']),
'failed to kill redis %d process' % s['id'])
self.assertEqual(0, util.shutdown_smr(s['id'], s['ip'], s['smr_base_port']),
'failed to kill smr %d process' % s['id'])
self.assertEqual(0, util.shutdown_gateway(s['id'], s['gateway_port']),
'failed to kill gw %d process' % s['id'])
# Recover PG
self.assertTrue(
util.install_pg(self.cluster, self.cluster['servers'], self.cluster['servers'][0], start_gw=True),
'failed to recover PGS and GW in a PM')
def match_cluster_info(self, cm_ip, cm_port, cluster):
# Cluster
cluster_info = util.cluster_info(cm_ip, cm_port, cluster['cluster_name'])['cluster_info']
self.assertEquals(cluster_info['PN_PG_Map'], '0 8192')
self.assertEquals(cluster_info['Key_Space_Size'], 8192)
# PG
for pg_id in cluster['pg_id_list']:
pg = util.pg_info(cm_ip, cm_port, cluster['cluster_name'], pg_id)
self.assertIsNotNone(pg)
for s in self.cluster['servers']:
# GW
gw_info = util.get_gw_info(cm_ip, cm_port, cluster['cluster_name'], s['id'])
self.assertEquals(gw_info['port'], s['gateway_port'])
self.assertEquals(gw_info['state'], 'N')
self.assertEquals(gw_info['hb'], 'Y')
self.assertEquals(gw_info['pm_Name'], s['pm_name'])
self.assertEquals(gw_info['pm_IP'], s['ip'])
# PGS
pgs_info = util.get_pgs_info(cm_ip, cm_port, cluster['cluster_name'], s['id'])
self.assertEquals(pgs_info['pg_ID'], s['pg_id'])
self.assertEquals(pgs_info['pm_Name'], s['pm_name'])
self.assertEquals(pgs_info['pm_IP'], s['ip'])
self.assertEquals(pgs_info['backend_Port_Of_Redis'], s['redis_port'])
self.assertEquals(pgs_info['replicator_Port_Of_SMR'], s['smr_base_port'])
self.assertEquals(pgs_info['management_Port_Of_SMR'], s['smr_mgmt_port'])
self.assertEquals(pgs_info['state'], 'N')
self.assertEquals(pgs_info['hb'], 'Y')
self.assertEquals(pgs_info['color'], 'GREEN')
self.assertTrue(pgs_info['smr_Role'] == 'M' or pgs_info['smr_Role'] == 'S')
self.assertEquals(pgs_info['old_master_version'], '201')
| 1.710938 | 2 |
examples/nonogram/tests/cases.py | notechats/notegame | 17 | 12789487 | <reponame>notechats/notegame
# -*- coding: utf-8 -*-
from __future__ import unicode_literals, print_function
from pynogram.core.common import (
BOX, SPACE,
)
# TODO: more solved rows
CASES = [
([], '???', [SPACE, SPACE, SPACE]),
([1, 1, 5], '---#-- - # ', [
SPACE, SPACE, SPACE, BOX, SPACE, SPACE, None, None,
None, None, None, None, None, None, None, SPACE,
None, None, None, BOX, BOX, BOX, BOX, None]),
([9, 1, 1, 1], ' --#########------- #- - ', [
SPACE, SPACE, SPACE, SPACE, SPACE, BOX, BOX, BOX,
BOX, BOX, BOX, BOX, BOX, BOX, SPACE, SPACE,
SPACE, SPACE, SPACE, SPACE, SPACE, None, None, SPACE,
BOX, SPACE, None, SPACE, None]),
([5, 6, 3, 1, 1], ' #- ----- ##- --- #-', [
None, None, None, None, None, None, None, None,
None, SPACE, None, BOX, BOX, BOX, BOX, BOX,
SPACE, SPACE, SPACE, SPACE, SPACE, SPACE, SPACE, SPACE,
SPACE, None, None, None, BOX, BOX, BOX, SPACE,
None, None, None, None, None, None, SPACE, SPACE,
SPACE, None, None, SPACE, BOX, SPACE]),
([4, 2], ' # . ', [
None, BOX, BOX, BOX, None, SPACE, BOX, BOX]),
([4, 2], ' # . ', [
BOX, BOX, BOX, BOX, SPACE, None, BOX, None]),
((1, 1, 2, 1, 1, 3, 1),
[
BOX, SPACE, SPACE, None, None, SPACE, None, BOX,
None, SPACE, SPACE, BOX, None, None, None, None,
None, BOX, None, None, None, None], [
BOX, SPACE, SPACE, None, None, SPACE, None, BOX,
None, SPACE, SPACE, BOX, SPACE, None, None, None,
None, BOX, None, None, None, None]),
]
BAD_CASES = [
([4, 2], ' # . '),
([4, 2], ' # .# #'),
((5, 3, 2, 2, 4, 2, 2),
'-#####----###-----------##- ### '),
]
| 1.914063 | 2 |
weight_comparisons.py | bartulem/KISN-pancortical-kinematics | 2 | 12789488 | <gh_stars>1-10
"""
Compares tuning-curve rate differences in weight/no-weight sessions.
@author: bartulem
"""
import os
import sys
import json
import scipy.stats
import numpy as np
from mpl_toolkits.mplot3d.art3d import Line3DCollection
import matplotlib.patches as patches
import matplotlib.colorbar as cbar
import matplotlib.pyplot as plt
from scipy.ndimage.filters import gaussian_filter1d, uniform_filter1d
from tqdm import tqdm
from random import gauss
from mpl_toolkits.axes_grid1.inset_locator import inset_axes
import sessions2load
import make_ratemaps
import neural_activity
def extract_json_data(json_file='', features=None,
peak_min=True, der='1st', rate_stability_bound=True,
ref_dict=None, test_session='weight', composite_score=True):
"""
Description
----------
This function extracts json data (weight/tuning peaks/stability)
and returns it packed into a dictionary.
----------
Parameters
----------
**kwargs (dictionary)
json_file (str)
Absolute path to the .json file of interest.
composite_score (bool)
To use the composite score for rate maps, instead of something else; defaults to True.
features (list)
List of features you're interested in (der not necessary!); defaults to ['Speeds'].
peak_min (int / bool)
The minimum rate for light1 to even be considered; defaults to True.
rate_stability_bound (int / bool)
How much is the OF2 peak rate allowed to deviate from OF1 (in terms of percent OF1 rate); defaults to True.
der (str)
Derivative of choice; defaults to '1st'.
ref_dict (dict)
The reference and second light session: defaults to {'ref_session': 'light1', 'other_session': 'light2'}
test_session (str)
The test session of interest; defaults to 'weight'.
----------
Returns
----------
data_dict (dict)
A dictionary which contains the desired data.
----------
"""
if ref_dict is None:
ref_dict = {'ref_session': 'light1', 'other_session': 'light2'}
if features is None:
features = ['Speeds']
with open(json_file) as j_file:
json_data = json.load(j_file)
weight_dict = {}
for feature in features:
for feature_type in [feature, f'{feature}_{der}_der']:
if der == '2nd' and feature == 'Speeds':
continue
else:
if composite_score:
weight_dict[feature_type] = {'auc': {ref_dict['ref_session']: [], test_session: [], ref_dict['other_session']: []},
'peaks': {ref_dict['ref_session']: [], test_session: [], ref_dict['other_session']: []},
'information_rates': {ref_dict['ref_session']: [], test_session: [], ref_dict['other_session']: []},
'stability': {'{}-{}'.format(ref_dict['ref_session'], test_session): [],
'{}-{}'.format(ref_dict['other_session'], test_session): [],
'{}-{}'.format(ref_dict['ref_session'], ref_dict['other_session']): []}}
for cl_num in json_data.keys():
if composite_score:
for key in json_data[cl_num]['features'].keys():
if key in weight_dict.keys() and len(json_data[cl_num]['features'][key][ref_dict['ref_session']]['rm']) > 5:
ref_session_data = json_data[cl_num]['features'][key][ref_dict['ref_session']]['rm']
test_session_data = json_data[cl_num]['features'][key][test_session]['rm']
other_session_data = json_data[cl_num]['features'][key][ref_dict['other_session']]['rm']
if (peak_min is True or np.max(ref_session_data) > peak_min) \
and (peak_min is True or np.max(test_session_data) > peak_min) \
and (peak_min is True or np.max(other_session_data) > peak_min) \
and rate_stability_bound is True or abs(json_data[cl_num]['baseline_firing_rates'][ref_dict['ref_session']]
- json_data[cl_num]['baseline_firing_rates'][test_session]) \
< (json_data[cl_num]['baseline_firing_rates'][ref_dict['ref_session']] * (rate_stability_bound / 100)):
weight_dict[key]['auc'][ref_dict['ref_session']].append(np.sum(ref_session_data))
weight_dict[key]['auc'][test_session].append(np.sum(test_session_data))
weight_dict[key]['auc'][ref_dict['other_session']].append(np.sum(other_session_data))
weight_dict[key]['peaks'][ref_dict['ref_session']].append(json_data[cl_num]['features'][key][ref_dict['ref_session']]['x'][np.argmax(ref_session_data)])
weight_dict[key]['peaks'][test_session].append(json_data[cl_num]['features'][key][test_session]['x'][np.argmax(test_session_data)])
weight_dict[key]['peaks'][ref_dict['other_session']].append(json_data[cl_num]['features'][key][ref_dict['other_session']]['x'][np.argmax(other_session_data)])
weight_dict[key]['information_rates'][ref_dict['ref_session']].append(json_data[cl_num]['features'][key]['ICr-{}'.format(ref_dict['ref_session'])])
weight_dict[key]['information_rates'][test_session].append(json_data[cl_num]['features'][key]['ICr-{}'.format(test_session)])
weight_dict[key]['information_rates'][ref_dict['other_session']].append(json_data[cl_num]['features'][key]['ICr-{}'.format(ref_dict['other_session'])])
weight_dict[key]['stability']['{}-{}'.format(ref_dict['ref_session'], test_session)].append(scipy.stats.spearmanr(ref_session_data, test_session_data)[0])
weight_dict[key]['stability']['{}-{}'.format(ref_dict['other_session'], test_session)].append(scipy.stats.spearmanr(other_session_data, test_session_data)[0])
weight_dict[key]['stability']['{}-{}'.format(ref_dict['ref_session'], ref_dict['other_session'])].append(scipy.stats.spearmanr(ref_session_data, other_session_data)[0])
return weight_dict
def make_shuffled_distributions(weight_dict, ref_dict, test_session, n_shuffles=1000):
shuffled_dict = {}
for feature in tqdm(weight_dict.keys()):
shuffled_dict[feature] = {'auc': {'null_differences': np.zeros(n_shuffles), 'true_difference': 0, 'z-value': 1, 'p-value': 1},
'peaks': {'null_differences': np.zeros(n_shuffles), 'true_difference': 0, 'z-value': 1, 'p-value': 1},
'stability': {'null_differences': np.zeros(n_shuffles), 'true_difference': 0, 'z-value': 1, 'p-value': 1},
'information_rates': {'null_differences': np.zeros(n_shuffles), 'true_difference': 0, 'z-value': 1, 'p-value': 1}}
for attribute in ['auc', 'information_rates']:
shuffled_dict[feature][attribute]['true_difference'] = np.mean(np.diff(np.array([weight_dict[feature][attribute][test_session],
weight_dict[feature][attribute][ref_dict['ref_session']]]), axis=0))
corr_arr = np.array([weight_dict[feature]['stability']['{}-{}'.format(ref_dict['ref_session'], test_session)],
weight_dict[feature]['stability']['{}-{}'.format(ref_dict['ref_session'], ref_dict['other_session'])]])
corr_arr[corr_arr > .99] = .99
true_difference_corr = np.mean(np.diff(np.arctanh(corr_arr), axis=0))
for sh in range(n_shuffles):
joint_arr = np.array([weight_dict[feature]['auc'][test_session],
weight_dict[feature]['auc'][ref_dict['ref_session']]])
joint_arr_peaks = np.array([weight_dict[feature]['peaks'][test_session],
weight_dict[feature]['peaks'][ref_dict['ref_session']]])
joint_arr_ir = np.array([weight_dict[feature]['information_rates'][test_session],
weight_dict[feature]['information_rates'][ref_dict['ref_session']]])
joint_arr_corr = np.arctanh(corr_arr.copy())
for col in range(joint_arr.shape[1]):
np.random.shuffle(joint_arr[:, col])
shuffled_dict[feature]['auc']['null_differences'][sh] = np.mean(np.diff(joint_arr, axis=0))
np.random.shuffle(joint_arr_peaks[:, col])
shuffled_dict[feature]['peaks']['null_differences'][sh] = np.mean(np.diff(joint_arr_peaks, axis=0))
np.random.shuffle(joint_arr_ir[:, col])
shuffled_dict[feature]['information_rates']['null_differences'][sh] = np.mean(np.diff(joint_arr_ir, axis=0))
np.random.shuffle(joint_arr_corr[:, col])
shuffled_dict[feature]['stability']['null_differences'][sh] = np.mean(np.diff(joint_arr_corr, axis=0))
for attribute in ['auc', 'peaks', 'information_rates']:
shuffled_dict[feature][attribute]['z-value'] = (shuffled_dict[feature][attribute]['true_difference'] - shuffled_dict[feature][attribute]['null_differences'].mean()) \
/ shuffled_dict[feature][attribute]['null_differences'].std()
p_val_attribute = 1 - scipy.stats.norm.cdf(shuffled_dict[feature][attribute]['z-value'])
if p_val_attribute < .5:
shuffled_dict[feature][attribute]['p-value'] = p_val_attribute
else:
shuffled_dict[feature][attribute]['p-value'] = 1 - p_val_attribute
shuffled_dict[feature]['stability']['z-value'] = (true_difference_corr - shuffled_dict[feature]['stability']['null_differences'].mean()) \
/ shuffled_dict[feature]['stability']['null_differences'].std()
p_val_correlations = 1 - scipy.stats.norm.cdf(shuffled_dict[feature]['stability']['z-value'])
if p_val_correlations < .5:
shuffled_dict[feature]['stability']['p-value'] = p_val_correlations
else:
shuffled_dict[feature]['stability']['p-value'] = 1 - p_val_correlations
shuffled_dict[feature]['stability']['true_difference'] = np.tanh(true_difference_corr)
shuffled_dict[feature]['stability']['null_differences'] = np.tanh(shuffled_dict[feature]['stability']['null_differences'])
return shuffled_dict
class WeightComparer:
def __init__(self, weight_json_file='', chosen_features=None,
rate_stability_bound=True, peak_min=True,
save_dir='', save_fig=False, fig_format='png',
der='1st', baseline_dict={},
ref_dict=None, test_session_type='weight',
beh_plot_sessions={}):
if chosen_features is None:
chosen_features = ['Speeds']
if ref_dict is None:
ref_dict = {'ref_session': 'light1', 'other_session': 'light2'}
self.weight_json_file = weight_json_file
self.chosen_features = chosen_features
self.peak_min = peak_min
self.rate_stability_bound = rate_stability_bound
self.save_dir = save_dir
self.save_fig = save_fig
self.fig_format = fig_format
self.der = der
self.baseline_dict = baseline_dict
self.ref_dict = ref_dict
self.test_session_type = test_session_type
self.beh_plot_sessions = beh_plot_sessions
def baseline_rate_change_over_time(self, **kwargs):
"""
Description
----------
This method plots how the baseline firing rate changes over
time for all clusters that were significant for at least one
1-D feature. [1] The first plot considers six example clusters
from all animals that specific brain area was recorded from. In
each session (light1, weight, light2), spikes are allocated into
10 second bins and smoothed with a Gaussian (sigma=1). They are
then concatenated into a single array and a rolling mean (size=50) is
calculated over the whole window.
----------
Parameters
----------
**kwargs (dictionary)
condensing_bin (int)
The size of the bin for calculating rates (in seconds); defaults to 10.
smooth_sd (int)
The SD of the smoothing window; defaults to 1 (bin).
rolling_average_window (int)
The size of the rolling mean window; defaults to 50 (bins).
----------
Returns
----------
baseline_change_examples (plot)
A plot with cluster examples for firing rate changes over sessions.
----------
"""
condensing_bin = kwargs['condensing_bin'] if 'condensing_bin' in kwargs.keys() and type(kwargs['condensing_bin']) == int else 10
smooth_sd = kwargs['smooth_sd'] if 'smooth_sd' in kwargs.keys() and type(kwargs['smooth_sd']) == int else 1
rolling_average_window = kwargs['rolling_average_window'] if 'rolling_average_window' in kwargs.keys() and type(kwargs['rolling_average_window']) == int else 50
# activity_dict = {}
# for animal in self.baseline_dict.keys():
# activity_dict[animal] = {}
# for cl_id in self.baseline_dict[animal]['cl_ids']:
# activity_dict[animal][cl_id] = {'light1': 0, self.test_session_type: 0, 'light2': 0}
# for idx, session_name in enumerate(['light1', self.test_session_type, 'light2']):
# file_id, \
# activity_dictionary, \
# purged_spikes_dictionary = neural_activity.Spikes(input_file=self.baseline_dict[animal]['files'][idx]).convert_activity_to_frames_with_shuffles(get_clusters=cl_id,
# to_shuffle=False,
# condense_arr=True,
# condense_bin_ms=int(1000*condensing_bin))
# activity_dict[animal][cl_id][session_name] = gaussian_filter1d(input=activity_dictionary[cl_id]['activity'].todense(), sigma=smooth_sd) / condensing_bin
#
# plot_dict = {}
# labels = {}
# borders = {}
# for animal in activity_dict.keys():
# plot_dict[animal] = {}
# labels[animal] = {}
# borders[animal] = [0, 0, 0]
# for cl_idx, cl_id in enumerate(activity_dict[animal].keys()):
# concatenated_activity = np.concatenate((activity_dict[animal][cl_id]['light1'],
# activity_dict[animal][cl_id][self.test_session_type],
# activity_dict[animal][cl_id]['light2']))
# labels[animal][cl_id] = concatenated_activity.max()
# smoothed_activity = uniform_filter1d(concatenated_activity, size=rolling_average_window)
# plot_dict[animal][cl_id] = smoothed_activity / smoothed_activity.max()
# if cl_idx == 0:
# borders[animal][0] = activity_dict[animal][cl_id]['light1'].shape[0]
# borders[animal][1] = activity_dict[animal][cl_id]['light1'].shape[0] \
# + activity_dict[animal][cl_id][self.test_session_type].shape[0]
# borders[animal][2] = activity_dict[animal][cl_id]['light1'].shape[0] \
# + activity_dict[animal][cl_id][self.test_session_type].shape[0]\
# + activity_dict[animal][cl_id]['light2'].shape[0]
#
# row_num = len(self.baseline_dict.keys())
# fig, ax = plt.subplots(nrows=row_num, ncols=1, figsize=(6.4, row_num*4.8))
# for animal_idx, animal in enumerate(self.baseline_dict.keys()):
# ax = plt.subplot(row_num, 1, animal_idx+1)
# for cl_id in sorted(labels[animal], key=labels[animal].get, reverse=True):
# ax.plot(plot_dict[animal][cl_id], '-',
# color='#000000',
# alpha=labels[animal][cl_id] / 100 + .05,
# label=f'{labels[animal][cl_id]} spikes/s')
# ax.legend(frameon=False)
# ax.set_title(f'Rat #{animal}')
# for bo_idx, border in enumerate(borders[animal]):
# if bo_idx < 2:
# ax.axvline(x=border, ls='-.', color='#000000', alpha=.25)
# ax.set_xticks([borders[animal][0] // 2,
# (borders[animal][1]+borders[animal][0]) // 2,
# (borders[animal][2]+borders[animal][1]) // 2])
# ax.set_xticklabels(['light1', self.test_session_type, 'light2'])
# ax.tick_params(axis='both', which='both', length=0)
# ax.set_ylabel('Order of recordings')
# ax.set_ylabel('Peak normalized activity')
# ax.spines['top'].set_visible(False)
# ax.spines['right'].set_visible(False)
# ax.spines['bottom'].set_visible(False)
# if self.save_fig:
# if os.path.exists(self.save_dir):
# fig.savefig(f'{self.save_dir}{os.sep}baseline_change_examples.{self.fig_format}', dpi=300)
# else:
# print("Specified save directory doesn't exist. Try again.")
# sys.exit()
# plt.show()
# statistics on baseline rate change
with open(self.weight_json_file, 'r') as json_file:
data = json.load(json_file)
baseline_rates = {'light1': [], self.test_session_type: [], 'light2': []}
for cl_num in data.keys():
baseline_rates['light1'].append(data[cl_num]['baseline_firing_rates']['light1'])
baseline_rates['light2'].append(data[cl_num]['baseline_firing_rates']['light2'])
baseline_rates[self.test_session_type].append(data[cl_num]['baseline_firing_rates'][self.test_session_type])
# fig2, ax2 = plt.subplots(nrows=1, ncols=1)
# ax2 = plt.subplot(1, 1, 1)
# hist_l1, edges = np.histogram(a=baseline_rates['light1'], bins=np.linspace(0, 40, 50))
# hist_l2, edges2 = np.histogram(a=baseline_rates['light2'], bins=np.linspace(0, 40, 50))
# test_session, edges2 = np.histogram(a=baseline_rates[self.test_session_type], bins=np.linspace(0, 40, 50))
# bin_centers = 0.5*(edges[1:]+edges[:-1])
# plt.plot(bin_centers, hist_l1, c='#000000', alpha=.35, label='light1')
# plt.plot(bin_centers, hist_l2, c='#000000', alpha=.45, label='light2')
# plt.plot(bin_centers, test_session, c='#000000', alpha=1, label=self.test_session_type)
# plt.legend()
# plt.title('Baseline firing rate distributions')
# plt.xlabel('Baseline firing rate (spikes/s)')
# plt.ylabel('Unit count')
# if self.save_fig:
# if os.path.exists(self.save_dir):
# fig2.savefig(f'{self.save_dir}{os.sep}firing_rate_distributions.{self.fig_format}', dpi=300)
# else:
# print("Specified save directory doesn't exist. Try again.")
# sys.exit()
# plt.show()
# fig2, ax2 = plt.subplots(nrows=1, ncols=1)
# ax2 = plt.subplot(1, 1, 1)
# ax2.scatter(x=[gauss(.2, .025) for x in range(len(baseline_rates['light1']))], y=baseline_rates['light1'],
# color='#000000', alpha=.15, s=10)
# ax2.boxplot(x=baseline_rates['light1'], positions=[.35], notch=True, sym='', widths=.1)
# ax2.scatter(x=[gauss(.6, .025) for x in range(len(baseline_rates[self.test_session_type]))], y=baseline_rates[self.test_session_type],
# color='#000000', alpha=.75, s=10)
# ax2.boxplot(x=baseline_rates[self.test_session_type], positions=[.75], notch=True, sym='', widths=.1)
# ax2.scatter(x=[gauss(1., .025) for x in range(len(baseline_rates['light2']))], y=baseline_rates['light2'],
# color='#000000', alpha=.15, s=10)
# ax2.boxplot(x=baseline_rates['light2'], positions=[1.15], notch=True, sym='', widths=.1)
# ax2.set_xticks([.275, .675, 1.075])
# ax2.set_xticklabels(['light1', self.test_session_type, 'light2'])
# ax2.set_ylabel('Firing rate (spikes/s)')
# ax2.set_yscale('log')
# ax2.spines['top'].set_visible(False)
# ax2.spines['right'].set_visible(False)
# if self.save_fig:
# if os.path.exists(self.save_dir):
# fig2.savefig(f'{self.save_dir}{os.sep}baseline_change_statistics1.{self.fig_format}', dpi=300)
# else:
# print("Specified save directory doesn't exist. Try again.")
# sys.exit()
# plt.show()
diff_light1_weight = np.diff(np.array([baseline_rates[self.test_session_type], baseline_rates['light1']]), axis=0).ravel()
diff_weight_light2 = np.diff(np.array([baseline_rates['light2'], baseline_rates[self.test_session_type]]), axis=0).ravel()
diff_light1_light2 = np.diff(np.array([baseline_rates['light2'], baseline_rates['light1']]), axis=0).ravel()
shuffled = np.zeros((3, 1000))
for sh in tqdm(range(1000)):
for idx, n in enumerate([(self.test_session_type, 'light1'), ('light2', self.test_session_type), ('light2', 'light1')]):
joint_arr = np.array([baseline_rates[n[0]], baseline_rates[n[1]]])
for col in range(joint_arr.shape[1]):
np.random.shuffle(joint_arr[:, col])
shuffled[idx, sh] = np.mean(np.diff(joint_arr, axis=0))
fig3, ax3 = plt.subplots(nrows=1, ncols=3, figsize=(6.4 * 3, 4.8))
ax31 = plt.subplot(1, 3, 1)
hist_n, hist_bins, hist_patches = ax31.hist(diff_light1_weight, bins=np.linspace(-7.5, 7.5, 50), histtype='stepfilled', color='#FFFFFF', edgecolor='#000000')
ax31.axvline(x=0, ls='-.', color='#000000', alpha=.25)
ax31.plot(diff_light1_weight.mean(), 12, marker='o', color='#000000')
ax31.set_xlabel(f'light1 - {self.test_session_type} (spikes/s)')
ax31.set_ylabel('Number of units')
p_value = 1 - scipy.stats.norm.cdf((diff_light1_weight.mean() - shuffled[0, :].mean()) / shuffled[0, :].std())
ax31.text(x=4, y=480, s=f'p={p_value:.2e}')
axins1 = inset_axes(ax31, width='40%', height='30%', loc=2)
axins1.hist(shuffled[0, :], bins=np.linspace(-.15, .15, 20), histtype='stepfilled', color='#000000', alpha=.25)
axins1.axvline(x=np.nanpercentile(shuffled[0, :], 99.5), color='#000000', ls='-.', alpha=.5)
axins1.plot(diff_light1_weight.mean(), 10, marker='o', color='#000000')
axins1.set_xticks([-.1, 0, .1, .2])
axins1.set_yticks([0, 50, 100, 150])
ax32 = plt.subplot(1, 3, 2)
ax32.hist(diff_weight_light2, bins=hist_bins, histtype='stepfilled', color='#FFFFFF', edgecolor='#000000')
ax32.axvline(x=0, ls='-.', color='#000000', alpha=.25)
ax32.plot(diff_weight_light2.mean(), 12, marker='o', color='#000000')
ax32.set_xlabel(f'{self.test_session_type} - light2 (spikes/s)')
p_value_2 = 1 - scipy.stats.norm.cdf((diff_weight_light2.mean() - shuffled[1, :].mean()) / shuffled[1, :].std())
ax32.text(x=6, y=490, s=f'p={p_value_2:.2f}')
axins2 = inset_axes(ax32, width='40%', height='30%', loc=2)
axins2.hist(shuffled[1, :], bins=np.linspace(-.15, .15, 20), histtype='stepfilled', color='#000000', alpha=.25)
axins2.plot(diff_weight_light2.mean(), 10, marker='o', color='#000000')
axins2.set_xticks([-.1, 0, .1])
axins2.set_yticks([0, 50, 100, 150])
ax33 = plt.subplot(1, 3, 3)
ax33.hist(diff_light1_light2, bins=hist_bins, histtype='stepfilled', color='#FFFFFF', edgecolor='#000000')
ax33.axvline(x=0, ls='-.', color='#000000', alpha=.25)
ax33.plot(diff_light1_light2.mean(), 10, marker='o', color='#000000')
ax33.set_xlabel('light1 - light2 (spikes/s)')
p_value_3 = 1 - scipy.stats.norm.cdf((diff_light1_light2.mean() - shuffled[2, :].mean()) / shuffled[2, :].std())
ax33.text(x=4, y=420, s=f'p={p_value_3:.2e}')
axins3 = inset_axes(ax33, width='40%', height='30%', loc=2)
axins3.hist(shuffled[2, :], bins=np.linspace(-.15, .15, 20), histtype='stepfilled', color='#000000', alpha=.25)
axins3.axvline(x=np.nanpercentile(shuffled[2, :], 99.5), color='#000000', ls='-.', alpha=.5)
axins3.plot(diff_light1_light2.mean(), 7, marker='o', color='#000000')
axins3.set_xticks([-.1, 0, .1, .2])
axins3.set_yticks([0, 50, 100])
if self.save_fig:
if os.path.exists(self.save_dir):
fig3.savefig(f'{self.save_dir}{os.sep}baseline_change_statistics2.{self.fig_format}', dpi=300)
else:
print("Specified save directory doesn't exist. Try again.")
sys.exit()
plt.show()
def plot_weight_features(self, **kwargs):
"""
Description
----------
This method plots the raw distribution of light1-light2 and
light1-weight peak rate differences, also in form of a
scatter plot and probability density distribution.
----------
Parameters
----------
**kwargs (dictionary)
peak_min (int / bool)
The minimum rate for light1 to even be considered; defaults to True.
rate_stability_bound (int / float / bool)
How much is the OF2 peak rate allowed to deviate from OF1 (in terms of percent OF1 rate); defaults to True.
hist_max (int)
The ymax value for the shuffled histograms; defaults to 275.
hist_step (int)
The step for y-values on the shuffled histogramsl defaults to 50.
----------
Returns
----------
weight_feature_distributions (plot)
A plot with weight peak rate differences distributions.
----------
"""
hist_max = kwargs['hist_max'] if 'hist_max' in kwargs.keys() and type(kwargs['hist_max']) == int else 301
hist_step = kwargs['hist_step'] if 'hist_step' in kwargs.keys() and type(kwargs['hist_step']) == int else 50
weight_dict = extract_json_data(json_file=self.weight_json_file,
features=self.chosen_features,
peak_min=self.peak_min,
rate_stability_bound=self.rate_stability_bound,
der=self.der,
ref_dict=self.ref_dict,
test_session=self.test_session_type)
shuffled_dict = make_shuffled_distributions(weight_dict=weight_dict, ref_dict=self.ref_dict,
test_session=self.test_session_type)
fig = plt.figure(figsize=(16, 20))
gs_left = [.075, .325, .575, .825]
gs_right = [.25, .5, .75, .98]
for gs_idx, gs in enumerate(['auc', 'peaks', 'information_rates', 'stability']):
gs1 = fig.add_gridspec(nrows=12, ncols=3, left=gs_left[gs_idx],
right=gs_right[gs_idx], wspace=.1, hspace=.5)
if gs != 'peaks':
ax1 = fig.add_subplot(gs1[:2, :])
ax2 = fig.add_subplot(gs1[6:8, :])
ax3 = fig.add_subplot(gs1[3:5, :])
ax4 = fig.add_subplot(gs1[9:11, :])
for chosen_feature in self.chosen_features:
if 'head' in chosen_feature or 'Head' in chosen_feature:
axes_list = [ax1, ax2]
else:
axes_list = [ax3, ax4]
chosen_feature_der = f'{chosen_feature}_{self.der}_der'
feature_colors = [[val for key, val in make_ratemaps.Ratemap.feature_colors.items() if key == chosen_feature][0],
[val for key, val in make_ratemaps.Ratemap.feature_colors.items() if key == chosen_feature_der][0]]
for color_idx, (ax, specific_feature) in enumerate(zip(axes_list, [chosen_feature, chosen_feature_der])):
if gs != 'stability':
ax.scatter(x=np.array(weight_dict[specific_feature][gs][self.ref_dict['ref_session']]),
y=np.array(weight_dict[specific_feature][gs][self.test_session_type]),
color=feature_colors[color_idx], alpha=1, s=10)
else:
ax.scatter(x=np.array(weight_dict[chosen_feature][gs]['{}-{}'.format(self.ref_dict['ref_session'], self.ref_dict['other_session'])]),
y=np.array(weight_dict[chosen_feature][gs]['{}-{}'.format(self.ref_dict['ref_session'], self.test_session_type)]),
color=feature_colors[color_idx], alpha=1, s=10)
for ax_idx, ax in enumerate([ax1, ax2, ax3, ax4]):
if ax_idx == 0:
ax.set_title(gs.replace('_', ' '))
if gs == 'auc':
ax.plot([1, 3e3], [1, 3e3], ls='-.', lw=.5, color='#000000')
ax.set_xlim(1, 3e3)
ax.set_xscale('log')
ax.set_ylim(1, 3e3)
ax.set_yscale('log')
ax.tick_params(axis='both', which='major', length=0, labelsize=8, pad=.5)
ax.set_xlabel('{} AUC (spikes/s)'.format(self.ref_dict['ref_session']), labelpad=.1)
ax.set_ylabel('{} AUC (spikes/s)'.format(self.test_session_type), labelpad=.1)
elif gs == 'information_rates':
ax.plot([.001, 1], [.001, 1], ls='-.', lw=.5, color='#000000')
ax.tick_params(axis='both', which='major', length=0, labelsize=8, pad=.75)
ax.set_xlim(.001, 1)
ax.set_xscale('log')
ax.set_ylim(.001, 1)
ax.set_yscale('log')
ax.set_xlabel('{} info rate (bits/spike)'.format(self.ref_dict['ref_session']), labelpad=.1)
ax.set_ylabel(f'{self.test_session_type} info rate (bits/spike)', labelpad=.1)
else:
ax.plot([-0.1, 1.1], [-0.1, 1.1], ls='-.', lw=.5, color='#000000')
ax.set_xlim(-0.1, 1.1)
ax.set_ylim(-0.1, 1.1)
ax.set_xticks([0, .25, .5, .75, 1])
ax.set_yticks([0, .25, .5, .75, 1])
ax.tick_params(axis='both', which='major', length=1, labelsize=8, pad=.75)
ax.set_xlabel('{}-{} correlation'.format(self.ref_dict['ref_session'], self.ref_dict['other_session']), labelpad=.1)
ax.set_ylabel('{}-{} correlation'.format(self.test_session_type, self.ref_dict['ref_session']), labelpad=.1)
else:
head_count = 0
back_count = 0
gs_x = {'head': [0, 1, 1], 'head_der': [6, 7, 7],
'back': [3, 4, 4], 'back_der': [9, 10, 10]}
for chosen_feature in self.chosen_features:
chosen_feature_der = f'{chosen_feature}_{self.der}_der'
feature_colors = [[val for key, val in make_ratemaps.Ratemap.feature_colors.items() if key == chosen_feature][0],
[val for key, val in make_ratemaps.Ratemap.feature_colors.items() if key == chosen_feature_der][0]]
if 'head' in chosen_feature or 'Head' in chosen_feature:
ax2 = fig.add_subplot(gs1[gs_x['head'][head_count], head_count])
ax3 = fig.add_subplot(gs1[gs_x['head_der'][head_count], head_count])
else:
ax2 = fig.add_subplot(gs1[gs_x['back'][back_count], back_count])
ax3 = fig.add_subplot(gs1[gs_x['back_der'][back_count], back_count])
for color_idx, (ax, specific_feature) in enumerate(zip([ax2, ax3], [chosen_feature, chosen_feature_der])):
ax.scatter(x=np.array(weight_dict[specific_feature][gs][self.ref_dict['ref_session']]),
y=np.array(weight_dict[specific_feature][gs][self.test_session_type]),
color=feature_colors[color_idx], alpha=1, s=10)
ax.tick_params(axis='both', which='major', length=1, labelsize=6, pad=.75)
if 'head' in chosen_feature or 'Head' in chosen_feature:
if 'der' not in specific_feature:
if 'pitch' not in specific_feature:
ax.plot([-180, 180], [-180, 180], ls='-.', lw=.5, color='#000000')
ax.set_xlim(-180, 180)
ax.set_ylim(-180, 180)
ax.set_xticks([-180, -90, 0, 90, 180])
ax.set_yticks([-180, -90, 0, 90, 180])
ax.set_xlabel('{} (deg)'.format(self.ref_dict['ref_session']), labelpad=.1)
ax.set_ylabel('{} (deg)'.format(self.test_session_type), labelpad=.1)
else:
ax.plot([-90, 90], [-90, 90], ls='-.', lw=.5, color='#000000')
ax.set_xlim(-90, 90)
ax.set_ylim(-90, 90)
ax.set_xticks([-90, -45, 0, 45, 90])
ax.set_yticks([-90, -45, 0, 45, 90])
ax.set_xlabel('{} (deg)'.format(self.ref_dict['ref_session']), labelpad=.1)
ax.set_ylabel('{} (deg)'.format(self.test_session_type), labelpad=.1)
else:
ax.plot([-400, 400], [-400, 400], ls='-.', lw=.5, color='#000000')
ax.set_xlim(-400, 400)
ax.set_ylim(-400, 400)
ax.set_xticks([-400, -200, 0, 200, 400])
ax.set_yticks([-400, -200, 0, 200, 400])
ax.set_xlabel('{} (deg/s)'.format(self.ref_dict['ref_session']), labelpad=.1)
ax.set_ylabel('{} (deg/s)'.format(self.test_session_type), labelpad=.1)
else:
if back_count < 2:
if 'der' not in specific_feature:
ax.plot([-60, 60], [-60, 60], ls='-.', lw=.5, color='#000000')
ax.set_xlim(-60, 60)
ax.set_ylim(-60, 60)
ax.set_xticks([-60, -30, 0, 30, 60])
ax.set_yticks([-60, -30, 0, 30, 60])
ax.set_xlabel('{} (deg)'.format(self.ref_dict['ref_session']), labelpad=.1)
ax.set_ylabel('{} (deg)'.format(self.test_session_type), labelpad=.1)
else:
ax.plot([-110, 110], [-110, 110], ls='-.', lw=.5, color='#000000')
ax.set_xlim(-110, 110)
ax.set_ylim(-110, 110)
ax.set_xticks([-100, -50, 0, 50, 100])
ax.set_yticks([-100, -50, 0, 50, 100])
ax.set_xlabel('{} (deg/s)'.format(self.ref_dict['ref_session']), labelpad=.1)
ax.set_ylabel('{} (deg/s)'.format(self.test_session_type), labelpad=.1)
else:
if 'der' not in specific_feature:
ax.plot([0, 18], [0, 18], ls='-.', lw=.5, color='#000000')
ax.set_xlim(0, 18)
ax.set_ylim(0, 18)
ax.set_xticks([0, 4.5, 9, 13.5, 18])
ax.set_yticks([0, 4.5, 9, 13.5, 18])
ax.set_xlabel('{} (cm)'.format(self.ref_dict['ref_session']), labelpad=.1)
ax.set_ylabel('{} (cm)'.format(self.test_session_type), labelpad=.1)
else:
ax.plot([-10, 10], [-10, 10], ls='-.', lw=.5, color='#000000')
ax.set_xlim(-10, 10)
ax.set_ylim(-10, 10)
ax.set_xticks([-10, -5, 0, 5, 10])
ax.set_yticks([-10, -5, 0, 5, 10])
ax.set_xlabel('{} (cm/s)'.format(self.ref_dict['ref_session']), labelpad=.1)
ax.set_ylabel('{} (cm/s)'.format(self.test_session_type), labelpad=.1)
if 'head' in chosen_feature or 'Head' in chosen_feature:
head_count += 1
else:
back_count += 1
head_col = 0
back_col = 0
for chosen_feature in self.chosen_features:
if 'head' in chosen_feature or 'Head' in chosen_feature:
row_list = [2, 8]
col = head_col
else:
row_list = [5, 11]
col = back_col
chosen_feature_der = f'{chosen_feature}_{self.der}_der'
feature_colors = [[val for key, val in make_ratemaps.Ratemap.feature_colors.items() if key == chosen_feature][0],
[val for key, val in make_ratemaps.Ratemap.feature_colors.items() if key == chosen_feature_der][0]]
for color_idx, (row, specific_feature) in enumerate(zip(row_list, [chosen_feature, chosen_feature_der])):
ax = fig.add_subplot(gs1[row, col])
ax.hist(shuffled_dict[specific_feature][gs]['null_differences'], bins=10,
histtype='stepfilled', color='#808080', edgecolor='#000000', alpha=.5)
ax.axvline(x=np.nanpercentile(shuffled_dict[specific_feature][gs]['null_differences'], 0.5), color='#000000', ls='-.', lw=.5)
ax.axvline(x=np.nanpercentile(shuffled_dict[specific_feature][gs]['null_differences'], 99.5), color='#000000', ls='-.', lw=.5)
ax.set_ylim(ymax=hist_max)
ax.plot(shuffled_dict[specific_feature][gs]['true_difference'], 0 + .05 * hist_max, marker='o', color=feature_colors[color_idx], markersize=5)
if col < 1:
ax.set_ylabel('Shuffled count', labelpad=.1, fontsize=6)
ax.set_yticks(np.arange(0, hist_max, hist_step))
elif col == 1:
if gs == 'auc':
ax.set_xlabel('{} - {} AUC difference (spikes/s)'.format(self.ref_dict['ref_session'], self.test_session_type), labelpad=.5, fontsize=6)
elif gs == 'peaks':
ax.set_xlabel('{} - {} peak position difference'.format(self.ref_dict['ref_session'], self.test_session_type), labelpad=.5, fontsize=6)
elif gs == 'information_rates':
ax.set_xlabel('{} - {} difference (bits/spike)'.format(self.ref_dict['ref_session'], self.test_session_type), labelpad=.5, fontsize=6)
else:
ax.set_xlabel('{}-{} to {}-{} difference (rho)'.format(self.ref_dict['ref_session'],
self.ref_dict['other_session'],
self.ref_dict['ref_session'],
self.test_session_type), labelpad=.5, fontsize=6)
ax.set_yticklabels([])
else:
ax.set_yticklabels([])
ax.tick_params(axis='both', which='both', labelsize=5)
if 'head' in chosen_feature or 'Head' in chosen_feature:
head_col += 1
else:
back_col += 1
if self.save_fig:
if os.path.exists(self.save_dir):
fig.savefig(f'{self.save_dir}{os.sep}weight_features_comparisons.{self.fig_format}', dpi=300)
else:
print("Specified save directory doesn't exist. Try again.")
sys.exit()
plt.show()
def plot_weight_statistics(self, **kwargs):
"""
Description
----------
This method plots the raw statistics of light1-light2 and
light1-weight peak rate differences: their means and dependent
samples t-test for each feature (and first derivative).
----------
Parameters
----------
**kwargs (dictionary)
----------
Returns
----------
weight_statistics (plot)
A plot with weight peak rate differences statistics.
----------
"""
weight_dict = extract_json_data(json_file=self.weight_json_file,
features=self.chosen_features,
peak_min=self.peak_min,
rate_stability_bound=self.rate_stability_bound,
der=self.der,
ref_dict=self.ref_dict,
test_session=self.test_session_type)
shuffled_dict = make_shuffled_distributions(weight_dict=weight_dict, ref_dict=self.ref_dict,
test_session=self.test_session_type)
weight_stats_dict = {'features': {'auc': [], 'information_rates': [], 'stability': [],
'z_auc': [], 'z_information_rates': [], 'z_stability': [],
'cl_n': [], 'names': [], 'feature_colors': []},
'features_der': {'auc': [], 'information_rates': [], 'stability': [],
'z_auc': [], 'z_information_rates': [], 'z_stability': [],
'cl_n': [], 'names': [], 'feature_colors': []}}
for feature in weight_dict.keys():
if 'der' not in feature:
weight_stats_dict['features']['auc'].append(shuffled_dict[feature]['auc']['p-value'])
weight_stats_dict['features']['information_rates'].append(shuffled_dict[feature]['information_rates']['p-value'])
weight_stats_dict['features']['stability'].append(shuffled_dict[feature]['stability']['p-value'])
weight_stats_dict['features']['z_auc'].append(shuffled_dict[feature]['auc']['z-value'])
weight_stats_dict['features']['z_information_rates'].append(shuffled_dict[feature]['information_rates']['z-value'])
weight_stats_dict['features']['z_stability'].append(shuffled_dict[feature]['stability']['z-value'])
weight_stats_dict['features']['cl_n'].append(len(weight_dict[feature]['auc'][self.test_session_type]))
weight_stats_dict['features']['names'].append(feature)
weight_stats_dict['features']['feature_colors'].append([val for key, val in make_ratemaps.Ratemap.feature_colors.items() if key in feature][0])
else:
weight_stats_dict['features_der']['auc'].append(shuffled_dict[feature]['auc']['p-value'])
weight_stats_dict['features_der']['information_rates'].append(shuffled_dict[feature]['information_rates']['p-value'])
weight_stats_dict['features_der']['stability'].append(shuffled_dict[feature]['stability']['p-value'])
weight_stats_dict['features_der']['z_auc'].append(shuffled_dict[feature]['auc']['z-value'])
weight_stats_dict['features_der']['z_information_rates'].append(shuffled_dict[feature]['information_rates']['z-value'])
weight_stats_dict['features_der']['z_stability'].append(shuffled_dict[feature]['stability']['z-value'])
weight_stats_dict['features_der']['cl_n'].append(len(weight_dict[feature]['auc'][self.test_session_type]))
weight_stats_dict['features_der']['names'].append(feature)
weight_stats_dict['features_der']['feature_colors'].append([val for key, val in make_ratemaps.Ratemap.feature_colors.items() if key in feature][0])
print(weight_stats_dict)
fig, ax = plt.subplots(nrows=1, ncols=1, figsize=(30, 5))
ax1 = plt.subplot(111)
left_point, middle_point, right_point = 2, 3, 4
cmap = plt.cm.seismic_r
normal_range = plt.Normalize(vmin=-4, vmax=0)
for idx, feature in enumerate(weight_stats_dict['features']['names']):
probabilities = np.log10([weight_stats_dict['features']['stability'][idx],
weight_stats_dict['features']['auc'][idx],
weight_stats_dict['features']['information_rates'][idx],
weight_stats_dict['features_der']['stability'][idx],
weight_stats_dict['features_der']['auc'][idx],
weight_stats_dict['features_der']['information_rates'][idx]])
colors = plt.cm.seismic_r(normal_range(probabilities))
for y_idx, y_point in enumerate([1, 3]):
if y_idx == 0:
co = colors[3:]
else:
co = colors[:3]
p = patches.Polygon(xy=np.array([[left_point, y_point], [right_point, y_point], [middle_point, (2 * np.sqrt(3) / 6) + y_point]]),
closed=True,
ec='#000000',
fc=co[0])
p2 = patches.Polygon(xy=np.array([[left_point, y_point], [middle_point, (2 * np.sqrt(3) / 2) + y_point], [middle_point, (2 * np.sqrt(3) / 6) + y_point]]),
closed=True,
ec='#000000',
fc=co[1])
p3 = patches.Polygon(xy=np.array([[right_point, y_point], [middle_point, (2 * np.sqrt(3) / 2) + y_point], [middle_point, (2 * np.sqrt(3) / 6) + y_point]]),
closed=True,
ec='#000000',
fc=co[2])
ax1.add_patch(p)
ax1.add_patch(p2)
ax1.add_patch(p3)
ax1.text(x=left_point + .3, y=5.5, s=feature.replace('_', ' '), fontsize=8)
ax1.text(x=left_point + .5, y=5, s='n_clusters={}'.format(weight_stats_dict['features']['cl_n'][idx]), fontsize=8)
ax1.text(x=left_point + .5, y=.5, s='n_clusters={}'.format(weight_stats_dict['features_der']['cl_n'][idx]), fontsize=8)
left_point += 3
middle_point += 3
right_point += 3
ax1.text(x=.6, y=4, s='Feature', fontsize=8)
ax1.text(x=.6, y=2, s='Derivative', fontsize=8)
ax1.set_xlim(0, 35)
ax1.set_ylim(0, 6)
ax1.spines['bottom'].set_visible(False)
ax1.spines['left'].set_visible(False)
ax1.spines['top'].set_visible(False)
ax1.spines['right'].set_visible(False)
ax1.set_xticks([])
ax1.set_yticks([])
cax, _ = cbar.make_axes(ax1)
color_bar = cbar.ColorbarBase(cax, cmap=cmap, norm=normal_range)
color_bar.set_label('log$_{10}$(p-value)')
color_bar.ax.tick_params(size=0)
if self.save_fig:
if os.path.exists(self.save_dir):
fig.savefig(f'{self.save_dir}{os.sep}weight_features_statistics.{self.fig_format}', dpi=300)
else:
print("Specified save directory doesn't exist. Try again.")
sys.exit()
plt.show()
def plot_behavioral_comparisons(self, **kwargs):
"""
Description
----------
This method plots behavioral differences in two sets of sessions:
weight and no-weight.
----------
Parameters
----------
**kwargs (list)
z_axis_bounds (int)
The min and max of the z-axis; defaults [5, 20].
----------
Returns
----------
weight_behavioral_comparisons (plot)
A plot comparing behavior in weight vs. no-weight sessions.
----------
"""
z_axis_bounds = kwargs['z_axis_bounds'] if 'z_axis_bounds' in kwargs.keys() and type(kwargs['z_axis_bounds']) == list else [5, 20]
three_d_occ = {'baseline': {}, 'test': {}}
for session_type in self.beh_plot_sessions.keys():
for file_loc in self.beh_plot_sessions[session_type]:
file_name, point_data = sessions2load.Session(session=file_loc).data_loader(extract_variables=['sorted_point_data'])
temp_neck_data = point_data['sorted_point_data'][:, 4, :]
for i in range(2):
temp_neck_data[:, i] += abs(min(temp_neck_data[:, i]))
print(min(temp_neck_data[:, 2]), max(temp_neck_data[:, 2]))
three_d_occ[session_type][file_name] = temp_neck_data * 100
fig = plt.figure(figsize=(10, 5))
fig.subplots_adjust(right=.8, wspace=.5)
normal_range = plt.Normalize(vmin=z_axis_bounds[0], vmax=z_axis_bounds[1])
for idx, session_type in enumerate(three_d_occ.keys()):
ax = fig.add_subplot(1, 2, idx + 1, projection='3d')
for session_data in three_d_occ[session_type].keys():
color_array = three_d_occ[session_type][session_data][:, 2].copy()
neck_point = three_d_occ[session_type][session_data].reshape(-1, 1, 3)
segments = np.concatenate([neck_point[:-1], neck_point[1:]], axis=1)
lc = Line3DCollection(segments,
cmap=plt.get_cmap('cividis'),
norm=normal_range)
lc.set_array(color_array)
ax.add_collection3d(lc)
ax.set_title(session_type)
ax.set_xlim(0, 200)
ax.set_ylim(0, 200)
ax.set_zlim(z_axis_bounds[0], z_axis_bounds[1])
ax.set_zticks(range(z_axis_bounds[0], z_axis_bounds[1] + 1, 5))
ax.set_xlabel('X (cm)')
ax.set_ylabel('Y (cm)')
ax.set_zlabel('Z (cm)')
ax.grid(False)
ax.xaxis.set_pane_color((1.0, 1.0, 1.0, 0.0))
ax.yaxis.set_pane_color((1.0, 1.0, 1.0, 0.0))
ax.zaxis.set_pane_color((1.0, 1.0, 1.0, 0.0))
ax.view_init(elev=None, azim=30)
if idx > 0:
cbar_ax = fig.add_axes([0.85, 0.25, 0.01, 0.4])
color_bar = cbar.ColorbarBase(cbar_ax, cmap=plt.cm.cividis, norm=normal_range)
color_bar.set_label('Neck elevation (cm)')
color_bar.ax.locator_params(nbins=4)
color_bar.ax.tick_params(size=0)
if self.save_fig:
if os.path.exists(self.save_dir):
fig.savefig(f'{self.save_dir}{os.sep}weight_behavioral_comparisons.{self.fig_format}', dpi=300)
else:
print("Specified save directory doesn't exist. Try again.")
sys.exit()
plt.show()
| 2.140625 | 2 |
neuronpp_gym/agents/agent1d.py | ziemowit-s/neuronpp_gym | 1 | 12789489 | from neuronpp_gym.core.agent_core import AgentCore
class Agent1D(AgentCore):
def build(self, input_size: int):
"""
Build Agent for 1 Dimensional input.
Before Agent step() you need to call:
1. agent.build()
2. agent.init()
"""
if self._built:
raise RuntimeError("The Agent have been already built.")
if not isinstance(input_size, int):
raise ValueError("input_size can only be of type int.")
self._build(input_shape=(input_size,), input_size=input_size, input_cell_num=input_size)
def _make_observation(self, observation, poisson=False, stepsize=None):
"""
Make 1D input observation
:param observation:
1 dim array of numbers
:return:
"""
if self.input_size != observation.size:
raise RuntimeError("Observation must be of same size as self.input_size, which is "
"a product of input_shape.")
input_syns = [s for c in self.input_cells for s in c.syns]
self._make_single_observation(observation=observation, syns=input_syns, poisson=poisson,
stepsize=stepsize) | 3.34375 | 3 |
tethysapp/modflow/app.py | Aquaveo/tethysapp-modflow | 0 | 12789490 | <filename>tethysapp/modflow/app.py
from tethys_sdk.base import TethysAppBase, url_map_maker
from tethys_sdk.app_settings import PersistentStoreDatabaseSetting, PersistentStoreConnectionSetting, \
SpatialDatasetServiceSetting, CustomSetting
from tethys_sdk.permissions import Permission
import tethysapp.modflow.signals # noqa
class Modflow(TethysAppBase):
"""
Tethys app class for Modflow.
"""
name = 'Groundwater Model Simulations'
index = 'modflow:home'
icon = 'modflow/images/icon-gms-tethys-170.png'
package = 'modflow'
root_url = 'modflow'
color = '#BA0C2F'
description = 'Application for evaluating pumping location and pumping rates effects on aquifers'
tags = ''
enable_feedback = False
feedback_emails = []
# Services
SCHEDULER_NAME = 'remote_cluster'
GEOSERVER_NAME = 'primary_geoserver'
DATABASE_NAME = 'primary_db'
MODEL_DB_CON_01 = 'model_db_1'
BASE_SCENARIO_ID = 1
def custom_settings(self):
"""
Define custom settings.
"""
custom_settings = (
CustomSetting(
name='bing_api_key',
type=CustomSetting.TYPE_STRING,
description='API key for BING basemap service.',
required=False
),
CustomSetting(
name='minx_extent',
type=CustomSetting.TYPE_STRING,
description='Min X Extent for Model Selection Map View',
required=True
),
CustomSetting(
name='miny_extent',
type=CustomSetting.TYPE_STRING,
description='Min Y Extent for Model Selection Map View',
required=True
),
CustomSetting(
name='maxx_extent',
type=CustomSetting.TYPE_STRING,
description='Max X Extent for Model Selection Map View',
required=True
),
CustomSetting(
name='maxy_extent',
type=CustomSetting.TYPE_STRING,
description='Max Y Extent for Model Selection Map View',
required=True
),
CustomSetting(
name='custom_base_maps',
type=CustomSetting.TYPE_STRING,
description='Comma separated urls for basemap services (ex. name1:url1, name2:url2)',
required=False
),
)
return custom_settings
def url_maps(self):
"""
Add controllers
"""
from modflow_adapter.models.app_users.modflow_model_resource import ModflowModelResource
from modflow_adapter.services.modflow_spatial_manager import ModflowSpatialManager
from tethysext.atcore.urls import app_users, spatial_reference
from tethysapp.modflow.models.app_users import ModflowOrganization
from tethysapp.modflow.services.map_manager import ModflowMapManager
from tethysapp.modflow.controllers.resources import ModifyModflowModel
from tethysapp.modflow.controllers.map_view.modflow_model_map_view import ModflowModelMapView
UrlMap = url_map_maker(self.root_url)
# Get the urls for the app_users extension
url_maps = []
app_users_urls = app_users.urls(
url_map_maker=UrlMap,
app=self,
persistent_store_name=self.DATABASE_NAME,
base_template='modflow/base.html',
custom_models=(
ModflowModelResource,
ModflowOrganization
),
custom_controllers=(
ModifyModflowModel,
),
)
url_maps.extend(app_users_urls)
spatial_reference_urls = spatial_reference.urls(
url_map_maker=UrlMap,
app=self,
persistent_store_name=self.DATABASE_NAME
)
url_maps.extend(spatial_reference_urls)
url_maps.extend((
UrlMap(
name='home',
url='modflow',
controller=ModflowModelMapView.as_controller(
_app=self,
_persistent_store_name=self.DATABASE_NAME,
geoserver_name=self.GEOSERVER_NAME,
_Organization=ModflowOrganization,
_Resource=ModflowModelResource,
_SpatialManager=ModflowSpatialManager,
_MapManager=ModflowMapManager
),
regex=['[0-9A-Za-z-_.]+', '[0-9A-Za-z-_.{}]+']
),
UrlMap(
name='model_view',
url='modflow/{resource_id}/map',
controller=ModflowModelMapView.as_controller(
_app=self,
_persistent_store_name=self.DATABASE_NAME,
geoserver_name=self.GEOSERVER_NAME,
_Organization=ModflowOrganization,
_Resource=ModflowModelResource,
_SpatialManager=ModflowSpatialManager,
_MapManager=ModflowMapManager
),
regex=['[0-9A-Za-z-_.]+', '[0-9A-Za-z-_.{}]+']
),
UrlMap(
name='modpath',
url='modflow/modpath',
controller='modflow.controllers.executables.modpath.modpath'
),
UrlMap(
name='get-flow-path-json',
url='modflow/get-flow-path-json',
controller='modflow.controllers.executables.modpath.get_flow_path_json'
),
UrlMap(
name='pump-impact',
url='modflow/pump-impact',
controller='modflow.controllers.executables.pump_impact.pump_impact'
),
UrlMap(
name='get-pump-impact-json',
url='modflow/get-pump-impact-json',
controller='modflow.controllers.executables.pump_impact.get_pump_impact_json'
),
UrlMap(
name='check-job-status',
url='modflow/check-job-status',
controller='modflow.controllers.executables.job_status.check_status'
),
UrlMap(
name='check-point-status',
url='modflow/check-point-status',
controller='modflow.controllers.rest.map.check_point_status'
),
UrlMap(
name='check-well-depth',
url='modflow/check-well-depth',
controller='modflow.controllers.rest.map.check_well_depth'
),
UrlMap(
name='pump-schedule-data',
url='modflow/pump-schedule-data',
controller='modflow.controllers.rest.map.pump_schedule_data'
),
UrlMap(
name='update-help-modal-status',
url='modflow/update-help-modal-status',
controller='modflow.controllers.rest.map.update_help_modal_status'
),
UrlMap(
name='settings',
url='settings',
controller='modflow.controllers.manage.modflow_settings.modflow_settings'
),
))
return url_maps
def persistent_store_settings(self):
"""
Define Persistent Store Settings.
"""
ps_settings = (
PersistentStoreDatabaseSetting(
name=self.DATABASE_NAME,
description='Primary database for Modflow.',
initializer='modflow.models.init_primary_db',
required=True,
spatial=True
),
PersistentStoreConnectionSetting(
name=self.MODEL_DB_CON_01,
description='First node of model database server pool.',
required=True
),
)
return ps_settings
def spatial_dataset_service_settings(self):
"""
Example spatial_dataset_service_settings method.
"""
sds_settings = (
SpatialDatasetServiceSetting(
name=self.GEOSERVER_NAME,
description='spatial dataset service for app to use',
engine=SpatialDatasetServiceSetting.GEOSERVER,
required=True,
),
)
return sds_settings
def permissions(self):
"""
Example permissions method.
"""
# Viewer Permissions
admin_user = Permission(
name='admin_user',
description='Admin User'
)
from tethysext.atcore.services.app_users.permissions_manager import AppPermissionsManager
from tethysext.atcore.permissions.app_users import PermissionsGenerator
# Generate permissions for App Users
permissions_manager = AppPermissionsManager(self.namespace)
pg = PermissionsGenerator(permissions_manager)
permissions = pg.generate()
permissions.append(admin_user)
return permissions
| 2.015625 | 2 |
mul.py | JohnBernad/Final-project--calculator | 0 | 12789491 | def mul():
num1 = float(input("Enter first number : "))
num2 = float(input("Enter second number : "))
p=num1*num2
print("the product is",p)
| 4 | 4 |
cernatschool/test_pixel.py | CERNatschool/beta-attenuation | 1 | 12789492 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#...the usual suspects.
import os, inspect
#...for the unit testing.
import unittest
#...for the logging.
import logging as lg
#...for the pixel wrapper class.
from pixel import Pixel
class PixelTest(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
pass
def test_create_pixel(self):
p = Pixel(100, 200, 1234, -1, 256, 256)
# The tests
#-----------
self.assertEqual(p.get_x(), 100)
self.assertEqual(p.get_y(), 200)
self.assertEqual(p.getX(), 51300)
self.assertEqual(p.getC(), 1234)
self.assertEqual(p.get_mask(), -1)
self.assertEqual(p.get_neighbours(), {})
self.assertEqual(p.pixel_entry(), "{\"x\":100, \"y\":200, \"c\":1234},\n")
if __name__ == "__main__":
lg.basicConfig(filename='log_test_pixel.txt', filemode='w', level=lg.DEBUG)
lg.info("")
lg.info("===============================================")
lg.info(" Logger output from cernatschool/test_pixel.py ")
lg.info("===============================================")
lg.info("")
unittest.main()
| 2.734375 | 3 |
module_2/utils.py | JaRo123/ams-ml-python-course | 3 | 12789493 | """Helper methods for Module 2."""
import errno
import glob
import os.path
import pickle
import time
import calendar
import numpy
import pandas
import matplotlib.colors
import matplotlib.pyplot as pyplot
import sklearn.metrics
import sklearn.linear_model
import sklearn.tree
import sklearn.ensemble
from module_4 import roc_curves
from module_4 import performance_diagrams as perf_diagrams
from module_4 import attributes_diagrams as attr_diagrams
# Directories.
MODULE2_DIR_NAME = '.'
SHORT_COURSE_DIR_NAME = '..'
DEFAULT_FEATURE_DIR_NAME = (
'{0:s}/data/track_data_ncar_ams_3km_csv_small'
).format(SHORT_COURSE_DIR_NAME)
# Variable names.
METADATA_COLUMNS = [
'Step_ID', 'Track_ID', 'Ensemble_Name', 'Ensemble_Member', 'Run_Date',
'Valid_Date', 'Forecast_Hour', 'Valid_Hour_UTC'
]
EXTRANEOUS_COLUMNS = [
'Duration', 'Centroid_Lon', 'Centroid_Lat', 'Centroid_X', 'Centroid_Y',
'Storm_Motion_U', 'Storm_Motion_V', 'Matched', 'Max_Hail_Size',
'Num_Matches', 'Shape', 'Location', 'Scale'
]
TARGET_NAME = 'RVORT1_MAX-future_max'
BINARIZED_TARGET_NAME = 'strong_future_rotation_flag'
NUM_VALUES_KEY = 'num_values'
MEAN_VALUE_KEY = 'mean_value'
MEAN_OF_SQUARES_KEY = 'mean_of_squares'
MAE_KEY = 'mean_absolute_error'
MSE_KEY = 'mean_squared_error'
MEAN_BIAS_KEY = 'mean_bias'
MAE_SKILL_SCORE_KEY = 'mae_skill_score'
MSE_SKILL_SCORE_KEY = 'mse_skill_score'
MAX_PEIRCE_SCORE_KEY = 'max_peirce_score'
AUC_KEY = 'area_under_roc_curve'
MAX_CSI_KEY = 'max_csi'
BRIER_SCORE_KEY = 'brier_score'
BRIER_SKILL_SCORE_KEY = 'brier_skill_score'
# Plotting constants.
DEFAULT_FIG_WIDTH_INCHES = 10
DEFAULT_FIG_HEIGHT_INCHES = 10
SMALL_FIG_WIDTH_INCHES = 10
SMALL_FIG_HEIGHT_INCHES = 10
FIGURE_RESOLUTION_DPI = 300
BAR_GRAPH_COLOUR = numpy.array([27, 158, 119], dtype=float) / 255
BAR_GRAPH_EDGE_WIDTH = 2
BAR_GRAPH_FONT_SIZE = 14
BAR_GRAPH_FONT_COLOUR = numpy.array([217, 95, 2], dtype=float) / 255
FONT_SIZE = 20
pyplot.rc('font', size=FONT_SIZE)
pyplot.rc('axes', titlesize=FONT_SIZE)
pyplot.rc('axes', labelsize=FONT_SIZE)
pyplot.rc('xtick', labelsize=FONT_SIZE)
pyplot.rc('ytick', labelsize=FONT_SIZE)
pyplot.rc('legend', fontsize=FONT_SIZE)
pyplot.rc('figure', titlesize=FONT_SIZE)
# Misc constants.
DATE_FORMAT = '%Y%m%d'
DATE_FORMAT_REGEX = '[0-9][0-9][0-9][0-9][0-1][0-9][0-3][0-9]'
RANDOM_SEED = 6695
LAMBDA_TOLERANCE = 1e-10
def time_string_to_unix(time_string, time_format):
"""Converts time from string to Unix format.
Unix format = seconds since 0000 UTC 1 Jan 1970.
:param time_string: Time string.
:param time_format: Format of time string (example: "%Y%m%d" or
"%Y-%m-%d-%H%M%S").
:return: unix_time_sec: Time in Unix format.
"""
return calendar.timegm(time.strptime(time_string, time_format))
def time_unix_to_string(unix_time_sec, time_format):
"""Converts time from Unix format to string.
Unix format = seconds since 0000 UTC 1 Jan 1970.
:param unix_time_sec: Time in Unix format.
:param time_format: Desired format of time string (example: "%Y%m%d" or
"%Y-%m-%d-%H%M%S").
:return: time_string: Time string.
"""
return time.strftime(time_format, time.gmtime(unix_time_sec))
def _remove_future_data(predictor_table):
"""Removes future data from predictors.
:param predictor_table: pandas DataFrame with predictor values. Each row is
one storm object.
:return: predictor_table: Same but with fewer columns.
"""
predictor_names = list(predictor_table)
columns_to_remove = [p for p in predictor_names if 'future' in p]
return predictor_table.drop(columns_to_remove, axis=1, inplace=False)
def _feature_file_name_to_date(csv_file_name):
"""Parses date from name of feature (CSV) file.
:param csv_file_name: Path to input file.
:return: date_string: Date (format "yyyymmdd").
"""
pathless_file_name = os.path.split(csv_file_name)[-1]
date_string = pathless_file_name.replace(
'track_step_NCARSTORM_d01_', ''
).replace('-0000.csv', '')
# Verify.
time_string_to_unix(time_string=date_string, time_format=DATE_FORMAT)
return date_string
def find_many_feature_files(first_date_string, last_date_string,
feature_dir_name=DEFAULT_FEATURE_DIR_NAME):
"""Finds feature files in the given date range.
:param first_date_string: First date ("yyyymmdd") in range.
:param last_date_string: Last date ("yyyymmdd") in range.
:param feature_dir_name: Name of directory with feature (CSV) files.
:return: csv_file_names: 1-D list of paths to feature files.
"""
first_time_unix_sec = time_string_to_unix(
time_string=first_date_string, time_format=DATE_FORMAT)
last_time_unix_sec = time_string_to_unix(
time_string=last_date_string, time_format=DATE_FORMAT)
csv_file_pattern = '{0:s}/track_step_NCARSTORM_d01_{1:s}-0000.csv'.format(
feature_dir_name, DATE_FORMAT_REGEX)
csv_file_names = glob.glob(csv_file_pattern)
csv_file_names.sort()
file_date_strings = [_feature_file_name_to_date(f) for f in csv_file_names]
file_times_unix_sec = numpy.array([
time_string_to_unix(time_string=d, time_format=DATE_FORMAT)
for d in file_date_strings
], dtype=int)
good_indices = numpy.where(numpy.logical_and(
file_times_unix_sec >= first_time_unix_sec,
file_times_unix_sec <= last_time_unix_sec
))[0]
return [csv_file_names[k] for k in good_indices]
def read_feature_file(csv_file_name):
"""Reads features from CSV file.
:param csv_file_name: Path to input file.
:return: metadata_table: pandas DataFrame with metadata. Each row is one
storm object.
:return: predictor_table: pandas DataFrame with predictor values. Each row
is one storm object.
:return: target_table: pandas DataFrame with target values. Each row is one
storm object.
"""
predictor_table = pandas.read_csv(csv_file_name, header=0, sep=',')
predictor_table.drop(EXTRANEOUS_COLUMNS, axis=1, inplace=True)
metadata_table = predictor_table[METADATA_COLUMNS]
predictor_table.drop(METADATA_COLUMNS, axis=1, inplace=True)
target_table = predictor_table[[TARGET_NAME]]
predictor_table.drop([TARGET_NAME], axis=1, inplace=True)
predictor_table = _remove_future_data(predictor_table)
return metadata_table, predictor_table, target_table
def read_many_feature_files(csv_file_names):
"""Reads features from many CSV files.
:param csv_file_names: 1-D list of paths to input files.
:return: metadata_table: See doc for `read_feature_file`.
:return: predictor_table: Same.
:return: target_table: Same.
"""
num_files = len(csv_file_names)
list_of_metadata_tables = [pandas.DataFrame()] * num_files
list_of_predictor_tables = [pandas.DataFrame()] * num_files
list_of_target_tables = [pandas.DataFrame()] * num_files
for i in range(num_files):
print('Reading data from: "{0:s}"...'.format(csv_file_names[i]))
(list_of_metadata_tables[i], list_of_predictor_tables[i],
list_of_target_tables[i]
) = read_feature_file(csv_file_names[i])
if i == 0:
continue
list_of_metadata_tables[i] = list_of_metadata_tables[i].align(
list_of_metadata_tables[0], axis=1
)[0]
list_of_predictor_tables[i] = list_of_predictor_tables[i].align(
list_of_predictor_tables[0], axis=1
)[0]
list_of_target_tables[i] = list_of_target_tables[i].align(
list_of_target_tables[0], axis=1
)[0]
metadata_table = pandas.concat(
list_of_metadata_tables, axis=0, ignore_index=True)
predictor_table = pandas.concat(
list_of_predictor_tables, axis=0, ignore_index=True)
target_table = pandas.concat(
list_of_target_tables, axis=0, ignore_index=True)
return metadata_table, predictor_table, target_table
def _update_normalization_params(intermediate_normalization_dict, new_values):
"""Updates normalization params for one predictor.
:param intermediate_normalization_dict: Dictionary with the following keys.
intermediate_normalization_dict['num_values']: Number of values on which
current estimates are based.
intermediate_normalization_dict['mean_value']: Current estimate for mean.
intermediate_normalization_dict['mean_of_squares']: Current mean of squared
values.
:param new_values: numpy array of new values (will be used to update
`intermediate_normalization_dict`).
:return: intermediate_normalization_dict: Same as input but with updated
values.
"""
if MEAN_VALUE_KEY not in intermediate_normalization_dict:
intermediate_normalization_dict = {
NUM_VALUES_KEY: 0,
MEAN_VALUE_KEY: 0.,
MEAN_OF_SQUARES_KEY: 0.
}
these_means = numpy.array([
intermediate_normalization_dict[MEAN_VALUE_KEY], numpy.mean(new_values)
])
these_weights = numpy.array([
intermediate_normalization_dict[NUM_VALUES_KEY], new_values.size
])
intermediate_normalization_dict[MEAN_VALUE_KEY] = numpy.average(
these_means, weights=these_weights)
these_means = numpy.array([
intermediate_normalization_dict[MEAN_OF_SQUARES_KEY],
numpy.mean(new_values ** 2)
])
intermediate_normalization_dict[MEAN_OF_SQUARES_KEY] = numpy.average(
these_means, weights=these_weights)
intermediate_normalization_dict[NUM_VALUES_KEY] += new_values.size
return intermediate_normalization_dict
def _get_standard_deviation(intermediate_normalization_dict):
"""Computes stdev from intermediate normalization params.
:param intermediate_normalization_dict: See doc for
`_update_normalization_params`.
:return: standard_deviation: Standard deviation.
"""
num_values = float(intermediate_normalization_dict[NUM_VALUES_KEY])
multiplier = num_values / (num_values - 1)
return numpy.sqrt(multiplier * (
intermediate_normalization_dict[MEAN_OF_SQUARES_KEY] -
intermediate_normalization_dict[MEAN_VALUE_KEY] ** 2
))
def get_normalization_params(csv_file_names):
"""Computes normalization params (mean and stdev) for each predictor.
:param csv_file_names: 1-D list of paths to input files.
:return: normalization_dict: See input doc for `normalize_images`.
"""
predictor_names = None
norm_dict_by_predictor = None
for this_file_name in csv_file_names:
print('Reading data from: "{0:s}"...'.format(this_file_name))
this_predictor_table = read_feature_file(this_file_name)[1]
if predictor_names is None:
predictor_names = list(this_predictor_table)
norm_dict_by_predictor = [{}] * len(predictor_names)
for m in range(len(predictor_names)):
norm_dict_by_predictor[m] = _update_normalization_params(
intermediate_normalization_dict=norm_dict_by_predictor[m],
new_values=this_predictor_table[predictor_names[m]].values
)
print('\n')
normalization_dict = {}
for m in range(len(predictor_names)):
this_mean = norm_dict_by_predictor[m][MEAN_VALUE_KEY]
this_stdev = _get_standard_deviation(norm_dict_by_predictor[m])
normalization_dict[predictor_names[m]] = numpy.array(
[this_mean, this_stdev]
)
message_string = (
'Mean and standard deviation for "{0:s}" = {1:.4f}, {2:.4f}'
).format(predictor_names[m], this_mean, this_stdev)
print(message_string)
return normalization_dict
def normalize_predictors(predictor_table, normalization_dict=None):
"""Normalizes predictors to z-scores.
:param predictor_table: See doc for `read_feature_file`.
:param normalization_dict: Dictionary. Each key is the name of a predictor
value, and the corresponding value is a length-2 numpy array with
[mean, standard deviation]. If `normalization_dict is None`, mean and
standard deviation will be computed for each predictor.
:return: predictor_table: Normalized version of input.
:return: normalization_dict: See doc for input variable. If input was None,
this will be a newly created dictionary. Otherwise, this will be the
same dictionary passed as input.
"""
predictor_names = list(predictor_table)
num_predictors = len(predictor_names)
if normalization_dict is None:
normalization_dict = {}
for m in range(num_predictors):
this_mean = numpy.mean(predictor_table[predictor_names[m]].values)
this_stdev = numpy.std(
predictor_table[predictor_names[m]].values, ddof=1
)
normalization_dict[predictor_names[m]] = numpy.array(
[this_mean, this_stdev]
)
for m in range(num_predictors):
this_mean = normalization_dict[predictor_names[m]][0]
this_stdev = normalization_dict[predictor_names[m]][1]
these_norm_values = (
predictor_table[predictor_names[m]].values - this_mean
) / this_stdev
predictor_table = predictor_table.assign(**{
predictor_names[m]: these_norm_values
})
return predictor_table, normalization_dict
def denormalize_predictors(predictor_table, normalization_dict):
"""Denormalizes predictors from z-scores back to original scales.
:param predictor_table: See doc for `normalize_predictors`.
:param normalization_dict: Same.
:return: predictor_table: Denormalized version of input.
"""
predictor_names = list(predictor_table)
num_predictors = len(predictor_names)
for m in range(num_predictors):
this_mean = normalization_dict[predictor_names[m]][0]
this_stdev = normalization_dict[predictor_names[m]][1]
these_denorm_values = (
this_mean + this_stdev * predictor_table[predictor_names[m]].values
)
predictor_table = predictor_table.assign(**{
predictor_names[m]: these_denorm_values
})
return predictor_table
def get_binarization_threshold(csv_file_names, percentile_level):
"""Computes binarization threshold for target variable.
Binarization threshold will be [q]th percentile of all target values, where
q = `percentile_level`.
:param csv_file_names: 1-D list of paths to input files.
:param percentile_level: q in the above discussion.
:return: binarization_threshold: Binarization threshold (used to turn each
target value into a yes-or-no label).
"""
max_target_values = numpy.array([])
for this_file_name in csv_file_names:
print('Reading data from: "{0:s}"...'.format(this_file_name))
this_target_table = read_feature_file(this_file_name)[-1]
max_target_values = numpy.concatenate((
max_target_values, this_target_table[TARGET_NAME].values
))
binarization_threshold = numpy.percentile(
max_target_values, percentile_level)
print('\nBinarization threshold for "{0:s}" = {1:.4e}'.format(
TARGET_NAME, binarization_threshold
))
return binarization_threshold
def binarize_target_values(target_values, binarization_threshold):
"""Binarizes target values.
E = number of examples (storm objects)
:param target_values: length-E numpy array of real-number target values.
:param binarization_threshold: Binarization threshold.
:return: target_values: length-E numpy array of binarized target values
(integers in 0...1).
"""
return (target_values >= binarization_threshold).astype(int)
def _lambdas_to_sklearn_inputs(lambda1, lambda2):
"""Converts lambdas to input arguments for scikit-learn.
:param lambda1: L1-regularization weight.
:param lambda2: L2-regularization weight.
:return: alpha: Input arg for scikit-learn model.
:return: l1_ratio: Input arg for scikit-learn model.
"""
return lambda1 + lambda2, lambda1 / (lambda1 + lambda2)
def setup_linear_regression(lambda1=0., lambda2=0.):
"""Sets up (but does not train) linear-regression model.
:param lambda1: L1-regularization weight.
:param lambda2: L2-regularization weight.
:return: model_object: Instance of `sklearn.linear_model`.
"""
assert lambda1 >= 0
assert lambda2 >= 0
if lambda1 < LAMBDA_TOLERANCE and lambda2 < LAMBDA_TOLERANCE:
return sklearn.linear_model.LinearRegression(
fit_intercept=True, normalize=False)
if lambda1 < LAMBDA_TOLERANCE:
return sklearn.linear_model.Ridge(
alpha=lambda2, fit_intercept=True, normalize=False,
random_state=RANDOM_SEED)
if lambda2 < LAMBDA_TOLERANCE:
return sklearn.linear_model.Lasso(
alpha=lambda1, fit_intercept=True, normalize=False,
random_state=RANDOM_SEED)
alpha, l1_ratio = _lambdas_to_sklearn_inputs(
lambda1=lambda1, lambda2=lambda2)
return sklearn.linear_model.ElasticNet(
alpha=alpha, l1_ratio=l1_ratio, fit_intercept=True, normalize=False,
random_state=RANDOM_SEED)
def train_linear_regression(model_object, training_predictor_table,
training_target_table):
"""Trains linear-regression model.
:param model_object: Untrained model created by `setup_linear_regression`.
:param training_predictor_table: See doc for `read_feature_file`.
:param training_target_table: Same.
:return: model_object: Trained version of input.
"""
model_object.fit(
X=training_predictor_table.as_matrix(),
y=training_target_table[TARGET_NAME].values
)
return model_object
def _create_directory(directory_name=None, file_name=None):
"""Creates directory (along with parents if necessary).
This method creates directories only when necessary, so you don't have to
worry about it overwriting anything.
:param directory_name: Name of desired directory.
:param file_name: [used only if `directory_name is None`]
Path to desired file. All directories in path will be created.
"""
if directory_name is None:
directory_name = os.path.split(file_name)[0]
try:
os.makedirs(directory_name)
except OSError as this_error:
if this_error.errno == errno.EEXIST and os.path.isdir(directory_name):
pass
else:
raise
def write_model(model_object, pickle_file_name):
"""Writes model to Pickle file.
:param model_object: Trained model (instance of `sklearn.linear_model`, for
example).
:param pickle_file_name: Path to output file.
"""
print('Writing model to: "{0:s}"...'.format(pickle_file_name))
_create_directory(file_name=pickle_file_name)
file_handle = open(pickle_file_name, 'wb')
pickle.dump(model_object, file_handle)
file_handle.close()
def evaluate_regression(
target_values, predicted_target_values, mean_training_target_value,
verbose=True, create_plots=True, dataset_name=None):
"""Evaluates regression model.
E = number of examples
:param target_values: length-E numpy array of actual target values.
:param predicted_target_values: length-E numpy array of predictions.
:param mean_training_target_value: Mean target value in training data.
:param verbose: Boolean flag. If True, will print results to command
window.
:param create_plots: Boolean flag. If True, will create plots.
:param dataset_name: Dataset name (e.g., "validation"). Used only if
`create_plots == True or verbose == True`.
:return: evaluation_dict: Dictionary with the following keys.
evaluation_dict['mean_absolute_error']: Mean absolute error (MAE).
evaluation_dict['mean_squared_error']: Mean squared error (MSE).
evaluation_dict['mean_bias']: Mean bias (signed error).
evaluation_dict['mae_skill_score']: MAE skill score (fractional improvement
over climatology, in range -1...1).
evaluation_dict['mse_skill_score']: MSE skill score (fractional improvement
over climatology, in range -1...1).
"""
signed_errors = predicted_target_values - target_values
mean_bias = numpy.mean(signed_errors)
mean_absolute_error = numpy.mean(numpy.absolute(signed_errors))
mean_squared_error = numpy.mean(signed_errors ** 2)
climo_signed_errors = mean_training_target_value - target_values
climo_mae = numpy.mean(numpy.absolute(climo_signed_errors))
climo_mse = numpy.mean(climo_signed_errors ** 2)
mae_skill_score = (climo_mae - mean_absolute_error) / climo_mae
mse_skill_score = (climo_mse - mean_squared_error) / climo_mse
evaluation_dict = {
MAE_KEY: mean_absolute_error,
MSE_KEY: mean_squared_error,
MEAN_BIAS_KEY: mean_bias,
MAE_SKILL_SCORE_KEY: mae_skill_score,
MSE_SKILL_SCORE_KEY: mse_skill_score
}
if verbose or create_plots:
dataset_name = dataset_name[0].upper() + dataset_name[1:]
if verbose:
print('{0:s} MAE (mean absolute error) = {1:.3e} s^-1'.format(
dataset_name, evaluation_dict[MAE_KEY]
))
print('{0:s} MSE (mean squared error) = {1:.3e} s^-2'.format(
dataset_name, evaluation_dict[MSE_KEY]
))
print('{0:s} bias (mean signed error) = {1:.3e} s^-1'.format(
dataset_name, evaluation_dict[MEAN_BIAS_KEY]
))
message_string = (
'{0:s} MAE skill score (improvement over climatology) = {1:.3f}'
).format(dataset_name, evaluation_dict[MAE_SKILL_SCORE_KEY])
print(message_string)
message_string = (
'{0:s} MSE skill score (improvement over climatology) = {1:.3f}'
).format(dataset_name, evaluation_dict[MSE_SKILL_SCORE_KEY])
print(message_string)
if not create_plots:
return evaluation_dict
figure_object, axes_object = pyplot.subplots(
1, 1, figsize=(SMALL_FIG_WIDTH_INCHES, SMALL_FIG_HEIGHT_INCHES)
)
attr_diagrams.plot_regression_relia_curve(
observed_values=target_values, forecast_values=predicted_target_values,
num_bins=20, figure_object=figure_object, axes_object=axes_object)
axes_object.set_xlabel(r'Forecast value (s$^{-1}$)')
axes_object.set_ylabel(r'Conditional mean observation (s$^{-1}$)')
title_string = '{0:s} reliability curve for max future vorticity'.format(
dataset_name)
axes_object.set_title(title_string)
pyplot.show()
return evaluation_dict
def plot_model_coefficients(model_object, predictor_names):
"""Plots coefficients for linear- or logistic-regression model.
:param model_object: Trained instance of `sklearn.linear_model`.
:param predictor_names: 1-D list of predictor names, in the same order used
to train the model.
"""
coefficients = model_object.coef_
num_dimensions = len(coefficients.shape)
if num_dimensions > 1:
coefficients = coefficients[0, ...]
num_predictors = len(predictor_names)
y_coords = numpy.linspace(
0, num_predictors - 1, num=num_predictors, dtype=float)
_, axes_object = pyplot.subplots(
1, 1, figsize=(DEFAULT_FIG_WIDTH_INCHES, DEFAULT_FIG_HEIGHT_INCHES)
)
axes_object.barh(
y_coords, coefficients, color=BAR_GRAPH_COLOUR,
edgecolor=BAR_GRAPH_COLOUR, linewidth=BAR_GRAPH_EDGE_WIDTH)
pyplot.xlabel('Coefficient')
pyplot.ylabel('Predictor variable')
pyplot.yticks([], [])
x_tick_values, _ = pyplot.xticks()
pyplot.xticks(x_tick_values, rotation=90)
x_min = numpy.percentile(coefficients, 1.)
x_max = numpy.percentile(coefficients, 99.)
pyplot.xlim([x_min, x_max])
for j in range(num_predictors):
axes_object.text(
0, y_coords[j], predictor_names[j], color=BAR_GRAPH_FONT_COLOUR,
horizontalalignment='center', verticalalignment='center',
fontsize=BAR_GRAPH_FONT_SIZE)
def _add_colour_bar(
axes_object, colour_map_object, values_to_colour, min_colour_value,
max_colour_value, colour_norm_object=None,
orientation_string='vertical', extend_min=True, extend_max=True):
"""Adds colour bar to existing axes.
:param axes_object: Existing axes (instance of
`matplotlib.axes._subplots.AxesSubplot`).
:param colour_map_object: Colour scheme (instance of
`matplotlib.pyplot.cm`).
:param values_to_colour: numpy array of values to colour.
:param min_colour_value: Minimum value in colour scheme.
:param max_colour_value: Max value in colour scheme.
:param colour_norm_object: Instance of `matplotlib.colors.BoundaryNorm`,
defining the scale of the colour map. If `colour_norm_object is None`,
will assume that scale is linear.
:param orientation_string: Orientation of colour bar ("vertical" or
"horizontal").
:param extend_min: Boolean flag. If True, the bottom of the colour bar will
have an arrow. If False, it will be a flat line, suggesting that lower
values are not possible.
:param extend_max: Same but for top of colour bar.
:return: colour_bar_object: Colour bar (instance of
`matplotlib.pyplot.colorbar`) created by this method.
"""
if colour_norm_object is None:
colour_norm_object = matplotlib.colors.Normalize(
vmin=min_colour_value, vmax=max_colour_value, clip=False)
scalar_mappable_object = pyplot.cm.ScalarMappable(
cmap=colour_map_object, norm=colour_norm_object)
scalar_mappable_object.set_array(values_to_colour)
if extend_min and extend_max:
extend_string = 'both'
elif extend_min:
extend_string = 'min'
elif extend_max:
extend_string = 'max'
else:
extend_string = 'neither'
if orientation_string == 'horizontal':
padding = 0.075
else:
padding = 0.05
colour_bar_object = pyplot.colorbar(
ax=axes_object, mappable=scalar_mappable_object,
orientation=orientation_string, pad=padding, extend=extend_string,
shrink=0.8)
colour_bar_object.ax.tick_params(labelsize=FONT_SIZE)
return colour_bar_object
def plot_scores_2d(
score_matrix, min_colour_value, max_colour_value, x_tick_labels,
y_tick_labels, colour_map_object=pyplot.cm.plasma):
"""Plots scores on 2-D grid.
M = number of rows in grid
N = number of columns in grid
:param score_matrix: M-by-N numpy array of scores.
:param min_colour_value: Minimum value in colour scheme.
:param max_colour_value: Max value in colour scheme.
:param x_tick_labels: length-N numpy array of tick values.
:param y_tick_labels: length-M numpy array of tick values.
:param colour_map_object: Colour scheme (instance of
`matplotlib.pyplot.cm`).
"""
_, axes_object = pyplot.subplots(
1, 1, figsize=(DEFAULT_FIG_WIDTH_INCHES, DEFAULT_FIG_HEIGHT_INCHES)
)
pyplot.imshow(
score_matrix, cmap=colour_map_object, origin='lower',
vmin=min_colour_value, vmax=max_colour_value)
x_tick_values = numpy.linspace(
0, score_matrix.shape[1] - 1, num=score_matrix.shape[1], dtype=float
)
y_tick_values = numpy.linspace(
0, score_matrix.shape[0] - 1, num=score_matrix.shape[0], dtype=float
)
pyplot.xticks(x_tick_values, x_tick_labels)
pyplot.yticks(y_tick_values, y_tick_labels)
_add_colour_bar(
axes_object=axes_object, colour_map_object=colour_map_object,
values_to_colour=score_matrix, min_colour_value=min_colour_value,
max_colour_value=max_colour_value)
def setup_logistic_regression(lambda1=0., lambda2=0.):
"""Sets up (but does not train) logistic-regression model.
:param lambda1: L1-regularization weight.
:param lambda2: L2-regularization weight.
:return: model_object: Instance of `sklearn.linear_model.SGDClassifier`.
"""
assert lambda1 >= 0
assert lambda2 >= 0
if lambda1 < LAMBDA_TOLERANCE and lambda2 < LAMBDA_TOLERANCE:
return sklearn.linear_model.SGDClassifier(
loss='log', penalty='none', fit_intercept=True, verbose=0,
random_state=RANDOM_SEED)
if lambda1 < LAMBDA_TOLERANCE:
return sklearn.linear_model.SGDClassifier(
loss='log', penalty='l2', alpha=lambda2, fit_intercept=True,
verbose=0, random_state=RANDOM_SEED)
if lambda2 < LAMBDA_TOLERANCE:
return sklearn.linear_model.SGDClassifier(
loss='log', penalty='l1', alpha=lambda1, fit_intercept=True,
verbose=0, random_state=RANDOM_SEED)
alpha, l1_ratio = _lambdas_to_sklearn_inputs(
lambda1=lambda1, lambda2=lambda2)
return sklearn.linear_model.SGDClassifier(
loss='log', penalty='elasticnet', alpha=alpha, l1_ratio=l1_ratio,
fit_intercept=True, verbose=0, random_state=RANDOM_SEED)
def train_logistic_regression(model_object, training_predictor_table,
training_target_table):
"""Trains logistic-regression model.
:param model_object: Untrained model created by `setup_logistic_regression`.
:param training_predictor_table: See doc for `read_feature_file`.
:param training_target_table: Same.
:return: model_object: Trained version of input.
"""
model_object.fit(
X=training_predictor_table.as_matrix(),
y=training_target_table[BINARIZED_TARGET_NAME].values
)
return model_object
def eval_binary_classifn(
observed_labels, forecast_probabilities, training_event_frequency,
verbose=True, create_plots=True, dataset_name=None):
"""Evaluates binary-classification model.
E = number of examples
:param observed_labels: length-E numpy array of observed labels (integers in
0...1, where 1 means that event occurred).
:param forecast_probabilities: length-E numpy array with forecast
probabilities of event (positive class).
:param training_event_frequency: Frequency of event in training data.
:param verbose: Boolean flag. If True, will print results to command
window.
:param create_plots: Boolean flag. If True, will create plots.
:param dataset_name: Dataset name (e.g., "validation"). Used only if
`create_plots == True or verbose == True`.
"""
pofd_by_threshold, pod_by_threshold = roc_curves.get_points_in_roc_curve(
observed_labels=observed_labels,
forecast_probabilities=forecast_probabilities)
max_peirce_score = numpy.nanmax(pod_by_threshold - pofd_by_threshold)
area_under_roc_curve = sklearn.metrics.auc(
x=pofd_by_threshold, y=pod_by_threshold)
pod_by_threshold, success_ratio_by_threshold = (
perf_diagrams.get_points_in_perf_diagram(
observed_labels=observed_labels,
forecast_probabilities=forecast_probabilities)
)
csi_by_threshold = (
(pod_by_threshold ** -1 + success_ratio_by_threshold ** -1 - 1) ** -1
)
max_csi = numpy.nanmax(csi_by_threshold)
mean_forecast_by_bin, event_freq_by_bin, num_examples_by_bin = (
attr_diagrams.get_points_in_relia_curve(
observed_labels=observed_labels,
forecast_probabilities=forecast_probabilities, num_bins=20)
)
uncertainty = training_event_frequency * (1. - training_event_frequency)
this_numerator = numpy.nansum(
num_examples_by_bin *
(mean_forecast_by_bin - event_freq_by_bin) ** 2
)
reliability = this_numerator / numpy.sum(num_examples_by_bin)
this_numerator = numpy.nansum(
num_examples_by_bin *
(event_freq_by_bin - training_event_frequency) ** 2
)
resolution = this_numerator / numpy.sum(num_examples_by_bin)
brier_score = uncertainty + reliability - resolution
brier_skill_score = (resolution - reliability) / uncertainty
evaluation_dict = {
MAX_PEIRCE_SCORE_KEY: max_peirce_score,
AUC_KEY: area_under_roc_curve,
MAX_CSI_KEY: max_csi,
BRIER_SCORE_KEY: brier_score,
BRIER_SKILL_SCORE_KEY: brier_skill_score
}
if verbose or create_plots:
dataset_name = dataset_name[0].upper() + dataset_name[1:]
if verbose:
print('{0:s} Max Peirce score (POD - POFD) = {1:.3f}'.format(
dataset_name, evaluation_dict[MAX_PEIRCE_SCORE_KEY]
))
print('{0:s} AUC (area under ROC curve) = {1:.3f}'.format(
dataset_name, evaluation_dict[AUC_KEY]
))
print('{0:s} Max CSI (critical success index) = {1:.3f}'.format(
dataset_name, evaluation_dict[MAX_CSI_KEY]
))
print('{0:s} Brier score = {1:.3f}'.format(
dataset_name, evaluation_dict[BRIER_SCORE_KEY]
))
message_string = (
'{0:s} Brier skill score (improvement over climatology) = {1:.3f}'
).format(dataset_name, evaluation_dict[BRIER_SKILL_SCORE_KEY])
print(message_string)
if not create_plots:
return evaluation_dict
_, axes_object = pyplot.subplots(
1, 1, figsize=(SMALL_FIG_WIDTH_INCHES, SMALL_FIG_HEIGHT_INCHES)
)
roc_curves.plot_roc_curve(
observed_labels=observed_labels,
forecast_probabilities=forecast_probabilities,
axes_object=axes_object)
title_string = '{0:s} ROC curve (AUC = {1:.3f})'.format(
dataset_name, evaluation_dict[AUC_KEY]
)
pyplot.title(title_string)
pyplot.show()
_, axes_object = pyplot.subplots(
1, 1, figsize=(SMALL_FIG_WIDTH_INCHES, SMALL_FIG_HEIGHT_INCHES)
)
perf_diagrams.plot_performance_diagram(
observed_labels=observed_labels,
forecast_probabilities=forecast_probabilities,
axes_object=axes_object)
title_string = '{0:s} performance diagram (max CSI = {1:.3f})'.format(
dataset_name, evaluation_dict[MAX_CSI_KEY]
)
pyplot.title(title_string)
pyplot.show()
figure_object, axes_object = pyplot.subplots(
1, 1, figsize=(SMALL_FIG_WIDTH_INCHES, SMALL_FIG_HEIGHT_INCHES)
)
attr_diagrams.plot_attributes_diagram(
observed_labels=observed_labels,
forecast_probabilities=forecast_probabilities, num_bins=20,
figure_object=figure_object, axes_object=axes_object)
title_string = (
'{0:s} attributes diagram (Brier skill score = {1:.3f})'
).format(dataset_name, evaluation_dict[BRIER_SKILL_SCORE_KEY])
axes_object.set_title(title_string)
pyplot.show()
return evaluation_dict
def setup_classification_tree(min_examples_at_split=30,
min_examples_at_leaf=30):
"""Sets up (but does not train) decision tree for classification.
:param min_examples_at_split: Minimum number of examples at split node.
:param min_examples_at_leaf: Minimum number of examples at leaf node.
:return: model_object: Instance of `sklearn.tree.DecisionTreeClassifier`.
"""
return sklearn.tree.DecisionTreeClassifier(
criterion='entropy', min_samples_split=min_examples_at_split,
min_samples_leaf=min_examples_at_leaf, random_state=RANDOM_SEED)
def train_classification_tree(model_object, training_predictor_table,
training_target_table):
"""Trains decision tree for classification.
:param model_object: Untrained model created by `setup_classification_tree`.
:param training_predictor_table: See doc for `read_feature_file`.
:param training_target_table: Same.
:return: model_object: Trained version of input.
"""
model_object.fit(
X=training_predictor_table.as_matrix(),
y=training_target_table[BINARIZED_TARGET_NAME].values
)
return model_object
def setup_classification_forest(
max_predictors_per_split, num_trees=100, min_examples_at_split=30,
min_examples_at_leaf=30):
"""Sets up (but does not train) random forest for classification.
:param max_predictors_per_split: Max number of predictors to try at each
split.
:param num_trees: Number of trees.
:param min_examples_at_split: Minimum number of examples at split node.
:param min_examples_at_leaf: Minimum number of examples at leaf node.
:return: model_object: Instance of
`sklearn.ensemble.RandomForestClassifier`.
"""
return sklearn.ensemble.RandomForestClassifier(
n_estimators=num_trees, min_samples_split=min_examples_at_split,
min_samples_leaf=min_examples_at_leaf,
max_features=max_predictors_per_split, bootstrap=True,
random_state=RANDOM_SEED, verbose=2)
def train_classification_forest(model_object, training_predictor_table,
training_target_table):
"""Trains random forest for classification.
:param model_object: Untrained model created by
`setup_classification_forest`.
:param training_predictor_table: See doc for `read_feature_file`.
:param training_target_table: Same.
:return: model_object: Trained version of input.
"""
model_object.fit(
X=training_predictor_table.as_matrix(),
y=training_target_table[BINARIZED_TARGET_NAME].values
)
return model_object
def setup_classification_gbt(
max_predictors_per_split, num_trees=100, learning_rate=0.1,
min_examples_at_split=30, min_examples_at_leaf=30):
"""Sets up (but does not train) gradient-boosted trees for classification.
:param max_predictors_per_split: Max number of predictors to try at each
split.
:param num_trees: Number of trees.
:param learning_rate: Learning rate.
:param min_examples_at_split: Minimum number of examples at split node.
:param min_examples_at_leaf: Minimum number of examples at leaf node.
:return: model_object: Instance of
`sklearn.ensemble.GradientBoostingClassifier`.
"""
return sklearn.ensemble.GradientBoostingClassifier(
loss='exponential', learning_rate=learning_rate, n_estimators=num_trees,
min_samples_split=min_examples_at_split,
min_samples_leaf=min_examples_at_leaf,
max_features=max_predictors_per_split, random_state=RANDOM_SEED,
verbose=2)
def train_classification_gbt(model_object, training_predictor_table,
training_target_table):
"""Trains gradient-boosted trees for classification.
:param model_object: Untrained model created by
`setup_classification_gbt`.
:param training_predictor_table: See doc for `read_feature_file`.
:param training_target_table: Same.
:return: model_object: Trained version of input.
"""
model_object.fit(
X=training_predictor_table.as_matrix(),
y=training_target_table[BINARIZED_TARGET_NAME].values
)
return model_object
| 1.890625 | 2 |
api/resources_portal/views/organization_invitations.py | AlexsLemonade/resources-portal | 0 | 12789494 | <gh_stars>0
from rest_framework import serializers, status, viewsets
from rest_framework.permissions import BasePermission, IsAuthenticated
from rest_framework.response import Response
from resources_portal.models import (
Organization,
OrganizationInvitation,
OrganizationUserSetting,
User,
)
from resources_portal.notifier import send_notifications
class OrganizationInvitationSerializer(serializers.ModelSerializer):
requester = serializers.PrimaryKeyRelatedField(queryset=User.objects.all())
request_receiver = serializers.PrimaryKeyRelatedField(queryset=User.objects.all())
organization = serializers.PrimaryKeyRelatedField(queryset=Organization.objects.all())
invite_or_request = serializers.CharField()
class Meta:
model = OrganizationInvitation
fields = (
"id",
"created_at",
"updated_at",
"status",
"invite_or_request",
"organization",
"request_receiver",
"requester",
)
read_only_fields = (
"id",
"created_at",
"updated_at",
"organization",
"request_receiver",
"requester",
)
class IsMemberAndOrganizationIsntPersonal(BasePermission):
def has_permission(self, request, view):
if "organization" not in request.data:
return False
organization = Organization.objects.get(pk=request.data["organization"])
return (
not organization.is_personal_organization and request.user in organization.members.all()
)
class OrganizationInvitationViewSet(viewsets.ModelViewSet):
queryset = OrganizationInvitation.objects.all()
serializer_class = OrganizationInvitationSerializer
permission_classes = [IsAuthenticated, IsMemberAndOrganizationIsntPersonal]
http_method_names = ["post", "options"]
def update_organizations(self, new_status, invitation, old_member):
if new_status == "ACCEPTED":
if invitation.invite_or_request == "INVITE":
new_member = invitation.request_receiver
# associated_user = invitation.requester
else:
new_member = invitation.requester
# Do we want to send to this user instead of new_member?
# associated_user = invitation.request_receiver
invitation.organization.members.add(new_member)
invitation.organization.assign_member_perms(new_member)
OrganizationUserSetting.objects.get_or_create(
user=new_member, organization=invitation.organization
)
# This is the logic we'll want for the invitation flow, but
# for now they're always being added.
# notification_type = f"ORG_{invitation.invite_or_request}_{new_status}"
send_notifications(
"ORGANIZATION_NEW_MEMBER", new_member, new_member, invitation.organization
)
send_notifications("ORGANIZATION_INVITE", new_member, old_member, invitation.organization)
def create(self, request, *args, **kwargs):
if not request.user.is_authenticated:
return Response(status=status.HTTP_401_UNAUTHORIZED)
serializer = OrganizationInvitationSerializer(data=request.data)
serializer.is_valid(raise_exception=True)
requester = serializer.validated_data["requester"]
organization = serializer.validated_data["organization"]
invite_or_request = serializer.validated_data["invite_or_request"]
if invite_or_request == "INVITE" and not requester.has_perm("add_members", organization):
return Response(
data={"detail": f"{requester} does not have permission to add members"},
status=status.HTTP_403_FORBIDDEN,
)
# For now invitations are auto-accepted so we don't need these notifications.
# if request.data["invite_or_request"] == "INVITE":
# notification = Notification(
# notification_type="ORG_INVITE_CREATED",
# notified_user=request.user,
# associated_user=request_receiver,
# organization=organization,
# )
# notification.save()
# else:
# notification = Notification(
# notification_type="ORG_REQUEST_CREATED",
# notified_user=request_receiver,
# associated_user=request.user,
# organization=organization,
# )
# notification.save()
# Facilitate adding without confirmation by making the
# invitation accepted automatically.
request.data["status"] = "ACCEPTED"
response = super(OrganizationInvitationViewSet, self).create(request, *args, **kwargs)
# questionable
invitation = OrganizationInvitation.objects.get(id=response.data["id"])
self.update_organizations(request.data["status"], invitation, request.user)
return response
# For now, we don't need to update or destroy because we allow users to
# be added to organizatons without confirmation.
# def update(self, request, *args, **kwargs):
# if not request.user.is_authenticated:
# return Response(status=status.HTTP_401_UNAUTHORIZED)
# invitation = OrganizationInvitation.objects.get(pk=kwargs["pk"])
# requester_accepting = (
# request.user == invitation.requester and invitation.invite_or_request == "INVITE"
# )
# request_receiver_approving = (
# request.user == invitation.request_receiver
# and invitation.invite_or_request == "REQUEST"
# )
# if not (requester_accepting or request_receiver_approving):
# return Response(
# data={
# "detail": f"The current user, {request.user}, is not the correct user to handle invitation id {invitation.id}"
# },
# status=status.HTTP_403_FORBIDDEN,
# )
# if not invitation.status == "PENDING":
# return Response(
# data={
# "detail": f"Invitation id {invitation.id} has already been resolved with a status of {invitation.status}"
# },
# status=status.HTTP_400_BAD_REQUEST,
# )
# new_status = request.data["status"]
# response_status = super(OrganizationInvitationViewSet, self).update(
# request, *args, **kwargs
# )
# self.update_organizations(new_status, invitation)
# return response_status
# def destroy(self, request, *args, **kwargs):
# if not request.user.is_authenticated:
# return Response(status=status.HTTP_401_UNAUTHORIZED)
# invitation = OrganizationInvitation.objects.get(pk=kwargs["pk"])
# if not request.user == invitation.requester:
# return Response(
# data={
# "detail": f"The current user, {request.user}, is not the requester of invitation id {invitation.id}"
# },
# status=status.HTTP_403_FORBIDDEN,
# )
# return super(OrganizationInvitationViewSet, self).destroy(request, *args, **kWargs)
| 2 | 2 |
code/scripts/normalize_to_cpm.py | YimengYang/wol | 0 | 12789495 | #!/usr/bin/env python3
"""Normalize a BIOM table of counts to copies per million sequences (cpm).
Usage:
normalize_to_cpm.py input.biom output.biom
Notes:
This is a pure BIOM solution, in contrast to the more complicated and
slower Pandas solution.
"""
from sys import argv
from biom import load_table
from biom.util import biom_open
n = 1000000
table = load_table(argv[1])
table.transform(
lambda data, id_, md: (data / data.sum() * n).round(), axis='sample')
with biom_open(argv[2], 'w') as f:
table.to_hdf5(f, table.generated_by)
| 2.53125 | 3 |
pmutt/tests/reaction/test_pmutt_reaction.py | wittregr/pMuTT | 28 | 12789496 | <gh_stars>10-100
# -*- coding: utf-8 -*-
"""
pmutt.test_pmutt_model_reaction
Tests for pmutt module
"""
import unittest
import numpy as np
from ase.build import molecule
from pmutt import constants as c
from pmutt import reaction as rxn
from pmutt.empirical.nasa import Nasa
from pmutt.statmech import StatMech, presets
class TestReaction(unittest.TestCase):
def setUp(self):
'''Reactions using Nasa polynomial'''
self.H2O_nasa = Nasa(name='H2O',
T_low=200.,
T_mid=1000.,
T_high=3500.,
elements={
'H': 2,
'O': 1
},
a_low=[
4.19864056E+00, -2.03643410E-03,
6.52040211E-06, -5.48797062E-09,
1.77197817E-12, -3.02937267E+04,
-8.49032208E-01
],
a_high=[
3.03399249E+00, 2.17691804E-03,
-1.64072518E-07, -9.70419870E-11,
1.68200992E-14, -3.00042971E+04,
4.96677010E+00
])
self.H2_nasa = Nasa(name='H2',
T_low=200.,
T_mid=1000.,
T_high=3500.,
elements={'H': 2},
a_low=[
2.34433112E+00, 7.98052075E-03,
-1.94781510E-05, 2.01572094E-08,
-7.37611761E-12, -9.17935173E+02,
6.83010238E-01
],
a_high=[
3.33727920E+00, -4.94024731E-05,
4.99456778E-07, -1.79566394E-10,
2.00255376E-14, -9.50158922E+02,
-3.20502331E+00
])
self.O2_nasa = Nasa(name='O2',
T_low=200.,
T_mid=1000.,
T_high=3500.,
elements={'O': 2},
a_low=[
3.78245636E+00, -2.99673416E-03,
9.84730201E-06, -9.68129509E-09,
3.24372837E-12, -1.06394356E+03, 3.65767573E+00
],
a_high=[
3.28253784E+00, 1.48308754E-03,
-7.57966669E-07, 2.09470555E-10,
-2.16717794E-14, -1.08845772E+03,
5.45323129E+00
])
self.rxn_nasa = rxn.Reaction(reactants=[self.H2_nasa, self.O2_nasa],
reactants_stoich=[1., 0.5],
products=[self.H2O_nasa],
products_stoich=[1.])
self.rxn_nasa_dict = {
'class':
"<class 'pmutt.reaction.Reaction'>",
'products': [{
'T_high':
3500.0,
'T_low':
200.0,
'T_mid':
1000.0,
'a_high': [
3.03399249, 0.00217691804, -1.64072518e-07, -9.7041987e-11,
1.68200992e-14, -30004.2971, 4.9667701
],
'a_low': [
4.19864056, -0.0020364341, 6.52040211e-06, -5.48797062e-09,
1.77197817e-12, -30293.7267, -0.849032208
],
'class':
"<class 'pmutt.empirical.nasa.Nasa'>",
'elements': {
'H': 2,
'O': 1
},
'name':
'H2O',
'notes':
None,
'phase':
None,
'model':
None,
'misc_models':
None,
'cat_site':
None,
'n_sites':
None,
'smiles':
None,
'type':
'nasa'
}],
'products_stoich': [1.0],
'reactants': [{
'T_high':
3500.0,
'T_low':
200.0,
'T_mid':
1000.0,
'a_high': [
3.3372792, -4.94024731e-05, 4.99456778e-07,
-1.79566394e-10, 2.00255376e-14, -950.158922, -3.20502331
],
'a_low': [
2.34433112, 0.00798052075, -1.9478151e-05, 2.01572094e-08,
-7.37611761e-12, -917.935173, 0.683010238
],
'class':
"<class 'pmutt.empirical.nasa.Nasa'>",
'elements': {
'H': 2
},
'name':
'H2',
'notes':
None,
'phase':
None,
'model':
None,
'misc_models':
None,
'cat_site':
None,
'n_sites':
None,
'smiles':
None,
'type':
'nasa'
}, {
'T_high':
3500.0,
'T_low':
200.0,
'T_mid':
1000.0,
'a_high': [
3.28253784, 0.00148308754, -7.57966669e-07, 2.09470555e-10,
-2.16717794e-14, -1088.45772, 5.45323129
],
'a_low': [
3.78245636, -0.00299673416, 9.84730201e-06,
-9.68129509e-09, 3.24372837e-12, -1063.94356, 3.65767573
],
'class':
"<class 'pmutt.empirical.nasa.Nasa'>",
'elements': {
'O': 2
},
'name':
'O2',
'notes':
None,
'phase':
None,
'model':
None,
'misc_models':
None,
'cat_site':
None,
'n_sites':
None,
'smiles':
None,
'type':
'nasa'
}],
'reactants_stoich': [1.0, 0.5],
'transition_state':
None,
'transition_state_stoich':
None,
'reaction_str': 'H2+0.50O2=H2O',
}
'''Reactions using StatMech'''
ideal_gas_param = presets['idealgas']
self.H2O_sm = StatMech(name='H2O',
atoms=molecule('H2O'),
symmetrynumber=2,
vib_wavenumbers=[3825.434, 3710.2642, 1582.432],
potentialenergy=-6.7598,
spin=0.,
**ideal_gas_param)
self.H2_sm = StatMech(name='H2',
atoms=molecule('H2'),
symmetrynumber=2,
vib_wavenumbers=[4306.1793],
potentialenergy=-14.2209,
spin=0.,
**ideal_gas_param)
self.O2_sm = StatMech(name='O2',
atoms=molecule('O2'),
symmetrynumber=2,
vib_wavenumbers=[1556.],
potentialenergy=-9.862407,
spin=1.,
**ideal_gas_param)
# This is an arbitrary transition state for testing
self.H2O_TS_sm = StatMech(name='H2O_TS',
atoms=molecule('H2O'),
symmetrynumber=1.,
vib_wavenumbers=[4000., 3900., 1600.],
potentialenergy=-5.7598,
spin=0.,
**ideal_gas_param)
self.rxn_sm = rxn.Reaction(reactants=[self.H2_sm, self.O2_sm],
reactants_stoich=[1., 0.5],
products=[self.H2O_sm],
products_stoich=[1.],
transition_state=[self.H2O_TS_sm],
transition_state_stoich=[1.])
self.species_dict = {
'H2O': self.H2O_sm,
'H2': self.H2_sm,
'O2': self.O2_sm,
'H2O_TS': self.H2O_TS_sm
}
self.maxDiff = None
def test_compare_element_balance(self):
self.assertIsNone(self.rxn_nasa.check_element_balance())
def test_get_species(self):
self.assertDictEqual(self.rxn_sm.get_species(key='name'),
self.species_dict)
def test_get_q_state(self):
exp_q_react = self.H2_sm.get_q(T=c.T0('K')) \
* self.O2_sm.get_q(T=c.T0('K'))**0.5
exp_q_prod = self.H2O_sm.get_q(T=c.T0('K'))
exp_q_TS = self.H2O_TS_sm.get_q(T=c.T0('K'))
self.assertAlmostEqual(
self.rxn_sm.get_q_state(state='reactants', T=c.T0('K')),
exp_q_react)
self.assertAlmostEqual(
self.rxn_sm.get_q_state(state='products', T=c.T0('K')), exp_q_prod)
self.assertAlmostEqual(
self.rxn_sm.get_q_state(state='transition state', T=c.T0('K')),
exp_q_TS)
def test_get_CvoR_state(self):
exp_CvoR_react = self.H2_sm.get_CvoR(T=c.T0('K')) \
+ self.O2_sm.get_CvoR(T=c.T0('K'))*0.5
exp_CvoR_prod = self.H2O_sm.get_CvoR(T=c.T0('K'))
exp_CvoR_TS = self.H2O_TS_sm.get_CvoR(T=c.T0('K'))
self.assertAlmostEqual(
self.rxn_sm.get_CvoR_state(state='reactants', T=c.T0('K')),
exp_CvoR_react)
self.assertAlmostEqual(
self.rxn_sm.get_CvoR_state(state='products', T=c.T0('K')),
exp_CvoR_prod)
self.assertAlmostEqual(
self.rxn_sm.get_CvoR_state(state='transition state', T=c.T0('K')),
exp_CvoR_TS)
def test_get_Cv_state(self):
units = 'J/mol/K'
exp_Cv_react = self.H2_sm.get_Cv(T=c.T0('K'), units=units) \
+ self.O2_sm.get_Cv(T=c.T0('K'), units=units)*0.5
exp_Cv_prod = self.H2O_sm.get_Cv(T=c.T0('K'), units=units)
exp_Cv_TS = self.H2O_TS_sm.get_Cv(T=c.T0('K'), units=units)
self.assertAlmostEqual(
self.rxn_sm.get_Cv_state(state='reactants',
T=c.T0('K'),
units=units), exp_Cv_react)
self.assertAlmostEqual(
self.rxn_sm.get_Cv_state(state='products',
T=c.T0('K'),
units=units), exp_Cv_prod)
self.assertAlmostEqual(
self.rxn_sm.get_Cv_state(state='transition state',
T=c.T0('K'),
units=units), exp_Cv_TS)
def test_get_CpoR_state(self):
exp_CpoR_react = self.H2_sm.get_CpoR(T=c.T0('K')) \
+ self.O2_sm.get_CpoR(T=c.T0('K'))*0.5
exp_CpoR_prod = self.H2O_sm.get_CpoR(T=c.T0('K'))
exp_CpoR_TS = self.H2O_TS_sm.get_CpoR(T=c.T0('K'))
self.assertAlmostEqual(
self.rxn_sm.get_CpoR_state(state='reactants', T=c.T0('K')),
exp_CpoR_react)
self.assertAlmostEqual(
self.rxn_sm.get_CpoR_state(state='products', T=c.T0('K')),
exp_CpoR_prod)
self.assertAlmostEqual(
self.rxn_sm.get_CpoR_state(state='transition state', T=c.T0('K')),
exp_CpoR_TS)
def test_get_Cp_state(self):
units = 'J/mol/K'
exp_Cp_react = self.H2_sm.get_Cp(T=c.T0('K'), units=units) \
+ self.O2_sm.get_Cp(T=c.T0('K'), units=units)*0.5
exp_Cp_prod = self.H2O_sm.get_Cp(T=c.T0('K'), units=units)
exp_Cp_TS = self.H2O_TS_sm.get_Cp(T=c.T0('K'), units=units)
self.assertAlmostEqual(
self.rxn_sm.get_Cp_state(state='reactants',
T=c.T0('K'),
units=units), exp_Cp_react)
self.assertAlmostEqual(
self.rxn_sm.get_Cp_state(state='products',
T=c.T0('K'),
units=units), exp_Cp_prod)
self.assertAlmostEqual(
self.rxn_sm.get_Cp_state(state='transition state',
T=c.T0('K'),
units=units), exp_Cp_TS)
def test_get_EoRT_state(self):
exp_EoRT_react = self.H2_sm.get_EoRT(T=c.T0('K')) \
+ self.O2_sm.get_EoRT(T=c.T0('K'))*0.5
exp_EoRT_prod = self.H2O_sm.get_EoRT(T=c.T0('K'))
exp_EoRT_TS = self.H2O_TS_sm.get_EoRT(T=c.T0('K'))
self.assertAlmostEqual(
self.rxn_sm.get_EoRT_state(state='reactants', T=c.T0('K')),
exp_EoRT_react)
self.assertAlmostEqual(
self.rxn_sm.get_EoRT_state(state='products', T=c.T0('K')),
exp_EoRT_prod)
self.assertAlmostEqual(
self.rxn_sm.get_EoRT_state(state='transition state', T=c.T0('K')),
exp_EoRT_TS)
def test_get_E_state(self):
units = 'J/mol'
exp_E_react = self.H2_sm.get_E(T=c.T0('K'), units=units) \
+ self.O2_sm.get_E(T=c.T0('K'), units=units)*0.5
exp_E_prod = self.H2O_sm.get_E(T=c.T0('K'), units=units)
exp_E_TS = self.H2O_TS_sm.get_E(T=c.T0('K'), units=units)
self.assertAlmostEqual(
self.rxn_sm.get_E_state(state='reactants',
T=c.T0('K'),
units=units), exp_E_react)
self.assertAlmostEqual(
self.rxn_sm.get_E_state(state='products', T=c.T0('K'),
units=units), exp_E_prod)
self.assertAlmostEqual(
self.rxn_sm.get_E_state(state='transition state',
T=c.T0('K'),
units=units), exp_E_TS)
def test_get_UoRT_state(self):
exp_UoRT_react = self.H2_sm.get_UoRT(T=c.T0('K')) \
+ self.O2_sm.get_UoRT(T=c.T0('K'))*0.5
exp_UoRT_prod = self.H2O_sm.get_UoRT(T=c.T0('K'))
exp_UoRT_TS = self.H2O_TS_sm.get_UoRT(T=c.T0('K'))
self.assertAlmostEqual(
self.rxn_sm.get_UoRT_state(state='reactants', T=c.T0('K')),
exp_UoRT_react)
self.assertAlmostEqual(
self.rxn_sm.get_UoRT_state(state='products', T=c.T0('K')),
exp_UoRT_prod)
self.assertAlmostEqual(
self.rxn_sm.get_UoRT_state(state='transition state', T=c.T0('K')),
exp_UoRT_TS)
def test_get_U_state(self):
units = 'J/mol'
exp_U_react = self.H2_sm.get_U(T=c.T0('K'), units=units) \
+ self.O2_sm.get_U(T=c.T0('K'), units=units)*0.5
exp_U_prod = self.H2O_sm.get_U(T=c.T0('K'), units=units)
exp_U_TS = self.H2O_TS_sm.get_U(T=c.T0('K'), units=units)
self.assertAlmostEqual(
self.rxn_sm.get_U_state(state='reactants',
T=c.T0('K'),
units=units), exp_U_react)
self.assertAlmostEqual(
self.rxn_sm.get_U_state(state='products', T=c.T0('K'),
units=units), exp_U_prod)
self.assertAlmostEqual(
self.rxn_sm.get_U_state(state='transition state',
T=c.T0('K'),
units=units), exp_U_TS)
def test_get_HoRT_state(self):
exp_HoRT_react = self.H2_sm.get_HoRT(T=c.T0('K')) \
+ self.O2_sm.get_HoRT(T=c.T0('K'))*0.5
exp_HoRT_prod = self.H2O_sm.get_HoRT(T=c.T0('K'))
exp_HoRT_TS = self.H2O_TS_sm.get_HoRT(T=c.T0('K'))
self.assertAlmostEqual(
self.rxn_sm.get_HoRT_state(state='reactants', T=c.T0('K')),
exp_HoRT_react)
self.assertAlmostEqual(
self.rxn_sm.get_HoRT_state(state='products', T=c.T0('K')),
exp_HoRT_prod)
self.assertAlmostEqual(
self.rxn_sm.get_HoRT_state(state='transition state', T=c.T0('K')),
exp_HoRT_TS)
def test_get_H_state(self):
units = 'J/mol'
exp_H_react = self.H2_sm.get_H(T=c.T0('K'), units=units) \
+ self.O2_sm.get_H(T=c.T0('K'), units=units)*0.5
exp_H_prod = self.H2O_sm.get_H(T=c.T0('K'), units=units)
exp_H_TS = self.H2O_TS_sm.get_H(T=c.T0('K'), units=units)
self.assertAlmostEqual(
self.rxn_sm.get_H_state(state='reactants',
T=c.T0('K'),
units=units), exp_H_react)
self.assertAlmostEqual(
self.rxn_sm.get_H_state(state='products', T=c.T0('K'),
units=units), exp_H_prod)
self.assertAlmostEqual(
self.rxn_sm.get_H_state(state='transition state',
T=c.T0('K'),
units=units), exp_H_TS)
def test_get_SoR_state(self):
exp_SoR_react = self.H2_sm.get_SoR(T=c.T0('K')) \
+ self.O2_sm.get_SoR(T=c.T0('K'))*0.5
exp_SoR_prod = self.H2O_sm.get_SoR(T=c.T0('K'))
exp_SoR_TS = self.H2O_TS_sm.get_SoR(T=c.T0('K'))
self.assertAlmostEqual(
self.rxn_sm.get_SoR_state(state='reactants', T=c.T0('K')),
exp_SoR_react)
self.assertAlmostEqual(
self.rxn_sm.get_SoR_state(state='products', T=c.T0('K')),
exp_SoR_prod)
self.assertAlmostEqual(
self.rxn_sm.get_SoR_state(state='transition state', T=c.T0('K')),
exp_SoR_TS)
def test_get_S_state(self):
units = 'J/mol/K'
exp_S_react = self.H2_sm.get_S(T=c.T0('K'), units=units) \
+ self.O2_sm.get_S(T=c.T0('K'), units=units)*0.5
exp_S_prod = self.H2O_sm.get_S(T=c.T0('K'), units=units)
exp_S_TS = self.H2O_TS_sm.get_S(T=c.T0('K'), units=units)
self.assertAlmostEqual(
self.rxn_sm.get_S_state(state='reactants',
T=c.T0('K'),
units=units), exp_S_react)
self.assertAlmostEqual(
self.rxn_sm.get_S_state(state='products', T=c.T0('K'),
units=units), exp_S_prod)
self.assertAlmostEqual(
self.rxn_sm.get_S_state(state='transition state',
T=c.T0('K'),
units=units), exp_S_TS)
def test_get_FoRT_state(self):
exp_FoRT_react = self.H2_sm.get_FoRT(T=c.T0('K')) \
+ self.O2_sm.get_FoRT(T=c.T0('K'))*0.5
exp_FoRT_prod = self.H2O_sm.get_FoRT(T=c.T0('K'))
exp_FoRT_TS = self.H2O_TS_sm.get_FoRT(T=c.T0('K'))
self.assertAlmostEqual(
self.rxn_sm.get_FoRT_state(state='reactants', T=c.T0('K')),
exp_FoRT_react)
self.assertAlmostEqual(
self.rxn_sm.get_FoRT_state(state='products', T=c.T0('K')),
exp_FoRT_prod)
self.assertAlmostEqual(
self.rxn_sm.get_FoRT_state(state='transition state', T=c.T0('K')),
exp_FoRT_TS)
def test_get_F_state(self):
units = 'J/mol'
exp_F_react = self.H2_sm.get_F(T=c.T0('K'), units=units) \
+ self.O2_sm.get_F(T=c.T0('K'), units=units)*0.5
exp_F_prod = self.H2O_sm.get_F(T=c.T0('K'), units=units)
exp_F_TS = self.H2O_TS_sm.get_F(T=c.T0('K'), units=units)
self.assertAlmostEqual(
self.rxn_sm.get_F_state(state='reactants',
T=c.T0('K'),
units=units), exp_F_react)
self.assertAlmostEqual(
self.rxn_sm.get_F_state(state='products', T=c.T0('K'),
units=units), exp_F_prod)
self.assertAlmostEqual(
self.rxn_sm.get_F_state(state='transition state',
T=c.T0('K'),
units=units), exp_F_TS)
def test_get_GoRT_state(self):
exp_GoRT_react = self.H2_sm.get_GoRT(T=c.T0('K')) \
+ self.O2_sm.get_GoRT(T=c.T0('K'))*0.5
exp_GoRT_prod = self.H2O_sm.get_GoRT(T=c.T0('K'))
exp_GoRT_TS = self.H2O_TS_sm.get_GoRT(T=c.T0('K'))
self.assertAlmostEqual(
self.rxn_sm.get_GoRT_state(state='reactants', T=c.T0('K')),
exp_GoRT_react)
self.assertAlmostEqual(
self.rxn_sm.get_GoRT_state(state='products', T=c.T0('K')),
exp_GoRT_prod)
self.assertAlmostEqual(
self.rxn_sm.get_GoRT_state(state='transition state', T=c.T0('K')),
exp_GoRT_TS)
def test_get_G_state(self):
units = 'J/mol'
exp_G_react = self.H2_sm.get_G(T=c.T0('K'), units=units) \
+ self.O2_sm.get_G(T=c.T0('K'), units=units)*0.5
exp_G_prod = self.H2O_sm.get_G(T=c.T0('K'), units=units)
exp_G_TS = self.H2O_TS_sm.get_G(T=c.T0('K'), units=units)
self.assertAlmostEqual(
self.rxn_sm.get_G_state(state='reactants',
T=c.T0('K'),
units=units), exp_G_react)
self.assertAlmostEqual(
self.rxn_sm.get_G_state(state='products', T=c.T0('K'),
units=units), exp_G_prod)
self.assertAlmostEqual(
self.rxn_sm.get_G_state(state='transition state',
T=c.T0('K'),
units=units), exp_G_TS)
def test_get_delta_CvoR(self):
exp_sm_CvoR = self.H2O_sm.get_CvoR(T=c.T0('K')) \
- self.H2_sm.get_CvoR(T=c.T0('K')) \
- self.O2_sm.get_CvoR(T=c.T0('K'))*0.5
self.assertAlmostEqual(self.rxn_sm.get_delta_CvoR(T=c.T0('K')),
exp_sm_CvoR)
self.assertAlmostEqual(
self.rxn_sm.get_delta_CvoR(T=c.T0('K'), rev=True), -exp_sm_CvoR)
exp_sm_CvoR_TS = self.H2O_TS_sm.get_CvoR(T=c.T0('K')) \
- self.H2_sm.get_CvoR(T=c.T0('K')) \
- self.O2_sm.get_CvoR(T=c.T0('K'))*0.5
exp_sm_CvoR_rev_TS = self.H2O_TS_sm.get_CvoR(T=c.T0('K')) \
- self.H2O_sm.get_CvoR(T=c.T0('K'))
self.assertAlmostEqual(
self.rxn_sm.get_delta_CvoR(T=c.T0('K'), act=True), exp_sm_CvoR_TS)
self.assertAlmostEqual(
self.rxn_sm.get_delta_CvoR(T=c.T0('K'), rev=True, act=True),
exp_sm_CvoR_rev_TS)
self.assertAlmostEqual(self.rxn_sm.get_CvoR_act(T=c.T0('K'), rev=True),
exp_sm_CvoR_rev_TS)
def test_get_delta_Cv(self):
units = 'J/mol/K'
exp_sm_Cv = self.H2O_sm.get_Cv(T=c.T0('K'), units=units) \
- self.H2_sm.get_Cv(T=c.T0('K'), units=units) \
- self.O2_sm.get_Cv(T=c.T0('K'), units=units)*0.5
self.assertAlmostEqual(
self.rxn_sm.get_delta_Cv(T=c.T0('K'), units=units), exp_sm_Cv)
self.assertAlmostEqual(
self.rxn_sm.get_delta_Cv(T=c.T0('K'), units=units, rev=True),
-exp_sm_Cv)
exp_sm_Cv_TS = self.H2O_TS_sm.get_Cv(T=c.T0('K'), units=units) \
- self.H2_sm.get_Cv(T=c.T0('K'), units=units) \
- self.O2_sm.get_Cv(T=c.T0('K'), units=units)*0.5
exp_sm_Cv_rev_TS = self.H2O_TS_sm.get_Cv(T=c.T0('K'), units=units) \
- self.H2O_sm.get_Cv(T=c.T0('K'), units=units)
self.assertAlmostEqual(
self.rxn_sm.get_delta_Cv(T=c.T0('K'), act=True, units=units),
exp_sm_Cv_TS)
self.assertAlmostEqual(
self.rxn_sm.get_delta_Cv(T=c.T0('K'),
rev=True,
units=units,
act=True), exp_sm_Cv_rev_TS)
self.assertAlmostEqual(
self.rxn_sm.get_Cv_act(T=c.T0('K'), rev=True, units=units),
exp_sm_Cv_rev_TS)
def test_get_delta_CpoR(self):
exp_nasa_CpoR = self.H2O_nasa.get_CpoR(T=c.T0('K')) \
- self.H2_nasa.get_CpoR(T=c.T0('K')) \
- self.O2_nasa.get_CpoR(T=c.T0('K'))*0.5
exp_sm_CpoR = self.H2O_sm.get_CpoR(T=c.T0('K')) \
- self.H2_sm.get_CpoR(T=c.T0('K')) \
- self.O2_sm.get_CpoR(T=c.T0('K'))*0.5
self.assertAlmostEqual(self.rxn_nasa.get_delta_CpoR(T=c.T0('K')),
exp_nasa_CpoR)
self.assertAlmostEqual(
self.rxn_nasa.get_delta_CpoR(T=c.T0('K'), rev=True),
-exp_nasa_CpoR)
self.assertAlmostEqual(self.rxn_sm.get_delta_CpoR(T=c.T0('K')),
exp_sm_CpoR)
self.assertAlmostEqual(
self.rxn_sm.get_delta_CpoR(T=c.T0('K'), rev=True), -exp_sm_CpoR)
exp_sm_CpoR_TS = self.H2O_TS_sm.get_CpoR(T=c.T0('K')) \
- self.H2_sm.get_CpoR(T=c.T0('K')) \
- self.O2_sm.get_CpoR(T=c.T0('K'))*0.5
exp_sm_CpoR_rev_TS = self.H2O_TS_sm.get_CpoR(T=c.T0('K')) \
- self.H2O_sm.get_CpoR(T=c.T0('K'))
self.assertAlmostEqual(
self.rxn_sm.get_delta_CpoR(T=c.T0('K'), act=True), exp_sm_CpoR_TS)
self.assertAlmostEqual(
self.rxn_sm.get_delta_CpoR(T=c.T0('K'), rev=True, act=True),
exp_sm_CpoR_rev_TS)
self.assertAlmostEqual(self.rxn_sm.get_CpoR_act(T=c.T0('K'), rev=True),
exp_sm_CpoR_rev_TS)
def test_get_delta_Cp(self):
units = 'J/mol/K'
exp_nasa_Cp = self.H2O_nasa.get_Cp(T=c.T0('K'), units=units) \
- self.H2_nasa.get_Cp(T=c.T0('K'), units=units) \
- self.O2_nasa.get_Cp(T=c.T0('K'), units=units)*0.5
exp_sm_Cp = self.H2O_sm.get_Cp(T=c.T0('K'), units=units) \
- self.H2_sm.get_Cp(T=c.T0('K'), units=units) \
- self.O2_sm.get_Cp(T=c.T0('K'), units=units)*0.5
self.assertAlmostEqual(
self.rxn_nasa.get_delta_Cp(T=c.T0('K'), units=units), exp_nasa_Cp)
self.assertAlmostEqual(
self.rxn_nasa.get_delta_Cp(T=c.T0('K'), units=units, rev=True),
-exp_nasa_Cp)
self.assertAlmostEqual(
self.rxn_sm.get_delta_Cp(T=c.T0('K'), units=units), exp_sm_Cp)
self.assertAlmostEqual(
self.rxn_sm.get_delta_Cp(T=c.T0('K'), units=units, rev=True),
-exp_sm_Cp)
exp_sm_Cp_TS = self.H2O_TS_sm.get_Cp(T=c.T0('K'), units=units) \
- self.H2_sm.get_Cp(T=c.T0('K'), units=units) \
- self.O2_sm.get_Cp(T=c.T0('K'), units=units)*0.5
exp_sm_Cp_rev_TS = self.H2O_TS_sm.get_Cp(T=c.T0('K'), units=units) \
- self.H2O_sm.get_Cp(T=c.T0('K'), units=units)
self.assertAlmostEqual(
self.rxn_sm.get_delta_Cp(T=c.T0('K'), act=True, units=units),
exp_sm_Cp_TS)
self.assertAlmostEqual(
self.rxn_sm.get_delta_Cp(T=c.T0('K'),
rev=True,
act=True,
units=units), exp_sm_Cp_rev_TS)
self.assertAlmostEqual(
self.rxn_sm.get_Cp_act(T=c.T0('K'), rev=True, units=units),
exp_sm_Cp_rev_TS)
def test_get_delta_EoRT(self):
exp_sm_EoRT = self.H2O_sm.get_EoRT(T=c.T0('K')) \
- self.H2_sm.get_EoRT(T=c.T0('K')) \
- self.O2_sm.get_EoRT(T=c.T0('K'))*0.5
self.assertAlmostEqual(self.rxn_sm.get_delta_EoRT(T=c.T0('K')),
exp_sm_EoRT)
self.assertAlmostEqual(
self.rxn_sm.get_delta_EoRT(T=c.T0('K'), rev=True), -exp_sm_EoRT)
exp_sm_EoRT_TS = self.H2O_TS_sm.get_EoRT(T=c.T0('K')) \
- self.H2_sm.get_EoRT(T=c.T0('K')) \
- self.O2_sm.get_EoRT(T=c.T0('K'))*0.5
exp_sm_EoRT_rev_TS = self.H2O_TS_sm.get_EoRT(T=c.T0('K')) \
- self.H2O_sm.get_EoRT(T=c.T0('K'))
self.assertAlmostEqual(
self.rxn_sm.get_delta_EoRT(T=c.T0('K'), act=True), exp_sm_EoRT_TS)
self.assertAlmostEqual(
self.rxn_sm.get_delta_EoRT(T=c.T0('K'), rev=True, act=True),
exp_sm_EoRT_rev_TS)
def test_get_delta_E(self):
units = 'J/mol'
exp_sm_E = self.H2O_sm.get_E(T=c.T0('K'), units=units) \
- self.H2_sm.get_E(T=c.T0('K'), units=units) \
- self.O2_sm.get_E(T=c.T0('K'), units=units)*0.5
self.assertAlmostEqual(
self.rxn_sm.get_delta_E(T=c.T0('K'), units=units), exp_sm_E)
self.assertAlmostEqual(
self.rxn_sm.get_delta_E(T=c.T0('K'), rev=True, units=units),
-exp_sm_E)
exp_sm_E_TS = self.H2O_TS_sm.get_E(T=c.T0('K'), units=units) \
- self.H2_sm.get_E(T=c.T0('K'), units=units) \
- self.O2_sm.get_E(T=c.T0('K'), units=units)*0.5
exp_sm_E_rev_TS = self.H2O_TS_sm.get_E(T=c.T0('K'), units=units) \
- self.H2O_sm.get_E(T=c.T0('K'), units=units)
self.assertAlmostEqual(
self.rxn_sm.get_delta_E(T=c.T0('K'), act=True, units=units),
exp_sm_E_TS)
self.assertAlmostEqual(
self.rxn_sm.get_delta_E(T=c.T0('K'),
rev=True,
act=True,
units=units), exp_sm_E_rev_TS)
def test_get_delta_UoRT(self):
exp_sm_UoRT = self.H2O_sm.get_UoRT(T=c.T0('K')) \
- self.H2_sm.get_UoRT(T=c.T0('K')) \
- self.O2_sm.get_UoRT(T=c.T0('K'))*0.5
self.assertAlmostEqual(self.rxn_sm.get_delta_UoRT(T=c.T0('K')),
exp_sm_UoRT)
self.assertAlmostEqual(
self.rxn_sm.get_delta_UoRT(T=c.T0('K'), rev=True), -exp_sm_UoRT)
exp_sm_UoRT_TS = self.H2O_TS_sm.get_UoRT(T=c.T0('K')) \
- self.H2_sm.get_UoRT(T=c.T0('K')) \
- self.O2_sm.get_UoRT(T=c.T0('K'))*0.5
exp_sm_UoRT_rev_TS = self.H2O_TS_sm.get_UoRT(T=c.T0('K')) \
- self.H2O_sm.get_UoRT(T=c.T0('K'))
self.assertAlmostEqual(
self.rxn_sm.get_delta_UoRT(T=c.T0('K'), act=True), exp_sm_UoRT_TS)
self.assertAlmostEqual(
self.rxn_sm.get_delta_UoRT(T=c.T0('K'), rev=True, act=True),
exp_sm_UoRT_rev_TS)
self.assertAlmostEqual(self.rxn_sm.get_UoRT_act(T=c.T0('K'), rev=True),
exp_sm_UoRT_rev_TS)
def test_get_delta_U(self):
units = 'J/mol'
exp_sm_U = self.H2O_sm.get_U(T=c.T0('K'), units=units) \
- self.H2_sm.get_U(T=c.T0('K'), units=units) \
- self.O2_sm.get_U(T=c.T0('K'), units=units)*0.5
self.assertAlmostEqual(
self.rxn_sm.get_delta_U(T=c.T0('K'), units=units), exp_sm_U)
self.assertAlmostEqual(
self.rxn_sm.get_delta_U(T=c.T0('K'), rev=True, units=units),
-exp_sm_U)
exp_sm_U_TS = self.H2O_TS_sm.get_U(T=c.T0('K'), units=units) \
- self.H2_sm.get_U(T=c.T0('K'), units=units) \
- self.O2_sm.get_U(T=c.T0('K'), units=units)*0.5
exp_sm_U_rev_TS = self.H2O_TS_sm.get_U(T=c.T0('K'), units=units) \
- self.H2O_sm.get_U(T=c.T0('K'), units=units)
self.assertAlmostEqual(
self.rxn_sm.get_delta_U(T=c.T0('K'), act=True, units=units),
exp_sm_U_TS)
self.assertAlmostEqual(
self.rxn_sm.get_delta_U(T=c.T0('K'),
rev=True,
act=True,
units=units), exp_sm_U_rev_TS)
self.assertAlmostEqual(
self.rxn_sm.get_U_act(T=c.T0('K'), rev=True, units=units),
exp_sm_U_rev_TS)
def test_get_delta_HoRT(self):
exp_nasa_HoRT = self.H2O_nasa.get_HoRT(T=c.T0('K')) \
- self.H2_nasa.get_HoRT(T=c.T0('K')) \
- self.O2_nasa.get_HoRT(T=c.T0('K'))*0.5
exp_sm_HoRT = self.H2O_sm.get_HoRT(T=c.T0('K')) \
- self.H2_sm.get_HoRT(T=c.T0('K')) \
- self.O2_sm.get_HoRT(T=c.T0('K'))*0.5
self.assertAlmostEqual(self.rxn_nasa.get_delta_HoRT(T=c.T0('K')),
exp_nasa_HoRT)
self.assertAlmostEqual(
self.rxn_nasa.get_delta_HoRT(T=c.T0('K'), rev=True),
-exp_nasa_HoRT)
self.assertAlmostEqual(self.rxn_sm.get_delta_HoRT(T=c.T0('K')),
exp_sm_HoRT)
self.assertAlmostEqual(
self.rxn_sm.get_delta_HoRT(T=c.T0('K'), rev=True), -exp_sm_HoRT)
exp_sm_HoRT_TS = self.H2O_TS_sm.get_HoRT(T=c.T0('K')) \
- self.H2_sm.get_HoRT(T=c.T0('K')) \
- self.O2_sm.get_HoRT(T=c.T0('K'))*0.5
exp_sm_HoRT_rev_TS = self.H2O_TS_sm.get_HoRT(T=c.T0('K')) \
- self.H2O_sm.get_HoRT(T=c.T0('K'))
self.assertAlmostEqual(
self.rxn_sm.get_delta_HoRT(T=c.T0('K'), act=True), exp_sm_HoRT_TS)
self.assertAlmostEqual(
self.rxn_sm.get_delta_HoRT(T=c.T0('K'), rev=True, act=True),
exp_sm_HoRT_rev_TS)
self.assertAlmostEqual(self.rxn_sm.get_HoRT_act(T=c.T0('K'), rev=True),
exp_sm_HoRT_rev_TS)
def test_get_delta_H(self):
units = 'J/mol'
exp_nasa_H = self.H2O_nasa.get_H(T=c.T0('K'), units=units) \
- self.H2_nasa.get_H(T=c.T0('K'), units=units) \
- self.O2_nasa.get_H(T=c.T0('K'), units=units)*0.5
exp_sm_H = self.H2O_sm.get_H(T=c.T0('K'), units=units) \
- self.H2_sm.get_H(T=c.T0('K'), units=units) \
- self.O2_sm.get_H(T=c.T0('K'), units=units)*0.5
self.assertAlmostEqual(
self.rxn_nasa.get_delta_H(T=c.T0('K'), units=units), exp_nasa_H)
self.assertAlmostEqual(
self.rxn_nasa.get_delta_H(T=c.T0('K'), units=units, rev=True),
-exp_nasa_H)
self.assertAlmostEqual(
self.rxn_sm.get_delta_H(T=c.T0('K'), units=units), exp_sm_H)
self.assertAlmostEqual(
self.rxn_sm.get_delta_H(T=c.T0('K'), units=units, rev=True),
-exp_sm_H)
exp_sm_H_TS = self.H2O_TS_sm.get_H(T=c.T0('K'), units=units) \
- self.H2_sm.get_H(T=c.T0('K'), units=units) \
- self.O2_sm.get_H(T=c.T0('K'), units=units)*0.5
exp_sm_H_rev_TS = self.H2O_TS_sm.get_H(T=c.T0('K'), units=units) \
- self.H2O_sm.get_H(T=c.T0('K'), units=units)
self.assertAlmostEqual(
self.rxn_sm.get_delta_H(T=c.T0('K'), act=True, units=units),
exp_sm_H_TS)
self.assertAlmostEqual(
self.rxn_sm.get_delta_H(T=c.T0('K'),
rev=True,
act=True,
units=units), exp_sm_H_rev_TS)
self.assertAlmostEqual(
self.rxn_sm.get_H_act(T=c.T0('K'), rev=True, units=units),
exp_sm_H_rev_TS)
def test_get_delta_SoR(self):
exp_nasa_SoR = self.H2O_nasa.get_SoR(T=c.T0('K')) \
- self.H2_nasa.get_SoR(T=c.T0('K')) \
- self.O2_nasa.get_SoR(T=c.T0('K'))*0.5
exp_sm_SoR = self.H2O_sm.get_SoR(T=c.T0('K')) \
- self.H2_sm.get_SoR(T=c.T0('K')) \
- self.O2_sm.get_SoR(T=c.T0('K'))*0.5
self.assertAlmostEqual(self.rxn_nasa.get_delta_SoR(T=c.T0('K')),
exp_nasa_SoR)
self.assertAlmostEqual(
self.rxn_nasa.get_delta_SoR(T=c.T0('K'), rev=True), -exp_nasa_SoR)
self.assertAlmostEqual(self.rxn_sm.get_delta_SoR(T=c.T0('K')),
exp_sm_SoR)
self.assertAlmostEqual(
self.rxn_sm.get_delta_SoR(T=c.T0('K'), rev=True), -exp_sm_SoR)
exp_sm_SoR_TS = self.H2O_TS_sm.get_SoR(T=c.T0('K')) \
- self.H2_sm.get_SoR(T=c.T0('K')) \
- self.O2_sm.get_SoR(T=c.T0('K'))*0.5
exp_sm_SoR_rev_TS = self.H2O_TS_sm.get_SoR(T=c.T0('K')) \
- self.H2O_sm.get_SoR(T=c.T0('K'))
self.assertAlmostEqual(
self.rxn_sm.get_delta_SoR(T=c.T0('K'), act=True), exp_sm_SoR_TS)
self.assertAlmostEqual(
self.rxn_sm.get_delta_SoR(T=c.T0('K'), rev=True, act=True),
exp_sm_SoR_rev_TS)
self.assertAlmostEqual(self.rxn_sm.get_SoR_act(T=c.T0('K'), rev=True),
exp_sm_SoR_rev_TS)
def test_get_delta_S(self):
units = 'J/mol/K'
exp_nasa_S = self.H2O_nasa.get_S(T=c.T0('K'), units=units) \
- self.H2_nasa.get_S(T=c.T0('K'), units=units) \
- self.O2_nasa.get_S(T=c.T0('K'), units=units)*0.5
exp_sm_S = self.H2O_sm.get_S(T=c.T0('K'), units=units) \
- self.H2_sm.get_S(T=c.T0('K'), units=units) \
- self.O2_sm.get_S(T=c.T0('K'), units=units)*0.5
self.assertAlmostEqual(
self.rxn_nasa.get_delta_S(T=c.T0('K'), units=units), exp_nasa_S)
self.assertAlmostEqual(
self.rxn_nasa.get_delta_S(T=c.T0('K'), units=units, rev=True),
-exp_nasa_S)
self.assertAlmostEqual(
self.rxn_sm.get_delta_S(T=c.T0('K'), units=units), exp_sm_S)
self.assertAlmostEqual(
self.rxn_sm.get_delta_S(T=c.T0('K'), rev=True, units=units),
-exp_sm_S)
exp_sm_S_TS = self.H2O_TS_sm.get_S(T=c.T0('K'), units=units) \
- self.H2_sm.get_S(T=c.T0('K'), units=units) \
- self.O2_sm.get_S(T=c.T0('K'), units=units)*0.5
exp_sm_S_rev_TS = self.H2O_TS_sm.get_S(T=c.T0('K'), units=units) \
- self.H2O_sm.get_S(T=c.T0('K'), units=units)
self.assertAlmostEqual(
self.rxn_sm.get_delta_S(T=c.T0('K'), act=True, units=units),
exp_sm_S_TS)
self.assertAlmostEqual(
self.rxn_sm.get_delta_S(T=c.T0('K'),
rev=True,
act=True,
units=units), exp_sm_S_rev_TS)
self.assertAlmostEqual(
self.rxn_sm.get_S_act(T=c.T0('K'), rev=True, units=units),
exp_sm_S_rev_TS)
def test_get_delta_FoRT(self):
exp_sm_FoRT = self.H2O_sm.get_FoRT(T=c.T0('K')) \
- self.H2_sm.get_FoRT(T=c.T0('K')) \
- self.O2_sm.get_FoRT(T=c.T0('K'))*0.5
self.assertAlmostEqual(self.rxn_sm.get_delta_FoRT(T=c.T0('K')),
exp_sm_FoRT)
self.assertAlmostEqual(
self.rxn_sm.get_delta_FoRT(T=c.T0('K'), rev=True), -exp_sm_FoRT)
exp_sm_FoRT_TS = self.H2O_TS_sm.get_FoRT(T=c.T0('K')) \
- self.H2_sm.get_FoRT(T=c.T0('K')) \
- self.O2_sm.get_FoRT(T=c.T0('K'))*0.5
exp_sm_FoRT_rev_TS = self.H2O_TS_sm.get_FoRT(T=c.T0('K')) \
- self.H2O_sm.get_FoRT(T=c.T0('K'))
self.assertAlmostEqual(
self.rxn_sm.get_delta_FoRT(T=c.T0('K'), act=True), exp_sm_FoRT_TS)
self.assertAlmostEqual(
self.rxn_sm.get_delta_FoRT(T=c.T0('K'), rev=True, act=True),
exp_sm_FoRT_rev_TS)
self.assertAlmostEqual(self.rxn_sm.get_FoRT_act(T=c.T0('K'), rev=True),
exp_sm_FoRT_rev_TS)
def test_get_delta_F(self):
units = 'J/mol'
exp_sm_F = self.H2O_sm.get_F(T=c.T0('K'), units=units) \
- self.H2_sm.get_F(T=c.T0('K'), units=units) \
- self.O2_sm.get_F(T=c.T0('K'), units=units)*0.5
self.assertAlmostEqual(
self.rxn_sm.get_delta_F(T=c.T0('K'), units=units), exp_sm_F)
self.assertAlmostEqual(
self.rxn_sm.get_delta_F(T=c.T0('K'), units=units, rev=True),
-exp_sm_F)
exp_sm_F_TS = self.H2O_TS_sm.get_F(T=c.T0('K'), units=units) \
- self.H2_sm.get_F(T=c.T0('K'), units=units) \
- self.O2_sm.get_F(T=c.T0('K'), units=units)*0.5
exp_sm_F_rev_TS = self.H2O_TS_sm.get_F(T=c.T0('K'), units=units) \
- self.H2O_sm.get_F(T=c.T0('K'), units=units)
self.assertAlmostEqual(
self.rxn_sm.get_delta_F(T=c.T0('K'), act=True, units=units),
exp_sm_F_TS)
self.assertAlmostEqual(
self.rxn_sm.get_delta_F(T=c.T0('K'),
rev=True,
act=True,
units=units), exp_sm_F_rev_TS)
self.assertAlmostEqual(
self.rxn_sm.get_F_act(T=c.T0('K'), rev=True, units=units),
exp_sm_F_rev_TS)
def test_get_delta_GoRT(self):
exp_nasa_GoRT = self.H2O_nasa.get_GoRT(T=c.T0('K')) \
- self.H2_nasa.get_GoRT(T=c.T0('K')) \
- self.O2_nasa.get_GoRT(T=c.T0('K'))*0.5
exp_sm_GoRT = self.H2O_sm.get_GoRT(T=c.T0('K')) \
- self.H2_sm.get_GoRT(T=c.T0('K')) \
- self.O2_sm.get_GoRT(T=c.T0('K'))*0.5
self.assertAlmostEqual(self.rxn_nasa.get_delta_GoRT(T=c.T0('K')),
exp_nasa_GoRT)
self.assertAlmostEqual(
self.rxn_nasa.get_delta_GoRT(T=c.T0('K'), rev=True),
-exp_nasa_GoRT)
self.assertAlmostEqual(self.rxn_sm.get_delta_GoRT(T=c.T0('K')),
exp_sm_GoRT)
self.assertAlmostEqual(
self.rxn_sm.get_delta_GoRT(T=c.T0('K'), rev=True), -exp_sm_GoRT)
exp_sm_GoRT_TS = self.H2O_TS_sm.get_GoRT(T=c.T0('K')) \
- self.H2_sm.get_GoRT(T=c.T0('K')) \
- self.O2_sm.get_GoRT(T=c.T0('K'))*0.5
exp_sm_GoRT_rev_TS = self.H2O_TS_sm.get_GoRT(T=c.T0('K')) \
- self.H2O_sm.get_GoRT(T=c.T0('K'))
self.assertAlmostEqual(
self.rxn_sm.get_delta_GoRT(T=c.T0('K'), act=True), exp_sm_GoRT_TS)
self.assertAlmostEqual(
self.rxn_sm.get_delta_GoRT(T=c.T0('K'), rev=True, act=True),
exp_sm_GoRT_rev_TS)
self.assertAlmostEqual(self.rxn_sm.get_GoRT_act(T=c.T0('K'), rev=True),
exp_sm_GoRT_rev_TS)
def test_get_delta_G(self):
units = 'J/mol'
exp_nasa_G = self.H2O_nasa.get_G(T=c.T0('K'), units=units) \
- self.H2_nasa.get_G(T=c.T0('K'), units=units) \
- self.O2_nasa.get_G(T=c.T0('K'), units=units)*0.5
exp_sm_G = self.H2O_sm.get_G(T=c.T0('K'), units=units) \
- self.H2_sm.get_G(T=c.T0('K'), units=units) \
- self.O2_sm.get_G(T=c.T0('K'), units=units)*0.5
self.assertAlmostEqual(
self.rxn_nasa.get_delta_G(T=c.T0('K'), units=units), exp_nasa_G)
self.assertAlmostEqual(
self.rxn_nasa.get_delta_G(T=c.T0('K'), rev=True, units=units),
-exp_nasa_G)
self.assertAlmostEqual(
self.rxn_sm.get_delta_G(T=c.T0('K'), units=units), exp_sm_G)
self.assertAlmostEqual(
self.rxn_sm.get_delta_G(T=c.T0('K'), rev=True, units=units),
-exp_sm_G)
exp_sm_G_TS = self.H2O_TS_sm.get_G(T=c.T0('K'), units=units) \
- self.H2_sm.get_G(T=c.T0('K'), units=units) \
- self.O2_sm.get_G(T=c.T0('K'), units=units)*0.5
exp_sm_G_rev_TS = self.H2O_TS_sm.get_G(T=c.T0('K'), units=units) \
- self.H2O_sm.get_G(T=c.T0('K'), units=units)
self.assertAlmostEqual(
self.rxn_sm.get_delta_G(T=c.T0('K'), act=True, units=units),
exp_sm_G_TS)
self.assertAlmostEqual(
self.rxn_sm.get_delta_G(T=c.T0('K'),
rev=True,
act=True,
units=units), exp_sm_G_rev_TS)
self.assertAlmostEqual(
self.rxn_sm.get_G_act(T=c.T0('K'), rev=True, units=units),
exp_sm_G_rev_TS)
def test_get_EoRT_act(self):
exp_sm_EoRT = self.H2O_TS_sm.get_HoRT(T=c.T0('K')) \
- self.H2_sm.get_HoRT(T=c.T0('K')) \
- self.O2_sm.get_HoRT(T=c.T0('K'))*0.5
exp_sm_EoRT_rev = self.H2O_TS_sm.get_HoRT(T=c.T0('K')) \
- self.H2O_sm.get_HoRT(T=c.T0('K'))
self.assertAlmostEqual(self.rxn_sm.get_EoRT_act(T=c.T0('K')),
exp_sm_EoRT)
self.assertAlmostEqual(self.rxn_sm.get_EoRT_act(T=c.T0('K'), rev=True),
exp_sm_EoRT_rev)
def test_get_E_act(self):
units = 'J/mol'
exp_sm_E = self.H2O_TS_sm.get_H(T=c.T0('K'), units=units) \
- self.H2_sm.get_H(T=c.T0('K'), units=units) \
- self.O2_sm.get_H(T=c.T0('K'), units=units)*0.5
exp_sm_E_rev = self.H2O_TS_sm.get_H(T=c.T0('K'), units=units) \
- self.H2O_sm.get_H(T=c.T0('K'), units=units)
self.assertAlmostEqual(self.rxn_sm.get_E_act(T=c.T0('K'), units=units),
exp_sm_E)
self.assertAlmostEqual(
self.rxn_sm.get_E_act(T=c.T0('K'), rev=True, units=units),
exp_sm_E_rev)
def test_get_A(self):
# Testing partition function method
exp_sm_q = self.H2O_TS_sm.get_q(T=c.T0('K'), include_ZPE=False) \
/ self.H2_sm.get_q(T=c.T0('K'), include_ZPE=False) \
/ self.O2_sm.get_q(T=c.T0('K'), include_ZPE=False)**0.5
exp_sm_A = c.kb('J/K') * c.T0('K') / c.h('J s') * exp_sm_q
exp_sm_q_rev = self.H2O_TS_sm.get_q(T=c.T0('K'), include_ZPE=False) \
/ self.H2O_sm.get_q(T=c.T0('K'), include_ZPE=False)
exp_sm_A_rev = c.kb('J/K') * c.T0('K') / c.h('J s') * exp_sm_q_rev
np.testing.assert_almost_equal(self.rxn_sm.get_A(T=c.T0('K')),
exp_sm_A,
decimal=0)
np.testing.assert_almost_equal(self.rxn_sm.get_A(T=c.T0('K'),
rev=True),
exp_sm_A_rev,
decimal=0)
# Testing entropy method
exp_sm_SoR = self.H2O_TS_sm.get_SoR(T=c.T0('K')) \
- self.H2_sm.get_SoR(T=c.T0('K')) \
- self.O2_sm.get_SoR(T=c.T0('K'))*0.5
exp_sm_A = c.kb('J/K') * c.T0('K') / c.h('J s') * np.exp(exp_sm_SoR)
exp_sm_SoR_rev = self.H2O_TS_sm.get_SoR(T=c.T0('K')) \
- self.H2O_sm.get_SoR(T=c.T0('K'))
exp_sm_A_rev = c.kb('J/K')*c.T0('K')/c.h('J s') \
* np.exp(exp_sm_SoR_rev)
np.testing.assert_almost_equal(self.rxn_sm.get_A(T=c.T0('K'),
use_q=False),
exp_sm_A,
decimal=0)
np.testing.assert_almost_equal(self.rxn_sm.get_A(T=c.T0('K'),
rev=True,
use_q=False),
exp_sm_A_rev,
decimal=0)
def test_from_string(self):
reaction_str = 'H2+0.5O2=H2O_TS=H2O'
self.assertEqual(
rxn.Reaction.from_string(reaction_str=reaction_str,
species=self.species_dict), self.rxn_sm)
def test_to_dict(self):
self.assertEqual(self.rxn_nasa.to_dict(), self.rxn_nasa_dict)
def test_from_dict(self):
self.assertEqual(rxn.Reaction.from_dict(self.rxn_nasa_dict),
self.rxn_nasa)
class TestHelperReaction(unittest.TestCase):
def test__parse_reaction(self):
reaction_str = 'H2+0.5O2=H2O'
expected_output = (['H2', 'O2'], [1., 0.5], ['H2O'], [1.], None, None)
self.assertTupleEqual(rxn._parse_reaction(reaction_str=reaction_str),
expected_output)
reaction_str = ' H2 + 0.5 O2 = H2O '
expected_output = (['H2', 'O2'], [1., 0.5], ['H2O'], [1.], None, None)
self.assertTupleEqual(rxn._parse_reaction(reaction_str=reaction_str),
expected_output)
reaction_str = ' H2 + 0.5 O2 = H2O_TS = H2O '
expected_output = (['H2',
'O2'], [1., 0.5], ['H2O'], [1.], ['H2O_TS'], [1.])
self.assertTupleEqual(rxn._parse_reaction(reaction_str=reaction_str),
expected_output)
def test__parse_reaction_state(self):
reaction_str = 'H2+0.5O2'
expected_output = (['H2', 'O2'], [1., 0.5])
self.assertTupleEqual(
rxn._parse_reaction_state(reaction_str=reaction_str),
expected_output)
if __name__ == '__main__':
unittest.main()
| 2.3125 | 2 |
src/pybel_tools/summary/visualization.py | cthoyt/pybel-tools | 6 | 12789497 | # -*- coding: utf-8 -*-
"""Functions for summarizing graphs.
This module contains functions that provide aggregate summaries of graphs including visualization with matplotlib,
printing summary information, and exporting summarized graphs
"""
import logging
import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
from pybel import BELGraph
__all__ = [
'plot_summary_axes',
'plot_summary',
]
logger = logging.getLogger(__name__)
def plot_summary_axes(graph: BELGraph, lax, rax, logx: bool = True):
"""Plot the graph summary statistics on the given axes.
After, you should run :func:`plt.tight_layout` and you must run :func:`plt.show` to view.
Shows:
1. Count of nodes, grouped by function type
2. Count of edges, grouped by relation type
:param pybel.BELGraph graph: A BEL graph
:param lax: An axis object from matplotlib
:param rax: An axis object from matplotlib
Example usage:
>>> import matplotlib.pyplot as plt
>>> from pybel import from_pickle
>>> from pybel_tools.summary import plot_summary_axes
>>> graph = from_pickle('~/dev/bms/aetionomy/parkinsons.gpickle')
>>> fig, axes = plt.subplots(1, 2, figsize=(10, 4))
>>> plot_summary_axes(graph, axes[0], axes[1])
>>> plt.tight_layout()
>>> plt.show()
"""
function_counter = graph.count.functions()
relation_counter = graph.count.relations()
function_df = pd.DataFrame.from_dict(dict(function_counter), orient='index').reset_index()
function_df.columns = ['Function', 'Count']
function_df.sort_values('Count', ascending=False, inplace=True)
relation_df = pd.DataFrame.from_dict(dict(relation_counter), orient='index').reset_index()
relation_df.columns = ['Relation', 'Count']
relation_df.sort_values('Count', ascending=False, inplace=True)
sns.barplot(x='Count', y='Function', data=function_df, ax=lax, orient='h')
lax.set_title('Number of nodes: {}'.format(graph.number_of_nodes()))
sns.barplot(x='Count', y='Relation', data=relation_df, ax=rax, orient='h')
rax.set_title('Number of edges: {}'.format(graph.number_of_edges()))
if logx:
lax.set_xscale('log')
rax.set_xscale('log')
def plot_summary(graph: BELGraph, logx: bool = True, **kwargs):
"""Plot your graph summary statistics.
This function is a thin wrapper around :func:`plot_summary_axis`. It
automatically takes care of building figures given matplotlib's pyplot module as an argument. After, you need
to run :func:`plt.show`.
:code:`plt` is given as an argument to avoid needing matplotlib as a dependency for this function
Shows:
1. Count of nodes, grouped by function type
2. Count of edges, grouped by relation type
:param kwargs: keyword arguments to give to :func:`plt.subplots`
Example usage:
>>> import matplotlib.pyplot as plt
>>> from pybel import from_pickle
>>> from pybel_tools.summary import plot_summary
>>> graph = from_pickle('~/dev/bms/aetionomy/parkinsons.gpickle')
>>> plot_summary(graph, figsize=(10, 4))
>>> plt.show()
"""
fig, (lax, rax) = plt.subplots(1, 2, **kwargs)
plot_summary_axes(graph, lax, rax, logx=logx)
plt.tight_layout()
return fig, (lax, rax)
| 3.640625 | 4 |
setup.py | noisyboiler/flask-wamp | 4 | 12789498 | # This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
from setuptools import setup, find_packages
from os import path
here = path.abspath(path.dirname(__file__))
# Get the long description from the README file
with open(path.join(here, 'README.rst')) as f:
long_description = f.read()
setup(
name='Flask-WAMP',
version='0.1.0',
description='WAMP RPC and Pub/Sub for Flask',
long_description=long_description,
long_description_content_type='text/x-rst',
url='https://github.com/noisyboiler/flask-wamp',
author='<NAME>',
author_email='<EMAIL>',
license='BSD',
zip_safe=False,
classifiers=[
'Development Status :: 4 - Beta',
'License :: OSI Approved :: BSD License',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
],
keywords='WAMP RPC Flask',
packages=find_packages(),
install_requires=[
"Flask==1.0.2",
"wampy==0.9.20",
],
extras_require={
'dev': [
"crossbar==0.15.0",
"autobahn==0.17.2",
"Twisted==17.9.0",
"pytest==4.0.2",
"mock==1.3.0",
"pytest-capturelog==0.7",
"colorlog",
"flake8==3.5.0",
"gevent-websocket==0.10.1",
"coverage>=3.7.1",
"Twisted==17.9.0",
],
},
)
| 1.664063 | 2 |
loadCal.py | weizy0219/loadBank | 1 | 12789499 | <reponame>weizy0219/loadBank<filename>loadCal.py
from openpyxl import Workbook,load_workbook
from itertools import product,chain
'''
从Excel文件中载入阻抗列表,经过计划并格式化
'''
def loadfromexcel ( excelfilename ):
"""
从指定的Excel文件中读取所有有效行并返回所有行组成的元组
:param excelfilename
:return:allrows
"""
wb=load_workbook( filename= excelfilename )
ws=wb.active
allrows=tuple(ws.rows) #读取所有有效单元
return allrows
def writetoexcel( excelfilename ):
"""
将文件写入excel表格中
:param excelfilename:
:return:
"""
def decoderows(allrows):
"""
从读取到的excel二维列表中解析出阻抗值列表
:param allrows:
:return rowhead,rowcontent:
"""
colname=[cell.value for cell in allrows[0][1:]]
rowhead=[]
rowcontent=[]
for onerow in allrows[1:]:
rowhead.append(onerow[0])
rowcontent.append(onerow[1:])
return rowhead,rowcontent
| 2.75 | 3 |
tests/test_categories_api.py | lv10/bestbuyapi | 10 | 12789500 | <gh_stars>1-10
import json
from bestbuyapi import BASE_URL, BestBuyAPI
api_name = "categories"
def test_build_url(bbapi):
sample_url = f"{BASE_URL}{api_name}(sku=43900)"
payload = {"query": "sku=43900", "params": {"format": "json"}}
url, thePayload = bbapi.category._build_url(payload)
assert sample_url == url, "Sample url is different built url"
assert thePayload["format"] == "json", "Response format isn't JSON"
assert thePayload.get("apiKey") is not None, "Response doesn't have API Key"
def test_search_category_by_id(bbapi):
cat_id = "cat00000"
query = f"id={cat_id}"
resp = bbapi.category.search(query=query, show="id", format="json")
assert resp["categories"][0]["id"] == cat_id, "Returned category id is different"
def test_search_category_by_name(bbapi):
cat_name = "Sony"
query = f"name={cat_name}"
resp = bbapi.category.search(query=query, format="json")
assert (
resp["categories"][0]["name"] == cat_name
), "Response category name is different"
| 2.765625 | 3 |
wildlifecompliance/migrations/0255_inspection_inspection_type.py | preranaandure/wildlifecompliance | 1 | 12789501 | <reponame>preranaandure/wildlifecompliance<filename>wildlifecompliance/migrations/0255_inspection_inspection_type.py
# -*- coding: utf-8 -*-
# Generated by Django 1.10.8 on 2019-07-17 02:20
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('wildlifecompliance', '0254_auto_20190716_1506'),
]
operations = [
migrations.AddField(
model_name='inspection',
name='inspection_type',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, related_name='inspection_type', to='wildlifecompliance.InspectionType'),
),
]
| 1.203125 | 1 |
indico_chat_bot/bot.py | NRodriguezcuellar/indico-chat-bot | 2 | 12789502 | import atexit
import hashlib
import hmac
import re
import os
import sys
import time
from datetime import datetime, timedelta
import click
import requests
from pytz import timezone, utc
from urllib.parse import urlencode, urljoin
from . import notifiers
from .util import read_config
from .storage import Storage
from.exceptions import InvalidTimeDeltaFormat, InvalidTime, UnknownNotifier
def _info(message):
print(message)
sys.stdout.flush()
def _parse_time_delta(time_delta):
"""
Parse string and return a timedelta.
Accepted formats:
* days in the future/past: '[+/-]DdHHhMMm'
"""
m = re.match(r'^([+-])?(?:(\d{1,3})d)?(?:(\d{1,2})h)?(?:(\d{1,2})m)?$', time_delta)
if m:
mod = -1 if m.group(1) == '-' else 1
atoms = list(0 if a is None else int(a) * mod for a in m.groups()[1:])
if atoms[1] > 23 or atoms[2] > 59:
raise InvalidTime()
return timedelta(days=atoms[0], hours=atoms[1], minutes=atoms[2])
else:
raise InvalidTimeDeltaFormat(time_delta)
def _dt(dt_dict):
dt = datetime.combine(datetime.strptime(dt_dict['date'], '%Y-%m-%d'),
datetime.strptime(dt_dict['time'], '%H:%M:%S').time())
return timezone(dt_dict['tz']).localize(dt)
def _is_fetching_past_events(bot):
return bot['timedelta'].startswith('-')
def notify(event, bot, channels):
for channel_id in bot['channels']:
channel = channels[channel_id]
data = {
'title': event['title'],
'url': event['url'],
'start_time': event['startDate']['time'][:5],
'start_date': event['startDate']['date'],
'start_tz': event['startDate']['tz'],
'room': event['room'] if event['room'] else 'no room'
}
text = channel['text'].format(**data)
channel_type = channel.get('type')
if channel_type not in notifiers.ALL_NOTIFIERS:
raise UnknownNotifier(channel_type)
getattr(notifiers, channel_type).notify(bot, channel, text)
def check_upcoming(config, storage, verbose, debug):
now = datetime.now(utc)
bots, channels = config['bots'], config['channels']
for bot_id, bot in bots.items():
url_path = 'export/categ/{}.json'.format('-'.join(bot['categories']))
time_delta = _parse_time_delta(bot['timedelta'])
params = {
'from': 'now',
'to': bot['timedelta'],
'limit': '100'
}
if debug:
verbose = True
params['nc'] = 'yes'
if _is_fetching_past_events(bot):
from_date = now + time_delta
params['from'] = (from_date - timedelta(hours=1)).strftime("%Y-%m-%dT%H:%M")
params['to'] = from_date.strftime("%Y-%m-%dT%H:%M")
params['tz'] = 'UTC'
if config['api_key']:
params['apikey'] = config['api_key']
if config['secret']:
params['timestamp'] = str(int(time.time()))
items = sorted(params.items(), key=lambda x: x[0].lower())
param_url = '/{}?{}'.format(url_path, urlencode(items)).encode('utf-8')
params['signature'] = hmac.new(config['secret'].encode('utf-8'), param_url, hashlib.sha1).hexdigest()
qstring = urlencode(params)
url = '{}?{}'.format(urljoin(config['server_url'], url_path), qstring)
if verbose:
_info('[d] URL: {}'.format(url))
req = requests.get(url, verify=(not debug))
results = req.json()['results']
if verbose:
_info('[i] {} events found'.format(len(results)))
for event in results:
evt_id = event['id']
start_dt = _dt(event['startDate'])
event_time_delta_minutes = (start_dt - now).total_seconds() / 60
bot_time_delta_minutes = time_delta.total_seconds() / 60
time_delta_satisfied = 0 < event_time_delta_minutes <= bot_time_delta_minutes
if (_is_fetching_past_events(bot) or time_delta_satisfied) and not storage.has(evt_id, bot_id):
notify(event, bot, channels)
if verbose:
_info('[>] Notified {} about {}'.format(bot['channels'], event['id']))
storage.add(evt_id, bot_id)
@click.group()
def cli():
pass
def _save_storage(storage):
print(f"Saving storage... ")
storage.save()
print("Done!")
@cli.command()
@click.argument('config_file', type=click.Path(exists=True))
@click.option('--verbose', default=False, is_flag=True)
@click.option('--debug', default=False, is_flag=True)
def run(config_file, verbose, debug):
config = read_config(config_file)
storage = Storage.get_instance(config['storage_path'])
atexit.register(lambda: _save_storage(storage))
env_debug = os.environ.get('DEBUG')
if env_debug:
debug = env_debug == '1'
while True:
if verbose:
_info('[i] Checking upcoming events')
check_upcoming(config, storage, verbose, debug)
time.sleep(config['polling_time'])
if __name__ == '__main__':
cli()
| 2.421875 | 2 |
identity/ecrfs/setup/projects/dream.py | LCBRU/identity | 0 | 12789503 | from identity.ecrfs.setup.standard import SEX_MAP_1M2F3T_SEX
from identity.setup.participant_identifier_types import ParticipantIdentifierTypeName
from identity.setup.redcap_instances import REDCapInstanceDetail
from identity.setup.studies import StudyName
from identity.ecrfs.setup import crfs, RedCapEcrfDefinition
crfs.extend([
RedCapEcrfDefinition({
'crfs': [
{
'instance': REDCapInstanceDetail.UHL_LIVE,
'study': StudyName.DREAM,
'projects': [8, 22],
},
{
'instance': REDCapInstanceDetail.UHL_HSCN,
'study': StudyName.DREAM,
'projects': [20, 21, 24],
},
],
'recruitment_date_column_name': 'date_enrolled',
**SEX_MAP_1M2F3T_SEX,
'withdrawn_from_study_column_name': 'reason_for_participant_rem',
'withdrawn_from_study_values': ['6'],
'excluded_from_analysis_column_name': 'inc_in_eos_analysis',
'excluded_from_analysis_values': ['<isnull>', '0'],
'identity_map': {
ParticipantIdentifierTypeName.DREAM_ID: 'record',
}
})])
| 2.015625 | 2 |
CarRegistration/__init__.py | infiniteloopltd/PyCarRegistrationAPI | 0 | 12789504 | #!/usr/bin/env python
# Copyright 2012 Locu <<EMAIL>> <<EMAIL>>
from api import *
| 1.015625 | 1 |
src/product/urls.py | aqifcse/django-coding-task | 0 | 12789505 | from django.urls import path
from django.views.generic import TemplateView
from product.views.product import CreateProductView
from product.views.variant import VariantView, VariantCreateView, VariantEditView
app_name = "product"
urlpatterns = [
# Variants URLs
path('variants/', VariantView.as_view(), name='variants'),
path('variant/create', VariantCreateView.as_view(), name='create.variant'),
path('variant/<int:id>/edit', VariantEditView.as_view(), name='update.variant'),
# Products URLs
path('create/', CreateProductView.as_view(), name='create.product'),
path('list/', TemplateView.as_view(template_name='products/list.html', extra_context={
'product': True
}), name='list.product'),
]
| 1.828125 | 2 |
src/graph_gui/animations/bfs_animation.py | 3ddelano/graph-visualizer-python | 0 | 12789506 | """
File: bfs_animation.py
Author: <NAME>
Repo: https://github.com/3ddelano/graph-visualizer-python
License: MIT
"""
from ..constants import (
CURRENT_EDGE_COLOR,
CURRENT_NODE_COLOR,
SEEN_EDGE_COLOR,
SEEN_NODE_COLOR,
START_NODE_COLOR,
)
from ..interfaces.animation_interface import AnimationInterface
class BFSAnimation(AnimationInterface):
def __init__(self, graph):
self.graph = graph
self.start_node = None
self.queue = []
self.visited = [] # used in the algorithm and for drawing the visited nodes
self.edges = []
self.prev_nodes = []
def set_start_node(self, node):
self.start_node = node
self.queue = [node]
def is_ended(self):
return len(self.queue) == 0
def one_step(self):
# One step in the BFS algorithm
if self.start_node is None:
return
if len(self.queue) > 0:
node = self.queue.pop(0)
self.visited.append(node)
# Get the path from startnode to node
for visited_node in self.visited:
edge = self.graph.get_edge_between_nodes(visited_node, node)
if edge:
self.edges.append(edge)
self.prev_nodes.append(node)
break
for adj_node in self.graph.get_adjacent_nodes(node):
if not adj_node in self.visited:
self.queue.append(adj_node)
self.visited.append(adj_node)
else:
# BFS Ended
self.start_node = None
def get_drawn_nodes(self):
ret = []
if self.start_node:
ret.append({"node": self.start_node, "color": START_NODE_COLOR})
length = len(self.prev_nodes)
if length > 1:
for i in range(length - 1):
ret.append({"node": self.prev_nodes[i], "color": SEEN_NODE_COLOR})
if length > 0:
ret.append({"node": self.prev_nodes[-1], "color": CURRENT_NODE_COLOR})
return ret
def get_drawn_edges(self):
ret = []
length = len(self.edges)
if length > 1:
for i in range(length - 1):
ret.append({"edge": self.edges[i], "color": SEEN_EDGE_COLOR})
if length > 0:
ret.append({"edge": self.edges[-1], "color": CURRENT_EDGE_COLOR})
return ret
def get_result_string(self):
ret = []
queue = [self.start_node]
visited = []
while len(queue) > 0:
node = queue.pop(0)
visited.append(node.id)
ret.append(node.id)
for adj_node in self.graph.get_adjacent_nodes(node):
if not adj_node.id in visited:
queue.append(adj_node)
visited.append(adj_node.id)
result_str = " -> ".join([str(i) for i in ret])
print("BFS result:" + result_str)
return result_str
| 2.984375 | 3 |
pyxtal/miscellaneous/bugs/bug.py | ubikpt/PyXtal | 127 | 12789507 | from pyxtal import pyxtal
from ase.io import read
from ase.spacegroup.symmetrize import prep_symmetry
from spglib import get_symmetry_dataset
#ans1 = get_symmetry_dataset(s, symprec=1e-2)
#print(ans1)
s = pyxtal()
s.from_seed('bug.vasp', tol=1e-2)
print(s)
#s1=s.subgroup(eps=0.1, group_type='t+k', max_cell=4)
#for a in s1:
# print(a)
#s1=s.subgroup(eps=0.1, group_type='k', max_cell=4)
#for a in s1:
# print(a)
#permutation = {"C":"Si", "Si":"C"}
#for i in range(100):
# struc = s.subgroup_once(0.01, None, permutation, max_cell=1)
# print(struc.group.number, struc.formula)
for i in range(100):
struc = s.subgroup_once(0.2, None, None, 't+k', max_cell=2)
print(struc.group.number, struc.formula)
#for i in range(1000):
# struc = s.subgroup_with_substitution(permutation, once=True, max_cell=4)
# print(struc)
| 2.328125 | 2 |
app/shared_code/queries.py | KubaTaba1uga/spam_recycler | 2 | 12789508 | import logging
from mailboxes.models import MailboxModel, MailboxGuestModel
from reports.models import ReportModel, MessageModel, MessageEvaluationModel
def get_user_guest_mailboxes(user):
"""
Returns guest mailboxes of a user.
"""
return (guest_mailbox.mailbox for guest_mailbox in MailboxGuestModel.objects.filter(guest=user).all())
def get_user_owner_mailboxes_query(user):
return MailboxModel.objects.filter(owner=user).all()
def get_user_owner_mailboxes(user):
"""
Return owned mailboxes of a user.
"""
return (owned_mailbox for owned_mailbox in get_user_owner_mailboxes_query(user))
def get_user_owner_mailboxes_tuples(user):
"""
Return owned mailboxes of a user as tuple
"""
return ((owned_mailbox.id, owned_mailbox.email_address) for owned_mailbox in get_user_owner_mailboxes_query(user))
def get_mailbox_query(mailbox_id):
return MailboxModel.objects.filter(pk=mailbox_id).first()
def get_mailbox_owner(mailbox_id):
"""
Return owner of a mailbox or None if mailbox does not exist
"""
mailbox = get_mailbox_query(mailbox_id)
if mailbox:
return mailbox.owner
def get_mailbox_guests_query(mailbox_id):
return MailboxGuestModel.objects.filter(mailbox_id=mailbox_id).all()
def get_mailbox_guests(mailbox_id):
""" Return all guests of mailbox with provided id
"""
return (guest_mailbox.guest for guest_mailbox in get_mailbox_guests_query(mailbox_id))
def get_guest_mailbox(mailbox_id):
"""
Return all guest mailboxes of mailbox with provided id
"""
return (guest_mailbox for guest_mailbox in get_mailbox_guests_query(mailbox_id))
def get_guest(guest_id):
"""
Return all guest mailboxes of mailbox with provided id
"""
return MailboxGuestModel.objects.filter(pk=guest_id).first()
def get_user_owner_reports(user):
"""
Return all reports of user owned mailboxes
"""
for mailbox in get_user_owner_mailboxes(user):
for report in mailbox.report.all():
yield report
def get_user_guest_reports(user):
"""
Return all reports of user guest mailboxes
"""
for mailbox in get_user_guest_mailboxes(user):
for report in mailbox.report.all():
yield report
def get_mailbox_by_owner(email_address, user):
return MailboxModel.objects.filter(email_address=email_address, owner=user).first()
def create_report(name, mailbox_id, start_at, end_at):
return ReportModel.objects.create(
name=name,
mailbox_id=mailbox_id,
start_at=start_at,
end_at=end_at,
messages_counter=0)
def create_message(subject, sender, to_recipients,
received_at, body, orginal_message, folder, report_id):
return MessageModel.objects.create(
subject=subject,
sender=sender,
to_recipients=to_recipients,
received_at=received_at,
body=body,
folder=folder,
report_id=report_id,
orginal_message=orginal_message)
def get_report_by_mailbox_and_name(name, mailbox):
return ReportModel.objects.filter(name=name, mailbox=mailbox).first()
def get_report_messages_by_id(report_id):
return MessageModel.objects.filter(report_id=report_id).all()
def get_report_messages_evaluations_by_id_query(report_id):
return MessageEvaluationModel.objects.filter(message__report_id=report_id)
def get_report_details_template_data(report_id):
""" Select required fields to to avoid long
template rendering time
"""
return get_report_messages_evaluations_by_id_query(
report_id).values('spam_score', 'pk', 'message__pk', 'message__sender', 'message__subject', 'message__received_at', 'message__folder').all()
def count_messages_in_report(report):
return MessageModel.objects.filter(
report=report).count()
def count_messages_evaluations_in_report(report):
return MessageEvaluationModel.objects.filter(
message__report=report).count()
def get_report_by_id_and_owner(report_id, user_id):
return ReportModel.objects.filter(pk=report_id, mailbox__owner_id=user_id).first()
def get_report_by_id(report_id):
return ReportModel.objects.filter(pk=report_id).first()
def get_message_by_id(message_id):
return MessageModel.objects.filter(pk=message_id).first()
def get_message_evaluation_by_id(message_evaluation_id):
return MessageEvaluationModel.objects.filter(pk=message_evaluation_id).first()
def validate_report_owner(report_id, user_id):
return get_report_by_id_and_owner(report_id, user_id)
def validate_report_guest_or_owner(report_id, user_id):
return get_report_by_id_and_owner(report_id, user_id)
def create_message_evaluation(spam_score, spam_description, message_id):
return MessageEvaluationModel.objects.create(
spam_score=spam_score,
spam_description=spam_description,
message_id=message_id)
| 2.421875 | 2 |
Code(Temp Identity)/Extra Code for testing cases/Barcode.py | ayushcv/TemperatureIdentity | 2 | 12789509 | import cv2
import numpy as np
from pyzbar.pyzbar import decode
#This is the barcode testing phase. This allowed me to create the validation of the accounts.
img = cv2.imread('/home/pi/Resources12/frame (1).png')
cap = cv2.VideoCapture(0)
cap.set(3, 640)
cap.set(4, 480)
with open('/home.pi/Resources12\myDataFile.text') as f:
myDataList = f.read().splitlines()
while True:
success, img = cap.read()
for barcode in decode(img):
myData = barcode.data.decode('utf-8')
print(myData)
if myData in myDataList:
myOutput = 'Authorized'
myColor = (0, 255, 0)
else:
myOutput = 'Un-Authorized'
myColor = (0, 0, 255)
pts = np.array([barcode.polygon], np.int32)
pts = pts.reshape((-1, 1, 2))
cv2.polylines(img, [pts], True, myColor, 5)
pts2 = barcode.rect
cv2.putText(img, myOutput, (pts2[0], pts2[1]), cv2.FONT_HERSHEY_SIMPLEX,
0.9, myColor, 2)
cv2.imshow('Result', img)
cv2.waitKey(1) | 2.921875 | 3 |
config.py | tomoyan/streamlit-main | 0 | 12789510 | <gh_stars>0
import streamlit as st
import random
from beem import Steem
from beem.nodelist import NodeList
from beem.instance import set_shared_blockchain_instance
# Streamlit app settings
def app_config():
st.set_page_config(
page_title='Steemit Club',
page_icon="🐟",
initial_sidebar_state="auto",
layout="centered",
menu_items={
'Report a bug': "https://tinyurl.com/steemit-tomoyan",
'About': 'Steemit club tag power up check by @tomoyan.'
}
)
# Set up steem node
def setup_steem():
# Setup Steem nodes
nodelist = NodeList()
nodelist.update_nodes()
# nodes = nodelist.get_steem_nodes()
nodes = [
'https://steemd.steemworld.org',
'https://api.steemzzang.com',
'https://api.justyy.com',
'https://api.steemitdev.com',
'https://steem.justyy.workers.dev',
'https://api.steem.fans',
'https://api.steemit.com',
'https://api.steem.buzz',
'https://steem.61bts.com',
# 'https://cn.steems.top',
]
random.shuffle(nodes)
STEEM = Steem(node=nodes)
set_shared_blockchain_instance(STEEM)
return STEEM
| 2.1875 | 2 |
tests/test_reanimation.py | aybugealtay/expert-meme | 0 | 12789511 | import unittest
from expert_meme import reanimate
class TestReanimation(unittest.TestCase):
def test_reanimate(self):
"""Tests reanimation works as planned, with no unintended consequences, such as
evil monster barnacles"""
x = reanimate.Reanimator('Chet')
self.assertFalse(x.alive)
x.reanimate()
self.assertTrue(x.alive)
def test_rereanimate_failure(self):
"""Tests that reanimated objects can't be reanimated more"""
x = reanimate.Reanimator('Chet')
x.reanimate()
with self.assertRaises(ValueError):
x.reanimate() | 3.203125 | 3 |
text-to-speech.py | mshaheerz/text-to-speech | 1 | 12789512 | import subprocess
def execute_unix(inputcommand):
p = subprocess.Popen(inputcommand, stdout=subprocess.PIPE, shell=True)
(output, err) = p.communicate()
return output
a = input("enter the text :")
# create wav file
# w = 'espeak -w temp.wav "%s" 2>>/dev/null' % a
# execute_unix(w)
# tts using espeak
c = 'espeak -ven+f3 -k6 -s150 --punct="<characters>" "%s" 3>>/dev/null' % a
execute_unix(c)
#trying to google tts using termux........
#just try
#by shaheerez
| 2.9375 | 3 |
figures/pipeline/site_monthly_metrics.py | Groove-eLearning/figures | 43 | 12789513 | """Populate Figures site monthly metrics data
"""
from __future__ import absolute_import
from datetime import datetime
from django.db import connection
from django.db.models import IntegerField
from django.utils.timezone import utc
from dateutil.relativedelta import relativedelta
from figures.compat import RELEASE_LINE
from figures.models import SiteMonthlyMetrics
from figures.sites import get_student_modules_for_site
def _get_fill_month_raw_sql_for_month(site_ids, month_for):
"""Return a string for the raw SQL statement to get distinct student_id counts.
"""
# this is just a separate function so it can be patched in test to acccommodate sqlite
return """\
SELECT COUNT(DISTINCT student_id) from courseware_studentmodule
where id in {}
and MONTH(modified) = {}
and YEAR(modified) = {}
""".format(site_ids, month_for.month, month_for.year)
def fill_month(site, month_for, student_modules=None, overwrite=False, use_raw=False):
"""Fill a month's site monthly metrics for the specified site
"""
if not student_modules:
student_modules = get_student_modules_for_site(site)
if student_modules:
if not use_raw:
month_sm = student_modules.filter(modified__year=month_for.year,
modified__month=month_for.month)
mau_count = month_sm.values_list('student_id',
flat=True).distinct().count()
else:
if RELEASE_LINE == 'ginkgo':
site_ids = tuple(
[int(sid) for sid in student_modules.values_list('id', flat=True).distinct()]
)
else:
# make sure we get integers and not longints from db
from django.db.models.functions import Cast
site_ids = tuple(
student_modules.annotate(
id_as_int=Cast('id', IntegerField())
).values_list('id_as_int', flat=True).distinct()
)
statement = _get_fill_month_raw_sql_for_month(site_ids, month_for)
with connection.cursor() as cursor:
cursor.execute(statement)
row = cursor.fetchone()
mau_count = row[0]
else:
mau_count = 0
obj, created = SiteMonthlyMetrics.add_month(site=site,
year=month_for.year,
month=month_for.month,
active_user_count=mau_count,
overwrite=overwrite)
return obj, created
def fill_last_month(site, overwrite=False):
"""Convenience function to fill previous month's site monthly metrics
"""
# Maybe we want to make 'last_month' a 'figures.helpers' method
last_month = datetime.utcnow().replace(tzinfo=utc) - relativedelta(months=1)
return fill_month(site=site, month_for=last_month, overwrite=overwrite)
| 2.421875 | 2 |
RPGBattle/battle.py | wdmwilcox/RPGBattle | 0 | 12789514 | <reponame>wdmwilcox/RPGBattle
from character import Gnome
from time import sleep
class Battle(object):
def __init__(self, player_level):
self.player_level = player_level
self.battle_over = False
self.enemies = [Gnome(1)]
self.loot = {}
def generate_enemies(self):
pass
def display_information(self):
pass
def display_enemy_dialogue(self):
pass
def add_loot(self,loot):
pass
def calculate_outcome(self):
return "this is the outcome"
pass
def get_enemy_turn(self, player):
for enemy in self.enemies:
enemy.play_turn(self, player)
sleep(1)
def check_battle_over(self):
for enemy in self.enemies:
if enemy.stats['health'] > 0:
return False
return True
| 2.515625 | 3 |
web-app/app.py | radekv23/data-representation-project | 0 | 12789515 | from flask import Flask, request, render_template, redirect, jsonify, flash, session
from flask.config import ConfigAttribute
from flask.helpers import url_for
import requests
from functools import wraps
import json
import base64
from io import BytesIO
from matplotlib.backends.backend_agg import FigureCanvasAgg as FigureCanvas
import matplotlib.pyplot as plt
import seaborn as sns
import pandas as pd
app = Flask(__name__)
# Check if user logged in
def is_logged_in(f):
@wraps(f)
def wrap(*args, **kwargs):
if 'logged_in' in session:
return f(*args, **kwargs)
else:
flash('Unauthorized, Please login first!', 'danger')
return redirect(url_for('sign_in'))
return wrap
@app.route('/sign-in')
@app.route('/', methods=['GET', 'POST'])
def sign_in():
if request.method == "POST":
user_data = {
"email": request.form['email'],
"password": request.form['password']
}
response = requests.get("http://127.0.0.1:5000/authenticate", json=user_data)
# Status code 201 means successfully created
if response.status_code == 200:
session['logged_in'] = True
session['user_id'] = response.json()['id']
session['username'] = response.json()['username']
return jsonify({'success': 'Login successful! Redirecting...'})
elif response.status_code == 401: # 401 means unauthorized
return jsonify({'error': response.json()['error_message']})
return render_template('sign-in.html')
@app.route('/register', methods=['GET', 'POST'])
def register():
if request.method == "POST":
user_data = {
"username": request.form['username'],
"email": request.form['email'],
"password": request.form['password']
}
response = requests.post("http://127.0.0.1:5000/authenticate", json=user_data)
# Status code 201 means successfully created
if response.status_code == 201:
return jsonify({'success': 'Registration Successful. You can now Log in'})
elif response.status_code == 403: # 403 means forbidden
return jsonify({'error': response.json()['error_message']})
return render_template('register.html')
@app.route('/sign-out', methods=['POST'])
@is_logged_in
def sign_out():
if request.method == "POST":
# clear all the session variables
session.clear()
return redirect(url_for('sign_in'))
@app.route('/dashboard')
@is_logged_in
def index():
# Send a get request to retrieve categories
categories_response = requests.get("http://127.0.0.1:5000/categories")
# Get the JSON data
categories = categories_response.json()
user_id = session['user_id']
# Send a get request to retrieve all expenses for a user
expenses_response = requests.get(f"http://127.0.0.1:5000/expenses/{user_id}")
# Get the JSON data
expenses = expenses_response.json()
plot = create_pie_plot(categories, expenses)
return render_template('index.html', categories=categories, expenses=expenses, plot=plot)
@app.route('/create', methods=['POST'])
@is_logged_in
def create():
user_id = 1
expense_data = {
'expense_name': request.form['expense_name'],
'amount': request.form['amount'],
'expense_date': request.form['expense_date'],
'note': request.form['note'],
'category_id': request.form['category'],
'user_id': session['user_id']
}
# POST the data to api for storing in the database
response = requests.post(f"http://127.0.0.1:5000/expenses/{user_id}", json=expense_data)
if response.status_code == 201:
return jsonify({'success': 'New expense added successfully!'})
else:
return jsonify({'error': 'There was an error creating new expense.'})
@app.route('/delete', methods=["POST"])
@is_logged_in
def delete():
if request.method == 'POST':
delete_response = requests.delete(f"http://127.0.0.1:5000/expense/{request.form['expense_id']}")
if delete_response.status_code == 204:
flash('Expense deleted successfully', 'success')
else:
flash('There was an error performing the delete', 'danger')
return redirect(url_for('index'))
@app.route('/update/<expense_id>', methods=['GET', 'POST'])
@is_logged_in
def update(expense_id):
response = requests.get(f"http://127.0.0.1:5000/expense/{expense_id}")
expense = response.json()
# Send a get request to retrieve categories
categories_response = requests.get("http://127.0.0.1:5000/categories")
# Get the JSON data
categories = categories_response.json()
if request.method == "POST":
expense_data = {
'expense_name': request.form['expense_name'],
'amount': request.form['amount'],
'expense_date': request.form['expense_date'],
'note': request.form['note'],
'category_id': request.form['category'],
}
response = requests.put(f"http://127.0.0.1:5000/expense/{expense_id}", json=expense_data)
if response.status_code == 201:
return jsonify({'success': 'Expense Updated successfully!'})
else:
return jsonify({'error': 'There was an error performing the update'})
return render_template('update.html', expense=expense, categories=categories)
def json_to_df(categories, expenses):
for expense in expenses:
expense['category'] = categories[expense['category_id']-1]['category_name']
expenses_json = json.dumps(expenses)
df = pd.read_json(expenses_json) # Convert json to dataframe
return df.sort_values(by=['category_id'])
def create_pie_plot(categories, expenses):
df = json_to_df(categories, expenses)
# Generate the figure **without using pyplot**.
fig,ax=plt.subplots(figsize=(6,6))
ax=sns.set(style="darkgrid")
#define data
data = list(df.groupby('category_id').sum()['amount'].values) # Get the total amount per category
labels = list(df.category.unique()) # Get categories
print(data)
print(labels)
#define Seaborn color palette to use
colors = sns.color_palette('husl')[0:5]
#create pie chart
plt.pie(data, labels = labels, colors = colors, autopct='%.0f%%')
plt.title('Amount spent per category')
canvas=FigureCanvas(fig)
# Save it to a temporary buffer.
buf = BytesIO()
fig.savefig(buf, format="png")
buf.seek(0)
# Embed the result in the html output.
data = base64.b64encode(buf.getbuffer()).decode("ascii")
src = f'data:image/png;base64,{data}'
return src
if __name__ == '__main__':
app.secret_key = '81d43f0a7f63babc337d0a529f91372f'
app.run(debug=True, port=80) | 2.90625 | 3 |
procsim/core/exceptions.py | stcorp/procsim | 1 | 12789516 | <reponame>stcorp/procsim
'''
Copyright (C) 2021 S[&]T, The Netherlands.
Exceptions
'''
from typing import Any
class ProcsimException(Exception):
'''
Base for all procsim exceptions
'''
pass
class TerminateError(ProcsimException):
'''
Program terminated externally (CTRL-C or SIGTERM)
'''
pass
class ScenarioError(ProcsimException):
'''
Error in scenario configuration
'''
pass
class GeneratorError(ProcsimException):
'''
Error in generator
'''
pass
class ParseError(ProcsimException):
'''
Error parsing XML
'''
def __init__(self, value: Any, message: str = 'Error parsing XML. Received invalid value') -> None:
super().__init__(value)
print(message, value)
| 2.328125 | 2 |
interpreter/interpreter/interpreter.py | Gu1nness/AsmInterpreter | 1 | 12789517 | <reponame>Gu1nness/AsmInterpreter
# -*- coding:utf8 -*-
from queue import Queue
from copy import deepcopy
from .memory import *
from .number import Number
from ..lexical_analysis.lexer import Lexer
from ..lexical_analysis.token_type import *
from ..syntax_analysis.parser import Parser
from ..syntax_analysis.tree import *
from ..semantic_analysis.analyzer import SemanticAnalyzer
from ..utils.utils import MessageColor
import sys
AsmQueue = Queue()
class EndOfExecution(BaseException):
pass
class Interpreter(NodeVisitor):
def __init__(self, break_points, event):
self.memory = Memory()
self.break_points = break_points
self.cmp_reg = 0
self.frame = None
self.jmpd = False
self.can_run = event
self.can_run.set()
def preload_functions(self, tree):
for child in tree.children:
frame = FunctionFrame(child)
self.memory.functions[child.name.value] = frame
self.memory.ranges[frame.boundaries] = child.name
self.memory._create_frames()
if not self.memory._check(self.break_points):
sys.stderr.write(str(["0x%08x" % key for key in
self.memory.frames.keys()]) + '\n')
res = ["0x%08x" % break_point for break_point in self.break_points if not
break_point in self.memory.frames.keys()]
sys.stderr.write(str(res) + "\n")
sys.stderr.flush()
raise Exception("Breakpoints are not all in the frames")
def visit_Register(self, node):
reg = self.memory.registers[node.value]
return Number(node.value[0], reg, register=node.value)
def visit_Frame(self, node):
self.visit(node.instr)
def visit_UnOp(self, node):
node.operand.pointer = True
if node.op.type == NOT_OP:
self.memory.inot(self.visit(node.operand))
if node.op.type == NEG_OP:
self.memory.ineg(self.visit(node.operand))
if node.op.type == DEC_OP:
value = self.visit(node.operand)
self.memory.idec(value)
if value == 0:
self.cmp_reg = 0
else:
self.cmp_reg = 1
if node.op.type == INC_OP:
value = self.visit(node.operand)
self.memory.iinc(value)
if value == 0:
self.cmp_reg = 0
else:
self.cmp_reg = 1
def visit_BinOp(self, node):
node.right.pointer = False
node.left.pointer = True
if node.op.type in [ADD_OP, ADDL_OP]:
value = self.visit(node.left).value
self.memory.iadd(self.visit(node.right), value)
if value == 0:
self.cmp_reg = 0
else:
self.cmp_reg = 1
if node.op.type == LEA_OP:
node.right.pointer = False
node.left.pointer = False
addr = self.visit(node.right)
value = self.visit(node.left).value
self.memory[addr] = self.visit(node.left).value
if node.op.type == MUL_OP:
value = self.visit(node.left).value
self.memory.imul(self.visit(node.right), value)
if node.op.type == SUB_OP:
value = self.visit(node.left).value
self.memory.isub(self.visit(node.right), value)
if value == 0:
self.cmp_reg = 0
else:
self.cmp_reg = 1
if node.op.type == AND_OP:
value = self.visit(node.left).value
self.memory.iand(self.visit(node.right), value)
if value == 0:
self.cmp_reg = 0
else:
self.cmp_reg = 1
if node.op.type == XOR_OP:
value = self.visit(node.left).value
self.memory.ixor(self.visit(node.right), value)
if value == 0:
self.cmp_reg = 0
else:
self.cmp_reg = 1
if node.op.type == SHL_OP:
value = self.visit(node.left).value
self.memory.ishl(self.visit(node.right), value)
if value == 0:
self.cmp_reg = 0
else:
self.cmp_reg = 1
if node.op.type == SHR_OP:
value = self.visit(node.left).value
self.memory.ishr(self.visit(node.right), value)
if value == 0:
self.cmp_reg = 0
else:
self.cmp_reg = 1
if node.op.type == TEST:
right = self.visit(node.right).value
left = self.visit(node.left).value
self.cmp_reg = int(right & left)
def visit_TernOp(self, node):
node.right.pointer = False
node.middle.pointer = True
node.left.pointer = True
if node.op.type in [MUL_OP]:
self.memory.mul(self.visit(node.right),
self.visit(node.left).value,
self.visit(node.middle).value,
)
def visit_XchgOp(self, node):
node.right.pointer = False
node.left.pointer = True
left = self.visit(node.right)
right = self.visit(node.left)
if left.register:
self.memory.registers[left.register] = right
if left.register.startswith('e'):
self.memory.registers['r' + left.register[1:]] = right
else:
self.stack[right.value] = left
if right.register:
self.memory.registers[right.register] = left
if right.register.startswith('e'):
self.memory.registers['r' + right.register[1:]] = left
else:
self.stack[right.value] = left
def visit_MovOp(self, node):
node.right.pointer = False
node.left.pointer = True
addr = self.visit(node.right)
print("====")
print(node.right)
print(addr)
value = self.visit(node.left).value
print(value)
self.memory[addr] = value
def visit_StackOp(self, node):
value = self.visit(node.expr)
if node.op.type == PUSH:
self.memory.push(value, length=1)
if node.op.type == PUSHQ:
self.memory.push(value, length=2)
if node.op.type == POP:
self.memory.pop(length=1)
if node.op.type == POPQ:
self.memory.pop(length=2)
def visit_JmpStmt(self, node):
addr = int(node.jmpaddr.value, 16)
frame = self.memory.frames[addr]
if node.op.type == JG and self.cmp_reg == 2:
self.jmpd = True
self.frame = frame
return
if node.op.type == JGE and self.cmp_reg >= 1:
self.jmpd = True
self.frame = frame
return
if node.op.type == JE and self.cmp_reg == 0:
self.jmpd = True
self.frame = frame
return
if node.op.type == JNE and self.cmp_reg != 0:
self.jmpd = True
self.frame = frame
return
if node.op.type == JLE and self.cmp_reg <= -1:
self.jmpd = True
self.frame = frame
return
if node.op.type == JL and self.cmp_reg == -2:
self.jmpd = True
self.frame = frame
return
if node.op.type == JMPQ:
self.jmpd = True
self.frame = frame
return
if node.op.type == JMP:
self.jmpd = True
self.frame = frame
return
def visit_CmpOp(self, node):
node.right.pointer = False
node.left.pointer = True
left = self.visit(node.left)
right = self.visit(node.right)
self.cmp_reg = Number.__cmp__(right, left)
def visit_CallQOp(self, node):
addr = self.call_addr
for (start, end) in self.memory.ranges.keys():
if start <= addr and addr <= end:
fct_frame = self.memory.ranges[(start,end)]
break
self.visit(fct_frame)
def visit_RetStmt(self, node):
ret_addr = self.memory.registers['rbp']
if ret_addr == 0:
raise EndOfExecution
for (start, end) in self.memory.ranges.keys():
if start <= addr and addr <= end:
fct_frame = self.memory.ranges[(start,end)]
break
return self.visit(fct_frame, start=ret_addr)
def visit_NullOp(self, node):
return
def visit_AddrExpression(self, node):
return Number('l', node.value)
def visit_TernaryAddrExpression(self, node):
if node.reg_1:
return self.visit(node.reg_1) + self.visit(node.offset) * self.visit(node.reg_2)
else:
return self.visit(node.offset) * self.visit(node.reg_2)
def visit_CompoundAddrExpression(self, node):
if node.token.type == NUMBER:
if node.pointer:
#print(node.offset)
addr = self.visit(node.offset) + self.visit(node.register)
res = self.memory.stack[self.visit(node.offset) + self.visit(node.register)]
return Number(node.register.value[0], res)
else:
return self.visit(node.offset) + self.visit(node.register)
if node.token.type == ASTERISK:
return self.memory.stack[self.visit(node.register)]
def interpret(self, tree):
self.preload_functions(tree)
node = self.memory['main']
self.frame = self.memory.frames[node._start]
try:
while True:
self.can_run.wait()
self.visit(self.frame)
if self.frame.prog_counter in self.break_points:
AsmQueue.put((self.frame.prog_counter, deepcopy(self.memory)))
self.can_run.clear()
if self.jmpd:
self.jmpd = False
else:
index = self.memory.prog_counters.index(self.frame.prog_counter)
try:
self.frame = self.memory.frames[self.memory.prog_counters[index + 1]]
except IndexError:
raise EndOfExecution
except EndOfExecution as _:
return self.memory.registers['rax']
@staticmethod
def run(program):
try:
lexer = Lexer(program)
parser = Parser(lexer)
tree = parser.parse()
SemanticAnalyzer.analyze(tree)
status = Interpreter().interpret(tree)
except Exception as message:
print("{}[{}] {} {}".format(
MessageColor.FAIL,
type(message).__name__,
message,
MessageColor.ENDC
))
status = -1
print()
print(MessageColor.OKBLUE + "Process terminated with status {}".format(status) + MessageColor.ENDC)
| 2.28125 | 2 |
exercicios/ex001.py | OtavioMalta/PythonExercicios | 1 | 12789518 | <reponame>OtavioMalta/PythonExercicios
n = int(input('digite um numero'))
s = n + 1
a = n - 1
d = n*2
t = n*3
r =n**(1/2)
print('o numero sucessor de {} é {} e o antecessor é {}'.format(n, s, a))
print('o dobro de {} é {} o triplo é {} e a raiz quadrada {}'.format(n, d, t, r))
print('a tabuada de {} é:'.format(n),n*1,n*2,n*3,n*4,n*5,n*6,n*7,n*8,n*9,n*10)
n1 = int(input('digite a primeira nota'))
n2 = int(input('digite a segunda nota'))
m = (n1 + n2)/2
print(" a media do aluno é de {}".format(m))
mts = int(input('digite um tamanho em metros'))
cm = mts * 100
mm = cm*10
print('esse numero em cm é{} e em milimetros é {}'.format(cm, mm))
rs = int(input('quantos reais vc tem?'))
dl = rs / 3.27
print('no totaal vc tem {} dolares'.format(dl))
al=int(input('altura da parede'))
lar=int(input('largura da parede'))
ar=al*lar
tin=ar/2
print('a area da parede é de {} metros quadrados, por isso precisará de {} litros de tinta'.format(ar,tin))
pr=float(input('digite um preço'))
dc= pr - (pr*5/100)
print('o preço com o desconto fica {}'.format(dc))
sal=float(input('quanto ganha o funcionario?'))
aum= sal + (sal*15/100)
print('o funcinario, com um aumento de 15%, receberá:R${}'.format(aum))
temp=float(input("qual a temperatura em Celsius"))
tmp= temp *1.8 + 32
print('a temperatura me fahrenheit é de {}'.format(tmp))
dia=int(input('quantos dias o carro foi alugado?'))
km=float(input('quantos km foram percorridos?'))
tot = (dia*60) + (0.15*km)
print('o total a ser pago é de {}'.format(tot))
| 4.1875 | 4 |
deui/html/view/body_element.py | urushiyama/DeUI | 1 | 12789519 | from .element import Element
class Body(Element):
"""
Represents content body.
"""
def __str__(self):
return "body"
| 2.703125 | 3 |
freetrade/auth.py | DainisGorbunovs/freetrade | 32 | 12789520 | import json
import logging
import os
import uuid
from collections import OrderedDict
from datetime import datetime
from typing import Callable
import jwt
import requests
from freetrade import Credentials
logger = logging.getLogger(__name__)
class Auth:
def __init__(self, credentials: Credentials, email: str, useragent: str = None,
session_id: str = None, otp_parser: Callable = None):
self.credentials = credentials
self.custom_token = None
self.id_token = None
self.refresh_token = None
self.host = 'https://' + self.credentials.get_ft_auth_host()
self.email = email
self.useragent = useragent if useragent else 'Freetrade/1.0.4756-4756 Dalvik/2.1.0 ' \
'(Linux; U; Android 9; SM-G965U Build/PPR1.180610.011)'
self.android_api_key = self.credentials.get_android_verification_api_key()
self.otp_parser = otp_parser
if otp_parser is None:
self.otp_parser = lambda: input('What is the OTP (one time password) in the Magic link in the email? ')
if session_id is None:
session_id = str(uuid.uuid4())
self.headers = OrderedDict([
('session_id', session_id),
('request_id', ''),
('User-Agent', self.useragent),
('Content-Type', 'application/x-www-form-urlencoded'),
('Content-Length', ''),
('Host', self.credentials.get_ft_auth_host()),
('Connection', 'close'),
('Accept-Encoding', 'gzip, deflate')
])
self.authenticate()
def get_request_header(self) -> OrderedDict:
self.headers['request_id'] = str(uuid.uuid4())
return self.headers
def login_request_otp(self) -> requests.Response:
payload = {'email': self.email}
response = requests.post(self.host + '/start', data=payload, headers=self.get_request_header())
return response
def login_with_otp(self, one_time_password: str) -> requests.Response:
payload = {'email': self.email, 'otp': one_time_password}
response = requests.post(self.host + '/login', data=payload, headers=self.get_request_header())
content = response.json()
self.custom_token = content['access_token']
return response
def authenticate(self, session_file: str = None):
# default local user configs
key_paths_list = [
'ft-session.json',
os.path.expanduser("~") + '/.config/freetrade/ft-session.json',
]
# if session_file is given, prioritise by reading from it first
if session_file is not None:
key_paths_list.insert(0, session_file)
# check which files are readable and contain the session
session = None
for path in key_paths_list:
# if there are issues reading file, ignore them
# and just relogin again
try:
if os.path.isfile(path):
with open(path, 'rb') as f:
session = json.load(f)
self.refresh_token = session['refresh_token']
self.headers['session_id'] = session['session_id']
self.refresh_id_token()
break
except Exception as e:
logger.error('Error reading session file: {} - {}.'.format(type(e).__name__, str(e)))
session = None
# if none of the files were successful, login again
if session is None:
self.login_request_otp()
otp = self.otp_parser()
# get a Custom Token for authenticating Firebase client SDKs
# valid for 1 hour
self.login_with_otp(otp)
# get a refresh token (valid 1 year) and ID token (valid 1 hour)
self.get_firebase_tokens()
# save the new session
with open(key_paths_list[0], 'w') as f:
data = {
'refresh_token': self.refresh_token,
'session_id': self.headers['session_id']
}
json.dump(data, f)
def get_firebase_tokens(self):
# if does not work, need new session token (relogin via authenticate)
# exchange custom token -> a refresh and ID tokens
# https://firebase.google.com/docs/reference/rest/auth
url = 'https://www.googleapis.com/identitytoolkit/v3/relyingparty/verifyCustomToken?key='\
+ self.android_api_key
res = requests.post(url, json={
'token': self.custom_token,
'returnSecureToken': True
})
tokens = res.json()
self.refresh_token = tokens['refreshToken']
self.id_token = tokens['idToken']
def refresh_id_token(self):
# exchange refresh token -> newer refresh and ID token
url = 'https://securetoken.googleapis.com/v1/token?key=' + self.android_api_key
res = requests.post(url, data={
'grant_type': 'refresh_token',
'refresh_token': self.refresh_token
})
tokens = res.json()
self.refresh_token = tokens['refresh_token']
self.id_token = tokens['id_token']
self.headers['Authorization'] = self.get_auth_bearer()
self.headers.move_to_end('Authorization', last=False)
def get_auth_bearer(self):
return 'Bearer ' + self.id_token
def keep_id_token_valid(self):
decoded = jwt.decode(self.id_token, verify=False)
delta_time_valid = datetime.utcfromtimestamp(decoded['exp']) - datetime.utcnow()
# if less than 60 seconds left, refresh ID token
if delta_time_valid.total_seconds() < 60:
self.refresh_id_token()
| 2.484375 | 2 |
src/rascore/util/functions/col.py | mitch-parker/rascore | 7 | 12789521 | <reponame>mitch-parker/rascore
# -*- coding: utf-8 -*-
"""
Copyright 2022 <NAME>
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
y32_col = "Y32"
y71_col = "Y71"
sw1_col = "SW1"
sw2_col = "SW2"
id_col = "id"
core_path_col = "core_path"
rcsb_path_col = "rcsb_path"
renum_path_col = "renum_path"
rcsb_assembly_path_col = "rcsb_assembly_path"
renum_assembly_path_col = "renum_assembly_path"
sifts_path_col = "sifts_path"
edia_path_col = "edia_path"
interf_path_col = "interface_path"
pocket_path_col = "pocket_path"
modelid_col = "modelid"
chainid_col = "chainid"
resid_col = "resid"
resname_col = "resname"
atomid_col = "atomid"
pdb_id_col = "pdb_id"
pdb_code_col = "pdb_code"
swiss_id_col = "swiss_id"
uniprot_id_col = "uniprot_id"
pfam_col = "pfam"
prot_col = "prot_name"
method_col = "method"
resolution_col = "resolution"
r_factor_col = "r_factor"
seq_col = "sequence"
len_col = "length"
range_col = "range"
date_col = "deposit_data"
pmid_col = "pmid"
len_a_col = "length_a"
len_b_col = "length_b"
len_c_col = "length_c"
ang_a_col = "angle_alpha"
ang_b_col = "angle_beat"
ang_g_col = "angle_gamma"
space_col = "space_group"
cf_col = "crystal_form"
mut_status_col = "mutation_status"
mut_pos_col = "mutation_position"
interf_id_col = "interface_id"
interf_col = "interface"
interf_area_col = "interface_area"
cb_cont_col = "cb_contacts"
atomid_cont_col = "atomid_contacts"
total_cb_cont_col = "total_cb_contacts"
total_atomid_cont_col = "total_atomid_contacts"
interf_cont_col = "interface_contacts"
cb_dist_col = "cb_distance"
iso_col = "isologous"
pocket_id_col = "pocket_id"
pocket_col = "pocket"
pocket_site_col = "pocket_site"
pocket_volume_col = "pocket_volume"
pocket_score_col = "pocket_score"
pocket_status_col = "pocket_status"
pocket_type_col = "pocket_type"
pocket_cont_col = "pocket_contacts"
bio_lig_col = "biological_ligand"
ion_lig_col = "ion_ligand"
pharm_lig_col = "pharmacological_ligand"
chem_lig_col = "chemical_ligand"
mod_lig_col = "modification_ligand"
mem_lig_col = "membrane_ligand"
pocket_lig_col = "pocket_ligand"
gene_class_col = "gene_class"
nuc_class_col = "nucleotide_class"
mut_class_col = "mutation_class"
pharm_class_col = "pharmacological_class"
match_class_col = "match_class"
prot_class_col = "protein_class"
interf_class_col = "interface_class"
pocket_class_col = "pocket_class"
bound_lig_col = "bound_ligand"
bound_prot_col = "bound_protein"
bound_prot_swiss_id_col = "bound_protein_swiss_id"
bound_lig_cont_col = 'bound_ligand_contacts'
bound_prot_cont_col = 'bound_protein_contacts'
pharm_lig_site_col = "pharmacological_ligand_site"
pharm_lig_match_col = "pharmarmacological_ligand_match"
pharm_lig_smiles_col = "pharmacological_ligand_smiles"
bound_prot_pfam_col = "bound_protein_pfam"
bound_prot_site_col = "bound_protein_site"
bound_prot_chainid_col = "bound_protein_chainid"
bound_interf_chainid_col = "bound_interf_chainid"
edia_col = "edia_score"
b_factor_col = "b_factor"
phi_col = "phi"
psi_col = "psi"
omega_col = "omega"
chi1_col = "chi1"
altchi1_col = "altchi1"
chi2_col = "chi2"
altchi2_col = "altchi2"
chi3_col = "chi3"
chi4_col = "chi4"
chi5_col = "chi5"
complete_col = "complete"
rama_col = "rama"
rotamer_col = "rotamer"
bb_resid_col = "bb_resid"
sc_resid_col = "sc_resid"
bb_seq_col = "bb_seq"
sc_seq_col = "sc_seq"
bb_len_col = "bb_length"
sc_len_col = "sc_length"
cluster_col = "cluster"
nn_dist_col = "nn_dist"
constr_dist_col = "constraint_distance"
nn_cutoff_col = "nn_cutoff"
constr_cutoff_col = "constraint_cutoff"
total_col = "total"
total_chain_col = "total_chain"
total_entry_col = "total_entry"
total_cf_col = "total_cf"
loop_col = "loop"
silh_col = "silhouette_score"
simi_col = "similarity_score"
total_complete_col = "total_complete"
cluster_count_col = "cluster_count"
total_cluster_pre_col = "total_cluster_pre"
total_noise_pre_col = "total_noise_pre"
total_pruned_nn_col = "total_pruned_nn"
total_pruned_constr_col = "total_pruned_constr"
total_classified_nn_col = "total_classified_nn"
total_classified_constr_col = "total_classified_constr"
total_cluster_post_col = "total_cluster_post"
total_noise_post_col = "total_noise_post"
select_col = "select"
rep_col = "representative"
common_col = "common"
entropy_col = "entropy"
occup_col = "occupancy"
mean_col = "mean"
max_col = "max"
vect_1_col = "vector_1"
vect_2_col = "vector_2"
atom_dist_col = "atom_dist"
hb_status_col = "hydrogen_bond"
hb_angle_1_col = "hb_angle_1"
hb_angle_2_col = "hb_angle_2"
wmhb_angle_col = "wmhb_angle"
outlier_col = "outlier"
bond_col = "bond"
angle_col = "angle"
dih_col = "dihedral"
rmsd_col = "rmsd"
index_col = "index"
p_col = "p_val"
correct_p_col = "corrected_p_val"
corr_col = "correlation"
a_col = "a_val"
b_col = "b_val"
c_col = "c_val"
d_col = "d_val"
risk_ratio_col = "risk_ratio"
low_ci_col = "lower_ci"
up_ci_col = "upper_ci"
sig_col = "significance"
bb_col_lst = [phi_col, psi_col, omega_col]
sc_col_lst = [
chi1_col,
chi2_col,
altchi1_col,
altchi2_col,
chi3_col,
chi4_col,
chi5_col,
]
dih_col_lst = bb_col_lst + sc_col_lst
reformat_col_lst = dih_col_lst
dist_col_lst = [
nn_dist_col,
constr_dist_col,
vect_1_col,
vect_2_col,
atom_dist_col,
hb_angle_1_col,
hb_angle_2_col,
wmhb_angle_col,
outlier_col,
]
sum_col_lst = [common_col, entropy_col, occup_col]
data_col_lst = reformat_col_lst + dist_col_lst + sum_col_lst
class_col_lst = [
gene_class_col,
nuc_class_col,
mut_class_col,
pharm_class_col,
match_class_col,
prot_class_col,
interf_class_col,
pocket_class_col,
]
path_col_lst = [
core_path_col,
rcsb_path_col,
renum_path_col,
rcsb_assembly_path_col,
renum_assembly_path_col,
sifts_path_col,
edia_path_col,
interf_path_col,
pocket_path_col,
]
count_col_dict = {
pdb_id_col: total_chain_col,
pdb_code_col: total_entry_col,
cf_col: total_cf_col,
}
order_col_lst = [
id_col,
pdb_id_col,
pdb_code_col,
modelid_col,
chainid_col,
sw1_col,
sw2_col,
y32_col,
y71_col,
cf_col,
cluster_col,
total_col,
total_chain_col,
total_entry_col,
total_cf_col,
rep_col,
rama_col,
rotamer_col,
nn_dist_col,
constr_dist_col,
interf_id_col,
interf_col,
pocket_id_col,
pocket_col,
prot_col,
swiss_id_col,
uniprot_id_col,
pfam_col,
mut_status_col,
mut_pos_col,
gene_class_col,
nuc_class_col,
mut_class_col,
pharm_class_col,
match_class_col,
prot_class_col,
interf_class_col,
pocket_class_col,
loop_col,
silh_col,
simi_col,
total_complete_col,
cluster_count_col,
total_cluster_pre_col,
total_noise_pre_col,
total_pruned_nn_col,
total_pruned_constr_col,
total_classified_nn_col,
total_classified_constr_col,
total_cluster_post_col,
total_noise_post_col,
select_col,
nn_cutoff_col,
constr_cutoff_col,
space_col,
len_col,
range_col,
date_col,
pmid_col,
index_col,
p_col,
correct_p_col,
corr_col,
risk_ratio_col,
low_ci_col,
up_ci_col,
a_col,
b_col,
c_col,
d_col,
resid_col,
resname_col,
atomid_col,
interf_area_col,
cb_cont_col,
atomid_cont_col,
total_cb_cont_col,
total_atomid_cont_col,
interf_cont_col,
cb_dist_col,
iso_col,
pocket_site_col,
pocket_volume_col,
pocket_score_col,
pocket_status_col,
pocket_type_col,
pocket_cont_col,
phi_col,
psi_col,
omega_col,
chi1_col,
chi2_col,
altchi1_col,
altchi2_col,
chi3_col,
chi4_col,
chi5_col,
edia_col,
b_factor_col,
vect_1_col,
vect_2_col,
atom_dist_col,
hb_status_col,
hb_angle_1_col,
hb_angle_2_col,
wmhb_angle_col,
outlier_col,
bond_col,
angle_col,
rmsd_col,
common_col,
entropy_col,
occup_col,
bound_lig_col,
bound_prot_col,
bound_lig_cont_col,
bound_prot_cont_col,
bound_prot_swiss_id_col,
pharm_lig_site_col,
pharm_lig_match_col,
pharm_lig_smiles_col,
bound_prot_pfam_col,
bound_prot_site_col,
bound_prot_chainid_col,
bound_interf_chainid_col,
bio_lig_col,
ion_lig_col,
pharm_lig_col,
chem_lig_col,
mod_lig_col,
mem_lig_col,
pocket_lig_col,
method_col,
resolution_col,
r_factor_col,
seq_col,
bb_resid_col,
bb_seq_col,
bb_len_col,
sc_resid_col,
sc_seq_col,
sc_len_col,
complete_col,
len_a_col,
len_b_col,
len_c_col,
ang_a_col,
ang_b_col,
ang_g_col,
core_path_col,
rcsb_path_col,
renum_path_col,
rcsb_assembly_path_col,
renum_assembly_path_col,
sifts_path_col,
edia_path_col,
interf_path_col,
pocket_path_col,
]
rename_col_dict = {
id_col: "ID",
pdb_id_col: "PDB ID",
pdb_code_col: "PDB Code",
modelid_col: "Model",
chainid_col: "Chain",
sw1_col: "SW1 Conformation",
sw2_col: "SW2 Conformation",
y32_col: "Y32 Position",
y71_col: "Y71 Position",
hb_status_col:"H-Bond Substate",
gene_class_col: "RAS Isoform",
mut_status_col: "Mutation Status",
nuc_class_col: "Nucleotide State",
prot_class_col: "Bound Protein",
pocket_class_col: "Inhibitor Site",
match_class_col: "Inhibitor Chemistry",
interf_class_col: "Homodimer Status",
bound_prot_col: "Bound Protein Name",
bound_prot_swiss_id_col: "Bound Protein SwissProt ID",
bound_prot_chainid_col: "Bound Protein Chain",
bio_lig_col: "Nucleotide",
ion_lig_col: "Ion",
pharm_lig_col: "Inhibitor",
chem_lig_col: "Chemical",
mod_lig_col: "Modification",
mem_lig_col: "Membrane",
pocket_lig_col: "Pocket",
space_col: "Space Group",
method_col: "Experiment Method",
resolution_col: "Resolution",
r_factor_col: "R-Factor",
date_col: "Deposit Date",
pocket_score_col: "Druggability Scores",
pocket_volume_col: "Pocket Volume"
}
def get_dist_col(x_resid, y_resid, x_atomid=None, y_atomid=None, ext=None):
if ext is None:
ext = atom_dist_col
dist_col = str(x_resid)
if str(x_atomid) != "None":
dist_col += f"({x_atomid})"
dist_col += ":"
dist_col += str(y_resid)
if str(y_atomid) != "None":
dist_col += f"({y_atomid})"
if ext is not None:
dist_col += f"_{ext}"
return dist_col
| 1.1875 | 1 |
src/functions.py | datapartnership/HospitalAccessibility | 3 | 12789522 | import requests, json, os, pickle
import networkx as nx
import GOSTnets as gn
import matplotlib.pyplot as plt
from matplotlib import gridspec
from time import sleep
import pandas as pd
import geopandas as gpd
import rasterio
from rasterio.windows import Window
from rasterio.plot import *
from rasterio.mask import *
import numpy as np
from shapely.geometry import Point
from shapely.geometry import box
import contextily as ctx
import osmnx as ox
from fiona.crs import from_epsg
import pycrs
import geoviews as gv
import hvplot.pandas
import random
from utility import *
from raster_ops import *
from mapbox import *
def get_origins(places, population_file, window_size = 50, use_pickle = False, do_pickle_result = True, pickle_region_name = ""):
"""
Function extracts origins points from raster population map-based origin data
places - string of region of interest, or a dataframe of administrate boundary polygon
population_file - raster file
window_size - final size of each population grid that is wanted
"""
# Immediately return pickled data if requested
if use_pickle == True:
with open (f"../data/interim/origins_{pickle_region_name}.pickle", "rb") as handle:
origins = pickle.load(handle)
print(f"Origins:{origins.shape};")
return origins
#Scan the raster map with big windows
origins=pd.DataFrame()
window=window_size * 2
with rasterio.open(population_file) as src:
for left_x in np.arange(0,src.width,window):
for top_y in np.arange(0,src.height,window):
out=get_pop(population_file,left_x,top_y,window,plot=False)
if out != {}:
origins=origins.append([out])
print("%i/%i\r"%(left_x,src.width),end="")
#Do a splitting pass. Run this cell several times (we run four times),
# until you have a balance of small window and not too big rois
#run this cell as many times as you want to split the windows
#for i in range(0,4):
#for pass_num in range(0,split_passes):
# Split pass start
regions_need_splitting = origins[origins['split'] == True]
print(f"{len(regions_need_splitting)} regions need splitting")
olen=len(origins)
for i in np.arange(olen):
print("%i/%i\r"%(i+1,olen),end="")
if origins.iloc[i,origins.columns.get_loc('split')] == True:
origins.iloc[i,origins.columns.get_loc('split')]='done'
s = split(population_file,origins.iloc[i])
origins=origins.append(s,sort=False)
print("done.")
print("We now have %i regions of min size %i, %i will be split in next round"%\
(len(origins),origins['window'].min(),len(origins[origins['split']==True])))
origins=origins[origins['tot_pop']>0]
origins=origins[origins['split']!='done']
print("We have %i regions of size %i, %i with population >0"%
(len(origins),min(origins['window']),len(origins[origins['tot_pop']>0])))
# Split pass end
# Set the geometry of the generated polygons as points - that are grid centroids.
origins=gpd.GeoDataFrame(origins,
crs='epsg:4326',
geometry=[Point(xy) for xy in zip(origins['center_lon'], origins['center_lat'])]
)
# Create a separate geometry column that contains the grid geometry
origins['geo_grid']=origins.apply(
lambda r: box(r.left_lon, r.bottom_lat, r.right_lon, r.top_lat, ccw=False),
axis=1
)
col_to_keep = origins.columns
# Filter Origins with administrative boundary
if (isinstance(places,pd.DataFrame)==True):
bounds = places
else:
bounds = ox.boundaries.gdf_from_places(places)
# Don't clip to full bounds, just bbox. That's why this is commented
tr_origins = gpd.sjoin(origins, bounds, how="inner", op="intersects")
tr_origins = tr_origins[col_to_keep].reset_index(drop=True)
#Outputting Origins
print(f"All origins:{origins.shape}; Relevant Origins:{tr_origins.shape}")
# Pickle generated origins if requested
if do_pickle_result == True:
with open ("../data/interim/origins_{pickle_region_name}.pickle", "wb") as handle:
pickle.dump(tr_origins, handle, protocol=pickle.HIGHEST_PROTOCOL)
print("Pickled origins")
return(tr_origins)
def origins_snap_osm(origins, net_pickle_path):
"""
Function snaps origins to nearest road based on road network
origins - dataframe of population origin points
net_pickle_path - road network
"""
print("Reading pickle")
G = nx.read_gpickle(net_pickle_path)
print("Snapping Origins")
origins = gn.pandana_snap_c(G,
point_gdf = origins,
source_crs = 'epsg:4326',
target_crs = 'epsg:4326',
add_dist_to_node_col = True,
time_it = True)
origins = origins.drop(['x', 'y'], axis=1)
print("Converting NNs to Points")
def get_geo_from_node(NN):
node = G.nodes[NN]
return gpd.points_from_xy([node['x']], [node['y']])[0]
origins['center_geom'] = origins['geometry'].copy()
origins['geometry'] = origins['NN'].apply(get_geo_from_node)
origins['geometry'] = origins['geometry'].set_crs('epsg:4326')
return origins
def get_destinations(places, tags):
"""
Function extract POI data from OSM
places - string of region of interest, or a dataframe of administrate boundary polygon
tags - amenity tags for destinations, must be queryable in OSM
"""
#For places innputs tht are a data frame
if isinstance(places, pd.DataFrame)==True :
bounds = places.reset_index(drop=True)
boundaries=bounds.loc[0]['geometry']
df = ox.pois_from_polygon(boundaries, tags)
#For places inputs that are a string
else:
destinations_list = []
for p in places:
destinations_list.append(ox.pois_from_place(p, tags))
df = pd.concat(destinations_list)
#Formatting dataframe
df = gpd.GeoDataFrame(df[["osmid", "amenity", "name", "source"]], geometry=df['geometry'])
df = df.set_crs("EPSG:4326")
# Convert Polygons to centroid
df.loc[df.geometry.geom_type != 'Point', "geometry"] = df.loc[df.geometry.geom_type != 'Point', "geometry"].centroid
#Making sure we have no Amenitities of None Type
df = df.loc[df.amenity.isin(tags['amenity']), :].reset_index(drop=True).copy()
return df
def n_closest_geodetic(origins, destinations, n_keep):
"""
Function takes in origins and destinations and outputs a new destination list
with n_keep amount of closest destinations to each origin. This helps make fewer calls from Mapbox
origins - data frane
destinations - data frane
n_keep = int number of nearby destinations you would like to keep
"""
destinations = destinations.to_crs("EPSG:4326")
origins = origins.to_crs("EPSG:4326")
dest_list = []
for i in origins.index:
origin = origins.loc[i, :]
dest_o = destinations.copy()
dest_o['distance_to_or'] = dest_o.distance(origin.geometry)
dest_o['o_index'] = i
dest_o = dest_o.sort_values(by='distance_to_or', axis=0, ascending=True).head(n_keep)
dest_list.append(dest_o)
return pd.concat(dest_list).reset_index(drop=True)
def biplot(origins,destinations, mode, t_max,xlim=False):
"""
Function plos a map and an histogram for the places beyong t_max hours from closest hospital.
oorigins - data frame with travel time and distance data and population data
destinations - data frame
mode - string of travel mode
t_max - travel time threshold in hours
x_lim - x axis limit for histogram
"""
o = origins.copy()
h = destinations.copy()
#o_above = o[(o['t_'+o_type]>t_max) & (o['so_'+o_type]<so_max)]
o_above = o[(o['hrs_to_hosp_or_clinic']>t_max)]
variable="tot_pop"
vmin,vmax=0,10000
fig = plt.figure(figsize=(12, 6))
gs = gridspec.GridSpec(1, 2, width_ratios=[3, 1])
ax1=plt.subplot(gs[0])
ax1.axis("off")
sm = plt.cm.ScalarMappable(cmap='Reds', norm=plt.Normalize(vmin=vmin, vmax=vmax))
sm.set_array([])
fig.colorbar(sm)
o_above['geometry'] = o_above['geo_grid']
o_above.to_crs('epsg:3857').plot(column=variable, cmap='Reds', linewidth=0.8, ax=ax1,edgecolor='0.8')
h.to_crs('epsg:3857').plot( alpha=0.8,color='blue',marker=".",markersize=8,ax=ax1)
ctx.add_basemap(ax1)
ax1.set_title("(Red) population beyond %i h from hospital (Blue)"%t_max)
ax1.set_axis_off()
ax2=plt.subplot(gs[1])
o['hrs_to_hosp_or_clinic'].plot.hist(alpha=0.5,bins=1000,cumulative=True,density=False,log=False,logx=False,weights=o['tot_pop'])
if len(xlim)>0:
plt.xlim(xlim)
ax2.ticklabel_format(style='sci')
ax2.axvline(t_max,color="red")
ax2.set_ylabel('People [10s Million]')
ax2.set_xlabel('Distance to closest hospital or clinic:'+' [h]')
modestring="%i people (%.2f%%) > %i h "+ mode+ " hospital"
ax2.set_title(modestring%\
(o_above['tot_pop'].sum(),o_above['tot_pop'].sum()/o['tot_pop'].sum()*100,t_max))
#plt.tight_layout()
plt.show();
| 2.734375 | 3 |
instackup/sql_tools.py | Lavedonio/alexandria | 0 | 12789523 | <gh_stars>0
import os
import logging
import sqlite3
import psycopg2
import mysql.connector
import pandas as pd
from .general_tools import fetch_credentials
# Logging Configuration
logger = logging.getLogger(__name__)
logger.setLevel(logging.WARNING)
formatter = logging.Formatter("%(asctime)s:%(name)s:%(levelname)s: %(message)s")
LOG_DIR = os.path.join(os.path.dirname(os.path.dirname(os.path.realpath(__file__))), 'logs')
os.makedirs(LOG_DIR, exist_ok=True)
file_handler = logging.FileHandler(os.path.join(LOG_DIR, "sql_tools.log"))
file_handler.setFormatter(formatter)
logger.addHandler(file_handler)
class SQLTool(object):
"""Base class for the different types of SQL databases."""
def __init__(self, sql_type, filename=None, connection='default'):
if sql_type == "SQLite":
sql_credentials = {}
if filename is None:
filename = ':memory:'
else:
# Getting credentials
sql_credentials = fetch_credentials(service_name=sql_type, connection=connection)
self.sql_type = sql_type
# SQLite
self.filename = filename
# PostgreSQL and others
self.connection_parameters = sql_credentials
# Attibutes ready to be set in connection
self.connection = None
self.cursor = None
def connect(self, fail_silently=False):
"""Create the connection using the __init__ attributes.
If fail_silently parameter is set to True, any errors will be surpressed
and not stop the code execution.
"""
try:
if self.sql_type == "SQLite":
conn = sqlite3.connect(self.filename)
elif self.sql_type == "MySQL":
conn = mysql.connector.connect(**self.connection_parameters)
else: # PostgreSQL
conn = psycopg2.connect(**self.connection_parameters)
logger.info("Connected!")
except (sqlite3.Error, psycopg2.Error, mysql.connector.Error) as e:
print('Failed to open database connection.')
logger.exception('Failed to open database connection.')
if not fail_silently:
raise e
else:
logger.error("ATENTION: Failing Silently")
else:
self.connection = conn
self.cursor = self.connection.cursor()
return self
def commit(self):
"""Commit any pending transaction to the database."""
self.connection.commit()
logger.info("Transaction commited.")
def rollback(self):
"""Roll back to the start of any pending transaction."""
self.connection.rollback()
logger.info("Roll back current transaction.")
def execute_sql(self, command, fail_silently=False):
"""Execute a SQL command (CREATE, UPDATE and DROP).
If fail_silently parameter is set to True, any errors will be surpressed
and not stop the code execution.
"""
try:
self.cursor.execute(command)
logger.debug(f"Command Executed: {command}")
except (sqlite3.Error, psycopg2.Error, mysql.connector.Error) as e:
logger.exception("Error running command!")
if not fail_silently:
raise e
else:
logger.error("ATENTION: Failing Silently")
def query(self, sql_query, fetch_through_pandas=True, fail_silently=False):
"""Run a query and return the results.
fetch_through_pandas parameter tells if the query should be parsed by the cursor or pandas.
If fail_silently parameter is set to True, any errors will be surpressed
and not stop the code execution.
Returns either a DataFrame (if fetch_through_pandas parameter is set to True)
or a list of tuples, each representing a row, with their position in the same order
as in the columns of the SELECT statement in the sql_query parameter.
"""
# Eliminating SQL table quotes that can't be handled by PostgreSQL
sql_query = sql_query.replace("`", "")
if fetch_through_pandas:
try:
result = pd.read_sql_query(sql_query, self.connection)
except (sqlite3.Error, psycopg2.Error, mysql.connector.Error, pd.io.sql.DatabaseError) as e:
logger.exception("Error running query!")
result = None
if not fail_silently:
raise e
else:
logger.error("ATENTION: Failing Silently")
else:
try:
self.cursor.execute(sql_query)
logger.debug(f"Query Executed: {sql_query}")
result = self.cursor.fetchall()
except (sqlite3.Error, psycopg2.Error, mysql.connector.Error) as e:
logger.exception("Error running query!")
result = None
if not fail_silently:
raise e
else:
logger.error("ATENTION: Failing Silently")
return result
def close_connection(self):
"""Closes Connection with the database"""
self.connection.close()
logger.info("Connection closed.")
# __enter__ and __exit__ functions for with statement.
# With statement docs: https://docs.python.org/2.5/whatsnew/pep-343.html
def __enter__(self):
return self.connect()
def __exit__(self, type, value, traceback):
if traceback is None:
# No exception, so commit
self.commit()
else:
# Exception occurred, so rollback.
self.rollback()
self.close_connection()
class SQLiteTool(SQLTool):
"""This class handle most of the interaction needed with SQLite3 databases,
so the base code becomes more readable and straightforward."""
def __init__(self, filename=None):
super().__init__("SQLite", filename=filename)
def describe_table(self, table, fetch_through_pandas=True, fail_silently=False):
"""Special query that returns all metadata from a specific table"""
sql_query = f"""SELECT name FROM sqlite_master WHERE type='{table}';"""
return self.query(sql_query, fetch_through_pandas=fetch_through_pandas, fail_silently=fail_silently)
class MySQLTool(SQLTool):
"""This class handle most of the interaction needed with MySQL databases,
so the base code becomes more readable and straightforward."""
def __init__(self, connection='default'):
super().__init__("MySQL", connection=connection)
def describe_table(self, table, fetch_through_pandas=True, fail_silently=False):
"""Returns all metadata from a specific table"""
sql_query = f"DESCRIBE {table}"
return self.query(sql_query, fetch_through_pandas=fetch_through_pandas, fail_silently=fail_silently)
class PostgreSQLTool(SQLTool):
"""This class handle most of the interaction needed with PostgreSQL databases,
so the base code becomes more readable and straightforward."""
def __init__(self, connection='default'):
super().__init__("PostgreSQL", connection=connection)
def describe_table(self, table, schema="public", fetch_through_pandas=True, fail_silently=False):
"""Special query that returns all metadata from a specific table"""
sql_query = f"""SELECT * FROM INFORMATION_SCHEMA.COLUMNS WHERE table_schema='{schema}' AND table_name='{table}'"""
return self.query(sql_query, fetch_through_pandas=fetch_through_pandas, fail_silently=fail_silently)
def get_all_db_info(self, get_json_info=True, fetch_through_pandas=True, fail_silently=False):
"""Gets all Database info, using a INFORMATION_SCHEMA query.
Ignore table pg_stat_statements and tables inside schemas pg_catalog and information_schema.
If get_json_info parameter is True, it adds 2 columns with the data types from each key
inside json and jsonb columns.
fetch_through_pandas and fail_silently parameters are passed directly to the query method if
get_json_info parameter is set to False; if it's not, these 2 parameters are passed as their default values.
Returns a DataFrame if either get_json_info or fetch_through_pandas parameters are set to True;
otherwise returns a list of tuples, each representing a row, with their position in the same order
as in the columns of the INFORMATION_SCHEMA.COLUMNS table.
"""
sql_query = """SELECT * FROM INFORMATION_SCHEMA.COLUMNS WHERE table_name != 'pg_stat_statements' AND table_schema NOT IN ('pg_catalog', 'information_schema')"""
if not get_json_info:
return self.query(sql_query, fetch_through_pandas=fetch_through_pandas, fail_silently=fail_silently)
else:
df = self.query(sql_query, fetch_through_pandas=True, fail_silently=False)
# Adding 2 new empty columns for the JSON data
num_rows, _ = df.shape
col_add_position = df.columns.get_loc("data_type")
df.insert(col_add_position + 1, 'json_key', pd.Series(["" for i in range(num_rows)], index=df.index))
df.insert(col_add_position + 2, 'json_value_type', pd.Series(["" for i in range(num_rows)], index=df.index))
# Filtering only json and jsonb types for further info lookup
df_json = df[df['data_type'].isin(['jsonb', 'json'])]
# Creating a empty dictionary list that will be later converted to a DataFrame and
# joined with the base information_schema query. It's done that way to improve efficiency
json_types_dict = {}
for index in df_json.columns.values.tolist():
json_types_dict[index] = []
# Base query for JSON keys and value data type lookup
json_query = """
SELECT
json_data.key,
{json}_typeof(json_data.value) AS json_value_data_type,
COUNT(*)
FROM {schema}.{table}, {json}_each({table}.{column}) AS json_data
GROUP BY 1, 2
ORDER BY 1, 2;
"""
# Run the base query for each column with json or jsonb data types
# and add the results to the json_types_dict.
for index, row in df_json.iterrows():
df_query_results = self.query(json_query.format(
schema=row['table_schema'],
table=row['table_name'],
column=row['column_name'],
json=row['data_type'],
), fetch_through_pandas=True, fail_silently=False)
for _, query_row in df_query_results.iterrows():
for col in json_types_dict.keys():
if col not in ['json_key', 'json_value_type']:
json_types_dict[col].append(row[col])
json_types_dict['json_key'].append(query_row['key'])
json_types_dict['json_value_type'].append(query_row['json_value_data_type'])
# Converting the results to DataFrame, joining and sorting them before returning the result
new_df = pd.concat([df, pd.DataFrame(json_types_dict)], ignore_index=True)
return new_df.sort_values(by=['table_catalog', 'table_schema', 'table_name', 'column_name', 'data_type', 'json_key', 'json_value_type'], ignore_index=True)
| 2.703125 | 3 |
tests/test_1_os.py | malte70/OSDetect | 3 | 12789524 | import os
import OSDetect
def test_get_os():
OS = OSDetect.info.getOS()
TRAVIS_OS_NAME = os.getenv("TRAVIS_OS_NAME")
if TRAVIS_OS_NAME is None:
assert isinstance(OS, str)
elif TRAVIS_OS_NAME == "linux":
assert OS.lower() == TRAVIS_OS_NAME.lower()
| 2.734375 | 3 |
src/common/services/domain.py | kei49/parse-web-python | 0 | 12789525 | from sqlalchemy import select
from db.models import Domain, DomainModel
from db.database import get_session
def create_domain(domain_str):
session = get_session()
with session.begin():
domain = Domain(domain=domain_str)
session.add(domain)
return DomainModel.from_orm(domain)
def read_all_domains():
session = get_session()
with session.begin():
statement = select(Domain)
domains = [DomainModel.from_orm(domain) for domain in session.execute(
statement).scalars().all()]
return domains
| 2.921875 | 3 |
src/kss/customer.py | yuriel-v/ks-shopification | 0 | 12789526 | <reponame>yuriel-v/ks-shopification
"""
Data structure for a particular customer
Fields:
- number: int
- uid: int
- name: str
- email: str
- reward_title: str
- backing_minimum: float
- reward_id: int
- bonus_support: float
- pledge_amount: float
- pledged_at: str (could be a datetime)
- rewards_sent: str
- pledged_status: str
- notes: str
- billing_state: str
- billing_country: str
- survey_response: str (could be a datetime)
- addons: str
- backer_number: str
- order_notes: str
- comments_suggestions: str
- items_ordered: dict
> 0: int
> 1: int
> 2: int
> 3: int
- shipping_details: dict
> country: str
> amount: float
> name: str
> address1: str
> address2: str
> city: str
> state: str
> postal_code: str
> country_name: str
> country_code: str
> phone_number: str
> delivery_notes: str
for items_ordered, each key refers to an index of this list of items:
[
'Mew Ichigo Inspired Cosmetic Bag',
'Serenity Crossbody Bag',
'Sakura Top Handle Bag',
'Madoka Top Handle Satchel Bag',
# Additional items
'Pink Trio (Set of 3)',
'Magical Girl Full Set'
]
"""
import re
from typing import Union
class Customer:
def __init__(self, raw_data=[]):
self.__data = {'shipping_details': dict(), 'items_ordered': dict()}
self.__items = (
'Mew Ichigo Inspired Cosmetic Bag',
'Serenity Crossbody Bag',
'Sakura Top Handle Bag',
'Madoka Top Handle Satchel Bag',
'Pink Trio (Set of 3)',
'Magical Girl Full Set'
)
# prices in usd!
self.__prices = {
'early_bird': (32.0, 58.0, 64.0, 70.0, 165.0, 220.0),
'normal': (35.0, 61.0, 68.0, 72.0, 171.0, 228.0)
}
self.__params = (
'number', 'uid', 'name', 'email', 'country', 'amount', 'reward_title',
'backing_minimum', 'reward_id', 'bonus_support', 'pledge_amount', 'pledged_at',
'rewards_sent', 'pledged_status', 'notes', 'billing_state', 'billing_country',
0, 1, 2, 3, 'survey_response', 'name', 'address1', 'address2', 'city', 'state',
'postal_code', 'country_name', 'country_code', 'phone_number', 'delivery_notes',
'addons', 'backer_number', 'order_notes', 'comments_suggestions'
)
if len(raw_data) >= 36:
self.parse_data(raw_data)
def parse_data(self, raw_data):
for i in range(0, len(self.__params)):
if i in (4, 5) or i in range(22, 32):
self.__data['shipping_details'][self.__params[i]] = raw_data[i]
elif i in range(17, 21):
self.__data['items_ordered'][self.__params[i]] = raw_data[i]
else:
self.__data[self.__params[i]] = raw_data[i]
self.__moneify()
@property
def data(self) -> dict[str, Union[str, dict]]:
return self.__data
# if attr1.lower() in ('shipping_details', 'items_ordered'):
# try:
# if attr2 is None:
# result = self.__data[attr1.lower()]
# else:
# result = self.__data[attr1.lower()][attr2.lower() if isinstance(attr2, str) else attr2]
# except KeyError:
# result = None
# else:
# try:
# result = self.__data[attr1.lower()]
# except KeyError:
# result = None
# return result
@property
def line_info(self):
key = 'early_bird' if 'early bird' in self.data['reward_title'].lower() else 'normal'
pkgs = ('ichigo', 'serenity', 'sakura', 'madoka', 'pink trio', 'full set')
pkg = None
for item in range(len(pkgs)):
if pkgs[item] in self.data['reward_title'].lower():
pkg = item
break
return {self.__items[pkg]: self.__prices[key][pkg]}
@staticmethod
def __money(moneystring):
if moneystring is None:
return None
else:
substr = re.search(r'[^" "]*$', moneystring)
return float(substr.group(0)) if bool(substr) else None
def __moneify(self):
self.__data['shipping_details']['amount'] = self.__money(self.__data['shipping_details']['amount'])
for param in ('backing_minimum', 'bonus_support', 'pledge_amount'):
self.__data[param] = self.__money(self.__data[param])
def __str__(self):
resultant = ''
for item in self.__params:
try:
resultant += f"- {str(item).replace('_', ' ').capitalize()}: {str(self.__data[item])}\n"
except KeyError:
continue
resultant += "- Items ordered:\n"
for i in range(0, 4):
resultant += f" > {self.__items[i]}: {str(self.__data['items_ordered'][i])}\n"
resultant += "- Shipping details:\n"
for key, val in self.__data['shipping_details'].items():
resultant += f" > {str(key).replace('_', ' ').capitalize()}: {str(val)}\n"
return resultant
| 2.9375 | 3 |
tests/testfile_bridge_the_gap.py | augmentedfabricationlab/bridge_the_gap | 0 | 12789527 | from __future__ import division
from __future__ import absolute_import
from __future__ import print_function
from compas.geometry import Point, Frame, Vector
from bridge_the_gap.assembly import BridgeAssembly
from bridge_the_gap.assembly import BridgeElement
my_assembly = BridgeAssembly()
myFrameSet = [
Frame(Point(13.009, 0.634, 0.000), Vector(0.987, -0.160, -0.000), Vector(-0.160, -0.987, 0.000)),
Frame(Point(11.957, 0.629, 4.000), Vector(0.987, 0.161, 0.000), Vector(0.161, -0.987, 0.000)),
Frame(Point(11.672, 0.175, 0.000), Vector(0.991, 0.132, 0.000), Vector(0.132, -0.991, 0.000)),
Frame(Point(13.295, 0.183, 0.000), Vector(0.992, -0.127, -0.000), Vector(-0.127, -0.992, 0.000)),
Frame(Point(13.766, 0.469, 0.000), Vector(0.966, -0.260, -0.000), Vector(-0.260, -0.966, 0.000)),
Frame(Point(11.186, 0.462, 0.000), Vector(0.965, 0.261, 0.000), Vector(0.261, -0.965, 0.000)),
Frame(Point(10.850, 0.067, 0.000), Vector(0.992, 0.128, 0.000), Vector(0.128, -0.992, 0.000)),
Frame(Point(14.077, 0.084, 0.000), Vector(0.992, -0.124, -0.000), Vector(-0.124, -0.992, 0.000)),
Frame(Point(14.565, 0.217, 0.000), Vector(0.941, -0.338, -0.000), Vector(-0.338, -0.941, 0.000)),
Frame(Point(10.416, 0.222, 0.000), Vector(0.943, 0.332, 0.000), Vector(0.332, -0.943, 0.000)),
Frame(Point(10.243, -0.008, 0.000), Vector(0.993, 0.115, 0.000), Vector(0.115, -0.993, 0.000)),
Frame(Point(14.718, 0.004, 0.000), Vector(0.992, -0.126, -0.000), Vector(-0.126, -0.992, 0.000)),
Frame(Point(12.498, 0.693, 0.000), Vector(1.000, 0.000, 0.000), Vector(0.000, -1.000, 0.000)),
Frame(Point(10.655, 0.191, 0.000), Vector(0.666, 0.746, 0.000), Vector(0.746, -0.666, 0.000)),
Frame(Point(11.382, 0.339, 0.000), Vector(0.633, 0.774, 0.000), Vector(0.774, -0.633, 0.000)),
Frame(Point(12.247, 0.465, 0.000), Vector(0.403, 0.915, 0.000), Vector(0.915, -0.403, 0.000)),
Frame(Point(12.751, 0.466, 0.000), Vector(0.409, -0.912, -0.000), Vector(-0.912, -0.409, 0.000)),
Frame(Point(13.553, 0.351, 0.000), Vector(0.634, -0.773, -0.000), Vector(-0.773, -0.634, 0.000)),
Frame(Point(14.290, 0.202, 0.000), Vector(0.621, -0.784, -0.000), Vector(-0.784, -0.621, 0.000)),
Frame(Point(12.500, 0.239, 0.000), Vector(1.000, 0.002, -0.000), Vector(0.002, -1.000, 0.000)),
Frame(Point(10.004, 0.023, 0.000), Vector(0.344, 0.939, -0.000), Vector(0.939, -0.344, 0.000)),
Frame(Point(14.994, 0.019, 0.000), Vector(0.437, -0.899, -0.000), Vector(-0.899, -0.437, 0.000)),
Frame(Point(14.692, 0.057, 0.000), Vector(0.998, 0.056, 0.000), Vector(0.056, -0.998, 0.000)),
Frame(Point(13.950, 0.244, 0.000), Vector(0.875, 0.483, 0.000), Vector(0.483, -0.875, 0.000)),
Frame(Point(13.110, 0.408, 0.000), Vector(0.838, 0.546, 0.000), Vector(0.546, -0.838, 0.000)),
Frame(Point(10.265, 0.054, 0.000), Vector(0.992, -0.130, -0.000), Vector(-0.130, -0.992, 0.000)),
Frame(Point(11.001, 0.235, 0.000), Vector(0.845, -0.535, -0.000), Vector(-0.535, -0.845, 0.000)),
Frame(Point(11.857, 0.402, 0.000), Vector(0.871, -0.492, -0.000), Vector(-0.492, -0.871, 0.000))
]
lengths = [0.72888612132232555, 0.79012319808336817, 0.95922231574812022, 0.89226783409583033, 0.82289075249600907, 0.789766110625581, 0.69778136937749724, 0.68436229107796309, 0.85475878513586756, 0.82558731651669848, 0.52654551668359806, 0.60831582360268277, 0.30194229954981111, 0.4516400184602582, 0.5862299187450738, 0.49647652761125388, 0.49639133374753358, 0.58120093519414162, 0.40792440981993494, 0.70502681708793857, 0.13115222717909872, 0.1195717848785053, 0.55203112300537804, 0.48645499694101429, 0.6161305979839532, 0.48204989073413501, 0.46314232034724573, 0.6657938136277054]
fram1 = Frame(Point(0,0,0), Vector(-1,0,0), Vector(0,1,0))
fram2 = Frame(Point(2,0,0), Vector(-1,0,0), Vector(0,1,0))
fram3 = Frame(Point(4,0,0), Vector(-1,0,0), Vector(0,1,0))
"""
my_assembly.add_element(BridgeElement(myFrameSet[0], Board_Length = 2.0, Endpoints = [Point(0,0,0),Point(1,0,0)]))
my_assembly.add_element(BridgeElement(myFrameSet[0], Board_Length = 2.0, Endpoints = [Point(1,0,0),Point(2,0,0)]))
my_assembly.add_element(BridgeElement(myFrameSet[1], Board_Length = 2.0, Endpoints = [Point(2,0,0),Point(2,2,0)]))
"""
my_assembly.add_element(BridgeElement(fram1, Board_Length = 2, Endpoints = [Point(-1,0,0),Point(1,0,0)]))
my_assembly.add_element(BridgeElement(fram2, Board_Length = 2, Endpoints = [Point(1,0,0),Point(3,0,0)]))
my_assembly.add_element(BridgeElement(fram3, Board_Length = 2, Endpoints = [Point(3,0,0),Point(5,0,0)]))
#my_assembly.prepare_robot_assembly(model_scale=1000, safety_distance=0.4, pickup_baseframe=myFrame)
print(my_assembly.create_assembly_sequence())
my_assembly.create_network()
my_assembly.visualize_network()
print("hello")
| 1.742188 | 2 |
Bits/lengths.py | vit-shreyansh-kumar/DailyWorkAtHome | 0 | 12789528 | <reponame>vit-shreyansh-kumar/DailyWorkAtHome
__about__ = """ Bit Lengths """
l = int(1).bit_length()
print("BIT LENGTH:", l) | 1.789063 | 2 |
wdhtools/view_tools.py | VPrincekin/wdhtools | 0 | 12789529 | #!/usr/bin/env python
# coding: utf-8
# @Author: dehong
# @Date: 2020-06-09
# @Time: 18:05
# @Name: view_tools
import seaborn as sns
import matplotlib.pyplot as plt
import pandas as pd
from featexp import get_univariate_plots, get_trend_stats
def histogram_plot(data, feature):
"""
利用直方图查看特征数据的具体分布情况(常用来查看目标变量是否符合正态分布)
:param data: DataFrame | 数据集
:param feature: string | 特征
:return:
"""
sns.distplot(data[feature], kde=False, color='r')
def feature_analysis(data, label, features, bins=10):
"""
特征与目标相关性分析
:param data: DataFrame | 数据集
:param label: string | 标签f
:param features: list | 特征列表
:param bins: int | 分组数目, 默认10
:return: null
"""
get_univariate_plots(data=data, target_col=label,
features_list=features, bins=bins)
def noise_feature_find(train, test, label, features, bins=10, corr=None):
"""
噪声特征的分析与判断
:param train: DataFrame | 训练集
:param test: DataFrame | 测试集
:param label: string | 标签
:param features: list | 特征列表
:param bins: int | 分组数目
:param corr: float | 选择趋势相关度低于 corr 的特征为噪音特征,默认为 None
:return: noise_feature_list 噪音特征列表
"""
get_univariate_plots(data=train, target_col=label, data_test=test,
features_list=features, bins=bins)
if corr is None:
return None
else:
stats = get_trend_stats(data=train, target_col=label, data_test=test)
noise_feature_list = list(stats[stats.Trend_correlation < corr].Feature)
return noise_feature_list
def scatter_plot(data, label, feature):
"""
利用散点图分析变量间的关系(常用来发现某些离散点)
:param data: DataFrame | 数据集
:param label: string | 标签
:param feature: string | 特征
:return:
"""
data.plot.scatter(x=feature, y=label, ylim=(0, 1),color='m')
plt.show()
def matrix_plot(data, features):
"""
利用 seaborn 对多个特征的散点图、直方图进行整合,得到各个特征两两组合形成的图矩阵(用来查看特征之间的相关性)
:param data: DataFrame | 数据集
:param features: list | 特征列表
:return:
"""
var_set = features
sns.set(font_scale=1.25) # 设置坐标轴的字体大小
sns.pairplot(data[var_set]) # 可在kind和diag_kind参数下设置不同的显示类型,此处分别为散点图和直方图,还可以设置每个图内的不同类型的显示
plt.show()
def heat_plot(data):
"""
利用热力图对各个特征间的关系进行分析
:param data: DataFrame | 数据集
:return:
"""
corr = data.corr()
f, axis = plt.subplots(figsize=(14, 12))
sns.heatmap(corr, vmax=0.8, square=True, ax=axis)
plt.show()
def top_k(data, label, top=10):
"""
选取与目标变量相关系数最高的K个特征,找出那些相互关联性较强的特征
:param data: DataFrame | 数据集
:param label: string | 标签
:param top: int | 选择最高的K个特征,默认值为10
:return:
"""
corr = data.corr()
k = top
top10_attr = corr.nlargest(k, label).index
top10_mat = corr.loc[top10_attr, top10_attr]
plt.subplots(figsize=(14, 10))
sns.set(font_scale=1.25)
sns.heatmap(top10_mat, annot=True, annot_kws={"size": 12}, square=True)
plt.show()
def spearman(data, label, features):
"""
采用“斯皮尔曼等级相关”来计算变量与标签的相关性
:param data: DataFrame | 数据集
:param label: string | 标签
:param features: list | 特征列表
:return:
"""
spr = pd.DataFrame()
spr['feature'] = features
spr['corr'] = [data[f].corr(data[label], 'spearman') for f in features]
spr = spr.sort_values('corr')
plt.figure(figsize=(6, 0.25 * len(features)))
sns.barplot(data=spr, y='feature', x='corr', orient='h')
def correlation_heatmap(data):
_, ax = plt.subplots(figsize=(14, 12))
colormap = sns.diverging_palette(220, 10, as_cmap=True)
_ = sns.heatmap(
data.corr(),
cmap=colormap,
square=True,
cbar_kws={'shrink': .9},
ax=ax,
annot=True,
linewidths=0.1, vmax=1.0, linecolor='white',
annot_kws={'fontsize': 12}
)
plt.title('Pearson Correlation of Features', y=1.05, size=15)
plt.show() | 2.5625 | 3 |
cs251tk/toolkit/__main__.py | renovate-tests/cs251-toolkit | 0 | 12789530 | <gh_stars>0
import datetime
import functools
import sys
from concurrent.futures import ProcessPoolExecutor, as_completed
from threading import Thread
from os import makedirs, getcwd
import os.path
import logging
from ..common import chdir, run
from ..specs import load_all_specs, check_dependencies
from .find_update import update_available
from .process_student import process_student
from .args import process_args, compute_stogit_url
from .progress_bar import progress_bar
from .save_recordings import save_recordings, gist_recordings
from .tabulate import tabulate
from webapp import server
def make_progress_bar(students, no_progress=False):
if no_progress:
return lambda _: None
size = len(students)
remaining = set(students)
invocation_count = 0
def increment(username):
nonlocal remaining
nonlocal invocation_count
remaining.remove(username)
invocation_count += 1
msg = ', '.join(sorted(remaining))
progress_bar(size, invocation_count, message=msg)
msg = ', '.join(sorted(remaining))
progress_bar(size, invocation_count, message=msg)
return increment
def run_server(basedir):
server.exe_name = '"{}/server/server_file"'.format(basedir)
server.run_server()
return
def main():
basedir = getcwd()
args, usernames, assignments, stogit_url = process_args()
clean = args['clean']
date = args['date']
debug = args['debug']
gist = args['gist']
highlight_partials = args['highlight_partials']
interact = args['interact']
no_check = args['no_check']
no_update = args['no_update']
no_progress = args['no_progress']
quiet = args['quiet']
skip_update_check = args['skip_update_check']
sort_by = args['sort_by']
workers = args['workers']
web = args['web']
if debug or interact or web:
workers = 1
current_version, new_version = update_available(skip_update_check=skip_update_check)
if new_version:
print(('v{} is available: you have v{}. '
'Try "pip3 install --no-cache --user --upgrade cs251tk" '
'to update.').format(new_version, current_version), file=sys.stderr)
if date:
logging.debug('Checking out {}'.format(date))
if not os.path.exists("data"):
print('data directory not found', file=sys.stderr)
download = input("Download specs? (Y/N)")
if download and download.lower()[0] == "y":
repo = input("Which class? (SD/HD)")
if repo and repo.lower()[0] == 's':
with chdir(basedir):
run(['git', 'clone', 'https://github.com/StoDevX/cs251-specs.git', 'data'])
if not args['stogit']:
stogit_url = compute_stogit_url(course="sd", stogit=None, _now=datetime.date.today())
elif repo and repo.lower()[0] == "h":
with chdir(basedir):
run(['git', 'clone', 'https://github.com/StoDevX/cs241-specs.git', 'data'])
if not args['stogit']:
stogit_url = compute_stogit_url(course="hd", stogit=None, _now=datetime.date.today())
else:
print("Class not recognized", file=sys.stderr)
sys.exit(1)
else:
sys.exit(1)
specs = load_all_specs(basedir=os.path.join(basedir, 'data'), skip_update_check=skip_update_check)
if not specs:
print('no specs loaded!')
sys.exit(1)
for spec_to_use in assignments:
check_dependencies(specs[spec_to_use])
results = []
records = []
makedirs('./students', exist_ok=True)
with chdir('./students'):
single = functools.partial(
process_student,
assignments=assignments,
basedir=basedir,
clean=clean,
date=date,
debug=debug,
interact=interact,
no_check=no_check,
no_update=no_update,
specs=specs,
stogit_url=stogit_url
)
if workers > 1:
print_progress = make_progress_bar(usernames, no_progress=no_progress)
with ProcessPoolExecutor(max_workers=workers) as pool:
futures = [pool.submit(single, name) for name in usernames]
for future in as_completed(futures):
result, recording = future.result()
print_progress(result['username'])
results.append(result)
records.extend(recording)
elif web:
Thread(target=run_server, args=(basedir,), daemon=True).start()
for student in usernames:
print("\nStudent: {}".format(student))
result, recording = single(student)
results.append(result)
records.extend(recording)
else:
for student in usernames:
logging.debug('Processing {}'.format(student))
result, recording = single(student)
results.append(result)
records.extend(recording)
if not quiet:
table = tabulate(results, sort_by=sort_by, highlight_partials=highlight_partials)
print('\n' + table)
if gist:
table = tabulate(results, sort_by=sort_by, highlight_partials=highlight_partials)
gist_recordings(records, table, debug=debug)
else:
save_recordings(records, debug=debug)
| 1.992188 | 2 |
StatRethinking/_build/jupyter_execute/chapter13-ModelsWithMemory.py | xishansnow/StatRethinking | 0 | 12789531 | <filename>StatRethinking/_build/jupyter_execute/chapter13-ModelsWithMemory.py<gh_stars>0
#!/usr/bin/env python
# coding: utf-8
# # 第 13 章 分层模型
| 1.289063 | 1 |
keyword_extraction/keywords_TR.py | luciabura/study-helper | 2 | 12789532 | """
Keyphrase extraction implementation based on the guidelines
given in the paper on TextRank by Mihalcea et al
This is
"""
import networkx as nx
import operator
import text_processing.preprocessing as preprocess
from utilities.read_write import read_file
WINDOW_SIZE = 2
INCLUDE_GRAPH_POS = ['NN', 'JJ', 'NNP', 'NNS']
def get_keyword_combinations(original_sequence, scores):
keywords = list(scores.keys())
keyphrases = {}
j = 0
for i, _ in enumerate(original_sequence):
if i < j:
continue
if original_sequence[i] in keywords:
keyphrase_components = []
keyphrase_length = 0
avg_score = 0
for word in original_sequence[i:i + 3]:
if word in keywords:
keyphrase_components.append(word)
avg_score += scores[word]
keyphrase_length += 1
else:
break
# avg_score = avg_score / float(keyphrase_length)
keyphrase = ' '.join(keyphrase_components)
keyphrases[keyphrase] = avg_score
j = i + len(keyphrase_components)
return keyphrases
def sort_scores(scores):
sorted_scores = sorted(list(scores.items()), key=operator.itemgetter(1), reverse=True)
return sorted_scores
def get_starting_scores(words):
scores = {word: 1 for word in words}
return scores
def add_graph_edges(graph, word_sequence):
"""
Adds edge between all words in word sequence that are within WINDOW_SIZE
of each other. I.e if within WINDOW_SIZE the two words co-occur
"""
# Assume undirected graph for beginning
for i in range(0, len(word_sequence) - WINDOW_SIZE - 1):
for j in range(i + 1, i + WINDOW_SIZE + 1):
w1 = word_sequence[i]
w2 = word_sequence[j]
if graph.has_node(w1) and graph.has_node(w2) and w1 != w2:
graph.add_edge(w1, w2, weight=1)
def build_graph(chosen_words):
"""
Using a list of words that have been filtered to match the criteria,
we initially build an undirected graph to run our algorithm on.
:param chosen_words:
:return: Undirected graph, based on the networkx library implementation
"""
graph = nx.Graph()
graph.add_nodes_from(chosen_words)
return graph
def get_graph_tokens(tokens):
graph_tokens = [token for token in tokens
if token.tag_ in INCLUDE_GRAPH_POS
and len(token.text) > 2]
return graph_tokens
def get_keywords(text):
tokens = preprocess.clean_to_doc(text)
original_sequence = [token.text.lower() for token in tokens]
graph_tokens = get_graph_tokens(tokens)
graph_words = [token.text.lower() for token in graph_tokens]
# print(sorted(list(set(graph_words))))
# Choose to display/return only a third in length
keyword_count = int(len(graph_words)/3)
graph = build_graph(graph_words)
add_graph_edges(graph, original_sequence)
# graph.remove_nodes_from(nx.isolates(graph))
#
# graph.graph['node']= {'shape': 'plaintext'}
# a = drawing.nx_agraph.to_agraph(graph)
# a.layout('dot')
# a.draw("graph_2.png")
# nx.draw_random(graph)
# plt.savefig("graph.png")
pagerank_scores = nx.pagerank(graph, alpha=0.85, tol=0.0001)
keyphrases = get_keyword_combinations(original_sequence, pagerank_scores)
keyphrases = [keyphrase for keyphrase, _ in sort_scores(keyphrases)]
# print(len(graph_words))
return keyphrases[0:keyword_count]
if __name__ == '__main__':
FILE_PATH = input('Enter the absolute path of '
'the file you want to extract the keywords from: \n')
FILE_TEXT = read_file(FILE_PATH)
print(get_keywords(FILE_TEXT))
| 3.328125 | 3 |
server/zmq_server_pirate.py | merlinran/acorn-precision-farming-rover | 143 | 12789533 | <gh_stars>100-1000
"""
*********************************************************************
This file is part of:
The Acorn Project
https://wwww.twistedfields.com/research
*********************************************************************
Copyright (c) 2019-2021 <NAME>, Twisted Fields LLC
Copyright (c) 2021 The Acorn Project contributors (cf. AUTHORS.md).
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*********************************************************************
"""
# Modified from example file
# Paranoid Pirate Worker by <NAME> <dln(at)eintr(dot)org>
from random import randint
import time
import zmq
import redis
import zmq_server
# keep the two imported to keep pickle working
# TODO: avoid this by moving the class defs to a separate module.
from master_process import Robot, RobotCommand
REDIS_PORT = 6379
HEARTBEAT_LIVENESS = 3
HEARTBEAT_INTERVAL = 1
INTERVAL_INIT = 1
INTERVAL_MAX = 32
# Paranoid Pirate Protocol constants
PPP_READY = b"\x01" # Signals worker is ready
PPP_HEARTBEAT = b"\x02" # Signals worker heartbeat
def worker_socket(context, poller):
"""Helper function that returns a new configured socket
connected to the Paranoid Pirate queue"""
worker = context.socket(zmq.DEALER) # DEALER
identity = b"%04X-%04X" % (randint(0, 0x10000), randint(0, 0x10000))
worker.setsockopt(zmq.IDENTITY, identity)
poller.register(worker, zmq.POLLIN)
worker.connect("tcp://localhost:5569")
worker.send(PPP_READY)
return worker
def main():
r = redis.Redis(host='localhost', port=REDIS_PORT)
context = zmq.Context(1)
poller = zmq.Poller()
liveness = HEARTBEAT_LIVENESS
interval = INTERVAL_INIT
heartbeat_at = time.time() + HEARTBEAT_INTERVAL
worker = worker_socket(context, poller)
cycles = 0
while True:
socks = dict(poller.poll(HEARTBEAT_INTERVAL * 1000))
# Handle worker activity on backend
if socks.get(worker) == zmq.POLLIN:
# Get message
# - 3-part envelope + content -> request
# - 1-part HEARTBEAT -> heartbeat
frames = worker.recv_multipart()
if not frames:
break # Interrupted
if len(frames) >= 5:
cycles += 1
print("I: Normal reply")
# print(len(frames))
# print(frames)
ident, zero_frame, idx, command, key, msg = frames
return_command, reply = zmq_server.handle_command(r, command, key, msg)
worker.send_multipart([ident, zero_frame, idx, return_command, reply])
# worker.send_multipart(frames)
liveness = HEARTBEAT_LIVENESS
elif len(frames) == 1 and frames[0] == PPP_HEARTBEAT:
print("I: Queue heartbeat")
liveness = HEARTBEAT_LIVENESS
else:
print("E: Invalid message: %s" % frames)
interval = INTERVAL_INIT
else:
liveness -= 1
if liveness == 0:
print("W: Heartbeat failure, can't reach queue")
print("W: Reconnecting in %0.2fs..." % interval)
time.sleep(interval)
if interval < INTERVAL_MAX:
interval *= 2
poller.unregister(worker)
worker.setsockopt(zmq.LINGER, 0)
worker.close()
worker = worker_socket(context, poller)
liveness = HEARTBEAT_LIVENESS
if time.time() > heartbeat_at:
heartbeat_at = time.time() + HEARTBEAT_INTERVAL
print("I: Worker heartbeat")
worker.send(PPP_HEARTBEAT)
if __name__ == "__main__":
main()
| 1.984375 | 2 |
simulateur/Troncon.py | Passaudage/PLD | 0 | 12789534 | <reponame>Passaudage/PLD
import Coordonnees
import Feu
import Voie
import Vehicule
import random
class Troncon:
const_largeur_voie = 350 # centimètres
def __init__(self, intersection_tete, intersection_queue, coordonnees_debut, coordonnees_fin, directions_sens_1, directions_sens_2): #sens1 : gauche vers droite, bas vers haut
self.intersection_tete = intersection_tete #en haut ou à droite
self.intersection_queue = intersection_queue #en bas ou à gauche
self.coordonnees_debut = coordonnees_debut
self.coordonnees_fin = coordonnees_fin
self.longueur = abs((self.coordonnees_fin-coordonnees_debut))
self.trajectoire = (self.coordonnees_fin-coordonnees_debut).normaliser()
self.directions_sens1 = directions_sens_1
self.directions_sens2 = directions_sens_2
self.voies_sens1 = []
self.voies_sens2 = []
self.dir_voies_sens1 = {"G": [], "TD": [], "D": []}
self.dir_voies_sens2 = {"G": [], "TD": [], "D": []}
self.feux_sens1 = {}
self.feux_sens2 = {}
tete_presente = queue_presente = True
if(self.intersection_queue==None):
queue_presente = False
elif(self.intersection_tete==None):
tete_presente = False
if(coordonnees_debut.x == coordonnees_fin.x):
if tete_presente: self.intersection_tete.branche_troncon(self, "B")
if queue_presente: self.intersection_queue.branche_troncon(self, "H")
elif(coordonnees_fin.y == coordonnees_debut.y):
if tete_presente: self.intersection_tete.branche_troncon(self, "G")
if queue_presente: self.intersection_queue.branche_troncon(self, "D")
def ajouter_generateur(self, sens, generateur):
if(sens=="sens2"):
self.feux_sens1['D'] = generateur
self.feux_sens1['G'] = generateur
self.feux_sens1['TD'] = generateur
generateur.ajoute_voie_entrante(self.voies_sens1)
generateur.ajoute_voie_sortante(self.voies_sens2)
else:
self.feux_sens2['D'] = generateur
self.feux_sens2['G'] = generateur
self.feux_sens2['TD'] = generateur
generateur.ajoute_voie_entrante(self.voies_sens2)
generateur.ajoute_voie_sortante(self.voies_sens1)
def ajouter_feux(self, sens, direction, feu):
print(str(sens))
if(sens=="sens1"):
self.feux_sens1[direction] = feu
else:
self.feux_sens2[direction] = feu
def afficher_feux(self):
for direction in self.feux_sens1.keys():
print("feu sens 1 dans la direction suivante : " + direction + " est " + str(self.feux_sens1.get(direction).est_passant()))
for direction in self.feux_sens2.keys():
print("feu sens 2 dans la direction suivante : " + direction + " est " + str(
self.feux_sens2.get(direction).est_passant()))
# on crée les voies de l'intérieur vers l'extérieur dans les deux sens, l'utilisateur fera donc attention aux directions qu'il passe en paramètre (gauche d'abord)
def creer_voie(self, directions, sens, vitesse_max):
coordonnees_debut = None
coordonnees_fin = None
if (sens == "sens1") :
if (self.coordonnees_debut.x == self.coordonnees_fin.x) :
coordonnees_debut = Coordonnees.Coordonnees(self.coordonnees_debut.x + ((len(self.voies_sens1)) + 0.5)*self.const_largeur_voie, self.coordonnees_debut.y )
coordonnees_fin = Coordonnees.Coordonnees(self.coordonnees_debut.x + (len(self.voies_sens1) + 0.5)*self.const_largeur_voie, self.coordonnees_fin.y )
if (self.coordonnees_debut.y == self.coordonnees_fin.y):
coordonnees_debut = Coordonnees.Coordonnees(self.coordonnees_debut.x,self.coordonnees_debut.y - (len(self.voies_sens1) + 0.5)*self.const_largeur_voie)
coordonnees_fin = Coordonnees.Coordonnees(self.coordonnees_fin.x ,self.coordonnees_fin.y - (len(self.voies_sens1) + 0.5)*self.const_largeur_voie)
v = Voie.Voie(self, coordonnees_debut, coordonnees_fin, directions, vitesse_max, sens)
self.voies_sens1.append(v)
for direction in directions:
self.dir_voies_sens1[direction] = [self.dir_voies_sens1.get(direction)] + [v]
if (sens == "sens2"):
if (self.coordonnees_debut.x == self.coordonnees_fin.x):
coordonnees_debut = Coordonnees.Coordonnees(self.coordonnees_fin.x - (len(self.voies_sens2) + 0.5)*self.const_largeur_voie, self.coordonnees_fin.y)
coordonnees_fin = Coordonnees.Coordonnees(self.coordonnees_debut.x - (len(self.voies_sens2) + 0.5)*self.const_largeur_voie, self.coordonnees_debut.y)
if (self.coordonnees_debut.y == self.coordonnees_fin.y):
coordonnees_debut = Coordonnees.Coordonnees(self.coordonnees_fin.x, self.coordonnees_fin.y + (len(self.voies_sens2) + 0.5)*self.const_largeur_voie)
coordonnees_fin = Coordonnees.Coordonnees(self.coordonnees_debut.x, self.coordonnees_debut.y + (len(self.voies_sens2) + 0.5)*self.const_largeur_voie)
v = Voie.Voie(self, coordonnees_debut, coordonnees_fin, directions, vitesse_max, sens)
self.voies_sens2.append(v)
for direction in directions:
self.dir_voies_sens2[direction] = [self.dir_voies_sens2.get(direction)] + [v]
def donner_voies_intersections(self):
"""
donne les voies possédées aux intersections limitrophes
# @author : marcus
"""
if(self.intersection_tete is not None):
self.intersection_tete.entrantes += self.voies_sens1
self.intersection_tete.sortantes += self.voies_sens2
if(self.intersection_queue is not None):
self.intersection_queue.entrantes += self.voies_sens2
self.intersection_queue.sortantes += self.voies_sens1
# end creer voie
#trouver voie avec bonne direction
def trouver_voie_direction(self, direction, sens):
voies_possibles = []
if(sens == "sens1") :
for voie in self.voies_sens1:
if(voie.direction_possible(direction)):
voies_possibles.append(voie)
if(sens == "sens2") :
for voie in self.voies_sens2:
if(voie.direction_possible(direction)):
voies_possibles.append(voie)
return voies_possibles
#Donne la prochaine voie sur laquelle il faut aller pour atteindre voie_arrivee à terme
def donner_etape_changement(self, voie_depart, voie_arrivee):
if(voie_depart in self.voies_sens1):
liste = self.voies_sens1
elif(voie_depart in self.voies_sens2):
liste = self.voies_sens2
ecart = liste.index(voie_arrivee) - liste.index(voie_depart)
if(abs(ecart) > 1):
sens = (int)(ecart/abs(ecart))
return liste[liste.index(voie_depart)+sens]
return voie_arrivee
#A l'arrivee sur le troncon, donne prochaine direction selon les probas
def donner_prochaine_direction(self, voie):
if(voie in self.voies_sens1):
directions = self.directions_sens1
elif(voie in self.voies_sens2):
directions = self.directions_sens2
rand = random.randint(1,10)
if(rand <= directions["G"]*10):
return "G"
elif(rand <= (directions["G"]+directions["TD"])*10):
return "TD"
else:
return "D"
def est_passant(self, direction, sens):
#~ print("sens :"+str(sens))
#~ print("direction : "+str(direction))
if(sens == "sens1") :
#~ print(self.feux_sens1)
return self.feux_sens1[direction].est_passant()
if (sens == "sens2"):
#~ print(self.feux_sens2)
return self.feux_sens2[direction].est_passant()
def get_intersection(self, voie):
if(voie in self.voies_sens1):
return self.intersection_tete
else : return self.intersection_queue
def get_proba_situation_voie(self, voie, directions):
if (voie.sens == "sens1"):
dico_voies = self.dir_voies_sens1
intersection = self.intersection_tete
else:
dico_voies = self.dir_voies_sens2
intersection = self.intersection_queue
proba = 0
for direction in directions:
proba += intersection.get_proba(self, direction)/len(dico_voies.get(direction))
return proba
def largeur(self):
return (len(self.voies_sens1) + len(self.voies_sens2))*self.const_largeur_voie
"""
ainsi toujours poussé vers de nouveaux rivages
dans la nuit éternelle emporté sans retour
ne pourrons nous jamais sur l'océan des ages
jetez l'ancre un seul jour ?
AdL
"""
| 2.5625 | 3 |
user_input_handling_functions.py | ShawnJSavoie2/ToBeRedoneOrDiscarded | 0 | 12789535 | # IDLE (Python 3.8.0)
# user-input-handling functions
def user_input_handling_function_first():
print()
user_input = input('Enter: ')
print()
good_to_go = 'no'
errors = []
while good_to_go == 'no':
if ' ' in user_input:
print('The sequence of characters contains one or more spaces.')
errors.append('yes')
if ',' in user_input:
print('The sequence of characters contains one or more commas.')
errors.append('yes')
if 'yes' in errors:
print()
user_input = input('Re-enter: ')
print()
good_to_go = 'no'
errors = []
else:
good_to_go = 'yes'
return user_input
def user_input_handling_function_second(dictionary):
print()
user_input = input('Enter: ')
print()
good_to_go = 'no'
errors = []
lict = []
while good_to_go == 'no':
for key in dictionary:
lict.append(key):
for element in user_input:
if element not in lict
print('The form can only contain a combination of the characters that represent the lists of characters.')
errors.append('yes')
break
if len(user_input) < 2:
print('The form is too short. It can\'t be less than two-characters long.')
errors.append('yes')
if len(user_input) > 8:
print('The form is too long. It can\'t be more than eight-characters long.')
errors.append('yes')
if 'yes' in errors:
print()
user_input = input('Re-enter: ')
print()
good_to_go = 'no'
errors = []
else:
good_to_go = 'yes'
return user_input
def user_input_handling_function_third(): # yes_or_no
print()
user_input = input('Enter: ')
print()
good_to_go = 'no'
errors = []
yes_or_no = ['yes', 'no']
while good_to_go == 'no':
if user_input not in yes_or_no:
print('You have to answer yes or no.')
errors.append('yes')
if 'yes' in errors:
print()
user_input = input('Re-enter: ')
print()
good_to_go = 'no'
errors = []
else:
good_to_go = 'yes'
return user_input
def user_input_handling_function_fourth(dictionary):
print()
user_input = input('Enter: ')
print()
good_to_go = 'no'
errors = []
while good_to_go == 'no':
if user_input not in dictionary:
print('The form you entered does not match one of the forms in your termal_dictionary. Each form in your')
print('termal_dictionary is a name (key) that has an associated definition (value) that is a list of terms')
print('that all have the same form as the name (key).')
errors.append('yes')
if 'yes' in errors:
print()
user_input = input('Re-enter: ')
print()
good_to_go = 'no'
errors = []
else:
good_to_go = 'yes'
return user_input
def user_input_handling_function_fifth():
print()
user_input = input('Enter: ')
print()
good_to_go = 'no'
errors = []
digits = ['0', '1', '2', '3', '4', '5', '6', '7', '8', '9', '.']
while good_to_go == 'no':
for i in user_input:
if i not in digits:
print('The number must consist of digits. For example: 1, 12, 123.1 or 1234.1.')
errors.append('yes')
break
if 'yes' in errors:
print()
user_input = input('Re-enter: ')
print()
good_to_go = 'no'
errors = []
else:
good_to_go = 'yes'
return user_input
def user_input_handling_function_sixth():
''' float checker '''
print()
user_input = input('Enter: ')
print()
good_to_go = 'no'
errors = []
digits = ['0', '1', '2', '3', '4', '5', '6', '7', '8', '9', '.']
while good_to_go == 'no':
for_loop_broken = 'yes'
while for_loop_broken == 'yes':
for i in user_input:
if i not in digits:
for_loop_broken = 'yes'
print('The number must consist of digits. For example: 1, 12, 123.1 or 1234.1')
print()
user_input = input('Re-enter: ')
print()
break
else:
for_loop_broken = 'no'
if float(user_input) <= 0:
print('The velocity can\'t be less than or equal to 0 m/s.')
errors.append('yes')
if float(user_input) > 38_573_389.830_824_5:
print('The velocity can\'t be greater than 38,573,389.830 824 5 m/s.')
errors.append('yes')
if 'yes' in errors:
print()
user_input = input('Re-enter: ')
print()
good_to_go = 'no'
errors = []
for_loop_broken = 'yes'
else:
good_to_go = 'yes'
return float(user_input)
def user_input_handling_function_seventh():
print()
user_input = input('Enter: ')
print()
good_to_go = 'no'
errors = []
unacceptable_characters = ['<', '>', '?', ':', '"', '|', '-', '/', '\ ']
while good_to_go == 'no':
if '.txt' not in user_input:
user_input = user_input + '.txt'
for i in user_input:
if i == ' ': # <-- fix this.
i = '_'
elif i in unacceptable_characters:
print('The file name can not contain any of the following characters: <, >, ?, :, ", |, -, /, \ ')
errors.append('yes')
break
if 'yes' in errors:
print()
user_input = input('Re-enter: ')
print()
good_to_go = 'no'
errors = []
else:
good_to_go = 'yes'
return user_input
def user_input_handling_function_eighth():
print()
user_input = input('Enter: ')
print()
good_to_go = 'no'
errors = []
digits = ['0', '1', '2', '3', '4', '5', '6', '7', '8', '9', '-']
while good_to_go == 'no':
if user_input == 'None':
user_input = None
return user_input
else:
for inner in user_input:
if inner not in digits:
print('The number must be an integer that consists of digits. For example: 1, -2, etc.')
errors.append('yes')
break
if 'yes' in errors:
print()
user_input = input('Re-enter: ')
print()
good_to_go = 'no'
errors = []
else:
good_to_go = 'yes'
return int(user_input)
def user_input_handling_function_ninth():
''' parser '''
print()
user_input = input('Enter: ')
print()
term = ''
lict = []
for element in user_input:
if element != ' ':
term = term + element
else:
lict.append(term)
term = ''
lict.append(term) # because term might not be empty....
return lict
def user_input_handling_function_tenth(dictionary):
''' dictionary checker '''
user_input = parser()
good_to_go = 'no'
errors = []
while good_to_go == 'no':
string = ''
lict = []
for element in user_input:
string = string + element
for key in dictionary:
for element in dictionary[key]:
lict.append(element)
for element in string:
if element not in lict:
print('One of your unwanted characters or combination of characters does not match the characters you')
print('entered earlier.')
errors.append('yes')
break
if 'yes' in errors:
print()
user_input = input('Re-enter: ')
print()
good_to_go = 'no'
errors = []
else:
good_to_go = 'yes'
return user_input
| 4.03125 | 4 |
woodwork/__init__.py | willsmithorg/woodwork | 81 | 12789536 | <filename>woodwork/__init__.py
# flake8: noqa
import pkg_resources
from .config import config
from .type_sys import type_system
from .type_sys.utils import list_logical_types, list_semantic_tags
from .utils import concat_columns, read_file
from .version import __version__
import woodwork.column_accessor
import woodwork.demo
import woodwork.table_accessor
from woodwork.accessor_utils import (
get_invalid_schema_message,
init_series,
is_schema_valid,
)
# Call functions registered by other libraries when woodwork is imported
for entry_point in pkg_resources.iter_entry_points(
"alteryx_open_src_initialize"
): # pragma: no cover
try:
method = entry_point.load()
if callable(method):
method("woodwork")
except Exception:
pass
| 1.828125 | 2 |
stock_correlation/anomaly.py | looselyconnected/fastai | 0 | 12789537 | import numpy as np
import pandas as pd
def get_anomaly_score(data, test_window, control_window):
# Given a pd Series, return the anomaly score series.
# The data given should be sorted in the time order.
test_mean = data.rolling(test_window).mean()
control_data = test_mean.rolling(control_window)
return (test_mean - control_data.mean()) / control_data.std()
| 3.15625 | 3 |
dhub/dhub.py | datahub-projects/dhub | 0 | 12789538 | <gh_stars>0
#!/usr/bin/env python3
import os, sys, argparse, json, time, datetime, subprocess
from dateutil.parser import parse as parsedate
from blessings import Terminal
#
#the BDOL does not admire scripts which are also importable modules
#well frack him
#
#so absolute imports work in script mode, we need to import from the parent folder
opath = os.path.abspath(".")
abspath = os.path.abspath(__file__)
abspath = abspath[:abspath.rfind('/') + 1]
os.chdir(abspath)
abspath = os.path.abspath("..")
sys.path.insert(0, abspath)
from dhub import get_pip
from dhub.mod_sync import sync, mod
from dhub.runrun import git_status, get_author, get_username, get_branch, get_repo, run, runner
HOME_USER = "/home/ubuntu/"
os.chdir(opath)
bless_term = Terminal()
def _print_green(s, **kw):
print (bless_term.green(s), **kw)
def _print_purple(s, **kw):
print (bless_term.magenta(s), **kw)
parser = argparse.ArgumentParser(description=__doc__)
subparsers = parser.add_subparsers(help='followed by --help for command-specific options')
reqs_args = subparsers.add_parser('reqs')
reqs_args.add_argument("--package")
reqs_args.add_argument("--importname")
reqs_args.add_argument("--file")
reqs_args.add_argument("--date", default="3000-1-1")
reqs_args.add_argument("--unstable", action="store_true")
reqs_args.add_argument("--debug", action="store_true")
sync_args = subparsers.add_parser('sync')
sync_args.add_argument("--debug", action="store_true")
# sync_args.add_argument("--insist")
mod_args = subparsers.add_parser('mod')
mod_args.add_argument("--message")
mod_args.add_argument("--debug", action="store_true")
mod_args.add_argument("--insist", action="store_true")
proc_args = subparsers.add_parser('process')
proc_args.add_argument("--ssh")
proc_args.add_argument("--name")
proc_args.add_argument("--port")
proc_args.add_argument("--source")
proc_args.add_argument("--sync")
proc_args.add_argument("--command", default="python entrypoint.py")
proc_args.add_argument("--git") #=path to proj or .
proc_args.add_argument("--wake") #instance ID
proc_args.add_argument("--dumb", action="store_true")
proc_args.add_argument("--debug", action="store_true")
args = parser.parse_args()
def wake_up(inst):
if inst:
cmd = ("aws ec2 start-instances --instance-ids %s" % inst).split()
cmd2 = ("aws ec2 describe-instance-status --include-all-instances --instance-ids %s" % inst).split()
_print_purple(' '.join(cmd))
out, ok = run(cmd)
print (out)
print ("Waiting for %s to arise" % inst)
while True:
out, ok = run(cmd2)
j = json.loads(out)
print ("AWAIT:", j['InstanceStatuses'][0]['InstanceId']==inst, j['InstanceStatuses'][0]['InstanceState']['Name']=='running')
if j['InstanceStatuses'][0]['InstanceId']==inst and j['InstanceStatuses'][0]['InstanceState']['Name']=='running':
break
time.sleep(5)
time.sleep(20)
print ("Instance %s is up & running" % inst)
#FIXME should be done solely on remote
def go_to_sleep(inst):
cmd = "aws ec2 stop-instances --instance-ids %s" % inst
_print_purple(cmd)
sys.stdout.flush()
os.system(cmd)
def subdo(sub, s, expect=None, show=True):
_print_green("\n-~> %s" % s.rstrip())
done = False
ret = ""
first = True
while not done:
out, done = sub.interact(s, expect)
if done:
if show:
print(out, end='')
elif out:
if first:
first = False #first response==shi-tty echo
else:
if show:
print (out, end='')
ret += out
s = None
return ret
def sync_to(base, paths, ssh):
if not ssh:
ssh = "localhost"
for p in paths.strip().split(','):
cmd = "rsync -vrltzu ./{1} {2}:{0}/".format(base, p, ssh)
_print_purple("RSYNC_TO: " + cmd)
os.system(cmd)
def sync_from(base, paths, ssh):
if not ssh:
ssh = "localhost"
for p in paths.strip().split(','):
cmd = "rsync -vrltzu {2}:{0}/{1} ./".format(base, p, ssh)
_print_purple("RSYNC_FROM: " + cmd)
os.system(cmd)
command=None
if len(sys.argv)>1:
command=sys.argv[1] #really? is this the only way?
if command=="reqs":
b4 = parsedate(args.date)
# print ("REQS", args, b4)
if args.package:
version, date = get_pip.get_latest(args.package, b4, not args.unstable, args.debug)
if not version:
print ("# ERROR: package not found: {0}".format(args.package))
exit()
0/0
print("{0: <40} #released {1}".format("{0}=={1}".format(args.package, version), date.ctime()))
elif args.file:
f=open(args.file)
for pkg in f.readlines():
pkg=pkg.strip()
if not pkg:
continue
if pkg[0]=='#':
continue
if pkg[0]=='-':
print ("Skipping", pkg)
continue
pkg=pkg.split("==")[0]
version, date = get_pip.get_latest(pkg, b4, not args.unstable, args.debug)
if not version:
continue
print("{0: <40} #released {1}".format("{0}=={1}".format(pkg, version), date.ctime()))
elif args.importname:
print ("Coming soon: cv2 implies openCV")
else:
print ("Coming soon: use pipreqs to create requirements list from scratch")
elif command=="mod":
if get_author() != get_username() and not args.insist:
_print_green("Different author --insist if you are sure")
# print (get_author(), get_username())
else:
if get_branch()=="master" and not args.insist:
_print_green("This operation will rebase the master branch --insist if you are sure")
else:
_print_green("Rewriting tip (most recent) commit & pushing to remote")
mod(message=args.message, show=args.debug, debug=args.debug)
elif command=="sync":
_print_green("Synchronizing local git repository and working tree to remote/origin")
sync(show=args.debug, debug=args.debug)
elif command=="process":
url = args.ssh
#
# alias
#
if url:
if args.name:
fn = "{0}/.dhub/names/{1}".format(os.path.expanduser("~"), args.name)
f = open(fn, 'w') #FIXME create folders
f.write(url)
f.close()
fn = "{0}/.dhub/names/{1}".format(os.path.expanduser("~"), url)
if os.path.exists(fn):
f = open(fn)
url = f.read().strip()
f.close()
sshopts = '-tt -4 -o ConnectTimeout=10 -o BatchMode=yes -o ServerAliveInterval=60'
if args.port:
sshopts += ' -p {0}'.format(args.port)
shell = "ssh {0} {1}".format(sshopts, url)
else:
shell = "ssh -tt -4 localhost"
# print ("SHELL COMMAND:", shell)
if args.source:
f = open(args.source)
wake_up(args.wake)
sub = runner(shell)
out, err = sub.first()
print(out, end='')
if not err:
for row in f.readlines():
row = row.rstrip()
print (row, end='')
out = sub.interact(row)
print (out, end='')
f.close()
elif args.dumb:
wake_up(args.wake)
sub = runner(shell)
out, err = sub.first()
if err:
print(out, end='')
else:
while True:
rows = out.split("\n")
for row in rows[:-1]:
print (row)
for row in rows[-1:]:
print ("DUMB:", row, end='')
inp = input('')
if inp=='exit':
break
out = sub.interact(inp)
sub.exit()
elif args.git:
sub = None
for i in range(1): #allows break
repo = get_repo()
if not repo:
print ("Not a git repository")
break
proj = repo[repo.rfind('/')+1:] #FIXME make me smarter -- multi-folder project
if args.git == '.':
if not args.ssh:
print ("You probably don't want to remove your local checkout")
break
else:
proj = args.git
branch = get_branch()
home = os.path.expanduser('~')
wake_up(args.wake)
for tri in range(3):
sub = runner(shell)
_print_purple (shell)
out, err = sub.first()
if not err:
break
print(out, end='')
print ("failed connection on attempt %d of 3" % (tri+1))
if err:
break
print ("Connection successful")
subdo(sub, "rm -fr %s" % proj)
out = subdo(sub, "git clone --single-branch --branch %s %s %s" % (branch, repo, proj), expect=["yes/no", "repository exists"])
if "yes/no" in out:
print ("\n\nDHUB: Must establish the legitimacy of the git repository's public key.")
print("Log in to the remote server and try 'git fetch' to establish the key in known_hosts.")
break
if "repository exists" in out:
print ("\n\nDHUB: Need to set up server's git public key for this repository.")
print("Alternatively, use read-only publicly accessible repo.")
break
subdo(sub, "cd %s" % proj)
if "fatal" in out:
break
print ()
if args.sync:
sync_to(proj, args.sync, args.ssh)
print ("Checking for Dockerfile")
out = subdo(sub, """python3 -uc 'import os; print(os.path.exists("Dockerfile"))'""", show=False)
if "False" in out:
print ("No docker file found; setting up virtualenv")
subdo(sub, "virtualenv -p python3 venv")
subdo(sub, "source ./venv/bin/activate")
out = subdo(sub, "pip install -r requirements.txt")
if out.find('ERROR')>=0:
print ("quitting on error", end='')
break
subdo(sub, args.command)
else:
print("Dockerfile found; building docker image")
subdo(sub, "docker build . -t %s" % proj)
if args.sync:
# # out, pr = sub.interact("""python3 -c 'import os; print(os.path.abspath("%s"))'""" % args.sync)
# out = subdo(sub, """python3 -uc 'import os; print(os.path.abspath("%s"))'""" % args.sync, show=False)
# # print ("\n\n\nRET:==>%s<==\n\n\n"%out.replace("\r","]"))
# abs = out.strip().split("\n")[-2].strip()
# print ("==>%s"%abs.replace("\r","]"))
subdo(sub, "docker run --rm -it -v {0}{2}/{1}:{0}{1} {2}".format(HOME_USER, args.sync, proj))
else:
subdo(sub, "docker run --rm -it %s" % proj)
if args.sync:
print()
sync_from(proj, args.sync, args.ssh)
if sub:
sub.exit()
if args.wake:
go_to_sleep(args.wake)
else:
subprocess.call(shell.replace("-T", '').split())
print ("\nExit dhub")
else:
parser.print_help()
| 2.203125 | 2 |
Assignments/HW3_NFA/problem1.py | Ursinus-CS373-F2021/CoursePage | 0 | 12789539 | <filename>Assignments/HW3_NFA/problem1.py
import numpy as np
import random
num_examples = 10
to_print = True
for seed in [0]:
np.random.seed(seed)
n_accept = 0
results = []
if to_print:
print("<table style=\"width:200px;\"><tr><td>Input</td><td>Result</td></tr>")
for i in range(num_examples):
randlen = np.random.randint(1, 11)
bstr = ""
for k in range(randlen):
bstr += np.random.choice(["a", "b", "c"])
res = "Reject"
if "ab" in bstr or "bc" in bstr:
res = "Accept"
n_accept += 1
if to_print:
print("<tr><td>", bstr, "</td><td>", res, "</td></tr>")
else:
print(bstr, end=' ')
if to_print:
print("</table>")
| 3.625 | 4 |
sprites/__init__.py | ghandic/FinalSpace | 0 | 12789540 | from .earth import Earth
from .enemy import Enemy
from .player import Player
from .explosion import Explosion
| 1.179688 | 1 |
controller/Base_controller.py | jvpersuhn/APIRest | 0 | 12789541 | <reponame>jvpersuhn/APIRest
from flask_restful import Resource
from flask import request,json
from dao.Base_dao import BaseDao
class BaseController(Resource):
def __init__(self, model):
self.db = BaseDao(model)
def get(self, id=None):
if id:
return self.db.select_by_id(id)
return self.db.select_all()
def delete(self, id):
return self.db.delete(id)
def post(self, model):
return self.db.insert(model)
def put(self, model):
return self.db.update(model)
def getDados(self):
kwargs = {}
for chave, valor in request.json.items():
kwargs[chave] = valor
return kwargs
| 2.65625 | 3 |
giter8.py | ochinchina/PyGiter8 | 0 | 12789542 | #!/usr/bin/python
import os
import os.path
import random
import re
import requests
import shutil
import sys
import xml.etree.ElementTree as ET
class TempField:
"""
the template field in the giter8
"""
def __init__( self, fields_value ):
self._format_methods = { 'upper': self._upper,
'lower': self._lower,
'cap': self._capitalize,
'decap': self._decapitalize,
'start': self._startCase,
'word': self._word,
'Camel': self._upperCamel,
'camel': self._lowerCamel,
'hypen': self._hyphen,
'norm': self._normalize,
'snake': self._snake,
'packaged': self._packaged,
'random': self._random }
self._fields_value = fields_value
self.WORD_LETTERS = map(lambda c: chr(c), range(ord('a'), ord('z')))
self.WORD_LETTERS.extend( map(lambda c: chr(c), range(ord('A'), ord('Z'))) )
self.WORD_LETTERS.extend( map(lambda c: chr(c), range(ord('0'), ord('9'))) )
self.WORD_LETTERS.extend( [' ', '\t', '\n', '\r', '\f', '\v', '\b'] )
def format( self, field_with_format ):
#check if it is the format:$name;format="Camel"$
tmp = field_with_format.split(";")
if len(tmp) == 2 and tmp[1].startswith( "format="):
formats = tmp[1][len("format=")+1:-1].split(',')
value = self._fields_value[ tmp[0] ]
for f in formats:
value = self._format_with_name( value, f )
return value
else: #check if it is in format: $organization__packaged$
pos = field_with_format.rfind("__")
if pos != -1:
value = self._fields_value[ field_with_format[0:pos] ]
f = field_with_format[pos+2:]
return self._format_with_name( value, f )
#no format info, return the value
return self._fields_value[field_with_format]
def _format_with_name( self, value, format_name):
if format_name in self._format_methods:
return self._format_methods[format_name](value)
else:
return value
def _upper( self, value ):
return value.upper()
def _lower( self, value ):
return value.lower()
def _capitalize( self, value ):
return "%s%s"%(value[0].upper(), value[1:])
def _decapitalize( self, value ):
return "%s%s" %(value[0].lower(), value[1:])
def _startCase( self, value ):
words = map( lambda w: self._capitalize(w), value.split() )
return "".join(words)
def _word(self,value):
return "".join( map( lambda c: c if c in self.WORD_LETTERS else '', value ) )
def _upperCamel( self, value ):
return self._word( self._startCase( value ) )
def _lowerCamel( self, value ):
return self._word( self._startCase( self._decapitalize( value ) ) )
def _hyphen( self, value ):
return "".join( map( lambda c: '-' if c.isspace() else c, value ) )
def _normalize( self, value ):
return self._lower( self._hyphen( value ) )
def _snake( self, value ):
return "".join( map( lambda c: '_' if c.isspace() or c =='.' else c, value ) )
def _packaged(self, value):
return "".join( map( lambda c: '/' if c == '.' else c, value ) )
def _random( self, value ):
random_str = ''.join(random.choice(string.ascii_uppercase + string.digits) for _ in range(10))
return "%s%s" %(value, random_str)
class Properties:
def __init__( self, fileName ):
self._fileName = fileName
self._props = {}
self._propOrder = []
self._load()
self._temp_fields = TempField( self._props )
def change_prop_with_prompt( self ):
"""
change property value with prompt
"""
for prop in self._propOrder:
value = self._props[prop]
maven_prop = self._parse_as_maven_prop( value )
if maven_prop is not None:
value = get_version_from_maven( maven_prop[0], maven_prop[1], maven_prop[2] )
else:
value = self.replace_fields( self._props[prop] )
self._props[prop] = value
value = raw_input( "%s(%s):" % (prop,value) ).strip()
if len( value ) > 0:
self._props[prop] = value
def _parse_as_maven_prop( self, value ):
"""
first check if the value is in format:maven(groupId, artifactId ) or
maven( groupId, artifactId, stable). If it is in one of above, a tuple
with three elements are returned
Args:
value: the value to check
Return:
None if the format not match maven(groupId, artifactId ) or maven( groupId, artifactId, stable)
tuple with three elements (groupId, artifactId, stable). stable is a boolean: true indicates
a stable release, false indicates any latest release
"""
# check if the value is in format: maven(groupId, artifactId ) or maven( groupId, artifactId, stable)
if not value.startswith( "maven" ):
return None
value = value[len("maven"):].strip()
if not value.startswith( "(" ) or not value.endswith( ")" ):
return None
value = value[1:-1]
value = value.split(",")
# strip the split word and discard the empty words
value = map( lambda x: x.strip(), value )
value = filter( lambda x: len( x ) > 0, value )
if len(value) == 2 or len(value) == 3:
return (value[0], value[1], False if len( value ) == 2 else value[2] == 'stable' )
else:
return None
def _load( self ):
with open( self._fileName ) as fp:
for line in fp:
line = line.strip()
if len( line ) <= 0 or line[0] == '#':
continue
pos = line.find('=')
if pos != -1:
prop = line[0:pos].strip()
value = line[pos+1:].strip()
self._props[prop] = value
self._propOrder.append( prop )
def get_field( self, name ):
"""
get the value of a field
"""
return self.replace_fields( "" if name not in self._props else self._props[name])
def get_file_name_with_template( self, file_name_template):
PACKAGE_VAR = '$package$'
start_pos = 0
package_pos = file_name_template.find( PACKAGE_VAR, start_pos )
result = ""
while package_pos != -1:
result = "%s%s%s" % (result, file_name_template[start_pos:package_pos], self.get_field( "package" ).replace('.', '/') )
start_pos = package_pos + len( PACKAGE_VAR )
package_pos = file_name_template.find( PACKAGE_VAR, start_pos )
return self.replace_fields("%s%s" % (result, file_name_template[start_pos:]))
def replace_fields(self, value ):
"""
find all fields( between two "$") and replace the fields
with the values in this properties
Args:
value: a value possible includes the fields
Returns: all the fields replaced by the value in this properties
"""
# find the start flag '$'
start = 0
result = ""
cond_express_start_pos = -1
cond_express_level = 0
while True:
field_info = self._find_field( value, start )
if field_info is None:
return "%s%s" % ( result, value[start:] )
else:
(field_start_pos, field_end_pos, field_name ) = field_info
if self._is_start_cond_expression( field_name ):
cond_express_level += 1
cond_express_start_pos = start
elif self._is_end_cond_expression( field_name ):
cond_express_level -= 1
if cond_express_level == 0:
result = "%s%s" % (result, self._replace_fields_with_condition( value[cond_express_start_pos: field_end_pos + 1 ] ) )
cond_express_start_pos = -1
elif cond_express_start_pos == -1:
result = "%s%s" % (result, value[start:field_start_pos])
result = "%s%s" % (result, self._temp_fields.format(field_name) )
start = field_end_pos + 1
def _find_field( self, value, start_pos ):
"""
find a field start and end with flag '$'
Args:
value: the value may contains field start&end with flag '$'
start_pos: the start search position
Returns:
None if no field in the value
a tuple with three elements (field_start_pos, field_end_pos, field_name) if a field is found
"""
field_start_pos = value.find( '$', start_pos )
if field_start_pos == -1:
return None
field_end_pos = value.find( '$', field_start_pos + 1 )
if field_end_pos == -1:
return None
return (field_start_pos, field_end_pos, value[field_start_pos + 1: field_end_pos] )
def _is_start_cond_expression( self, value ):
"""
check if a conditional express is started. A start conditional express is
start like: if(var.truthy)
Args:
the value to be checked
Return:
True if the value is started like: if(var.truthy)
"""
cond_exp = self._parse_cond_expression( value )
return cond_exp is not None and cond_exp[0] == 'if'
def _parse_cond_expression( self, value ):
if value.startswith( "if" ) or value.startswith( "elseif" ):
start = value.find( '(' )
end = value.find( ')' )
if start != -1 and end != -1:
return ( "if" if value.startswith( "if" ) else "elseif", value[start+1:end].strip() )
elif value.startswith( "else" ):
return ("else", "unused")
elif value.startswith( "endif" ):
return ("endif", "unused")
return None
def _is_end_cond_expression( self, value ):
"""
check if the value is conditional end flag: endif
Args:
the value to be checked
Return:
true if the value is "endif" string
"""
return value == "endif"
def _replace_fields_with_condition( self, value ):
start = 0
cond_exp = ConditionalExpression( self )
statement = ""
while True:
field_info = self._find_field( value, start )
if field_info is None:
break
(field_start_pos, field_end_pos, field_name) = field_info
cond_exp_info = self._parse_cond_expression( field_name )
if cond_exp_info is None:
statement = "%s%s" % (statement, value[start:field_end_pos+1] )
else:
statement = "%s%s" % (statement, value[start:field_start_pos ] )
if len(statement) > 0:
cond_exp.add_statement( statement )
statement = ""
if cond_exp_info[0] == 'if':
cond_exp.if_exp( cond_exp_info[1] )
elif cond_exp_info[0] == 'elseif':
cond_exp.elif_exp( cond_exp_info[1] )
elif cond_exp_info[0] == 'else':
cond_exp.else_exp()
elif cond_exp_info[0] == 'endif':
return cond_exp.evaluate()
start = field_info[1] + 1
return value
class ConditionalExpression:
def __init__( self, props ):
self._if_exp = None
self._if_statements = []
self._elif_exps = []
self._elif_statements=[]
self._else_statements=[]
self._props = props
self._cur_statements = None
def add_statement( self, statement ):
if self._cur_statements is not None:
self._cur_statements.append( statement )
def if_exp( self, expression ):
self._if_exp = expression
self._cur_statements = self._if_statements
def elif_exp( self, expression ):
self._elif_exps.append( expression )
self._cur_statements = self._elif_statements
def else_exp( self ):
self._cur_statements = self._else_statements
def evaluate( self ):
if self._is_true( self._if_exp ):
return self._props.replace_fields( "".join( self._if_statements ) )
index = 0
for elif_exp in self._elif_exps:
if self._is_true( elif_exp ):
return self._props.replace_fields(self._elif_statements[index])
index += 1
return self._props.replace_fields( "".join( self._else_statements ) )
def _is_true( self, value ):
return value.endswith( '.truthy' ) and self._props.get_field(value[:-1*len('.truthy')]) in ['y', 'yes', 'true']
def get_version_from_maven( groupId, artifactId, stable = False ):
r = requests.get( "http://repo1.maven.org/maven2/%s/%s/maven-metadata.xml" % ( groupId.replace('.', '/'), artifactId) )
if r.status_code >= 200 and r.status_code < 300:
root = ET.fromstring( r.text )
versioning = root.find('versioning')
if stable:
return versioning.find( 'release' ).text
else:
return versioning.find( 'latest' ).text
return ""
def list_files( root_dir ):
result = []
if os.path.isdir( root_dir ):
files = os.listdir(root_dir)
for f in files:
full_name = os.path.join( root_dir, f )
if os.path.isdir( full_name ):
result.extend( list_files( full_name ) )
else:
result.append( full_name )
else:
result.append( root_dir )
return result
def get_project_file_name( template_root_dir, project_name, file_name ):
temp_root_dirs = filter( lambda x: len( x ) > 0, template_root_dir.split( os.sep ) )
file_names = filter( lambda x: len( x ) > 0, file_name.split( os.sep ) )
if len( file_names ) > len( temp_root_dirs ) and file_names[0:len( temp_root_dirs)] == temp_root_dirs:
t = [project_name]
t.extend( file_names[len( temp_root_dirs):] )
return os.path.join( *t )
else:
return file_name
def create_dir_of( file_name ):
dir_name = os.path.dirname( file_name )
if not os.path.exists( dir_name ):
os.makedirs( dir_name )
def write_file( file_name, content ):
with open( file_name, "wt" ) as fp:
fp.write( content )
def is_text_file( fileName ):
TEXT_SUFFIX = [".java", ".scala", ".sbt", ".properties", ".txt", ".text", ".htm", ".html"]
for suffix in TEXT_SUFFIX:
if fileName.endswith( suffix ):
return True
return False
def is_verbatim_file( fileName, verbatims ):
verbatims = verbatims if type(verbatims) is list else [verbatims]
for pattern in verbatims:
pattern = pattern.replace('.', '\\.' )
pattern = pattern.replace( '*', '.*' )
if re.match( pattern, fileName):
return True
return False
def clone_template( git_url ):
tmp = git_url.split( "/" )
if len( tmp ) == 2:
git_url = "https://github.com/%s" % git_url
if not git_url.endswith( ".git" ):
git_url = "%s.git" % git_url
os.system( "git clone %s" % git_url )
return git_url.split("/")[-1][0:-4]
def main( g8_temp_root ):
if g8_temp_root.endswith( ".git" ) or g8_temp_root.endswith(".g8"):
g8_temp_root = clone_template( g8_temp_root )
root_dir = os.path.join( g8_temp_root, 'src/main/g8')
files = list_files(root_dir)
props = Properties( os.path.join( root_dir, 'default.properties') )
project = raw_input( "Your project:" ).strip()
props.change_prop_with_prompt()
verbatims = props.get_field( 'verbatim' ).strip().split()
for fileName in files:
realFileName = props.get_file_name_with_template( fileName )
dest_file = get_project_file_name( root_dir, project, realFileName )
create_dir_of( dest_file )
if not is_verbatim_file( fileName, verbatims ):
with open(fileName) as fp:
content = fp.read()
content = props.replace_fields( content )
write_file( dest_file, content )
else:
shutil.copyfile( fileName, dest_file )
if __name__ == "__main__":
if len( sys.argv ) < 2:
print( "Usage: giter8.py <giter8_template_directory>")
sys.exit(1)
else:
main( sys.argv[1] )
| 2.703125 | 3 |
FHEM/bindings/python/fhempy/lib/meross/meross_device.py | dominikkarall/fhempy | 16 | 12789543 | <reponame>dominikkarall/fhempy
import asyncio
from fhempy.lib.generic import FhemModule
from fhempy.lib import fhem, fhem_pythonbinding
from meross_iot.model.enums import OnlineStatus, Namespace
class meross_device:
def __init__(self, logger, fhemdevice: FhemModule):
self.logger = logger
self.fhemdev = fhemdevice
self.hash = fhemdevice.hash
async def Define(self, hash, args, argsh):
self._setupdev_name = args[3]
self._deviceid = args[4]
self._device = None
self.hash["DEVICEID"] = self._deviceid
self._setupdev = None
await fhem.readingsSingleUpdate(self.hash, "state", "ready", 1)
self.fhemdev.create_async_task(self._init_device())
async def _get_set_commands(self):
set_conf = {}
set_conf["on"] = {}
set_conf["off"] = {}
self.fhemdev.set_set_config(set_conf)
async def set_on(self, hash, params):
await self._device.async_turn_on()
async def set_off(self, hash, params):
await self._device.async_turn_off()
async def _init_device(self):
try:
await self._connect_to_setup_device()
await self._setup_device()
await self._get_set_commands()
await self.update_readings()
except Exception as ex:
self.logger.exception(ex)
async def _connect_to_setup_device(self):
while self._setupdev is None or self._setupdev.ready is False:
await asyncio.sleep(1)
self._setupdev = fhem_pythonbinding.getFhemPyDeviceByName(
self._setupdev_name
)
if self._setupdev:
self._setupdev = self._setupdev.meross_device
async def _async_push_notification_received(
self, namespace: Namespace, data: dict, device_internal_id: str
):
update_state = False
full_update = False
if namespace == Namespace.CONTROL_UNBIND:
self.logger.warning(
f"Received unbind event. Removing device {self._device.name} from FHEM"
)
await self.platform.async_remove_entity(self.entity_id)
elif namespace == Namespace.SYSTEM_ONLINE:
self.logger.warning(f"Device {self._device.name} reported online event.")
online = OnlineStatus(int(data.get("online").get("status")))
update_state = True
full_update = online == OnlineStatus.ONLINE
elif namespace == Namespace.HUB_ONLINE:
self.logger.warning(
f"Device {self._device.name} reported (HUB) online event."
)
online = OnlineStatus(int(data.get("status")))
update_state = True
full_update = online == OnlineStatus.ONLINE
else:
update_state = True
full_update = False
if full_update:
await self._device.async_update()
if update_state:
await self.update_readings()
async def _setup_device(self):
self._device = self._setupdev.get_device_by_id(self._deviceid)
if self._device is not None:
await self._device.async_update()
self._device.register_push_notification_handler_coroutine(
self._async_push_notification_received
)
async def update_readings(self):
await fhem.readingsBeginUpdate(self.hash)
await fhem.readingsBulkUpdateIfChanged(self.hash, "uuid", self._device.uuid)
await fhem.readingsBulkUpdateIfChanged(self.hash, "name", self._device.name)
await fhem.readingsBulkUpdateIfChanged(
self.hash, "firmware_version", self._device.firmware_version
)
await fhem.readingsBulkUpdateIfChanged(
self.hash, "hardware_version", self._device.hardware_version
)
await fhem.readingsBulkUpdateIfChanged(
self.hash, "internal_id", self._device.internal_id
)
await fhem.readingsBulkUpdateIfChanged(
self.hash, "mqtt_host", self._device.mqtt_host
)
await fhem.readingsBulkUpdateIfChanged(
self.hash, "mqtt_port", self._device.mqtt_port
)
await fhem.readingsBulkUpdateIfChanged(self.hash, "type", self._device.type)
await fhem.readingsBulkUpdateIfChanged(
self.hash, "online_status", self._device.online_status.name
)
onoff = "off"
if self._device.is_on():
onoff = "on"
await fhem.readingsBulkUpdateIfChanged(self.hash, "state", onoff)
await fhem.readingsEndUpdate(self.hash, 1)
| 2.1875 | 2 |
src/commander/commands.py | anudeep22003/simple-langauge-pipeline | 1 | 12789544 | <reponame>anudeep22003/simple-langauge-pipeline<gh_stars>1-10
#### Python Package Imports ####
import sys, os
from termcolor import colored, cprint
from collections import defaultdict
#### Manual Import ####
from abstract_factory import Command, CommandHandler
sys.path.append(os.path.join(os.getcwd(),'src','interfacers'))
from neo4j_interfacer import Neo4jInterfacer
class TagCommand(Command):
def orchestrate(self):
self.take_user_input()
pass
def take_user_input(self):
print("You are in the tagging experience!")
while True:
print("Do you want to continue?")
user_input = input('--> ')
if user_input == 'e':
print("exiting")
break
class QuitCommand(Command):
def orchestrate(self):
print("Exiting now....")
sys.exit()
class KeywordCleanerCommand(Command):
def __init__(self, local_command_handler: CommandHandler, neointerface = Neo4jInterfacer) -> None:
self.neo = neointerface()
self.selection_set = list()
self.handler = local_command_handler()
self.adventure_history = list()
self.parse_complete_dict = defaultdict(list)
self.current_parse_accepted_dict = defaultdict(list)
self.delete_complete_dict = defaultdict(list)
pass
def orchestrate(self):
while True:
user_adventure_choice = self.handler.initialize_game()
if not user_adventure_choice:
self.orchestrate()
elif user_adventure_choice == 'quit':
break
else:
self.adventure_history.append(user_adventure_choice)
current_adventure = self.adventure_history[-1]
# using default dict and making sure that entries for each adventure type are initialized
self.create_dict_entries(current_adventure)
list_to_process = self.start_subgame(current_adventure)
if list_to_process is None:
break
else:
self.user_input_iterator(list_to_process, current_adventure)
def create_dict_entries(self, adventure: str):
self.parse_complete_dict[adventure]
self.current_parse_accepted_dict[adventure]
self.delete_complete_dict[adventure]
def option_string(self):
return """Your options are: \t ↵ (keep)\t r: (r)emove\tce: (c)omplete & (e)xit \taq: (a)bort and (q)uit"""
def start_subgame(self, user_adventure):
cprint("--> How many do you want to get through today\t",color='red')
print("exit by pressing q: to (q)uit")
user_input = input("Enter your choice\t--> ")
if user_input == 'q':
return None
else:
input_size = int(user_input)
continue_index = len(self.parse_complete_dict[user_adventure]) - len(self.delete_complete_dict[user_adventure])
last_index = int(input_size) + continue_index
cprint(f"Ok, we will start from #{continue_index} and go till #{last_index}", color='red')
cprint(self.option_string(),color='yellow', on_color='on_grey')
# each item of the list is a dict with keys: `word` and `count`
return self.list_nodes(last_index,adventure=user_adventure)[continue_index:last_index]
def delete_selection(self, current_adventure):
if len(self.selection_set) == 0:
self.cleanup_selection(current_adventure)
else:
cprint(f"You are about to delete the following nodes: \n>>type: [{current_adventure}] \t >> [{self.selection_set}]\t")
user_confirmation = input("> Press y to continue with deletion, or ANY other key to exit ")
if user_confirmation.lower() == 'y':
q = f"""MATCH (k:Keyword:{current_adventure})
WHERE k.word IN {self.selection_set}
DETACH DELETE k
"""
self.neo.cypher_write_query_runner(q)
print(f"removed following words and added to log: \n>> [{self.selection_set}]")
self.delete_complete_dict[current_adventure].extend(self.selection_set)
self.cleanup_selection(current_adventure)
else:
self.abort_without_delete(current_adventure)
def cleanup_selection(self, adventure):
cprint(("selection list",self.selection_set), color='green')
self.selection_set.clear()
self.parse_complete_dict[adventure].extend(self.current_parse_accepted_dict[adventure])
cprint((f"parse complete list for {adventure}",self.parse_complete_dict[adventure]), color='green')
cprint((f"parse accepted list for {adventure}",self.current_parse_accepted_dict[adventure]), color='green')
self.current_parse_accepted_dict[adventure].clear()
cprint("Cleaned up the current selection for the next round....", color='green')
def add_to_selection(self, word):
self.selection_set.append(word)
def abort_without_delete(self, current_adventure: str):
print("Aborting without deleting anything...")
self.cleanup_selection(current_adventure)
print("Taking you to the start of the subgame.....")
self.orchestrate()
def user_input_iterator(self, entries: list, current_adventure: str):
while True:
for entry in entries:
print(f"({entry['word']}, #{entry['count']})", end='\t')
user_input = input("---> ")
if user_input == "":
self.current_parse_accepted_dict[current_adventure].append(entry['word'])
pass
elif user_input == 'r':
# add to the selector
self.add_to_selection(entry['word'])
elif user_input == 'ce':
# complete the deletion of selection
pass
elif user_input == 'aq':
self.abort_without_delete(current_adventure)
else:
print("Invalid option selected. Your options are:\n")
print(self.option_string())
self.delete_selection(current_adventure)
break
def list_nodes(self, size: int, adventure: str):
# list nodes by their count
# contruct a query
# run query
# display data to user
return self.query_runner(self.query_constructor(size, adventure))
def query_constructor(self, size: int, adventure:str):
return f"""Match (k:Keyword:{adventure})-[r]-(m)
Return k.word as word, count(r) as count
Order by count DESC LIMIT {int(size)}
"""
def query_runner(self, cypher_query):
return self.neo.cypher_read_query_runner(cypher_query) | 2.328125 | 2 |
maillogger/exceptions.py | Natureshadow/maillogger | 2 | 12789545 | <gh_stars>1-10
class MailloggerError(Exception):
"""Maillogger base exception class"""
class NotFoundFileError(MailloggerError):
"""Raised when a target file to load can not be found"""
msg = 'Could not find "{filepath}"'
def __init__(self, filepath: str) -> None:
super().__init__(self.msg.format(filepath=filepath))
class UnsupportedDataFormatError(MailloggerError):
"""Raised when an unsupported data format is specified"""
msg = 'Data format "{fmt}" is not supported'
def __init__(self, fmt: str) -> None:
super().__init__(self.msg.format(fmt=fmt))
| 2.890625 | 3 |
swiftly/filelikeiter.py | haydendigital/swiftly | 23 | 12789546 | """
Wraps an iterable to behave as a file-like object.
Copyright (c) 2010-2012 OpenStack Foundation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
class FileLikeIter(object):
"""
Wraps an iterable to behave as a file-like object.
Taken from work I did for OpenStack Swift
swift.common.utils.FileLikeIter, Copyright (c) 2010-2012
OpenStack Foundation.
"""
def __init__(self, iterable, limit=None):
self.iterator = iter(iterable)
self.limit = limit
self.left = limit
self.buf = None
self.closed = False
def __iter__(self):
return self
def __next__(self):
"""
x.__next__() -> the next value, or raise StopIteration
"""
if self.closed:
raise ValueError('I/O operation on closed file')
if self.buf:
rv = self.buf
self.buf = None
return rv
else:
return next(self.iterator)
def reset_limit(self):
"""
Resets the limit.
"""
self.left = self.limit
def read(self, size=-1):
"""
read([size]) -> read at most size bytes, returned as a string.
If the size argument is negative or omitted, read until EOF is reached.
Notice that when in non-blocking mode, less data than what was
requested may be returned, even if no size parameter was given.
"""
if self.left is not None:
size = min(size, self.left)
if self.closed:
raise ValueError('I/O operation on closed file')
if size < 0:
return ''.join(self)
elif not size:
chunk = ''
elif self.buf:
chunk = self.buf
self.buf = None
else:
try:
chunk = next(self.iterator)
except StopIteration:
return ''
if len(chunk) > size:
self.buf = chunk[size:]
chunk = chunk[:size]
if self.left is not None:
self.left -= len(chunk)
return chunk
def readline(self, size=-1):
"""
readline([size]) -> next line from the file, as a string.
Retain newline. A non-negative size argument limits the maximum
number of bytes to return (an incomplete line may be returned then).
Return an empty string at EOF.
"""
if self.closed:
raise ValueError('I/O operation on closed file')
data = ''
while '\n' not in data and (size < 0 or len(data) < size):
if size < 0:
chunk = self.read(1024)
else:
chunk = self.read(size - len(data))
if not chunk:
break
data += chunk
if '\n' in data:
data, sep, rest = data.partition('\n')
data += sep
if self.buf:
self.buf = rest + self.buf
else:
self.buf = rest
return data
def readlines(self, sizehint=-1):
"""
readlines([size]) -> list of strings, each a line from the file.
Call readline() repeatedly and return a list of the lines so read.
The optional size argument, if given, is an approximate bound on the
total number of bytes in the lines returned.
"""
if self.closed:
raise ValueError('I/O operation on closed file')
lines = []
while True:
line = self.readline(sizehint)
if not line:
break
lines.append(line)
if sizehint >= 0:
sizehint -= len(line)
if sizehint <= 0:
break
return lines
def is_empty(self):
"""
Check whether the "file" is empty reading the single byte.
"""
something = self.read(1)
if something:
if self.buf:
self.buf = something + self.buf
else:
self.buf = something
return False
else:
return True
def close(self):
"""
close() -> None or (perhaps) an integer. Close the file.
Sets data attribute .closed to True. A closed file cannot be used for
further I/O operations. close() may be called more than once without
error. Some kinds of file objects (for example, opened by popen())
may return an exit status upon closing.
"""
self.iterator = None
self.closed = True
| 3.28125 | 3 |
DQNs/memory_module.py | abr-98/Reinforcement-Learning | 0 | 12789547 | <reponame>abr-98/Reinforcement-Learning
import random
import numpy as np
from collections import namedtuple,deque
class replayBuffer:
transition=namedtuple('Transition',['s','a','r','s_','nd'])
def __init__(self,capacity):
self.capacity=capacity
self.memory=deque([],maxlen=self.capacity)
def push(self,s,a,r,s_,nd):
tr=replayBuffer.transition(np.float32(s),a,r,np.float32(s_),nd)
self.memory.append(tr)
def sample(self,batch_size):
tr_batch=random.choices(self.memory,k=batch_size)
s=[];a=[];r=[];s_=[];nd=[]
for tr in tr_batch:
s.append(tr.s);a.append(tr.a);r.append(tr.r),s_.append(tr.s_),nd.append(tr.nd)
return np.array(s),np.array(a),np.array(r),np.array(s_),np.uint8(nd) | 2.953125 | 3 |
mix_label_1.py | hififi/hello-VAD | 1 | 12789548 | # -*- coding: utf-8 -*-
"""
Created on Thu Aug 31 09:24:17 2017
@author: gao
得到语音和噪声的混合数据 以.pkl的形式存放在mix_data中
"""
import librosa as lr
import numpy as np
import os
import pickle
import matplotlib.pyplot as plt
from ams_extract import ams_extractor
resample_rate=16000
nfft = 512
offset = int(nfft/2)
def genAndSave_trainData(speech_fileName):#给纯净语音打标签
s, fs = lr.load(speech_fileName, sr=None)
s = lr.resample(s, fs, resample_rate) # resample to 16k
s = np.array(s)
s_tf = lr.core.stft(s, n_fft=nfft, hop_length=offset)
s_db = lr.core.amplitude_to_db(np.abs(s_tf))
#s_angle = np.angle(s_tf)
x_label = np.ones(s_db.shape[1]) # initialize x_label to all one
xmean = s_db.mean(axis=0)
for i in range(s_db.shape[1]):
if xmean[i] < -40:
x_label[i] = 0
#xstd = np.std(s_db, axis =0)
#x_data = (s_db-xmean)/(xstd+1e-10) # normalize train data to zero mean and unit std
return x_label#,x_data,s_angle,xmean,xstd
'''
#label&data图形显示
plt.subplot(211)
plt.plot(xmean)
plt.subplot(212)
plt.plot(x_label)
plt.show()
'''
def gen_vadWav(x_data,x_label,s_angle,xmean,xstd):#标记处理后的波形显示函数
for i in range(x_data.shape[1]):
x_data[:,i] = x_data[:,i] * xstd[i] + xmean[i] # 逆归一化处理
speech_amp = lr.core.db_to_amplitude(x_data)
for i in range(len(x_label)):
if x_label[i]==0:
speech_amp[:,i]=1#是data的地方就都保留 不是的地方全都置成0
speech_tf = speech_amp * np.exp(s_angle*1j)
speech = lr.core.istft(speech_tf, hop_length = offset)#加入角度信息傅里叶反变换得到图谱
return speech
def Read_wav(fileName):#定义的不好 未使用
s, fs = lr.load(fileName, sr=None)
s = lr.resample(s, fs, resample_rate) # resample to 16k
s = np.array(s)
s_tf = lr.core.stft(s, n_fft=nfft, hop_length=offset)#对数据进行快速傅里叶变化
s_db = lr.core.amplitude_to_db(np.abs(s_tf))#幅值转化为dB
s_angle = np.angle(s_tf)#记录傅里叶变换后的角度
xmean = s_db.mean(axis=0)#求傅里叶变化后的均值
xstd = np.std(s_db, axis =0) #标准差
x_data = (s_db-xmean)/(xstd+1e-10) # normalize train data to zero mean and unit std
return x_data,s_angle
def MFCC(y,sr):#提取20D的MFCC
return lr.feature.mfcc(y=y,n_fft=nfft,hop_length=offset,n_mfcc=20)
def Pitch(y,sr):
pitches, magnitudes = lr.core.pitch.piptrack(y=y,n_fft=nfft,hop_length=offset)
# Select out pitches with high energy
pitches = pitches[magnitudes > np.median(magnitudes)]
return lr.core.pitch_tuning(pitches)
def Mix_wav(speechName,noiseName,mix_snr=5,train=True):#对数据加一个5db的噪声
s,fs=lr.load(speechName,sr=None)
n,fsn=lr.load(noiseName,sr=None)
s=lr.resample(s,fs,resample_rate)
n=lr.resample(n,fsn,resample_rate)
s=np.array(s)
n=np.array(n)
len_s=len(s)
len_n=len(n)
if len_s<=len_n:
n=n[0:len_s]
else:
n_extend_num=int(len_s/len_n)+1
n=n.repeat(n_extend_num)
n=n[0:len_s]
alpha=np.sqrt((s**2).sum()/((n**2).sum()*10**(mix_snr/10)))
mix=s+alpha*n
mix_tf=lr.core.stft(mix,n_fft=nfft,hop_length=offset)
mix_db=lr.core.amplitude_to_db(np.abs(mix_tf))
mfcc=MFCC(mix,sr=fs)
pitch=Pitch(mix,sr=fs)
ams=ams_extractor(mix,sr=fs,win_len=nfft,shift_len=offset,order=1)
if train==True:
return mix_db, mfcc, pitch,ams#mix_db
else:
mix_angle=np.angle(mix_tf)
return mix_db,mix_angle
def I_Mix_wav(mix_db,mix_angle):# 加性噪声语音波形逆变换
mix_amp=lr.core.db_to_amplitude(mix_db)
mix_tf=mix_amp*np.exp(mix_angle*1j)
mix=lr.core.istft(mix_tf,hop_length=offset)
return mix
def label_save(mix_data,mix_label,fn):#fn为文件要存储的位置
dic_data={'mix_data':mix_data,
'mix_label':mix_label} #将数据存入字典中
with open(fn,'wb') as f:
pickle.dump(dic_data,f)
def _create_zero_indexes(speech_fileName):#过零率
s, fs = lr.load(speech_fileName, sr=None)
s = lr.resample(s, fs, resample_rate) # resample to 16k
zero_indexes=[]
zero_crossings=lr.zero_crossings(s)
zero_index=np.nonzero(zero_crossings)[0]
zero_indexes.append(zero_index)
#得到加噪的语音信号
filepath="C:/Users/gao/vad/863_IBM_test/"#添加路径1
#filepath="D:/863语音数据/863_data/863_IBM_train1/"#添加路径2
dirname=os.listdir(filepath)#获取全部文件
filepath1="C:/Users/gao/vad/noise/"
dirnoise=os.listdir(filepath1)
for i in range(0,len(dirname)):
for j in range(0,len(dirnoise)):
noise_fileName=filepath1+dirnoise[j]
#print(noise_fileName)#读取噪声音频
speech_fileName=filepath+dirname[i]
#print(speech_fileName)#读取原始数据
mix_label=genAndSave_trainData(speech_fileName)#用原始音频得到数据的标签
#plt.plot(mix_label)
#plt.show()
#得到5dB下的混合音频
#mix_db,mix_angle=Mix_wav(speech_fileName,noise_fileName,mix_snr=5,train=False)
mix_db, mfcc, pitch,ams=Mix_wav(speech_fileName,noise_fileName,mix_snr=0,train=True)
#print(mix_tf.shape,mfcc.shape,pitch.shape,ams.shape)
mix=np.row_stack((mix_db,mfcc,ams))
#print(mix.shape)
#mixdata的归一化
mixmean= mix.mean(axis=0)#求出均值
mixstd = np.std(mix, axis =0) #求出标准差
mix_data= (mix-mixmean)/(mixstd+1e-10) #归一化后的数据
print(mix_data.shape)
print(mix_label)
#a=label_save(mix_data,mix_label,'C:/Users/gao/vad/test_data/0dB/factory_test/'+dirname[i].strip('.WAV')+dirnoise[j].strip('.wav')+'0'+'.pkl')
"""
x=I_Mix_wav(mix_db,mix_angle)
lr.output.write_wav('C:/Users/gao/vad/test_data/'+dirname[i].strip('.WAV')+'__5'+dirnoise[j], x, resample_rate)
#显示加性噪声语音
plt.plot(x)
plt.show()
"""
| 2.109375 | 2 |
Task/Zebra-puzzle/Python/zebra-puzzle-2.py | djgoku/RosettaCodeData | 0 | 12789549 | from itertools import permutations
import psyco
psyco.full()
class Number:elems= "One Two Three Four Five".split()
class Color: elems= "Red Green Blue White Yellow".split()
class Drink: elems= "Milk Coffee Water Beer Tea".split()
class Smoke: elems= "PallMall Dunhill Blend BlueMaster Prince".split()
class Pet: elems= "Dog Cat Zebra Horse Bird".split()
class Nation:elems= "British Swedish Danish Norvegian German".split()
for c in (Number, Color, Drink, Smoke, Pet, Nation):
for i, e in enumerate(c.elems):
exec "%s.%s = %d" % (c.__name__, e, i)
def is_possible(number, color, drink, smoke, pet):
if number and number[Nation.Norvegian] != Number.One:
return False
if color and color[Nation.British] != Color.Red:
return False
if drink and drink[Nation.Danish] != Drink.Tea:
return False
if smoke and smoke[Nation.German] != Smoke.Prince:
return False
if pet and pet[Nation.Swedish] != Pet.Dog:
return False
if not number or not color or not drink or not smoke or not pet:
return True
for i in xrange(5):
if color[i] == Color.Green and drink[i] != Drink.Coffee:
return False
if smoke[i] == Smoke.PallMall and pet[i] != Pet.Bird:
return False
if color[i] == Color.Yellow and smoke[i] != Smoke.Dunhill:
return False
if number[i] == Number.Three and drink[i] != Drink.Milk:
return False
if smoke[i] == Smoke.BlueMaster and drink[i] != Drink.Beer:
return False
if color[i] == Color.Blue and number[i] != Number.Two:
return False
for j in xrange(5):
if (color[i] == Color.Green and
color[j] == Color.White and
number[j] - number[i] != 1):
return False
diff = abs(number[i] - number[j])
if smoke[i] == Smoke.Blend and pet[j] == Pet.Cat and diff != 1:
return False
if pet[i]==Pet.Horse and smoke[j]==Smoke.Dunhill and diff != 1:
return False
if smoke[i]==Smoke.Blend and drink[j]==Drink.Water and diff!=1:
return False
return True
def show_row(t, data):
print "%6s: %12s%12s%12s%12s%12s" % (
t.__name__, t.elems[data[0]],
t.elems[data[1]], t.elems[data[2]],
t.elems[data[3]], t.elems[data[4]])
def main():
perms = list(permutations(range(5)))
for number in perms:
if is_possible(number, None, None, None, None):
for color in perms:
if is_possible(number, color, None, None, None):
for drink in perms:
if is_possible(number, color, drink, None, None):
for smoke in perms:
if is_possible(number, color, drink, smoke, None):
for pet in perms:
if is_possible(number, color, drink, smoke, pet):
print "Found a solution:"
show_row(Nation, range(5))
show_row(Number, number)
show_row(Color, color)
show_row(Drink, drink)
show_row(Smoke, smoke)
show_row(Pet, pet)
print
main()
| 3.28125 | 3 |
Python/Books/Learning-Programming-with-Python.Tamim-Shahriar-Subeen/chapter-008/ph-8.11-uppercase-lowercase-capitalize.py | shihab4t/Books-Code | 0 | 12789550 | s1 = "Bangladesh"
s_up = s1.upper()
print(s_up)
s_lo = s1.lower()
print(s_lo)
s_cap = s1.capitalize()
print(s_cap)
| 3.6875 | 4 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.