max_stars_repo_path
stringlengths 4
286
| max_stars_repo_name
stringlengths 5
119
| max_stars_count
int64 0
191k
| id
stringlengths 1
7
| content
stringlengths 6
1.03M
| content_cleaned
stringlengths 6
1.03M
| language
stringclasses 111
values | language_score
float64 0.03
1
| comments
stringlengths 0
556k
| edu_score
float64 0.32
5.03
| edu_int_score
int64 0
5
|
---|---|---|---|---|---|---|---|---|---|---|
cartoonify.py | adl1995/image-processing-filters | 0 | 10700 | <filename>cartoonify.py
#!/usr/bin/env python
__author__ = "<NAME>"
__email__ = "<EMAIL>"
__status__ = "Production"
import matplotlib.pyplot as plt
import numpy as np
import skimage as ski
import Image
def cartoonify(im, display=False):
"""
function receives an image and add its gradient magnitude in it and add it
to the original image to return a semi-cartoon image.
Note: You will have to scale the gradient-magnitue image
before adding it back to the input image.
Input:
im: input image to cartoonify
display: whether to display image or not...
NOTE: This function expects a gaussian filtered image
"""
kernel, kern_size = np.array([[-1,-1,-1] ,[0,0,0] ,[1,1,1]]), 3
gx, gy = np.zeros_like(im, dtype=float), np.zeros_like(im, dtype=float)
for i in range(im.shape[0] - (kern_size-1)):
for j in range(im.shape[1] - (kern_size-1)):
window = im[i:i + kern_size, j:j + kern_size]
gx[i,j], gy[i,j] = np.sum(window * kernel.T), np.sum(window * kernel)
magnitude = np.sqrt(gx**2 + gy**2)
magnitude = magnitude.astype(np.int64, copy=False)
cartoon = im + (im + magnitude)
if display == 1:
plt.imshow(cartoon, cmap='gray')
plt.suptitle('Cartoon')
plt.show()
return cartoon
| <filename>cartoonify.py
#!/usr/bin/env python
__author__ = "<NAME>"
__email__ = "<EMAIL>"
__status__ = "Production"
import matplotlib.pyplot as plt
import numpy as np
import skimage as ski
import Image
def cartoonify(im, display=False):
"""
function receives an image and add its gradient magnitude in it and add it
to the original image to return a semi-cartoon image.
Note: You will have to scale the gradient-magnitue image
before adding it back to the input image.
Input:
im: input image to cartoonify
display: whether to display image or not...
NOTE: This function expects a gaussian filtered image
"""
kernel, kern_size = np.array([[-1,-1,-1] ,[0,0,0] ,[1,1,1]]), 3
gx, gy = np.zeros_like(im, dtype=float), np.zeros_like(im, dtype=float)
for i in range(im.shape[0] - (kern_size-1)):
for j in range(im.shape[1] - (kern_size-1)):
window = im[i:i + kern_size, j:j + kern_size]
gx[i,j], gy[i,j] = np.sum(window * kernel.T), np.sum(window * kernel)
magnitude = np.sqrt(gx**2 + gy**2)
magnitude = magnitude.astype(np.int64, copy=False)
cartoon = im + (im + magnitude)
if display == 1:
plt.imshow(cartoon, cmap='gray')
plt.suptitle('Cartoon')
plt.show()
return cartoon
| en | 0.686938 | #!/usr/bin/env python function receives an image and add its gradient magnitude in it and add it to the original image to return a semi-cartoon image. Note: You will have to scale the gradient-magnitue image before adding it back to the input image. Input: im: input image to cartoonify display: whether to display image or not... NOTE: This function expects a gaussian filtered image | 3.821585 | 4 |
keymapper/__init__.py | rburns629/KeyMapper | 0 | 10701 | from dataclasses import dataclass
import json
import re
@dataclass
class KeyMapper(dict):
"""
Example:
km = KeyMapper({'messages': {'message1': 'Hello World!'}}})
print(km['messages.message1'])
Variables:
__delimiter__ is set to dot-notation by default, unless specified otherwise.
"""
__delimiter__ = "." # Default
__schema__ = {}
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
if kwargs:
if 'delimiter' in kwargs:
self.__delimiter__ = kwargs['delimiter']
elif 'schema' in kwargs:
self.__schema__ = kwargs['schema']
for arg in args:
if isinstance(arg, dict):
for k, v in arg.items():
if self.__schema__:
if self.__schema__[k] == type(v):
self.__dict__.update({k: v})
else:
raise ValueError(
f'TypeMismatchError: value {type(v)} does not match type {type(self.__schema__[k])} defined in schema')
else:
self.__dict__.update({k: v})
def __repr__(self):
return '{}(dict={})'.format(self.__class__, self.__dict__)
def __str__(self):
return '{}'.format(self.__dict__)
def __getattr__(self, attr):
try:
return self.get(attr)
except Exception as e:
raise e
def __setattr__(self, key, value):
try:
self.__setitem__(key, value)
except Exception as e:
raise e
def __delattr__(self, item):
try:
self.__delitem__(item)
except Exception as e:
raise e
def __getitem__(self, key):
try:
if self.__delimiter__ in key:
return self.__mapper__(self.__dict__, key.split(self.__delimiter__), self.__getitem__.__name__)
else:
return self.get(key)
except Exception as e:
raise e
def __setitem__(self, key, value):
try:
if self.__delimiter__ in key:
self.__mapper__(self.__dict__, key.split(
self.__delimiter__), self.__setitem__.__name__, value)
else:
super().__setitem__(key, value)
self.__dict__.update({key: value})
except Exception as e:
raise e
def __delitem__(self, key):
try:
if self.__delimiter__ in key:
self.__mapper__(self.__dict__, key.split(
self.__delimiter__), self.__delitem__.__name__)
else:
super().__delitem__(key)
del self.__dict__[key]
except Exception as e:
raise e
def pprint(self, *args):
try:
if len(args) > 0:
return json.dumps(args[0], indent=4, ensure_ascii=False)
return json.dumps(self, indent=4, ensure_ascii=False)
except Exception as e:
raise e
@classmethod
def __mapper__(cls, d, m, callback, *args, **kwargs):
for i, k in enumerate(m):
key = k if not re.search(r'^[0-9]+$', k) else int(k)
try:
if str(key) in d or type(key) == int and d[key]:
if str(key) != m[-1] or i != len(m) - 1:
return cls.__mapper__(d[key], m[1:], callback, *args, **kwargs)
elif str(key) == m[-1] and i == len(m) - 1:
if callback == '__setitem__':
d[key] = args[0]
return None
elif callback == '__delitem__':
del d[key]
return None
else:
return d[key]
except Exception as e:
raise e
else:
if i == len(m) - 1:
if callback == '__setitem__':
d[m[-1]] = args[0]
return None
else:
raise KeyError('{}'.format(m[i]))
else:
if callback == '__getitem__':
return d
| from dataclasses import dataclass
import json
import re
@dataclass
class KeyMapper(dict):
"""
Example:
km = KeyMapper({'messages': {'message1': 'Hello World!'}}})
print(km['messages.message1'])
Variables:
__delimiter__ is set to dot-notation by default, unless specified otherwise.
"""
__delimiter__ = "." # Default
__schema__ = {}
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
if kwargs:
if 'delimiter' in kwargs:
self.__delimiter__ = kwargs['delimiter']
elif 'schema' in kwargs:
self.__schema__ = kwargs['schema']
for arg in args:
if isinstance(arg, dict):
for k, v in arg.items():
if self.__schema__:
if self.__schema__[k] == type(v):
self.__dict__.update({k: v})
else:
raise ValueError(
f'TypeMismatchError: value {type(v)} does not match type {type(self.__schema__[k])} defined in schema')
else:
self.__dict__.update({k: v})
def __repr__(self):
return '{}(dict={})'.format(self.__class__, self.__dict__)
def __str__(self):
return '{}'.format(self.__dict__)
def __getattr__(self, attr):
try:
return self.get(attr)
except Exception as e:
raise e
def __setattr__(self, key, value):
try:
self.__setitem__(key, value)
except Exception as e:
raise e
def __delattr__(self, item):
try:
self.__delitem__(item)
except Exception as e:
raise e
def __getitem__(self, key):
try:
if self.__delimiter__ in key:
return self.__mapper__(self.__dict__, key.split(self.__delimiter__), self.__getitem__.__name__)
else:
return self.get(key)
except Exception as e:
raise e
def __setitem__(self, key, value):
try:
if self.__delimiter__ in key:
self.__mapper__(self.__dict__, key.split(
self.__delimiter__), self.__setitem__.__name__, value)
else:
super().__setitem__(key, value)
self.__dict__.update({key: value})
except Exception as e:
raise e
def __delitem__(self, key):
try:
if self.__delimiter__ in key:
self.__mapper__(self.__dict__, key.split(
self.__delimiter__), self.__delitem__.__name__)
else:
super().__delitem__(key)
del self.__dict__[key]
except Exception as e:
raise e
def pprint(self, *args):
try:
if len(args) > 0:
return json.dumps(args[0], indent=4, ensure_ascii=False)
return json.dumps(self, indent=4, ensure_ascii=False)
except Exception as e:
raise e
@classmethod
def __mapper__(cls, d, m, callback, *args, **kwargs):
for i, k in enumerate(m):
key = k if not re.search(r'^[0-9]+$', k) else int(k)
try:
if str(key) in d or type(key) == int and d[key]:
if str(key) != m[-1] or i != len(m) - 1:
return cls.__mapper__(d[key], m[1:], callback, *args, **kwargs)
elif str(key) == m[-1] and i == len(m) - 1:
if callback == '__setitem__':
d[key] = args[0]
return None
elif callback == '__delitem__':
del d[key]
return None
else:
return d[key]
except Exception as e:
raise e
else:
if i == len(m) - 1:
if callback == '__setitem__':
d[m[-1]] = args[0]
return None
else:
raise KeyError('{}'.format(m[i]))
else:
if callback == '__getitem__':
return d
| en | 0.308857 | Example: km = KeyMapper({'messages': {'message1': 'Hello World!'}}}) print(km['messages.message1']) Variables: __delimiter__ is set to dot-notation by default, unless specified otherwise. # Default | 3.230306 | 3 |
PythonFiles_DataScience/demo37_pythonfordatascience.py | mahnooranjum/Programming_DataScience | 0 | 10702 | # -*- coding: utf-8 -*-
"""Demo37_PythonforDataScience.ipynb
# PYTHON FOR DATA SCIENCE
We will take our python programming skills a step further and process large data in it. Python is an excellent language for deployment. Hence we will be using open source data during the learning process!!
This will make sure we understand the challenges a Data Scientist can face and how to deal with them. In my experience, Data Preprocessing takes 70% of the time in any project. Hence it is crucial for any Data Scientist to know what it is and how it is done.
This may be the boring portion of the course but I assure you, you will feel accomplished by the end of this tutorial.
- Python Basics
- Object Oriented Python
- **Python for Data Science**
- NumPy
- Pandas
- Plotting
- Matplotlib
- Seaborn
Let's get coding !!
"""
#Variables can not start with a number
12var = 1
_13var = 1
name = "Mahnoor"
surname = "Anjum"
age = 21
print("I'm {} {} and I am {} years old.".format(name, surname, age))
name = "Mahnoor"
surname = "Anjum"
age = 21
print("I'm {_1} {_2} and I am {_3} years old.".format(_1 = name, _2= surname, _3 = age))
"""### INDEXING AND SLICING
One of the most important Python concept for data scientists is the slicing operator ':'
"""
str = "ONE TWO THREE FOUR FIVE"
print(str[0])
print(str[5])
print(str[len(str)-1])
str[:5]
str[5:]
str[1]="a"
nested = [1,2,3,['_1','_2','_3',['__1']]]
nested[0]
nested[3][0]
len(nested)
len(nested[3])
nested[3][3]
nested[3][3][0]
dict = {'key1':'value1', \
'key2': 'value2', \
'key3':'value3'}
dict['key1']
T = True
F = False
var = 10
for i in range(var):
print(i)
for i in range(var):
bool = (i==2)
if bool:
break
print(i)
[1,2,3,1,1,2,3,4]
(1,2,3,1,1,2,3,4)
{1,2,3,1,1,2,3,4}
new_set = set([1,2,3,1,1,2,3,4])
new_set.add(5)
new_set
for item in new_set:
print(item)
list(range(4))
my_list = list(range(5,10))
output = []
for number in my_list:
output.append(number**3)
output
output = [num**3 for num in my_list]
output
"""### FUNCTIONS"""
def my_function(parameter):
print(parameter)
my_function("Jalebi (Hungry okay?)")
def my_function(parameter="Default"):
print(parameter)
my_function()
num = 4
def change(par):
par =5
return par
change(num)
num
num = 4
def change(par):
par =5
return par
change(num)
num
num = [4]
def change(par):
par.append(5)
del par[0]
return par
change(num)
num
my_list
"""### LAMBDA EXPRESSIONS"""
def square(x): return x*x
list(map(square, my_list))
list(map(lambda x:x*x, my_list))
"""### BUILT-IN FUNCTIONS"""
s = "We have a hulk !!!"
s.lower()
s.upper()
s.split()
dict = {'key1':1,'key2':2}
dict.keys()
dict.values()
dict.items()
my_list.pop()
my_list
"""### TUPLE UNPACKING"""
list_of_tuples =[(1,2),(3,4),(5,6)]
for (a,b) in list_of_tuples:
print (a)
print (b)
"""### WELCOME TO THE END OF THE TUTORIAL
You made it!! Hope you enjoyed taking this tutorial as much as I enjoyed coding it. From the next tutorial, we will be starting our first Data Science Library called NumPy. Until then, happy coding.
---------------------------------------------------------------------------------
Copyrights © 2018, All Rights Reserved.
- Author: <NAME>.
- Course: The Complete Hands-On Machine Learning Course
- Date Created: 2018-06-27
- Date Modified: -
""" | # -*- coding: utf-8 -*-
"""Demo37_PythonforDataScience.ipynb
# PYTHON FOR DATA SCIENCE
We will take our python programming skills a step further and process large data in it. Python is an excellent language for deployment. Hence we will be using open source data during the learning process!!
This will make sure we understand the challenges a Data Scientist can face and how to deal with them. In my experience, Data Preprocessing takes 70% of the time in any project. Hence it is crucial for any Data Scientist to know what it is and how it is done.
This may be the boring portion of the course but I assure you, you will feel accomplished by the end of this tutorial.
- Python Basics
- Object Oriented Python
- **Python for Data Science**
- NumPy
- Pandas
- Plotting
- Matplotlib
- Seaborn
Let's get coding !!
"""
#Variables can not start with a number
12var = 1
_13var = 1
name = "Mahnoor"
surname = "Anjum"
age = 21
print("I'm {} {} and I am {} years old.".format(name, surname, age))
name = "Mahnoor"
surname = "Anjum"
age = 21
print("I'm {_1} {_2} and I am {_3} years old.".format(_1 = name, _2= surname, _3 = age))
"""### INDEXING AND SLICING
One of the most important Python concept for data scientists is the slicing operator ':'
"""
str = "ONE TWO THREE FOUR FIVE"
print(str[0])
print(str[5])
print(str[len(str)-1])
str[:5]
str[5:]
str[1]="a"
nested = [1,2,3,['_1','_2','_3',['__1']]]
nested[0]
nested[3][0]
len(nested)
len(nested[3])
nested[3][3]
nested[3][3][0]
dict = {'key1':'value1', \
'key2': 'value2', \
'key3':'value3'}
dict['key1']
T = True
F = False
var = 10
for i in range(var):
print(i)
for i in range(var):
bool = (i==2)
if bool:
break
print(i)
[1,2,3,1,1,2,3,4]
(1,2,3,1,1,2,3,4)
{1,2,3,1,1,2,3,4}
new_set = set([1,2,3,1,1,2,3,4])
new_set.add(5)
new_set
for item in new_set:
print(item)
list(range(4))
my_list = list(range(5,10))
output = []
for number in my_list:
output.append(number**3)
output
output = [num**3 for num in my_list]
output
"""### FUNCTIONS"""
def my_function(parameter):
print(parameter)
my_function("Jalebi (Hungry okay?)")
def my_function(parameter="Default"):
print(parameter)
my_function()
num = 4
def change(par):
par =5
return par
change(num)
num
num = 4
def change(par):
par =5
return par
change(num)
num
num = [4]
def change(par):
par.append(5)
del par[0]
return par
change(num)
num
my_list
"""### LAMBDA EXPRESSIONS"""
def square(x): return x*x
list(map(square, my_list))
list(map(lambda x:x*x, my_list))
"""### BUILT-IN FUNCTIONS"""
s = "We have a hulk !!!"
s.lower()
s.upper()
s.split()
dict = {'key1':1,'key2':2}
dict.keys()
dict.values()
dict.items()
my_list.pop()
my_list
"""### TUPLE UNPACKING"""
list_of_tuples =[(1,2),(3,4),(5,6)]
for (a,b) in list_of_tuples:
print (a)
print (b)
"""### WELCOME TO THE END OF THE TUTORIAL
You made it!! Hope you enjoyed taking this tutorial as much as I enjoyed coding it. From the next tutorial, we will be starting our first Data Science Library called NumPy. Until then, happy coding.
---------------------------------------------------------------------------------
Copyrights © 2018, All Rights Reserved.
- Author: <NAME>.
- Course: The Complete Hands-On Machine Learning Course
- Date Created: 2018-06-27
- Date Modified: -
""" | en | 0.844136 | # -*- coding: utf-8 -*- Demo37_PythonforDataScience.ipynb # PYTHON FOR DATA SCIENCE We will take our python programming skills a step further and process large data in it. Python is an excellent language for deployment. Hence we will be using open source data during the learning process!! This will make sure we understand the challenges a Data Scientist can face and how to deal with them. In my experience, Data Preprocessing takes 70% of the time in any project. Hence it is crucial for any Data Scientist to know what it is and how it is done. This may be the boring portion of the course but I assure you, you will feel accomplished by the end of this tutorial. - Python Basics - Object Oriented Python - **Python for Data Science** - NumPy - Pandas - Plotting - Matplotlib - Seaborn Let's get coding !! #Variables can not start with a number ### INDEXING AND SLICING One of the most important Python concept for data scientists is the slicing operator ':' ### FUNCTIONS ### LAMBDA EXPRESSIONS ### BUILT-IN FUNCTIONS ### TUPLE UNPACKING ### WELCOME TO THE END OF THE TUTORIAL You made it!! Hope you enjoyed taking this tutorial as much as I enjoyed coding it. From the next tutorial, we will be starting our first Data Science Library called NumPy. Until then, happy coding. --------------------------------------------------------------------------------- Copyrights © 2018, All Rights Reserved. - Author: <NAME>. - Course: The Complete Hands-On Machine Learning Course - Date Created: 2018-06-27 - Date Modified: - | 4.178516 | 4 |
quantrocket/db.py | Jay-Jay-D/quantrocket-client | 0 | 10703 | <gh_stars>0
# Copyright 2017 QuantRocket - All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from quantrocket.houston import houston
from quantrocket.cli.utils.output import json_to_cli
def list_databases(service=None):
"""
List databases.
Parameters
----------
service : str, optional
only list databases for this service
Returns
-------
list
list of databases
"""
params = {}
if service:
params["service"] = service
response = houston.get("/db/databases", params=params)
houston.raise_for_status_with_json(response)
return response.json()
def _cli_list_databases(*args, **kwargs):
return json_to_cli(list_databases, *args, **kwargs)
def download_database(database, outfile):
"""
Download a database from the db service and write to a local file.
Parameters
----------
database : str, required
the filename of the database (as returned by the list_databases)
outfile: str, required
filename to write the database to
Returns
-------
None
"""
response = houston.get("/db/databases/{0}".format(database), stream=True)
houston.raise_for_status_with_json(response)
with open(outfile, "wb") as f:
for chunk in response.iter_content(chunk_size=1024):
if chunk:
f.write(chunk)
def _cli_download_database(*args, **kwargs):
return json_to_cli(download_database, *args, **kwargs)
def s3_push_databases(service, codes=None):
"""
Push database(s) to Amazon S3.
Parameters
----------
serivce : str, required
only push databases for this service (specify 'all' to push all services)
codes: list of str, optional
only push databases identified by these codes (omit to push all databases for service)
Returns
-------
json
status message
"""
data = {}
if codes:
data["codes"] = codes
response = houston.put("/db/s3/{0}".format(service), data=data)
houston.raise_for_status_with_json(response)
return response.json()
def _cli_s3_push_databases(*args, **kwargs):
return json_to_cli(s3_push_databases, *args, **kwargs)
def s3_pull_databases(service, codes=None, force=False):
"""
Pull database(s) from Amazon S3 to the db service.
Parameters
----------
serivce : str, required
only pull databases for this service (specify 'all' to pull all services)
codes: list of str, optional
only pull databases identified by these codes (omit to pull all databases for service)
force: bool
overwrite existing database if one exists (default is to fail if one exists)
Returns
-------
json
status message
"""
params = {}
if codes:
params["codes"] = codes
if force:
params["force"] = force
response = houston.get("/db/s3/{0}".format(service), params=params)
houston.raise_for_status_with_json(response)
return response.json()
def _cli_s3_pull_databases(*args, **kwargs):
return json_to_cli(s3_pull_databases, *args, **kwargs)
def optimize_databases(service, codes=None):
"""
Optimize database file(s) to improve performance.
Parameters
----------
serivce : str, required
only optimize databases for this service (specify 'all' to optimize all services)
codes: list of str, optional
only optimize databases identified by these codes (omit to optimize all databases for service)
Returns
-------
json
status message
"""
data = {}
if codes:
data["codes"] = codes
response = houston.post("/db/optimizations/{0}".format(service), data=data)
houston.raise_for_status_with_json(response)
return response.json()
def _cli_optimize_databases(*args, **kwargs):
return json_to_cli(optimize_databases, *args, **kwargs)
| # Copyright 2017 QuantRocket - All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from quantrocket.houston import houston
from quantrocket.cli.utils.output import json_to_cli
def list_databases(service=None):
"""
List databases.
Parameters
----------
service : str, optional
only list databases for this service
Returns
-------
list
list of databases
"""
params = {}
if service:
params["service"] = service
response = houston.get("/db/databases", params=params)
houston.raise_for_status_with_json(response)
return response.json()
def _cli_list_databases(*args, **kwargs):
return json_to_cli(list_databases, *args, **kwargs)
def download_database(database, outfile):
"""
Download a database from the db service and write to a local file.
Parameters
----------
database : str, required
the filename of the database (as returned by the list_databases)
outfile: str, required
filename to write the database to
Returns
-------
None
"""
response = houston.get("/db/databases/{0}".format(database), stream=True)
houston.raise_for_status_with_json(response)
with open(outfile, "wb") as f:
for chunk in response.iter_content(chunk_size=1024):
if chunk:
f.write(chunk)
def _cli_download_database(*args, **kwargs):
return json_to_cli(download_database, *args, **kwargs)
def s3_push_databases(service, codes=None):
"""
Push database(s) to Amazon S3.
Parameters
----------
serivce : str, required
only push databases for this service (specify 'all' to push all services)
codes: list of str, optional
only push databases identified by these codes (omit to push all databases for service)
Returns
-------
json
status message
"""
data = {}
if codes:
data["codes"] = codes
response = houston.put("/db/s3/{0}".format(service), data=data)
houston.raise_for_status_with_json(response)
return response.json()
def _cli_s3_push_databases(*args, **kwargs):
return json_to_cli(s3_push_databases, *args, **kwargs)
def s3_pull_databases(service, codes=None, force=False):
"""
Pull database(s) from Amazon S3 to the db service.
Parameters
----------
serivce : str, required
only pull databases for this service (specify 'all' to pull all services)
codes: list of str, optional
only pull databases identified by these codes (omit to pull all databases for service)
force: bool
overwrite existing database if one exists (default is to fail if one exists)
Returns
-------
json
status message
"""
params = {}
if codes:
params["codes"] = codes
if force:
params["force"] = force
response = houston.get("/db/s3/{0}".format(service), params=params)
houston.raise_for_status_with_json(response)
return response.json()
def _cli_s3_pull_databases(*args, **kwargs):
return json_to_cli(s3_pull_databases, *args, **kwargs)
def optimize_databases(service, codes=None):
"""
Optimize database file(s) to improve performance.
Parameters
----------
serivce : str, required
only optimize databases for this service (specify 'all' to optimize all services)
codes: list of str, optional
only optimize databases identified by these codes (omit to optimize all databases for service)
Returns
-------
json
status message
"""
data = {}
if codes:
data["codes"] = codes
response = houston.post("/db/optimizations/{0}".format(service), data=data)
houston.raise_for_status_with_json(response)
return response.json()
def _cli_optimize_databases(*args, **kwargs):
return json_to_cli(optimize_databases, *args, **kwargs) | en | 0.658561 | # Copyright 2017 QuantRocket - All Rights Reserved # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. List databases. Parameters ---------- service : str, optional only list databases for this service Returns ------- list list of databases Download a database from the db service and write to a local file. Parameters ---------- database : str, required the filename of the database (as returned by the list_databases) outfile: str, required filename to write the database to Returns ------- None Push database(s) to Amazon S3. Parameters ---------- serivce : str, required only push databases for this service (specify 'all' to push all services) codes: list of str, optional only push databases identified by these codes (omit to push all databases for service) Returns ------- json status message Pull database(s) from Amazon S3 to the db service. Parameters ---------- serivce : str, required only pull databases for this service (specify 'all' to pull all services) codes: list of str, optional only pull databases identified by these codes (omit to pull all databases for service) force: bool overwrite existing database if one exists (default is to fail if one exists) Returns ------- json status message Optimize database file(s) to improve performance. Parameters ---------- serivce : str, required only optimize databases for this service (specify 'all' to optimize all services) codes: list of str, optional only optimize databases identified by these codes (omit to optimize all databases for service) Returns ------- json status message | 2.589906 | 3 |
ink2canvas/GradientHelper.py | greipfrut/pdftohtml5canvas | 4 | 10704 | <filename>ink2canvas/GradientHelper.py
from ink2canvas.lib.simpletransform import parseTransform
class GradientHelper(object):
def __init__(self, abstractShape):
self.abstractShape = abstractShape
def hasGradient(self, key):
style = self.abstractShape.getStyle()
if key in style:
styleParamater = style[key]
if styleParamater.startswith("url(#linear"):
return "linear"
if styleParamater.startswith("url(#radial"):
return "radial"
return None
def getGradientHref(self, key):
style = self.abstractShape.getStyle()
if key in style:
return style[key][5:-1]
return
def setGradientFill(self):
gradType = self.hasGradient("fill")
if (gradType):
gradient = self.setComponentGradient("fill", gradType)
self.abstractShape.canvasContext.setFill("gradient=grad")
if(self.hasGradientTransform(gradient)):
self.abstractShape.canvasContext.fill();
self.abstractShape.canvasContext.restore()
return True
def setGradientStroke(self):
gradType = self.hasGradient("stroke")
if (gradType):
gradient = self.setComponentGradient("stroke", gradType)
self.abstractShape.canvasContext.setStroke("gradient=grad")
if(self.hasGradientTransform(gradient)):
self.abstractShape.canvasContext.stroke();
self.abstractShape.canvasContext.restore()
return True
def hasGradientTransform(self, gradient):
return bool(gradient.attr("gradientTransform"))
def setGradientTransform(self, gradient):
dataString = gradient.attr("gradientTransform")
dataMatrix = parseTransform(dataString)
m11, m21, dx = dataMatrix[0]
m12, m22, dy = dataMatrix[1]
self.abstractShape.canvasContext.transform(m11, m12, m21, m22, dx, dy)
def setComponentGradient(self, key, gradType):
gradientId = self.getGradientHref(key)
if(gradType == "linear"):
gradient = self.abstractShape.rootTree.getLinearGradient(gradientId)
if(gradType == "radial"):
gradient = self.abstractShape.rootTree.getRadialGradient(gradientId)
if(gradient.link != None):
gradient.colorStops = self.abstractShape.rootTree.getLinearGradient(gradient.link).colorStops
if(self.hasGradientTransform(gradient)):
self.abstractShape.canvasContext.save()
self.setGradientTransform(gradient)
if(gradType == "linear"):
x1, y1, x2, y2 = gradient.getData()
self.abstractShape.canvasContext.createLinearGradient("grad", x1, y1, x2, y2)
if(gradType == "radial"):
cx, cy, fx, fy, r = gradient.getData()
self.abstractShape.canvasContext.createRadialGradient("grad", cx, cy, 0, fx, fy, r)
for stopKey, stopValue in gradient.colorStops.iteritems():
offset = float(stopKey)
color = self.abstractShape.canvasContext.getColor(stopValue.split(";")[0].split(":")[1] , stopValue.split(";")[1].split(":")[1] )
self.abstractShape.canvasContext.addColorStop("grad", offset, color)
return gradient
def createLinearGradient(self):
x1, y1, x2, y2 = self.gradient.getData()
self.abstractShape.canvasContext.createLinearGradient("grad", x1, y1, x2, y2)
for stop in self.gradient.stops:
color = self.canvasContext.getColor(stop.split(";")[0].split(":")[1] , stop.split(";")[1].split(":")[1])
offset = float(stop.split(";")[2].split(":")[1])
self.abstractShape.canvasContext.addColorStop("grad", offset, color) | <filename>ink2canvas/GradientHelper.py
from ink2canvas.lib.simpletransform import parseTransform
class GradientHelper(object):
def __init__(self, abstractShape):
self.abstractShape = abstractShape
def hasGradient(self, key):
style = self.abstractShape.getStyle()
if key in style:
styleParamater = style[key]
if styleParamater.startswith("url(#linear"):
return "linear"
if styleParamater.startswith("url(#radial"):
return "radial"
return None
def getGradientHref(self, key):
style = self.abstractShape.getStyle()
if key in style:
return style[key][5:-1]
return
def setGradientFill(self):
gradType = self.hasGradient("fill")
if (gradType):
gradient = self.setComponentGradient("fill", gradType)
self.abstractShape.canvasContext.setFill("gradient=grad")
if(self.hasGradientTransform(gradient)):
self.abstractShape.canvasContext.fill();
self.abstractShape.canvasContext.restore()
return True
def setGradientStroke(self):
gradType = self.hasGradient("stroke")
if (gradType):
gradient = self.setComponentGradient("stroke", gradType)
self.abstractShape.canvasContext.setStroke("gradient=grad")
if(self.hasGradientTransform(gradient)):
self.abstractShape.canvasContext.stroke();
self.abstractShape.canvasContext.restore()
return True
def hasGradientTransform(self, gradient):
return bool(gradient.attr("gradientTransform"))
def setGradientTransform(self, gradient):
dataString = gradient.attr("gradientTransform")
dataMatrix = parseTransform(dataString)
m11, m21, dx = dataMatrix[0]
m12, m22, dy = dataMatrix[1]
self.abstractShape.canvasContext.transform(m11, m12, m21, m22, dx, dy)
def setComponentGradient(self, key, gradType):
gradientId = self.getGradientHref(key)
if(gradType == "linear"):
gradient = self.abstractShape.rootTree.getLinearGradient(gradientId)
if(gradType == "radial"):
gradient = self.abstractShape.rootTree.getRadialGradient(gradientId)
if(gradient.link != None):
gradient.colorStops = self.abstractShape.rootTree.getLinearGradient(gradient.link).colorStops
if(self.hasGradientTransform(gradient)):
self.abstractShape.canvasContext.save()
self.setGradientTransform(gradient)
if(gradType == "linear"):
x1, y1, x2, y2 = gradient.getData()
self.abstractShape.canvasContext.createLinearGradient("grad", x1, y1, x2, y2)
if(gradType == "radial"):
cx, cy, fx, fy, r = gradient.getData()
self.abstractShape.canvasContext.createRadialGradient("grad", cx, cy, 0, fx, fy, r)
for stopKey, stopValue in gradient.colorStops.iteritems():
offset = float(stopKey)
color = self.abstractShape.canvasContext.getColor(stopValue.split(";")[0].split(":")[1] , stopValue.split(";")[1].split(":")[1] )
self.abstractShape.canvasContext.addColorStop("grad", offset, color)
return gradient
def createLinearGradient(self):
x1, y1, x2, y2 = self.gradient.getData()
self.abstractShape.canvasContext.createLinearGradient("grad", x1, y1, x2, y2)
for stop in self.gradient.stops:
color = self.canvasContext.getColor(stop.split(";")[0].split(":")[1] , stop.split(";")[1].split(":")[1])
offset = float(stop.split(";")[2].split(":")[1])
self.abstractShape.canvasContext.addColorStop("grad", offset, color) | es | 0.584595 | #linear"): #radial"): | 3.014201 | 3 |
project/manage.py | yosukesuzuki/let-me-notify | 0 | 10705 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Kay management script.
:Copyright: (c) 2009 Accense Technology, Inc. All rights reserved.
:license: BSD, see LICENSE for more details.
"""
import sys
import os
import logging
sys.path = [os.path.abspath(os.path.dirname(__file__))] + sys.path
import kay
kay.setup_env(manage_py_env=True)
from werkzeug import script
from kay.management import *
import appengine_config
if __name__ == '__main__':
if len(sys.argv) == 1:
sys.argv.append("--help")
script.run()
| #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Kay management script.
:Copyright: (c) 2009 Accense Technology, Inc. All rights reserved.
:license: BSD, see LICENSE for more details.
"""
import sys
import os
import logging
sys.path = [os.path.abspath(os.path.dirname(__file__))] + sys.path
import kay
kay.setup_env(manage_py_env=True)
from werkzeug import script
from kay.management import *
import appengine_config
if __name__ == '__main__':
if len(sys.argv) == 1:
sys.argv.append("--help")
script.run()
| en | 0.752953 | #!/usr/bin/env python # -*- coding: utf-8 -*- Kay management script. :Copyright: (c) 2009 Accense Technology, Inc. All rights reserved. :license: BSD, see LICENSE for more details. | 1.788718 | 2 |
examples/plotting/field_pole_figure.py | heprom/pymicro | 30 | 10706 | <reponame>heprom/pymicro<filename>examples/plotting/field_pole_figure.py
from pymicro.crystal.microstructure import *
from pymicro.crystal.texture import *
from pymicro.examples import PYMICRO_EXAMPLES_DATA_DIR
from matplotlib import pyplot as plt, colors, colorbar, cm
import pathlib as pl
'''This example demonstrate how a field can be used to color each symbol on
the pole figure with the :py:meth:~`pymicro.crystal.texture.set_map_field`
method.
'''
#orientations = Orientation.read_euler_txt('../data/orientation_set.inp')
#for i in range(600):
# micro.grains.append(Grain(i, orientations[i + 1]))
euler_list = np.genfromtxt(PYMICRO_EXAMPLES_DATA_DIR / 'orientation_set.inp').tolist()
micro = Microstructure(name='field', autodelete=True)
micro.add_grains(euler_list)
# load strain from dat files
strain_field = np.genfromtxt(PYMICRO_EXAMPLES_DATA_DIR / 'strain_avg_per_grain.dat')[19, ::2]
# build custom pole figures
pf = PoleFigure(microstructure=micro)
pf.mksize = 40
pf.set_map_field('strain', strain_field, field_min_level=0.015, field_max_level=0.025)
fig = plt.figure()
# direct PF
ax1 = fig.add_axes([0.05, 0.05, 0.8, 0.9], aspect='equal')
pf.plot_pf(ax=ax1)
plt.title('111 pole figure, cubic elasticity')
# to add the color bar
ax2 = fig.add_axes([0.8, 0.05, 0.05, 0.9])
norm = colors.Normalize(vmin=0.015, vmax=0.025)
cb = colorbar.ColorbarBase(ax2, cmap=cm.hot, norm=norm, orientation='vertical')
cb.set_label('Average strain (mm/mm)')
image_name = os.path.splitext(__file__)[0] + '.png'
print('writing %s' % image_name)
plt.savefig('%s' % image_name, format='png')
del pf
del micro
from matplotlib import image
image.thumbnail(image_name, 'thumb_' + image_name, 0.2)
| from pymicro.crystal.microstructure import *
from pymicro.crystal.texture import *
from pymicro.examples import PYMICRO_EXAMPLES_DATA_DIR
from matplotlib import pyplot as plt, colors, colorbar, cm
import pathlib as pl
'''This example demonstrate how a field can be used to color each symbol on
the pole figure with the :py:meth:~`pymicro.crystal.texture.set_map_field`
method.
'''
#orientations = Orientation.read_euler_txt('../data/orientation_set.inp')
#for i in range(600):
# micro.grains.append(Grain(i, orientations[i + 1]))
euler_list = np.genfromtxt(PYMICRO_EXAMPLES_DATA_DIR / 'orientation_set.inp').tolist()
micro = Microstructure(name='field', autodelete=True)
micro.add_grains(euler_list)
# load strain from dat files
strain_field = np.genfromtxt(PYMICRO_EXAMPLES_DATA_DIR / 'strain_avg_per_grain.dat')[19, ::2]
# build custom pole figures
pf = PoleFigure(microstructure=micro)
pf.mksize = 40
pf.set_map_field('strain', strain_field, field_min_level=0.015, field_max_level=0.025)
fig = plt.figure()
# direct PF
ax1 = fig.add_axes([0.05, 0.05, 0.8, 0.9], aspect='equal')
pf.plot_pf(ax=ax1)
plt.title('111 pole figure, cubic elasticity')
# to add the color bar
ax2 = fig.add_axes([0.8, 0.05, 0.05, 0.9])
norm = colors.Normalize(vmin=0.015, vmax=0.025)
cb = colorbar.ColorbarBase(ax2, cmap=cm.hot, norm=norm, orientation='vertical')
cb.set_label('Average strain (mm/mm)')
image_name = os.path.splitext(__file__)[0] + '.png'
print('writing %s' % image_name)
plt.savefig('%s' % image_name, format='png')
del pf
del micro
from matplotlib import image
image.thumbnail(image_name, 'thumb_' + image_name, 0.2) | en | 0.499085 | This example demonstrate how a field can be used to color each symbol on the pole figure with the :py:meth:~`pymicro.crystal.texture.set_map_field` method. #orientations = Orientation.read_euler_txt('../data/orientation_set.inp') #for i in range(600): # micro.grains.append(Grain(i, orientations[i + 1])) # load strain from dat files # build custom pole figures # direct PF # to add the color bar | 2.671744 | 3 |
model/img2seq_torch.py | marcoleewow/LaTeX_OCR | 290 | 10707 | <gh_stars>100-1000
import time
import sys
import os
import numpy as np
import torch
import torch.nn as nn
import torchvision.models as models
from torch.nn.utils.rnn import pack_padded_sequence
from model.base_torch import BaseModel
from model.utils.general import init_dir, get_logger
from model.utils.general import Progbar
from model.utils.general import Config
from model.utils.general import minibatches
from model.components.SimpleCNN import SimpleCNN
from model.components.ResNet import ResNet9
from model.components.DenseNet import DenseNet169
from model.components.seq2seq_torch import EncoderCNN, DecoderWithAttention, Img2Seq
from model.evaluation.text import score_files, truncate_end, write_answers
from model.utils.image import pad_batch_images_2
from model.utils.text import pad_batch_formulas
from torch.utils.data import Dataset
import h5py
import json
from model.utils.data_generator import DataGenerator
class ImgFormulaDataset(Dataset):
"""
A PyTorch Dataset class to be used in a PyTorch DataLoader to create batches.
"""
def __init__(self, data_generator: DataGenerator, transform=None):
"""
:param data_folder: folder where data files are stored
:param data_name: base name of processed datasets
:param split: split, one of 'TRAIN', 'VAL', or 'TEST'
:param transform: image transform pipeline
"""
self.data_generator = data_generator
# PyTorch transformation pipeline for the image (normalizing, etc.)
self.transform = transform
def __getitem__(self, i):
# Remember, the Nth caption corresponds to the (N // captions_per_image)th image
(img, formula) = self.data_generator.__getitem__(i)
img = pad_batch_images_2([img], [800, 800, 1])
# img = torch.tensor(img, dtype=torch.int8) # (N, W, H, C)
# img = img.squeeze(0)
# img = img.permute(2, 0, 1) # (C, W, H)
# if self.transform is not None:
# img = self.transform(img)
# formula = torch.tensor(formula, dtype=torch.int) # (C, W, H), (TOKEN)
return img, formula
def __len__(self):
return len(self.data_generator)
class Img2SeqModel(BaseModel):
def __init__(self, config, dir_output, vocab):
super(Img2SeqModel, self).__init__(config, dir_output)
self._vocab = vocab
def getModel(self, model_name="CNN"):
if model_name == "CNN":
return SimpleCNN()
elif model_name == "ResNet9":
return ResNet9()
elif model_name == "DenseNet169":
return DenseNet169(pretrained=True)
elif model_name == "Img2Seq":
self.encoder = EncoderCNN(self._config)
self.decoder = DecoderWithAttention(attention_dim=512,
embed_dim=512,
decoder_dim=512,
vocab_size=self._vocab.n_tok,
dropout=0.5)
return Img2Seq(self._config, self._vocab)
def getOptimizer(self, lr_method='adam', lr=0.001):
self.encoder_optimizer = torch.optim.Adam(params=self.encoder.parameters(), lr=lr)
self.decoder_optimizer = torch.optim.Adam(params=self.decoder.parameters(), lr=lr)
return super().getOptimizer(lr_method=lr_method, lr=lr)
def _run_train_epoch(self, config, train_set, val_set, epoch, lr_schedule):
"""Performs an epoch of training
Args:
config: Config instance
train_set: Dataset instance
val_set: Dataset instance
epoch: (int) id of the epoch, starting at 0
lr_schedule: LRSchedule instance that takes care of learning proc
Returns:
score: (float) model will select weights that achieve the highest score
"""
# logging
batch_size = config.batch_size
nbatches = (len(train_set) + batch_size - 1) // batch_size
prog = Progbar(nbatches)
self.model.train()
self.encoder.train()
self.decoder.train()
train_loader = torch.utils.data.DataLoader(ImgFormulaDataset(train_set),
batch_size=batch_size,
shuffle=True, num_workers=3, pin_memory=True)
# for i, (img, formula) in enumerate(train_loader):
for i, (img, formula) in enumerate(minibatches(train_set, batch_size)):
img = pad_batch_images_2(img)
img = torch.FloatTensor(img) # (N, W, H, C)
formula, formula_length = pad_batch_formulas(formula, self._vocab.id_pad, self._vocab.id_end)
img = img.permute(0, 3, 1, 2) # (N, C, W, H)
formula = torch.LongTensor(formula) # (N,)
loss_eval = self.getLoss(img, formula=formula, lr=lr_schedule.lr, dropout=config.dropout, training=True)
prog.update(i + 1, [("loss", loss_eval), ("lr", lr_schedule.lr)])
# update learning rate
lr_schedule.update(batch_no=epoch*nbatches + i)
self.logger.info("- Training: {}".format(prog.info))
# evaluation
config_eval = Config({"dir_answers": self._dir_output + "formulas_val/", "batch_size": config.batch_size})
scores = self.evaluate(config_eval, val_set)
score = scores["perplexity"]
lr_schedule.update(score=score)
return score
def getLoss(self, img, formula, lr, dropout, training=True):
# Move to GPU, if available
img = img.to(self.device)
formula = formula.to(self.device)
# Forward prop.
imgs = self.encoder(img)
scores, caps_sorted, decode_lengths, alphas, sort_ind = self.decoder(
imgs, formula, torch.LongTensor([[len(i)] for i in formula]))
# Since we decoded starting with <start>, the targets are all words after <start>, up to <end>
targets = caps_sorted[:, 1:]
# Remove timesteps that we didn't decode at, or are pads
# pack_padded_sequence is an easy trick to do this
scores, _ = pack_padded_sequence(scores, decode_lengths, batch_first=True)
targets, _ = pack_padded_sequence(targets, decode_lengths, batch_first=True)
# Calculate loss
loss = self.criterion(scores, targets)
alpha_c = 1.
# Add doubly stochastic attention regularization
loss += alpha_c * ((1. - alphas.sum(dim=1)) ** 2).mean()
# Back prop.
self.decoder_optimizer.zero_grad()
if self.encoder_optimizer is not None:
self.encoder_optimizer.zero_grad()
loss.backward()
# Update weights
self.decoder_optimizer.step()
if self.encoder_optimizer is not None:
self.encoder_optimizer.step()
return -loss.item()
def _run_evaluate_epoch(self, config, test_set):
"""Performs an epoch of evaluation
Args:
test_set: Dataset instance
params: (dict) with extra params in it
- "dir_name": (string)
Returns:
scores: (dict) scores["acc"] = 0.85 for instance
"""
self.model.eval()
self.encoder.eval()
self.decoder.eval()
# initialize containers of references and predictions
if self._config.decoding == "greedy":
refs, hyps = [], [[]]
elif self._config.decoding == "beam_search":
refs, hyps = [], [[] for i in range(self._config.beam_size)]
references = list() # references (true captions) for calculating BLEU-4 score
hypotheses = list() # hypotheses (predictions)
with torch.no_grad():
nbatches = len(test_set)
prog = Progbar(nbatches)
test_loader = torch.utils.data.DataLoader(ImgFormulaDataset(test_set),
batch_size=nbatches,
shuffle=True, num_workers=3, pin_memory=True)
for i, (img, formula) in enumerate(minibatches(test_set, nbatches)):
# print(type(img), len(img), img[0].shape)
# print(type(formula), formula)
# Move to GPU, if available
img = pad_batch_images_2(img)
img = torch.FloatTensor(img) # (N, W, H, C)
formula, formula_length = pad_batch_formulas(formula, self._vocab.id_pad, self._vocab.id_end)
img = img.permute(0, 3, 1, 2) # (N, C, W, H)
formula = torch.LongTensor(formula) # (N,)
img = img.to(self.device)
formula = formula.to(self.device)
# Forward prop.
imgs = self.encoder(img)
scores, caps_sorted, decode_lengths, alphas, sort_ind = self.decoder(imgs, formula, torch.LongTensor([[len(i)] for i in formula]))
# Since we decoded starting with <start>, the targets are all words after <start>, up to <end>
targets = caps_sorted[:, 1:]
# Remove timesteps that we didn't decode at, or are pads
# pack_padded_sequence is an easy trick to do this
scores, _ = pack_padded_sequence(scores, decode_lengths, batch_first=True)
targets, _ = pack_padded_sequence(targets, decode_lengths, batch_first=True)
# Calculate loss
loss = self.criterion(scores, targets)
print(scores.shape, targets.shape)
print(loss)
alpha_c = 1.
# Add doubly stochastic attention regularization
loss += alpha_c * ((1. - alphas.sum(dim=1)) ** 2).mean()
loss_eval = loss.item()
prog.update(i + 1, [("loss", loss_eval), ("perplexity", np.exp(loss_eval))])
# Store references (true captions), and hypothesis (prediction) for each image
# If for n images, we have n hypotheses, and references a, b, c... for each image, we need -
# references = [[ref1a, ref1b, ref1c], [ref2a, ref2b], ...], hypotheses = [hyp1, hyp2, ...]
# print("---------------------------------------------------------------formula and prediction :")
for form, preds in zip(formula, scores):
refs.append(form)
# print(form, " ---------- ", preds[0])
for i, pred in enumerate(preds):
hyps[i].append(pred)
files = write_answers(refs, hyps, self._vocab.id_to_tok, config.dir_answers, self._vocab.id_end)
scores = score_files(files[0], files[1])
# perp = - np.exp(ce_words / float(n_words))
# scores["perplexity"] = perp
self.logger.info("- Evaluating: {}".format(prog.info))
return {
"perplexity": loss.item()
}
def predict_batch(self, images):
preds = []
images = images.to(self.device)
outputs = self.model(images)
_, predicted = torch.max(outputs.data, 1)
pr = outputs[:, 1].detach().cpu().numpy()
for i in pr:
preds.append(i)
return preds
def predict(self, img):
return self.predict_batch([img])
| import time
import sys
import os
import numpy as np
import torch
import torch.nn as nn
import torchvision.models as models
from torch.nn.utils.rnn import pack_padded_sequence
from model.base_torch import BaseModel
from model.utils.general import init_dir, get_logger
from model.utils.general import Progbar
from model.utils.general import Config
from model.utils.general import minibatches
from model.components.SimpleCNN import SimpleCNN
from model.components.ResNet import ResNet9
from model.components.DenseNet import DenseNet169
from model.components.seq2seq_torch import EncoderCNN, DecoderWithAttention, Img2Seq
from model.evaluation.text import score_files, truncate_end, write_answers
from model.utils.image import pad_batch_images_2
from model.utils.text import pad_batch_formulas
from torch.utils.data import Dataset
import h5py
import json
from model.utils.data_generator import DataGenerator
class ImgFormulaDataset(Dataset):
"""
A PyTorch Dataset class to be used in a PyTorch DataLoader to create batches.
"""
def __init__(self, data_generator: DataGenerator, transform=None):
"""
:param data_folder: folder where data files are stored
:param data_name: base name of processed datasets
:param split: split, one of 'TRAIN', 'VAL', or 'TEST'
:param transform: image transform pipeline
"""
self.data_generator = data_generator
# PyTorch transformation pipeline for the image (normalizing, etc.)
self.transform = transform
def __getitem__(self, i):
# Remember, the Nth caption corresponds to the (N // captions_per_image)th image
(img, formula) = self.data_generator.__getitem__(i)
img = pad_batch_images_2([img], [800, 800, 1])
# img = torch.tensor(img, dtype=torch.int8) # (N, W, H, C)
# img = img.squeeze(0)
# img = img.permute(2, 0, 1) # (C, W, H)
# if self.transform is not None:
# img = self.transform(img)
# formula = torch.tensor(formula, dtype=torch.int) # (C, W, H), (TOKEN)
return img, formula
def __len__(self):
return len(self.data_generator)
class Img2SeqModel(BaseModel):
def __init__(self, config, dir_output, vocab):
super(Img2SeqModel, self).__init__(config, dir_output)
self._vocab = vocab
def getModel(self, model_name="CNN"):
if model_name == "CNN":
return SimpleCNN()
elif model_name == "ResNet9":
return ResNet9()
elif model_name == "DenseNet169":
return DenseNet169(pretrained=True)
elif model_name == "Img2Seq":
self.encoder = EncoderCNN(self._config)
self.decoder = DecoderWithAttention(attention_dim=512,
embed_dim=512,
decoder_dim=512,
vocab_size=self._vocab.n_tok,
dropout=0.5)
return Img2Seq(self._config, self._vocab)
def getOptimizer(self, lr_method='adam', lr=0.001):
self.encoder_optimizer = torch.optim.Adam(params=self.encoder.parameters(), lr=lr)
self.decoder_optimizer = torch.optim.Adam(params=self.decoder.parameters(), lr=lr)
return super().getOptimizer(lr_method=lr_method, lr=lr)
def _run_train_epoch(self, config, train_set, val_set, epoch, lr_schedule):
"""Performs an epoch of training
Args:
config: Config instance
train_set: Dataset instance
val_set: Dataset instance
epoch: (int) id of the epoch, starting at 0
lr_schedule: LRSchedule instance that takes care of learning proc
Returns:
score: (float) model will select weights that achieve the highest score
"""
# logging
batch_size = config.batch_size
nbatches = (len(train_set) + batch_size - 1) // batch_size
prog = Progbar(nbatches)
self.model.train()
self.encoder.train()
self.decoder.train()
train_loader = torch.utils.data.DataLoader(ImgFormulaDataset(train_set),
batch_size=batch_size,
shuffle=True, num_workers=3, pin_memory=True)
# for i, (img, formula) in enumerate(train_loader):
for i, (img, formula) in enumerate(minibatches(train_set, batch_size)):
img = pad_batch_images_2(img)
img = torch.FloatTensor(img) # (N, W, H, C)
formula, formula_length = pad_batch_formulas(formula, self._vocab.id_pad, self._vocab.id_end)
img = img.permute(0, 3, 1, 2) # (N, C, W, H)
formula = torch.LongTensor(formula) # (N,)
loss_eval = self.getLoss(img, formula=formula, lr=lr_schedule.lr, dropout=config.dropout, training=True)
prog.update(i + 1, [("loss", loss_eval), ("lr", lr_schedule.lr)])
# update learning rate
lr_schedule.update(batch_no=epoch*nbatches + i)
self.logger.info("- Training: {}".format(prog.info))
# evaluation
config_eval = Config({"dir_answers": self._dir_output + "formulas_val/", "batch_size": config.batch_size})
scores = self.evaluate(config_eval, val_set)
score = scores["perplexity"]
lr_schedule.update(score=score)
return score
def getLoss(self, img, formula, lr, dropout, training=True):
# Move to GPU, if available
img = img.to(self.device)
formula = formula.to(self.device)
# Forward prop.
imgs = self.encoder(img)
scores, caps_sorted, decode_lengths, alphas, sort_ind = self.decoder(
imgs, formula, torch.LongTensor([[len(i)] for i in formula]))
# Since we decoded starting with <start>, the targets are all words after <start>, up to <end>
targets = caps_sorted[:, 1:]
# Remove timesteps that we didn't decode at, or are pads
# pack_padded_sequence is an easy trick to do this
scores, _ = pack_padded_sequence(scores, decode_lengths, batch_first=True)
targets, _ = pack_padded_sequence(targets, decode_lengths, batch_first=True)
# Calculate loss
loss = self.criterion(scores, targets)
alpha_c = 1.
# Add doubly stochastic attention regularization
loss += alpha_c * ((1. - alphas.sum(dim=1)) ** 2).mean()
# Back prop.
self.decoder_optimizer.zero_grad()
if self.encoder_optimizer is not None:
self.encoder_optimizer.zero_grad()
loss.backward()
# Update weights
self.decoder_optimizer.step()
if self.encoder_optimizer is not None:
self.encoder_optimizer.step()
return -loss.item()
def _run_evaluate_epoch(self, config, test_set):
"""Performs an epoch of evaluation
Args:
test_set: Dataset instance
params: (dict) with extra params in it
- "dir_name": (string)
Returns:
scores: (dict) scores["acc"] = 0.85 for instance
"""
self.model.eval()
self.encoder.eval()
self.decoder.eval()
# initialize containers of references and predictions
if self._config.decoding == "greedy":
refs, hyps = [], [[]]
elif self._config.decoding == "beam_search":
refs, hyps = [], [[] for i in range(self._config.beam_size)]
references = list() # references (true captions) for calculating BLEU-4 score
hypotheses = list() # hypotheses (predictions)
with torch.no_grad():
nbatches = len(test_set)
prog = Progbar(nbatches)
test_loader = torch.utils.data.DataLoader(ImgFormulaDataset(test_set),
batch_size=nbatches,
shuffle=True, num_workers=3, pin_memory=True)
for i, (img, formula) in enumerate(minibatches(test_set, nbatches)):
# print(type(img), len(img), img[0].shape)
# print(type(formula), formula)
# Move to GPU, if available
img = pad_batch_images_2(img)
img = torch.FloatTensor(img) # (N, W, H, C)
formula, formula_length = pad_batch_formulas(formula, self._vocab.id_pad, self._vocab.id_end)
img = img.permute(0, 3, 1, 2) # (N, C, W, H)
formula = torch.LongTensor(formula) # (N,)
img = img.to(self.device)
formula = formula.to(self.device)
# Forward prop.
imgs = self.encoder(img)
scores, caps_sorted, decode_lengths, alphas, sort_ind = self.decoder(imgs, formula, torch.LongTensor([[len(i)] for i in formula]))
# Since we decoded starting with <start>, the targets are all words after <start>, up to <end>
targets = caps_sorted[:, 1:]
# Remove timesteps that we didn't decode at, or are pads
# pack_padded_sequence is an easy trick to do this
scores, _ = pack_padded_sequence(scores, decode_lengths, batch_first=True)
targets, _ = pack_padded_sequence(targets, decode_lengths, batch_first=True)
# Calculate loss
loss = self.criterion(scores, targets)
print(scores.shape, targets.shape)
print(loss)
alpha_c = 1.
# Add doubly stochastic attention regularization
loss += alpha_c * ((1. - alphas.sum(dim=1)) ** 2).mean()
loss_eval = loss.item()
prog.update(i + 1, [("loss", loss_eval), ("perplexity", np.exp(loss_eval))])
# Store references (true captions), and hypothesis (prediction) for each image
# If for n images, we have n hypotheses, and references a, b, c... for each image, we need -
# references = [[ref1a, ref1b, ref1c], [ref2a, ref2b], ...], hypotheses = [hyp1, hyp2, ...]
# print("---------------------------------------------------------------formula and prediction :")
for form, preds in zip(formula, scores):
refs.append(form)
# print(form, " ---------- ", preds[0])
for i, pred in enumerate(preds):
hyps[i].append(pred)
files = write_answers(refs, hyps, self._vocab.id_to_tok, config.dir_answers, self._vocab.id_end)
scores = score_files(files[0], files[1])
# perp = - np.exp(ce_words / float(n_words))
# scores["perplexity"] = perp
self.logger.info("- Evaluating: {}".format(prog.info))
return {
"perplexity": loss.item()
}
def predict_batch(self, images):
preds = []
images = images.to(self.device)
outputs = self.model(images)
_, predicted = torch.max(outputs.data, 1)
pr = outputs[:, 1].detach().cpu().numpy()
for i in pr:
preds.append(i)
return preds
def predict(self, img):
return self.predict_batch([img]) | en | 0.702969 | A PyTorch Dataset class to be used in a PyTorch DataLoader to create batches. :param data_folder: folder where data files are stored
:param data_name: base name of processed datasets
:param split: split, one of 'TRAIN', 'VAL', or 'TEST'
:param transform: image transform pipeline # PyTorch transformation pipeline for the image (normalizing, etc.) # Remember, the Nth caption corresponds to the (N // captions_per_image)th image # img = torch.tensor(img, dtype=torch.int8) # (N, W, H, C) # img = img.squeeze(0) # img = img.permute(2, 0, 1) # (C, W, H) # if self.transform is not None: # img = self.transform(img) # formula = torch.tensor(formula, dtype=torch.int) # (C, W, H), (TOKEN) Performs an epoch of training
Args:
config: Config instance
train_set: Dataset instance
val_set: Dataset instance
epoch: (int) id of the epoch, starting at 0
lr_schedule: LRSchedule instance that takes care of learning proc
Returns:
score: (float) model will select weights that achieve the highest score # logging # for i, (img, formula) in enumerate(train_loader): # (N, W, H, C) # (N, C, W, H) # (N,) # update learning rate # evaluation # Move to GPU, if available # Forward prop. # Since we decoded starting with <start>, the targets are all words after <start>, up to <end> # Remove timesteps that we didn't decode at, or are pads # pack_padded_sequence is an easy trick to do this # Calculate loss # Add doubly stochastic attention regularization # Back prop. # Update weights Performs an epoch of evaluation
Args:
test_set: Dataset instance
params: (dict) with extra params in it
- "dir_name": (string)
Returns:
scores: (dict) scores["acc"] = 0.85 for instance # initialize containers of references and predictions # references (true captions) for calculating BLEU-4 score # hypotheses (predictions) # print(type(img), len(img), img[0].shape) # print(type(formula), formula) # Move to GPU, if available # (N, W, H, C) # (N, C, W, H) # (N,) # Forward prop. # Since we decoded starting with <start>, the targets are all words after <start>, up to <end> # Remove timesteps that we didn't decode at, or are pads # pack_padded_sequence is an easy trick to do this # Calculate loss # Add doubly stochastic attention regularization # Store references (true captions), and hypothesis (prediction) for each image # If for n images, we have n hypotheses, and references a, b, c... for each image, we need - # references = [[ref1a, ref1b, ref1c], [ref2a, ref2b], ...], hypotheses = [hyp1, hyp2, ...] # print("---------------------------------------------------------------formula and prediction :") # print(form, " ---------- ", preds[0]) # perp = - np.exp(ce_words / float(n_words)) # scores["perplexity"] = perp | 2.211241 | 2 |
src/third_party/dart/tools/dom/scripts/all_tests.py | rhencke/engine | 21 | 10708 | #!/usr/bin/python
# Copyright (c) 2011, the Dart project authors. Please see the AUTHORS file
# for details. All rights reserved. Use of this source code is governed by a
# BSD-style license that can be found in the LICENSE file.
"""This entry point runs all script tests."""
import logging.config
import unittest
if __name__ == '__main__':
logging.config.fileConfig('logging.conf')
suite = unittest.TestLoader().loadTestsFromNames([
'templateloader_test', 'pegparser_test', 'idlparser_test',
'idlnode_test', 'idlrenderer_test', 'database_test',
'databasebuilder_test', 'emitter_test', 'dartgenerator_test',
'multiemitter_test'
])
unittest.TextTestRunner().run(suite)
| #!/usr/bin/python
# Copyright (c) 2011, the Dart project authors. Please see the AUTHORS file
# for details. All rights reserved. Use of this source code is governed by a
# BSD-style license that can be found in the LICENSE file.
"""This entry point runs all script tests."""
import logging.config
import unittest
if __name__ == '__main__':
logging.config.fileConfig('logging.conf')
suite = unittest.TestLoader().loadTestsFromNames([
'templateloader_test', 'pegparser_test', 'idlparser_test',
'idlnode_test', 'idlrenderer_test', 'database_test',
'databasebuilder_test', 'emitter_test', 'dartgenerator_test',
'multiemitter_test'
])
unittest.TextTestRunner().run(suite)
| en | 0.886715 | #!/usr/bin/python # Copyright (c) 2011, the Dart project authors. Please see the AUTHORS file # for details. All rights reserved. Use of this source code is governed by a # BSD-style license that can be found in the LICENSE file. This entry point runs all script tests. | 2.036107 | 2 |
src/11/11367.py | youngdaLee/Baekjoon | 11 | 10709 | <filename>src/11/11367.py
"""
11367. Report Card Time
작성자: xCrypt0r
언어: Python 3
사용 메모리: 29,380 KB
소요 시간: 64 ms
해결 날짜: 2020년 9월 18일
"""
def main():
for _ in range(int(input())):
name, score = input().split()
score = int(score)
if score < 60: grade = 'F'
elif score < 67: grade = 'D'
elif score < 70: grade = 'D+'
elif score < 77: grade = 'C'
elif score < 80: grade = 'C+'
elif score < 87: grade = 'B'
elif score < 90: grade = 'B+'
elif score < 97: grade = 'A'
else: grade = 'A+'
print(name + ' ' + grade)
if __name__ == '__main__':
main()
| <filename>src/11/11367.py
"""
11367. Report Card Time
작성자: xCrypt0r
언어: Python 3
사용 메모리: 29,380 KB
소요 시간: 64 ms
해결 날짜: 2020년 9월 18일
"""
def main():
for _ in range(int(input())):
name, score = input().split()
score = int(score)
if score < 60: grade = 'F'
elif score < 67: grade = 'D'
elif score < 70: grade = 'D+'
elif score < 77: grade = 'C'
elif score < 80: grade = 'C+'
elif score < 87: grade = 'B'
elif score < 90: grade = 'B+'
elif score < 97: grade = 'A'
else: grade = 'A+'
print(name + ' ' + grade)
if __name__ == '__main__':
main()
| ko | 0.995778 | 11367. Report Card Time 작성자: xCrypt0r 언어: Python 3 사용 메모리: 29,380 KB 소요 시간: 64 ms 해결 날짜: 2020년 9월 18일 | 3.456244 | 3 |
imgaug/augmentables/bbs.py | bill0714/imgaug | 1 | 10710 | from __future__ import print_function, division, absolute_import
import copy
import numpy as np
import skimage.draw
import skimage.measure
from .. import imgaug as ia
from .utils import normalize_shape, project_coords
# TODO functions: square(), to_aspect_ratio(), contains_point()
class BoundingBox(object):
"""Class representing bounding boxes.
Each bounding box is parameterized by its top left and bottom right
corners. Both are given as x and y-coordinates. The corners are intended
to lie inside the bounding box area. As a result, a bounding box that lies
completely inside the image but has maximum extensions would have
coordinates ``(0.0, 0.0)`` and ``(W - epsilon, H - epsilon)``. Note that
coordinates are saved internally as floats.
Parameters
----------
x1 : number
X-coordinate of the top left of the bounding box.
y1 : number
Y-coordinate of the top left of the bounding box.
x2 : number
X-coordinate of the bottom right of the bounding box.
y2 : number
Y-coordinate of the bottom right of the bounding box.
label : None or str, optional
Label of the bounding box, e.g. a string representing the class.
"""
def __init__(self, x1, y1, x2, y2, label=None):
"""Create a new BoundingBox instance."""
if x1 > x2:
x2, x1 = x1, x2
if y1 > y2:
y2, y1 = y1, y2
self.x1 = x1
self.y1 = y1
self.x2 = x2
self.y2 = y2
self.label = label
@property
def coords(self):
"""Get the top-left and bottom-right coordinates as one array.
Returns
-------
ndarray
A ``(N, 2)`` numpy array with ``N=2`` containing the top-left
and bottom-right coordinates.
"""
arr = np.empty((2, 2), dtype=np.float32)
arr[0, :] = (self.x1, self.y1)
arr[1, :] = (self.x2, self.y2)
return arr
@property
def x1_int(self):
"""Get the x-coordinate of the top left corner as an integer.
Returns
-------
int
X-coordinate of the top left corner, rounded to the closest
integer.
"""
# use numpy's round to have consistent behaviour between python
# versions
return int(np.round(self.x1))
@property
def y1_int(self):
"""Get the y-coordinate of the top left corner as an integer.
Returns
-------
int
Y-coordinate of the top left corner, rounded to the closest
integer.
"""
# use numpy's round to have consistent behaviour between python
# versions
return int(np.round(self.y1))
@property
def x2_int(self):
"""Get the x-coordinate of the bottom left corner as an integer.
Returns
-------
int
X-coordinate of the bottom left corner, rounded to the closest
integer.
"""
# use numpy's round to have consistent behaviour between python
# versions
return int(np.round(self.x2))
@property
def y2_int(self):
"""Get the y-coordinate of the bottom left corner as an integer.
Returns
-------
int
Y-coordinate of the bottom left corner, rounded to the closest
integer.
"""
# use numpy's round to have consistent behaviour between python
# versions
return int(np.round(self.y2))
@property
def height(self):
"""Estimate the height of the bounding box.
Returns
-------
number
Height of the bounding box.
"""
return self.y2 - self.y1
@property
def width(self):
"""Estimate the width of the bounding box.
Returns
-------
number
Width of the bounding box.
"""
return self.x2 - self.x1
@property
def center_x(self):
"""Estimate the x-coordinate of the center point of the bounding box.
Returns
-------
number
X-coordinate of the center point of the bounding box.
"""
return self.x1 + self.width/2
@property
def center_y(self):
"""Estimate the y-coordinate of the center point of the bounding box.
Returns
-------
number
Y-coordinate of the center point of the bounding box.
"""
return self.y1 + self.height/2
@property
def area(self):
"""Estimate the area of the bounding box.
Returns
-------
number
Area of the bounding box, i.e. ``height * width``.
"""
return self.height * self.width
# TODO add test for tuple of number
def contains(self, other):
"""Estimate whether the bounding box contains a given point.
Parameters
----------
other : tuple of number or imgaug.augmentables.kps.Keypoint
Point to check for.
Returns
-------
bool
``True`` if the point is contained in the bounding box,
``False`` otherwise.
"""
if isinstance(other, tuple):
x, y = other
else:
x, y = other.x, other.y
return self.x1 <= x <= self.x2 and self.y1 <= y <= self.y2
# TODO add tests for ndarray inputs
def project(self, from_shape, to_shape):
"""Project the bounding box onto a differently shaped image.
E.g. if the bounding box is on its original image at
``x1=(10 of 100 pixels)`` and ``y1=(20 of 100 pixels)`` and is
projected onto a new image with size ``(width=200, height=200)``,
its new position will be ``(x1=20, y1=40)``.
(Analogous for ``x2``/``y2``.)
This is intended for cases where the original image is resized.
It cannot be used for more complex changes (e.g. padding, cropping).
Parameters
----------
from_shape : tuple of int or ndarray
Shape of the original image. (Before resize.)
to_shape : tuple of int or ndarray
Shape of the new image. (After resize.)
Returns
-------
imgaug.augmentables.bbs.BoundingBox
``BoundingBox`` instance with new coordinates.
"""
coords_proj = project_coords([(self.x1, self.y1), (self.x2, self.y2)],
from_shape, to_shape)
return self.copy(
x1=coords_proj[0][0],
y1=coords_proj[0][1],
x2=coords_proj[1][0],
y2=coords_proj[1][1],
label=self.label)
def extend(self, all_sides=0, top=0, right=0, bottom=0, left=0):
"""Extend the size of the bounding box along its sides.
Parameters
----------
all_sides : number, optional
Value by which to extend the bounding box size along all
sides.
top : number, optional
Value by which to extend the bounding box size along its top
side.
right : number, optional
Value by which to extend the bounding box size along its right
side.
bottom : number, optional
Value by which to extend the bounding box size along its bottom
side.
left : number, optional
Value by which to extend the bounding box size along its left
side.
Returns
-------
imgaug.BoundingBox
Extended bounding box.
"""
return BoundingBox(
x1=self.x1 - all_sides - left,
x2=self.x2 + all_sides + right,
y1=self.y1 - all_sides - top,
y2=self.y2 + all_sides + bottom
)
def intersection(self, other, default=None):
"""Compute the intersection BB between this BB and another BB.
Note that in extreme cases, the intersection can be a single point.
In that case the intersection bounding box exists and it will be
returned, but it will have a height and width of zero.
Parameters
----------
other : imgaug.augmentables.bbs.BoundingBox
Other bounding box with which to generate the intersection.
default : any, optional
Default value to return if there is no intersection.
Returns
-------
imgaug.augmentables.bbs.BoundingBox or any
Intersection bounding box of the two bounding boxes if there is
an intersection.
If there is no intersection, the default value will be returned,
which can by anything.
"""
x1_i = max(self.x1, other.x1)
y1_i = max(self.y1, other.y1)
x2_i = min(self.x2, other.x2)
y2_i = min(self.y2, other.y2)
if x1_i > x2_i or y1_i > y2_i:
return default
else:
return BoundingBox(x1=x1_i, y1=y1_i, x2=x2_i, y2=y2_i)
def union(self, other):
"""Compute the union BB between this BB and another BB.
This is equivalent to drawing a bounding box around all corner points
of both bounding boxes.
Parameters
----------
other : imgaug.augmentables.bbs.BoundingBox
Other bounding box with which to generate the union.
Returns
-------
imgaug.augmentables.bbs.BoundingBox
Union bounding box of the two bounding boxes.
"""
return BoundingBox(
x1=min(self.x1, other.x1),
y1=min(self.y1, other.y1),
x2=max(self.x2, other.x2),
y2=max(self.y2, other.y2),
)
def iou(self, other):
"""Compute the IoU between this bounding box and another one.
IoU is the intersection over union, defined as::
``area(intersection(A, B)) / area(union(A, B))``
``= area(intersection(A, B))
/ (area(A) + area(B) - area(intersection(A, B)))``
Parameters
----------
other : imgaug.augmentables.bbs.BoundingBox
Other bounding box with which to compare.
Returns
-------
float
IoU between the two bounding boxes.
"""
inters = self.intersection(other)
if inters is None:
return 0.0
area_union = self.area + other.area - inters.area
return inters.area / area_union if area_union > 0 else 0.0
def is_fully_within_image(self, image):
"""Estimate whether the bounding box is fully inside the image area.
Parameters
----------
image : (H,W,...) ndarray or tuple of int
Image dimensions to use.
If an ``ndarray``, its shape will be used.
If a ``tuple``, it is assumed to represent the image shape
and must contain at least two integers.
Returns
-------
bool
``True`` if the bounding box is fully inside the image area.
``False`` otherwise.
"""
shape = normalize_shape(image)
height, width = shape[0:2]
return (
self.x1 >= 0
and self.x2 < width
and self.y1 >= 0
and self.y2 < height)
def is_partly_within_image(self, image):
"""Estimate whether the BB is at least partially inside the image area.
Parameters
----------
image : (H,W,...) ndarray or tuple of int
Image dimensions to use.
If an ``ndarray``, its shape will be used.
If a ``tuple``, it is assumed to represent the image shape
and must contain at least two integers.
Returns
-------
bool
``True`` if the bounding box is at least partially inside the
image area.
``False`` otherwise.
"""
shape = normalize_shape(image)
height, width = shape[0:2]
eps = np.finfo(np.float32).eps
img_bb = BoundingBox(x1=0, x2=width-eps, y1=0, y2=height-eps)
return self.intersection(img_bb) is not None
def is_out_of_image(self, image, fully=True, partly=False):
"""Estimate whether the BB is partially/fully outside of the image area.
Parameters
----------
image : (H,W,...) ndarray or tuple of int
Image dimensions to use.
If an ``ndarray``, its shape will be used.
If a ``tuple``, it is assumed to represent the image shape and
must contain at least two integers.
fully : bool, optional
Whether to return ``True`` if the bounding box is fully outside
of the image area.
partly : bool, optional
Whether to return ``True`` if the bounding box is at least
partially outside fo the image area.
Returns
-------
bool
``True`` if the bounding box is partially/fully outside of the
image area, depending on defined parameters.
``False`` otherwise.
"""
if self.is_fully_within_image(image):
return False
elif self.is_partly_within_image(image):
return partly
return fully
@ia.deprecated(alt_func="BoundingBox.clip_out_of_image()",
comment="clip_out_of_image() has the exactly same "
"interface.")
def cut_out_of_image(self, *args, **kwargs):
return self.clip_out_of_image(*args, **kwargs)
def clip_out_of_image(self, image):
"""Clip off all parts of the BB box that are outside of the image.
Parameters
----------
image : (H,W,...) ndarray or tuple of int
Image dimensions to use for the clipping of the bounding box.
If an ``ndarray``, its shape will be used.
If a ``tuple``, it is assumed to represent the image shape and
must contain at least two integers.
Returns
-------
imgaug.augmentables.bbs.BoundingBox
Bounding box, clipped to fall within the image dimensions.
"""
shape = normalize_shape(image)
height, width = shape[0:2]
assert height > 0, (
"Expected image with height>0, got shape %s." % (image.shape,))
assert width > 0, (
"Expected image with width>0, got shape %s." % (image.shape,))
eps = np.finfo(np.float32).eps
x1 = np.clip(self.x1, 0, width - eps)
x2 = np.clip(self.x2, 0, width - eps)
y1 = np.clip(self.y1, 0, height - eps)
y2 = np.clip(self.y2, 0, height - eps)
return self.copy(
x1=x1,
y1=y1,
x2=x2,
y2=y2,
label=self.label
)
# TODO convert this to x/y params?
def shift(self, top=None, right=None, bottom=None, left=None):
"""Move this bounding box along the x/y-axis.
Parameters
----------
top : None or int, optional
Amount of pixels by which to shift this object *from* the
top (towards the bottom).
right : None or int, optional
Amount of pixels by which to shift this object *from* the
right (towards the left).
bottom : None or int, optional
Amount of pixels by which to shift this object *from* the
bottom (towards the top).
left : None or int, optional
Amount of pixels by which to shift this object *from* the
left (towards the right).
Returns
-------
imgaug.augmentables.bbs.BoundingBox
Shifted bounding box.
"""
top = top if top is not None else 0
right = right if right is not None else 0
bottom = bottom if bottom is not None else 0
left = left if left is not None else 0
return self.copy(
x1=self.x1+left-right,
x2=self.x2+left-right,
y1=self.y1+top-bottom,
y2=self.y2+top-bottom
)
# TODO add explicit test for zero-sized BBs (worked when tested by hand)
def draw_on_image(self, image, color=(0, 255, 0), alpha=1.0, size=1,
copy=True, raise_if_out_of_image=False, thickness=None):
"""Draw the bounding box on an image.
Parameters
----------
image : (H,W,C) ndarray
The image onto which to draw the bounding box.
Currently expected to be ``uint8``.
color : iterable of int, optional
The color to use, corresponding to the channel layout of the
image. Usually RGB.
alpha : float, optional
The transparency of the drawn bounding box, where ``1.0`` denotes
no transparency and ``0.0`` is invisible.
size : int, optional
The thickness of the bounding box in pixels. If the value is
larger than ``1``, then additional pixels will be added around
the bounding box (i.e. extension towards the outside).
copy : bool, optional
Whether to copy the input image or change it in-place.
raise_if_out_of_image : bool, optional
Whether to raise an error if the bounding box is fully outside of
the image. If set to ``False``, no error will be raised and only
the parts inside the image will be drawn.
thickness : None or int, optional
Deprecated.
Returns
-------
(H,W,C) ndarray(uint8)
Image with bounding box drawn on it.
"""
if thickness is not None:
ia.warn_deprecated(
"Usage of argument 'thickness' in BoundingBox.draw_on_image() "
"is deprecated. The argument was renamed to 'size'.")
size = thickness
if raise_if_out_of_image and self.is_out_of_image(image):
raise Exception(
"Cannot draw bounding box x1=%.8f, y1=%.8f, x2=%.8f, y2=%.8f "
"on image with shape %s." % (
self.x1, self.y1, self.x2, self.y2, image.shape))
result = np.copy(image) if copy else image
if isinstance(color, (tuple, list)):
color = np.uint8(color)
for i in range(size):
y1, y2, x1, x2 = self.y1_int, self.y2_int, self.x1_int, self.x2_int
# When y values get into the range (H-0.5, H), the *_int functions
# round them to H. That is technically sensible, but in the case
# of drawing means that the border lies just barely outside of
# the image, making the border disappear, even though the BB is
# fully inside the image. Here we correct for that because of
# beauty reasons. Same is the case for x coordinates.
if self.is_fully_within_image(image):
y1 = np.clip(y1, 0, image.shape[0]-1)
y2 = np.clip(y2, 0, image.shape[0]-1)
x1 = np.clip(x1, 0, image.shape[1]-1)
x2 = np.clip(x2, 0, image.shape[1]-1)
y = [y1-i, y1-i, y2+i, y2+i]
x = [x1-i, x2+i, x2+i, x1-i]
rr, cc = skimage.draw.polygon_perimeter(y, x, shape=result.shape)
if alpha >= 0.99:
result[rr, cc, :] = color
else:
if ia.is_float_array(result):
# TODO use blend_alpha here
result[rr, cc, :] = (
(1 - alpha) * result[rr, cc, :]
+ alpha * color)
result = np.clip(result, 0, 255)
else:
input_dtype = result.dtype
result = result.astype(np.float32)
result[rr, cc, :] = (
(1 - alpha) * result[rr, cc, :]
+ alpha * color)
result = np.clip(result, 0, 255).astype(input_dtype)
return result
# TODO add tests for pad and pad_max
def extract_from_image(self, image, pad=True, pad_max=None,
prevent_zero_size=True):
"""Extract the image pixels within the bounding box.
This function will zero-pad the image if the bounding box is
partially/fully outside of the image.
Parameters
----------
image : (H,W) ndarray or (H,W,C) ndarray
The image from which to extract the pixels within the bounding box.
pad : bool, optional
Whether to zero-pad the image if the object is partially/fully
outside of it.
pad_max : None or int, optional
The maximum number of pixels that may be zero-paded on any side,
i.e. if this has value ``N`` the total maximum of added pixels
is ``4*N``.
This option exists to prevent extremely large images as a result of
single points being moved very far away during augmentation.
prevent_zero_size : bool, optional
Whether to prevent the height or width of the extracted image from
becoming zero.
If this is set to ``True`` and the height or width of the bounding
box is below ``1``, the height/width will be increased to ``1``.
This can be useful to prevent problems, e.g. with image saving or
plotting.
If it is set to ``False``, images will be returned as ``(H', W')``
or ``(H', W', 3)`` with ``H`` or ``W`` potentially being 0.
Returns
-------
(H',W') ndarray or (H',W',C) ndarray
Pixels within the bounding box. Zero-padded if the bounding box
is partially/fully outside of the image.
If `prevent_zero_size` is activated, it is guarantueed that
``H'>0`` and ``W'>0``, otherwise only ``H'>=0`` and ``W'>=0``.
"""
pad_top = 0
pad_right = 0
pad_bottom = 0
pad_left = 0
height, width = image.shape[0], image.shape[1]
x1, x2, y1, y2 = self.x1_int, self.x2_int, self.y1_int, self.y2_int
# When y values get into the range (H-0.5, H), the *_int functions
# round them to H. That is technically sensible, but in the case of
# extraction leads to a black border, which is both ugly and
# unexpected after calling cut_out_of_image(). Here we correct for
# that because of beauty reasons. Same is the case for x coordinates.
fully_within = self.is_fully_within_image(image)
if fully_within:
y1, y2 = np.clip([y1, y2], 0, height-1)
x1, x2 = np.clip([x1, x2], 0, width-1)
# TODO add test
if prevent_zero_size:
if abs(x2 - x1) < 1:
x2 = x1 + 1
if abs(y2 - y1) < 1:
y2 = y1 + 1
if pad:
# if the bb is outside of the image area, the following pads the
# image first with black pixels until the bb is inside the image
# and only then extracts the image area
# TODO probably more efficient to initialize an array of zeros
# and copy only the portions of the bb into that array that
# are natively inside the image area
if x1 < 0:
pad_left = abs(x1)
x2 = x2 + pad_left
width = width + pad_left
x1 = 0
if y1 < 0:
pad_top = abs(y1)
y2 = y2 + pad_top
height = height + pad_top
y1 = 0
if x2 >= width:
pad_right = x2 - width
if y2 >= height:
pad_bottom = y2 - height
paddings = [pad_top, pad_right, pad_bottom, pad_left]
any_padded = any([val > 0 for val in paddings])
if any_padded:
if pad_max is None:
pad_max = max(paddings)
image = ia.pad(
image,
top=min(pad_top, pad_max),
right=min(pad_right, pad_max),
bottom=min(pad_bottom, pad_max),
left=min(pad_left, pad_max)
)
return image[y1:y2, x1:x2]
else:
within_image = (
(0, 0, 0, 0)
<= (x1, y1, x2, y2)
< (width, height, width, height)
)
out_height, out_width = (y2 - y1), (x2 - x1)
nonzero_height = (out_height > 0)
nonzero_width = (out_width > 0)
if within_image and nonzero_height and nonzero_width:
return image[y1:y2, x1:x2]
if prevent_zero_size:
out_height = 1
out_width = 1
else:
out_height = 0
out_width = 0
if image.ndim == 2:
return np.zeros((out_height, out_width), dtype=image.dtype)
return np.zeros((out_height, out_width, image.shape[-1]),
dtype=image.dtype)
# TODO also add to_heatmap
# TODO add this to BoundingBoxesOnImage
def to_keypoints(self):
"""Convert the BB's corners to keypoints (clockwise, from top left).
Returns
-------
list of imgaug.augmentables.kps.Keypoint
Corners of the bounding box as keypoints.
"""
# TODO get rid of this deferred import
from imgaug.augmentables.kps import Keypoint
return [
Keypoint(x=self.x1, y=self.y1),
Keypoint(x=self.x2, y=self.y1),
Keypoint(x=self.x2, y=self.y2),
Keypoint(x=self.x1, y=self.y2)
]
def coords_almost_equals(self, other, max_distance=1e-4):
"""Estimate if this and another BB have almost identical coordinates.
Parameters
----------
other : imgaug.augmentables.bbs.BoundingBox or iterable
The other bounding box with which to compare this one.
If this is an ``iterable``, it is assumed to represent the top-left
and bottom-right coordinates of that bounding box, given as e.g.
an ``(2,2)`` ndarray or an ``(4,)`` ndarray or as a similar list.
max_distance : number, optional
The maximum euclidean distance between a corner on one bounding
box and the closest corner on the other bounding box. If the
distance is exceeded for any such pair, the two BBs are not
viewed as equal.
Returns
-------
bool
Whether the two bounding boxes have almost identical corner
coordinates.
"""
if ia.is_np_array(other):
# we use flat here in case other is (N,2) instead of (4,)
coords_b = other.flat
elif ia.is_iterable(other):
coords_b = list(ia.flatten(other))
else:
assert isinstance(other, BoundingBox), (
"Expected 'other' to be an iterable containing two "
"(x,y)-coordinate pairs or a BoundingBox. "
"Got type %s." % (type(other),))
coords_b = other.coords.flat
coords_a = self.coords
return np.allclose(coords_a.flat, coords_b, atol=max_distance, rtol=0)
def almost_equals(self, other, max_distance=1e-4):
"""Compare this and another BB's label and coordinates.
This is the same as
:func:`imgaug.augmentables.bbs.BoundingBox.coords_almost_equals` but
additionally compares the labels.
Parameters
----------
other : imgaug.augmentables.bbs.BoundingBox or iterable
The other object to compare against. Expected to be a
``BoundingBox``.
max_distance : number, optional
See
:func:`imgaug.augmentables.bbs.BoundingBox.coords_almost_equals`.
Returns
-------
bool
``True`` if the coordinates are almost equal and additionally
the labels are equal. Otherwise ``False``.
"""
if self.label != other.label:
return False
return self.coords_almost_equals(other, max_distance=max_distance)
@classmethod
def from_point_soup(cls, xy):
"""Convert a ``(2P,) or (P,2) ndarray`` to a BB instance.
This is the inverse of
:func:`imgaug.BoundingBoxesOnImage.to_xyxy_array`.
Parameters
----------
xy : (2P,) ndarray or (P, 2) array or iterable of number or iterable of iterable of number
Array containing ``P`` points in xy-form denoting a soup of
points around which to place a bounding box.
The array should usually be of dtype ``float32``.
Returns
-------
imgaug.augmentables.bbs.BoundingBox
Bounding box around the points.
"""
xy = np.array(xy, dtype=np.float32)
assert len(xy) > 0, (
"Expected to get at least one point to place a bounding box "
"around, got shape %s." % (xy.shape,))
assert xy.ndim == 1 or (xy.ndim == 2 and xy.shape[-1] == 2), (
"Expected input array of shape (P,) or (P, 2), "
"got shape %s." % (xy.shape,))
if xy.ndim == 1:
xy = xy.reshape((-1, 2))
x1, y1 = np.min(xy, axis=0)
x2, y2 = np.max(xy, axis=0)
return cls(x1=x1, y1=y1, x2=x2, y2=y2)
def copy(self, x1=None, y1=None, x2=None, y2=None, label=None):
"""Create a shallow copy of this BoundingBox instance.
Parameters
----------
x1 : None or number
If not ``None``, then the ``x1`` coordinate of the copied object
will be set to this value.
y1 : None or number
If not ``None``, then the ``y1`` coordinate of the copied object
will be set to this value.
x2 : None or number
If not ``None``, then the ``x2`` coordinate of the copied object
will be set to this value.
y2 : None or number
If not ``None``, then the ``y2`` coordinate of the copied object
will be set to this value.
label : None or string
If not ``None``, then the ``label`` of the copied object
will be set to this value.
Returns
-------
imgaug.augmentables.bbs.BoundingBox
Shallow copy.
"""
return BoundingBox(
x1=self.x1 if x1 is None else x1,
x2=self.x2 if x2 is None else x2,
y1=self.y1 if y1 is None else y1,
y2=self.y2 if y2 is None else y2,
label=copy.deepcopy(self.label) if label is None else label
)
def deepcopy(self, x1=None, y1=None, x2=None, y2=None, label=None):
"""
Create a deep copy of the BoundingBox object.
Parameters
----------
x1 : None or number
If not ``None``, then the ``x1`` coordinate of the copied object
will be set to this value.
y1 : None or number
If not ``None``, then the ``y1`` coordinate of the copied object
will be set to this value.
x2 : None or number
If not ``None``, then the ``x2`` coordinate of the copied object
will be set to this value.
y2 : None or number
If not ``None``, then the ``y2`` coordinate of the copied object
will be set to this value.
label : None or string
If not ``None``, then the ``label`` of the copied object
will be set to this value.
Returns
-------
imgaug.augmentables.bbs.BoundingBox
Deep copy.
"""
# TODO write specific copy routine with deepcopy for label and remove
# the deepcopy from copy()
return self.copy(x1=x1, y1=y1, x2=x2, y2=y2, label=label)
def __repr__(self):
return self.__str__()
def __str__(self):
return "BoundingBox(x1=%.4f, y1=%.4f, x2=%.4f, y2=%.4f, label=%s)" % (
self.x1, self.y1, self.x2, self.y2, self.label)
class BoundingBoxesOnImage(object):
"""Container for the list of all bounding boxes on a single image.
Parameters
----------
bounding_boxes : list of imgaug.augmentables.bbs.BoundingBox
List of bounding boxes on the image.
shape : tuple of int or ndarray
The shape of the image on which the objects are placed.
Either an image with shape ``(H,W,[C])`` or a ``tuple`` denoting
such an image shape.
Examples
--------
>>> import numpy as np
>>> from imgaug.augmentables.bbs import BoundingBox, BoundingBoxesOnImage
>>>
>>> image = np.zeros((100, 100))
>>> bbs = [
>>> BoundingBox(x1=10, y1=20, x2=20, y2=30),
>>> BoundingBox(x1=25, y1=50, x2=30, y2=70)
>>> ]
>>> bbs_oi = BoundingBoxesOnImage(bbs, shape=image.shape)
"""
def __init__(self, bounding_boxes, shape):
self.bounding_boxes = bounding_boxes
self.shape = normalize_shape(shape)
@property
def items(self):
"""Get the bounding boxes in this container.
Returns
-------
list of BoundingBox
Bounding boxes within this container.
"""
return self.bounding_boxes
# TODO remove this? here it is image height, but in BoundingBox it is
# bounding box height
@property
def height(self):
"""Get the height of the image on which the bounding boxes fall.
Returns
-------
int
Image height.
"""
return self.shape[0]
# TODO remove this? here it is image width, but in BoundingBox it is
# bounding box width
@property
def width(self):
"""Get the width of the image on which the bounding boxes fall.
Returns
-------
int
Image width.
"""
return self.shape[1]
@property
def empty(self):
"""Determine whether this instance contains zero bounding boxes.
Returns
-------
bool
True if this object contains zero bounding boxes.
"""
return len(self.bounding_boxes) == 0
def on(self, image):
"""Project bounding boxes from one image (shape) to a another one.
Parameters
----------
image : ndarray or tuple of int
New image onto which the bounding boxes are to be projected.
May also simply be that new image's shape tuple.
Returns
-------
imgaug.augmentables.bbs.BoundingBoxesOnImage
Object containing the same bounding boxes after projection to
the new image shape.
"""
shape = normalize_shape(image)
if shape[0:2] == self.shape[0:2]:
return self.deepcopy()
bounding_boxes = [bb.project(self.shape, shape)
for bb in self.bounding_boxes]
return BoundingBoxesOnImage(bounding_boxes, shape)
@classmethod
def from_xyxy_array(cls, xyxy, shape):
"""Convert an ``(N, 4) or (N, 2, 2) ndarray`` to a BBsOI instance.
This is the inverse of
:func:`imgaug.BoundingBoxesOnImage.to_xyxy_array`.
Parameters
----------
xyxy : (N, 4) ndarray or (N, 2, 2) array
Array containing the corner coordinates of ``N`` bounding boxes.
Each bounding box is represented by its top-left and bottom-right
coordinates.
The array should usually be of dtype ``float32``.
shape : tuple of int
Shape of the image on which the bounding boxes are placed.
Should usually be ``(H, W, C)`` or ``(H, W)``.
Returns
-------
imgaug.augmentables.bbs.BoundingBoxesOnImage
Object containing a list of :class:`BoundingBox` instances
derived from the provided corner coordinates.
"""
xyxy = np.array(xyxy, dtype=np.float32)
# note that np.array([]) is (0,), not (0, 2)
if xyxy.shape[0] == 0:
return BoundingBoxesOnImage([], shape)
assert (
(xyxy.ndim == 2 and xyxy.shape[-1] == 4)
or (xyxy.ndim == 3 and xyxy.shape[1:3] == (2, 2))), (
"Expected input array of shape (N, 4) or (N, 2, 2), "
"got shape %s." % (xyxy.shape,))
xyxy = xyxy.reshape((-1, 2, 2))
boxes = [BoundingBox.from_point_soup(row) for row in xyxy]
return cls(boxes, shape)
@classmethod
def from_point_soups(cls, xy, shape):
"""Convert an ``(N, 2P) or (N, P, 2) ndarray`` to a BBsOI instance.
Parameters
----------
xy : (N, 2P) ndarray or (N, P, 2) array or iterable of iterable of number or iterable of iterable of iterable of number
Array containing the corner coordinates of ``N`` bounding boxes.
Each bounding box is represented by a soup of ``P`` points.
If ``(N, P)`` then the second axis is expected to be in
xy-form (e.g. ``x1``, ``y1``, ``x2``, ``y2``, ...).
The final bounding box coordinates will be derived using ``min``
and ``max`` operations on the xy-values.
The array should usually be of dtype ``float32``.
shape : tuple of int
Shape of the image on which the bounding boxes are placed.
Should usually be ``(H, W, C)`` or ``(H, W)``.
Returns
-------
imgaug.augmentables.bbs.BoundingBoxesOnImage
Object containing a list of :class:`BoundingBox` instances
derived from the provided point soups.
"""
xy = np.array(xy, dtype=np.float32)
# from_xy_array() already checks the ndim/shape, so we don't have to
# do it here
boxes = [BoundingBox.from_point_soup(row) for row in xy]
return cls(boxes, shape)
def to_xyxy_array(self, dtype=np.float32):
"""Convert the ``BoundingBoxesOnImage`` object to an ``(N,4) ndarray``.
This is the inverse of
:func:`imgaug.BoundingBoxesOnImage.from_xyxy_array`.
Parameters
----------
dtype : numpy.dtype, optional
Desired output datatype of the ndarray.
Returns
-------
ndarray
``(N,4) ndarray``, where ``N`` denotes the number of bounding
boxes and ``4`` denotes the top-left and bottom-right bounding
box corner coordinates in form ``(x1, y1, x2, y2)``.
"""
xyxy_array = np.zeros((len(self.bounding_boxes), 4), dtype=np.float32)
for i, box in enumerate(self.bounding_boxes):
xyxy_array[i] = [box.x1, box.y1, box.x2, box.y2]
return xyxy_array.astype(dtype)
def to_xy_array(self):
"""Convert the ``BoundingBoxesOnImage`` object to an ``(N,2) ndarray``.
Returns
-------
ndarray
``(2*B,2) ndarray`` of xy-coordinates, where ``B`` denotes the
number of bounding boxes.
"""
return self.to_xyxy_array().reshape((-1, 2))
def fill_from_xyxy_array_(self, xyxy):
"""Modify the BB coordinates of this instance in-place.
.. note ::
This currently expects exactly one entry in `xyxy` per bounding
in this instance. (I.e. two corner coordinates per instance.)
Otherwise, an ``AssertionError`` will be raised.
.. note ::
This method will automatically flip x-coordinates if ``x1>x2``
for a bounding box. (Analogous for y-coordinates.)
Parameters
----------
xyxy : (N, 4) ndarray or iterable of iterable of number
Coordinates of ``N`` bounding boxes on an image, given as
a ``(N,4)`` array of two corner xy-coordinates per bounding box.
``N`` must match the number of bounding boxes in this instance.
Returns
-------
BoundingBoxesOnImage
This instance itself, with updated bounding box coordinates.
Note that the instance was modified in-place.
"""
xyxy = np.array(xyxy, dtype=np.float32)
# note that np.array([]) is (0,), not (0, 4)
assert xyxy.shape[0] == 0 or (xyxy.ndim == 2 and xyxy.shape[-1] == 4), (
"Expected input array to have shape (N,4), "
"got shape %s." % (xyxy.shape,))
assert len(xyxy) == len(self.bounding_boxes), (
"Expected to receive an array with as many rows there are "
"bounding boxes in this instance. Got %d rows, expected %d." % (
len(xyxy), len(self.bounding_boxes)))
for bb, (x1, y1, x2, y2) in zip(self.bounding_boxes, xyxy):
bb.x1 = min([x1, x2])
bb.y1 = min([y1, y2])
bb.x2 = max([x1, x2])
bb.y2 = max([y1, y2])
return self
def fill_from_xy_array_(self, xy):
"""Modify the BB coordinates of this instance in-place.
See
:func:`imgaug.augmentables.bbs.BoundingBoxesOnImage.fill_from_xyxy_array_`.
Parameters
----------
xy : (2*B, 2) ndarray or iterable of iterable of number
Coordinates of ``B`` bounding boxes on an image, given as
a ``(2*B,2)`` array of two corner xy-coordinates per bounding box.
``B`` must match the number of bounding boxes in this instance.
Returns
-------
BoundingBoxesOnImage
This instance itself, with updated bounding box coordinates.
Note that the instance was modified in-place.
"""
xy = np.array(xy, dtype=np.float32)
return self.fill_from_xyxy_array_(xy.reshape((-1, 4)))
def draw_on_image(self, image, color=(0, 255, 0), alpha=1.0, size=1,
copy=True, raise_if_out_of_image=False, thickness=None):
"""Draw all bounding boxes onto a given image.
Parameters
----------
image : (H,W,3) ndarray
The image onto which to draw the bounding boxes.
This image should usually have the same shape as set in
``BoundingBoxesOnImage.shape``.
color : int or list of int or tuple of int or (3,) ndarray, optional
The RGB color of all bounding boxes.
If a single ``int`` ``C``, then that is equivalent to ``(C,C,C)``.
alpha : float, optional
Alpha/transparency of the bounding box.
size : int, optional
Thickness in pixels.
copy : bool, optional
Whether to copy the image before drawing the bounding boxes.
raise_if_out_of_image : bool, optional
Whether to raise an exception if any bounding box is outside of the
image.
thickness : None or int, optional
Deprecated.
Returns
-------
(H,W,3) ndarray
Image with drawn bounding boxes.
"""
image = np.copy(image) if copy else image
for bb in self.bounding_boxes:
image = bb.draw_on_image(
image,
color=color,
alpha=alpha,
size=size,
copy=False,
raise_if_out_of_image=raise_if_out_of_image,
thickness=thickness
)
return image
def remove_out_of_image(self, fully=True, partly=False):
"""Remove all BBs that are fully/partially outside of the image.
Parameters
----------
fully : bool, optional
Whether to remove bounding boxes that are fully outside of the
image.
partly : bool, optional
Whether to remove bounding boxes that are partially outside of
the image.
Returns
-------
imgaug.augmentables.bbs.BoundingBoxesOnImage
Reduced set of bounding boxes, with those that were
fully/partially outside of the image being removed.
"""
bbs_clean = [
bb
for bb
in self.bounding_boxes
if not bb.is_out_of_image(self.shape, fully=fully, partly=partly)]
return BoundingBoxesOnImage(bbs_clean, shape=self.shape)
@ia.deprecated(alt_func="BoundingBoxesOnImage.clip_out_of_image()",
comment="clip_out_of_image() has the exactly same "
"interface.")
def cut_out_of_image(self):
return self.clip_out_of_image()
def clip_out_of_image(self):
"""Clip off all parts from all BBs that are outside of the image.
Returns
-------
imgaug.augmentables.bbs.BoundingBoxesOnImage
Bounding boxes, clipped to fall within the image dimensions.
"""
bbs_cut = [
bb.clip_out_of_image(self.shape)
for bb
in self.bounding_boxes
if bb.is_partly_within_image(self.shape)]
return BoundingBoxesOnImage(bbs_cut, shape=self.shape)
def shift(self, top=None, right=None, bottom=None, left=None):
"""Move all all BBs along the x/y-axis.
Parameters
----------
top : None or int, optional
Amount of pixels by which to shift all objects *from* the
top (towards the bottom).
right : None or int, optional
Amount of pixels by which to shift all objects *from* the
right (towads the left).
bottom : None or int, optional
Amount of pixels by which to shift all objects *from* the
bottom (towards the top).
left : None or int, optional
Amount of pixels by which to shift all objects *from* the
left (towards the right).
Returns
-------
imgaug.augmentables.bbs.BoundingBoxesOnImage
Shifted bounding boxes.
"""
bbs_new = [
bb.shift(top=top, right=right, bottom=bottom, left=left)
for bb
in self.bounding_boxes]
return BoundingBoxesOnImage(bbs_new, shape=self.shape)
def to_keypoints_on_image(self):
"""Convert the bounding boxes to one ``KeypointsOnImage`` instance.
Returns
-------
imgaug.augmentables.kps.KeypointsOnImage
A keypoints instance containing ``N*4`` coordinates for ``N``
bounding boxes. Order matches the order in ``bounding_boxes``.
"""
from .kps import KeypointsOnImage
# This currently uses 4 points instead of 2 points as the method
# is primarily used during augmentation and 4 points are overall
# the better choice there.
arr = np.zeros((len(self.bounding_boxes), 2*4), dtype=np.float32)
for i, box in enumerate(self.bounding_boxes):
arr[i] = [
box.x1, box.y1,
box.x2, box.y1,
box.x2, box.y2,
box.x1, box.y2
]
return KeypointsOnImage.from_xy_array(
arr.reshape((-1, 2)),
shape=self.shape
)
def invert_to_keypoints_on_image_(self, kpsoi):
"""Invert the output of ``to_keypoints_on_image()`` in-place.
This function writes in-place into this ``BoundingBoxesOnImage``
instance.
Parameters
----------
kpsoi : imgaug.augmentables.kps.KeypointsOnImages
Keypoints to convert back to bounding boxes, i.e. the outputs
of ``to_keypoints_on_image()``.
Returns
-------
BoundingBoxesOnImage
Bounding boxes container with updated coordinates.
Note that the instance is also updated in-place.
"""
assert len(kpsoi.keypoints) == len(self.bounding_boxes) * 4, (
"Expected %d coordinates, got %d." % (
len(self.bounding_boxes) * 2, len(kpsoi.keypoints)))
for i, bb in enumerate(self.bounding_boxes):
xx = [kpsoi.keypoints[4*i+0].x, kpsoi.keypoints[4*i+1].x,
kpsoi.keypoints[4*i+2].x, kpsoi.keypoints[4*i+3].x]
yy = [kpsoi.keypoints[4*i+0].y, kpsoi.keypoints[4*i+1].y,
kpsoi.keypoints[4*i+2].y, kpsoi.keypoints[4*i+3].y]
bb.x1 = min(xx)
bb.y1 = min(yy)
bb.x2 = max(xx)
bb.y2 = max(yy)
self.shape = kpsoi.shape
return self
def copy(self):
"""Create a shallow copy of the ``BoundingBoxesOnImage`` instance.
Returns
-------
imgaug.augmentables.bbs.BoundingBoxesOnImage
Shallow copy.
"""
return copy.copy(self)
def deepcopy(self):
"""Create a deep copy of the ``BoundingBoxesOnImage`` object.
Returns
-------
imgaug.augmentables.bbs.BoundingBoxesOnImage
Deep copy.
"""
# Manual copy is far faster than deepcopy for BoundingBoxesOnImage,
# so use manual copy here too
bbs = [bb.deepcopy() for bb in self.bounding_boxes]
return BoundingBoxesOnImage(bbs, tuple(self.shape))
def __repr__(self):
return self.__str__()
def __str__(self):
return (
"BoundingBoxesOnImage(%s, shape=%s)"
% (str(self.bounding_boxes), self.shape))
| from __future__ import print_function, division, absolute_import
import copy
import numpy as np
import skimage.draw
import skimage.measure
from .. import imgaug as ia
from .utils import normalize_shape, project_coords
# TODO functions: square(), to_aspect_ratio(), contains_point()
class BoundingBox(object):
"""Class representing bounding boxes.
Each bounding box is parameterized by its top left and bottom right
corners. Both are given as x and y-coordinates. The corners are intended
to lie inside the bounding box area. As a result, a bounding box that lies
completely inside the image but has maximum extensions would have
coordinates ``(0.0, 0.0)`` and ``(W - epsilon, H - epsilon)``. Note that
coordinates are saved internally as floats.
Parameters
----------
x1 : number
X-coordinate of the top left of the bounding box.
y1 : number
Y-coordinate of the top left of the bounding box.
x2 : number
X-coordinate of the bottom right of the bounding box.
y2 : number
Y-coordinate of the bottom right of the bounding box.
label : None or str, optional
Label of the bounding box, e.g. a string representing the class.
"""
def __init__(self, x1, y1, x2, y2, label=None):
"""Create a new BoundingBox instance."""
if x1 > x2:
x2, x1 = x1, x2
if y1 > y2:
y2, y1 = y1, y2
self.x1 = x1
self.y1 = y1
self.x2 = x2
self.y2 = y2
self.label = label
@property
def coords(self):
"""Get the top-left and bottom-right coordinates as one array.
Returns
-------
ndarray
A ``(N, 2)`` numpy array with ``N=2`` containing the top-left
and bottom-right coordinates.
"""
arr = np.empty((2, 2), dtype=np.float32)
arr[0, :] = (self.x1, self.y1)
arr[1, :] = (self.x2, self.y2)
return arr
@property
def x1_int(self):
"""Get the x-coordinate of the top left corner as an integer.
Returns
-------
int
X-coordinate of the top left corner, rounded to the closest
integer.
"""
# use numpy's round to have consistent behaviour between python
# versions
return int(np.round(self.x1))
@property
def y1_int(self):
"""Get the y-coordinate of the top left corner as an integer.
Returns
-------
int
Y-coordinate of the top left corner, rounded to the closest
integer.
"""
# use numpy's round to have consistent behaviour between python
# versions
return int(np.round(self.y1))
@property
def x2_int(self):
"""Get the x-coordinate of the bottom left corner as an integer.
Returns
-------
int
X-coordinate of the bottom left corner, rounded to the closest
integer.
"""
# use numpy's round to have consistent behaviour between python
# versions
return int(np.round(self.x2))
@property
def y2_int(self):
"""Get the y-coordinate of the bottom left corner as an integer.
Returns
-------
int
Y-coordinate of the bottom left corner, rounded to the closest
integer.
"""
# use numpy's round to have consistent behaviour between python
# versions
return int(np.round(self.y2))
@property
def height(self):
"""Estimate the height of the bounding box.
Returns
-------
number
Height of the bounding box.
"""
return self.y2 - self.y1
@property
def width(self):
"""Estimate the width of the bounding box.
Returns
-------
number
Width of the bounding box.
"""
return self.x2 - self.x1
@property
def center_x(self):
"""Estimate the x-coordinate of the center point of the bounding box.
Returns
-------
number
X-coordinate of the center point of the bounding box.
"""
return self.x1 + self.width/2
@property
def center_y(self):
"""Estimate the y-coordinate of the center point of the bounding box.
Returns
-------
number
Y-coordinate of the center point of the bounding box.
"""
return self.y1 + self.height/2
@property
def area(self):
"""Estimate the area of the bounding box.
Returns
-------
number
Area of the bounding box, i.e. ``height * width``.
"""
return self.height * self.width
# TODO add test for tuple of number
def contains(self, other):
"""Estimate whether the bounding box contains a given point.
Parameters
----------
other : tuple of number or imgaug.augmentables.kps.Keypoint
Point to check for.
Returns
-------
bool
``True`` if the point is contained in the bounding box,
``False`` otherwise.
"""
if isinstance(other, tuple):
x, y = other
else:
x, y = other.x, other.y
return self.x1 <= x <= self.x2 and self.y1 <= y <= self.y2
# TODO add tests for ndarray inputs
def project(self, from_shape, to_shape):
"""Project the bounding box onto a differently shaped image.
E.g. if the bounding box is on its original image at
``x1=(10 of 100 pixels)`` and ``y1=(20 of 100 pixels)`` and is
projected onto a new image with size ``(width=200, height=200)``,
its new position will be ``(x1=20, y1=40)``.
(Analogous for ``x2``/``y2``.)
This is intended for cases where the original image is resized.
It cannot be used for more complex changes (e.g. padding, cropping).
Parameters
----------
from_shape : tuple of int or ndarray
Shape of the original image. (Before resize.)
to_shape : tuple of int or ndarray
Shape of the new image. (After resize.)
Returns
-------
imgaug.augmentables.bbs.BoundingBox
``BoundingBox`` instance with new coordinates.
"""
coords_proj = project_coords([(self.x1, self.y1), (self.x2, self.y2)],
from_shape, to_shape)
return self.copy(
x1=coords_proj[0][0],
y1=coords_proj[0][1],
x2=coords_proj[1][0],
y2=coords_proj[1][1],
label=self.label)
def extend(self, all_sides=0, top=0, right=0, bottom=0, left=0):
"""Extend the size of the bounding box along its sides.
Parameters
----------
all_sides : number, optional
Value by which to extend the bounding box size along all
sides.
top : number, optional
Value by which to extend the bounding box size along its top
side.
right : number, optional
Value by which to extend the bounding box size along its right
side.
bottom : number, optional
Value by which to extend the bounding box size along its bottom
side.
left : number, optional
Value by which to extend the bounding box size along its left
side.
Returns
-------
imgaug.BoundingBox
Extended bounding box.
"""
return BoundingBox(
x1=self.x1 - all_sides - left,
x2=self.x2 + all_sides + right,
y1=self.y1 - all_sides - top,
y2=self.y2 + all_sides + bottom
)
def intersection(self, other, default=None):
"""Compute the intersection BB between this BB and another BB.
Note that in extreme cases, the intersection can be a single point.
In that case the intersection bounding box exists and it will be
returned, but it will have a height and width of zero.
Parameters
----------
other : imgaug.augmentables.bbs.BoundingBox
Other bounding box with which to generate the intersection.
default : any, optional
Default value to return if there is no intersection.
Returns
-------
imgaug.augmentables.bbs.BoundingBox or any
Intersection bounding box of the two bounding boxes if there is
an intersection.
If there is no intersection, the default value will be returned,
which can by anything.
"""
x1_i = max(self.x1, other.x1)
y1_i = max(self.y1, other.y1)
x2_i = min(self.x2, other.x2)
y2_i = min(self.y2, other.y2)
if x1_i > x2_i or y1_i > y2_i:
return default
else:
return BoundingBox(x1=x1_i, y1=y1_i, x2=x2_i, y2=y2_i)
def union(self, other):
"""Compute the union BB between this BB and another BB.
This is equivalent to drawing a bounding box around all corner points
of both bounding boxes.
Parameters
----------
other : imgaug.augmentables.bbs.BoundingBox
Other bounding box with which to generate the union.
Returns
-------
imgaug.augmentables.bbs.BoundingBox
Union bounding box of the two bounding boxes.
"""
return BoundingBox(
x1=min(self.x1, other.x1),
y1=min(self.y1, other.y1),
x2=max(self.x2, other.x2),
y2=max(self.y2, other.y2),
)
def iou(self, other):
"""Compute the IoU between this bounding box and another one.
IoU is the intersection over union, defined as::
``area(intersection(A, B)) / area(union(A, B))``
``= area(intersection(A, B))
/ (area(A) + area(B) - area(intersection(A, B)))``
Parameters
----------
other : imgaug.augmentables.bbs.BoundingBox
Other bounding box with which to compare.
Returns
-------
float
IoU between the two bounding boxes.
"""
inters = self.intersection(other)
if inters is None:
return 0.0
area_union = self.area + other.area - inters.area
return inters.area / area_union if area_union > 0 else 0.0
def is_fully_within_image(self, image):
"""Estimate whether the bounding box is fully inside the image area.
Parameters
----------
image : (H,W,...) ndarray or tuple of int
Image dimensions to use.
If an ``ndarray``, its shape will be used.
If a ``tuple``, it is assumed to represent the image shape
and must contain at least two integers.
Returns
-------
bool
``True`` if the bounding box is fully inside the image area.
``False`` otherwise.
"""
shape = normalize_shape(image)
height, width = shape[0:2]
return (
self.x1 >= 0
and self.x2 < width
and self.y1 >= 0
and self.y2 < height)
def is_partly_within_image(self, image):
"""Estimate whether the BB is at least partially inside the image area.
Parameters
----------
image : (H,W,...) ndarray or tuple of int
Image dimensions to use.
If an ``ndarray``, its shape will be used.
If a ``tuple``, it is assumed to represent the image shape
and must contain at least two integers.
Returns
-------
bool
``True`` if the bounding box is at least partially inside the
image area.
``False`` otherwise.
"""
shape = normalize_shape(image)
height, width = shape[0:2]
eps = np.finfo(np.float32).eps
img_bb = BoundingBox(x1=0, x2=width-eps, y1=0, y2=height-eps)
return self.intersection(img_bb) is not None
def is_out_of_image(self, image, fully=True, partly=False):
"""Estimate whether the BB is partially/fully outside of the image area.
Parameters
----------
image : (H,W,...) ndarray or tuple of int
Image dimensions to use.
If an ``ndarray``, its shape will be used.
If a ``tuple``, it is assumed to represent the image shape and
must contain at least two integers.
fully : bool, optional
Whether to return ``True`` if the bounding box is fully outside
of the image area.
partly : bool, optional
Whether to return ``True`` if the bounding box is at least
partially outside fo the image area.
Returns
-------
bool
``True`` if the bounding box is partially/fully outside of the
image area, depending on defined parameters.
``False`` otherwise.
"""
if self.is_fully_within_image(image):
return False
elif self.is_partly_within_image(image):
return partly
return fully
@ia.deprecated(alt_func="BoundingBox.clip_out_of_image()",
comment="clip_out_of_image() has the exactly same "
"interface.")
def cut_out_of_image(self, *args, **kwargs):
return self.clip_out_of_image(*args, **kwargs)
def clip_out_of_image(self, image):
"""Clip off all parts of the BB box that are outside of the image.
Parameters
----------
image : (H,W,...) ndarray or tuple of int
Image dimensions to use for the clipping of the bounding box.
If an ``ndarray``, its shape will be used.
If a ``tuple``, it is assumed to represent the image shape and
must contain at least two integers.
Returns
-------
imgaug.augmentables.bbs.BoundingBox
Bounding box, clipped to fall within the image dimensions.
"""
shape = normalize_shape(image)
height, width = shape[0:2]
assert height > 0, (
"Expected image with height>0, got shape %s." % (image.shape,))
assert width > 0, (
"Expected image with width>0, got shape %s." % (image.shape,))
eps = np.finfo(np.float32).eps
x1 = np.clip(self.x1, 0, width - eps)
x2 = np.clip(self.x2, 0, width - eps)
y1 = np.clip(self.y1, 0, height - eps)
y2 = np.clip(self.y2, 0, height - eps)
return self.copy(
x1=x1,
y1=y1,
x2=x2,
y2=y2,
label=self.label
)
# TODO convert this to x/y params?
def shift(self, top=None, right=None, bottom=None, left=None):
"""Move this bounding box along the x/y-axis.
Parameters
----------
top : None or int, optional
Amount of pixels by which to shift this object *from* the
top (towards the bottom).
right : None or int, optional
Amount of pixels by which to shift this object *from* the
right (towards the left).
bottom : None or int, optional
Amount of pixels by which to shift this object *from* the
bottom (towards the top).
left : None or int, optional
Amount of pixels by which to shift this object *from* the
left (towards the right).
Returns
-------
imgaug.augmentables.bbs.BoundingBox
Shifted bounding box.
"""
top = top if top is not None else 0
right = right if right is not None else 0
bottom = bottom if bottom is not None else 0
left = left if left is not None else 0
return self.copy(
x1=self.x1+left-right,
x2=self.x2+left-right,
y1=self.y1+top-bottom,
y2=self.y2+top-bottom
)
# TODO add explicit test for zero-sized BBs (worked when tested by hand)
def draw_on_image(self, image, color=(0, 255, 0), alpha=1.0, size=1,
copy=True, raise_if_out_of_image=False, thickness=None):
"""Draw the bounding box on an image.
Parameters
----------
image : (H,W,C) ndarray
The image onto which to draw the bounding box.
Currently expected to be ``uint8``.
color : iterable of int, optional
The color to use, corresponding to the channel layout of the
image. Usually RGB.
alpha : float, optional
The transparency of the drawn bounding box, where ``1.0`` denotes
no transparency and ``0.0`` is invisible.
size : int, optional
The thickness of the bounding box in pixels. If the value is
larger than ``1``, then additional pixels will be added around
the bounding box (i.e. extension towards the outside).
copy : bool, optional
Whether to copy the input image or change it in-place.
raise_if_out_of_image : bool, optional
Whether to raise an error if the bounding box is fully outside of
the image. If set to ``False``, no error will be raised and only
the parts inside the image will be drawn.
thickness : None or int, optional
Deprecated.
Returns
-------
(H,W,C) ndarray(uint8)
Image with bounding box drawn on it.
"""
if thickness is not None:
ia.warn_deprecated(
"Usage of argument 'thickness' in BoundingBox.draw_on_image() "
"is deprecated. The argument was renamed to 'size'.")
size = thickness
if raise_if_out_of_image and self.is_out_of_image(image):
raise Exception(
"Cannot draw bounding box x1=%.8f, y1=%.8f, x2=%.8f, y2=%.8f "
"on image with shape %s." % (
self.x1, self.y1, self.x2, self.y2, image.shape))
result = np.copy(image) if copy else image
if isinstance(color, (tuple, list)):
color = np.uint8(color)
for i in range(size):
y1, y2, x1, x2 = self.y1_int, self.y2_int, self.x1_int, self.x2_int
# When y values get into the range (H-0.5, H), the *_int functions
# round them to H. That is technically sensible, but in the case
# of drawing means that the border lies just barely outside of
# the image, making the border disappear, even though the BB is
# fully inside the image. Here we correct for that because of
# beauty reasons. Same is the case for x coordinates.
if self.is_fully_within_image(image):
y1 = np.clip(y1, 0, image.shape[0]-1)
y2 = np.clip(y2, 0, image.shape[0]-1)
x1 = np.clip(x1, 0, image.shape[1]-1)
x2 = np.clip(x2, 0, image.shape[1]-1)
y = [y1-i, y1-i, y2+i, y2+i]
x = [x1-i, x2+i, x2+i, x1-i]
rr, cc = skimage.draw.polygon_perimeter(y, x, shape=result.shape)
if alpha >= 0.99:
result[rr, cc, :] = color
else:
if ia.is_float_array(result):
# TODO use blend_alpha here
result[rr, cc, :] = (
(1 - alpha) * result[rr, cc, :]
+ alpha * color)
result = np.clip(result, 0, 255)
else:
input_dtype = result.dtype
result = result.astype(np.float32)
result[rr, cc, :] = (
(1 - alpha) * result[rr, cc, :]
+ alpha * color)
result = np.clip(result, 0, 255).astype(input_dtype)
return result
# TODO add tests for pad and pad_max
def extract_from_image(self, image, pad=True, pad_max=None,
prevent_zero_size=True):
"""Extract the image pixels within the bounding box.
This function will zero-pad the image if the bounding box is
partially/fully outside of the image.
Parameters
----------
image : (H,W) ndarray or (H,W,C) ndarray
The image from which to extract the pixels within the bounding box.
pad : bool, optional
Whether to zero-pad the image if the object is partially/fully
outside of it.
pad_max : None or int, optional
The maximum number of pixels that may be zero-paded on any side,
i.e. if this has value ``N`` the total maximum of added pixels
is ``4*N``.
This option exists to prevent extremely large images as a result of
single points being moved very far away during augmentation.
prevent_zero_size : bool, optional
Whether to prevent the height or width of the extracted image from
becoming zero.
If this is set to ``True`` and the height or width of the bounding
box is below ``1``, the height/width will be increased to ``1``.
This can be useful to prevent problems, e.g. with image saving or
plotting.
If it is set to ``False``, images will be returned as ``(H', W')``
or ``(H', W', 3)`` with ``H`` or ``W`` potentially being 0.
Returns
-------
(H',W') ndarray or (H',W',C) ndarray
Pixels within the bounding box. Zero-padded if the bounding box
is partially/fully outside of the image.
If `prevent_zero_size` is activated, it is guarantueed that
``H'>0`` and ``W'>0``, otherwise only ``H'>=0`` and ``W'>=0``.
"""
pad_top = 0
pad_right = 0
pad_bottom = 0
pad_left = 0
height, width = image.shape[0], image.shape[1]
x1, x2, y1, y2 = self.x1_int, self.x2_int, self.y1_int, self.y2_int
# When y values get into the range (H-0.5, H), the *_int functions
# round them to H. That is technically sensible, but in the case of
# extraction leads to a black border, which is both ugly and
# unexpected after calling cut_out_of_image(). Here we correct for
# that because of beauty reasons. Same is the case for x coordinates.
fully_within = self.is_fully_within_image(image)
if fully_within:
y1, y2 = np.clip([y1, y2], 0, height-1)
x1, x2 = np.clip([x1, x2], 0, width-1)
# TODO add test
if prevent_zero_size:
if abs(x2 - x1) < 1:
x2 = x1 + 1
if abs(y2 - y1) < 1:
y2 = y1 + 1
if pad:
# if the bb is outside of the image area, the following pads the
# image first with black pixels until the bb is inside the image
# and only then extracts the image area
# TODO probably more efficient to initialize an array of zeros
# and copy only the portions of the bb into that array that
# are natively inside the image area
if x1 < 0:
pad_left = abs(x1)
x2 = x2 + pad_left
width = width + pad_left
x1 = 0
if y1 < 0:
pad_top = abs(y1)
y2 = y2 + pad_top
height = height + pad_top
y1 = 0
if x2 >= width:
pad_right = x2 - width
if y2 >= height:
pad_bottom = y2 - height
paddings = [pad_top, pad_right, pad_bottom, pad_left]
any_padded = any([val > 0 for val in paddings])
if any_padded:
if pad_max is None:
pad_max = max(paddings)
image = ia.pad(
image,
top=min(pad_top, pad_max),
right=min(pad_right, pad_max),
bottom=min(pad_bottom, pad_max),
left=min(pad_left, pad_max)
)
return image[y1:y2, x1:x2]
else:
within_image = (
(0, 0, 0, 0)
<= (x1, y1, x2, y2)
< (width, height, width, height)
)
out_height, out_width = (y2 - y1), (x2 - x1)
nonzero_height = (out_height > 0)
nonzero_width = (out_width > 0)
if within_image and nonzero_height and nonzero_width:
return image[y1:y2, x1:x2]
if prevent_zero_size:
out_height = 1
out_width = 1
else:
out_height = 0
out_width = 0
if image.ndim == 2:
return np.zeros((out_height, out_width), dtype=image.dtype)
return np.zeros((out_height, out_width, image.shape[-1]),
dtype=image.dtype)
# TODO also add to_heatmap
# TODO add this to BoundingBoxesOnImage
def to_keypoints(self):
"""Convert the BB's corners to keypoints (clockwise, from top left).
Returns
-------
list of imgaug.augmentables.kps.Keypoint
Corners of the bounding box as keypoints.
"""
# TODO get rid of this deferred import
from imgaug.augmentables.kps import Keypoint
return [
Keypoint(x=self.x1, y=self.y1),
Keypoint(x=self.x2, y=self.y1),
Keypoint(x=self.x2, y=self.y2),
Keypoint(x=self.x1, y=self.y2)
]
def coords_almost_equals(self, other, max_distance=1e-4):
"""Estimate if this and another BB have almost identical coordinates.
Parameters
----------
other : imgaug.augmentables.bbs.BoundingBox or iterable
The other bounding box with which to compare this one.
If this is an ``iterable``, it is assumed to represent the top-left
and bottom-right coordinates of that bounding box, given as e.g.
an ``(2,2)`` ndarray or an ``(4,)`` ndarray or as a similar list.
max_distance : number, optional
The maximum euclidean distance between a corner on one bounding
box and the closest corner on the other bounding box. If the
distance is exceeded for any such pair, the two BBs are not
viewed as equal.
Returns
-------
bool
Whether the two bounding boxes have almost identical corner
coordinates.
"""
if ia.is_np_array(other):
# we use flat here in case other is (N,2) instead of (4,)
coords_b = other.flat
elif ia.is_iterable(other):
coords_b = list(ia.flatten(other))
else:
assert isinstance(other, BoundingBox), (
"Expected 'other' to be an iterable containing two "
"(x,y)-coordinate pairs or a BoundingBox. "
"Got type %s." % (type(other),))
coords_b = other.coords.flat
coords_a = self.coords
return np.allclose(coords_a.flat, coords_b, atol=max_distance, rtol=0)
def almost_equals(self, other, max_distance=1e-4):
"""Compare this and another BB's label and coordinates.
This is the same as
:func:`imgaug.augmentables.bbs.BoundingBox.coords_almost_equals` but
additionally compares the labels.
Parameters
----------
other : imgaug.augmentables.bbs.BoundingBox or iterable
The other object to compare against. Expected to be a
``BoundingBox``.
max_distance : number, optional
See
:func:`imgaug.augmentables.bbs.BoundingBox.coords_almost_equals`.
Returns
-------
bool
``True`` if the coordinates are almost equal and additionally
the labels are equal. Otherwise ``False``.
"""
if self.label != other.label:
return False
return self.coords_almost_equals(other, max_distance=max_distance)
@classmethod
def from_point_soup(cls, xy):
"""Convert a ``(2P,) or (P,2) ndarray`` to a BB instance.
This is the inverse of
:func:`imgaug.BoundingBoxesOnImage.to_xyxy_array`.
Parameters
----------
xy : (2P,) ndarray or (P, 2) array or iterable of number or iterable of iterable of number
Array containing ``P`` points in xy-form denoting a soup of
points around which to place a bounding box.
The array should usually be of dtype ``float32``.
Returns
-------
imgaug.augmentables.bbs.BoundingBox
Bounding box around the points.
"""
xy = np.array(xy, dtype=np.float32)
assert len(xy) > 0, (
"Expected to get at least one point to place a bounding box "
"around, got shape %s." % (xy.shape,))
assert xy.ndim == 1 or (xy.ndim == 2 and xy.shape[-1] == 2), (
"Expected input array of shape (P,) or (P, 2), "
"got shape %s." % (xy.shape,))
if xy.ndim == 1:
xy = xy.reshape((-1, 2))
x1, y1 = np.min(xy, axis=0)
x2, y2 = np.max(xy, axis=0)
return cls(x1=x1, y1=y1, x2=x2, y2=y2)
def copy(self, x1=None, y1=None, x2=None, y2=None, label=None):
"""Create a shallow copy of this BoundingBox instance.
Parameters
----------
x1 : None or number
If not ``None``, then the ``x1`` coordinate of the copied object
will be set to this value.
y1 : None or number
If not ``None``, then the ``y1`` coordinate of the copied object
will be set to this value.
x2 : None or number
If not ``None``, then the ``x2`` coordinate of the copied object
will be set to this value.
y2 : None or number
If not ``None``, then the ``y2`` coordinate of the copied object
will be set to this value.
label : None or string
If not ``None``, then the ``label`` of the copied object
will be set to this value.
Returns
-------
imgaug.augmentables.bbs.BoundingBox
Shallow copy.
"""
return BoundingBox(
x1=self.x1 if x1 is None else x1,
x2=self.x2 if x2 is None else x2,
y1=self.y1 if y1 is None else y1,
y2=self.y2 if y2 is None else y2,
label=copy.deepcopy(self.label) if label is None else label
)
def deepcopy(self, x1=None, y1=None, x2=None, y2=None, label=None):
"""
Create a deep copy of the BoundingBox object.
Parameters
----------
x1 : None or number
If not ``None``, then the ``x1`` coordinate of the copied object
will be set to this value.
y1 : None or number
If not ``None``, then the ``y1`` coordinate of the copied object
will be set to this value.
x2 : None or number
If not ``None``, then the ``x2`` coordinate of the copied object
will be set to this value.
y2 : None or number
If not ``None``, then the ``y2`` coordinate of the copied object
will be set to this value.
label : None or string
If not ``None``, then the ``label`` of the copied object
will be set to this value.
Returns
-------
imgaug.augmentables.bbs.BoundingBox
Deep copy.
"""
# TODO write specific copy routine with deepcopy for label and remove
# the deepcopy from copy()
return self.copy(x1=x1, y1=y1, x2=x2, y2=y2, label=label)
def __repr__(self):
return self.__str__()
def __str__(self):
return "BoundingBox(x1=%.4f, y1=%.4f, x2=%.4f, y2=%.4f, label=%s)" % (
self.x1, self.y1, self.x2, self.y2, self.label)
class BoundingBoxesOnImage(object):
"""Container for the list of all bounding boxes on a single image.
Parameters
----------
bounding_boxes : list of imgaug.augmentables.bbs.BoundingBox
List of bounding boxes on the image.
shape : tuple of int or ndarray
The shape of the image on which the objects are placed.
Either an image with shape ``(H,W,[C])`` or a ``tuple`` denoting
such an image shape.
Examples
--------
>>> import numpy as np
>>> from imgaug.augmentables.bbs import BoundingBox, BoundingBoxesOnImage
>>>
>>> image = np.zeros((100, 100))
>>> bbs = [
>>> BoundingBox(x1=10, y1=20, x2=20, y2=30),
>>> BoundingBox(x1=25, y1=50, x2=30, y2=70)
>>> ]
>>> bbs_oi = BoundingBoxesOnImage(bbs, shape=image.shape)
"""
def __init__(self, bounding_boxes, shape):
self.bounding_boxes = bounding_boxes
self.shape = normalize_shape(shape)
@property
def items(self):
"""Get the bounding boxes in this container.
Returns
-------
list of BoundingBox
Bounding boxes within this container.
"""
return self.bounding_boxes
# TODO remove this? here it is image height, but in BoundingBox it is
# bounding box height
@property
def height(self):
"""Get the height of the image on which the bounding boxes fall.
Returns
-------
int
Image height.
"""
return self.shape[0]
# TODO remove this? here it is image width, but in BoundingBox it is
# bounding box width
@property
def width(self):
"""Get the width of the image on which the bounding boxes fall.
Returns
-------
int
Image width.
"""
return self.shape[1]
@property
def empty(self):
"""Determine whether this instance contains zero bounding boxes.
Returns
-------
bool
True if this object contains zero bounding boxes.
"""
return len(self.bounding_boxes) == 0
def on(self, image):
"""Project bounding boxes from one image (shape) to a another one.
Parameters
----------
image : ndarray or tuple of int
New image onto which the bounding boxes are to be projected.
May also simply be that new image's shape tuple.
Returns
-------
imgaug.augmentables.bbs.BoundingBoxesOnImage
Object containing the same bounding boxes after projection to
the new image shape.
"""
shape = normalize_shape(image)
if shape[0:2] == self.shape[0:2]:
return self.deepcopy()
bounding_boxes = [bb.project(self.shape, shape)
for bb in self.bounding_boxes]
return BoundingBoxesOnImage(bounding_boxes, shape)
@classmethod
def from_xyxy_array(cls, xyxy, shape):
"""Convert an ``(N, 4) or (N, 2, 2) ndarray`` to a BBsOI instance.
This is the inverse of
:func:`imgaug.BoundingBoxesOnImage.to_xyxy_array`.
Parameters
----------
xyxy : (N, 4) ndarray or (N, 2, 2) array
Array containing the corner coordinates of ``N`` bounding boxes.
Each bounding box is represented by its top-left and bottom-right
coordinates.
The array should usually be of dtype ``float32``.
shape : tuple of int
Shape of the image on which the bounding boxes are placed.
Should usually be ``(H, W, C)`` or ``(H, W)``.
Returns
-------
imgaug.augmentables.bbs.BoundingBoxesOnImage
Object containing a list of :class:`BoundingBox` instances
derived from the provided corner coordinates.
"""
xyxy = np.array(xyxy, dtype=np.float32)
# note that np.array([]) is (0,), not (0, 2)
if xyxy.shape[0] == 0:
return BoundingBoxesOnImage([], shape)
assert (
(xyxy.ndim == 2 and xyxy.shape[-1] == 4)
or (xyxy.ndim == 3 and xyxy.shape[1:3] == (2, 2))), (
"Expected input array of shape (N, 4) or (N, 2, 2), "
"got shape %s." % (xyxy.shape,))
xyxy = xyxy.reshape((-1, 2, 2))
boxes = [BoundingBox.from_point_soup(row) for row in xyxy]
return cls(boxes, shape)
@classmethod
def from_point_soups(cls, xy, shape):
"""Convert an ``(N, 2P) or (N, P, 2) ndarray`` to a BBsOI instance.
Parameters
----------
xy : (N, 2P) ndarray or (N, P, 2) array or iterable of iterable of number or iterable of iterable of iterable of number
Array containing the corner coordinates of ``N`` bounding boxes.
Each bounding box is represented by a soup of ``P`` points.
If ``(N, P)`` then the second axis is expected to be in
xy-form (e.g. ``x1``, ``y1``, ``x2``, ``y2``, ...).
The final bounding box coordinates will be derived using ``min``
and ``max`` operations on the xy-values.
The array should usually be of dtype ``float32``.
shape : tuple of int
Shape of the image on which the bounding boxes are placed.
Should usually be ``(H, W, C)`` or ``(H, W)``.
Returns
-------
imgaug.augmentables.bbs.BoundingBoxesOnImage
Object containing a list of :class:`BoundingBox` instances
derived from the provided point soups.
"""
xy = np.array(xy, dtype=np.float32)
# from_xy_array() already checks the ndim/shape, so we don't have to
# do it here
boxes = [BoundingBox.from_point_soup(row) for row in xy]
return cls(boxes, shape)
def to_xyxy_array(self, dtype=np.float32):
"""Convert the ``BoundingBoxesOnImage`` object to an ``(N,4) ndarray``.
This is the inverse of
:func:`imgaug.BoundingBoxesOnImage.from_xyxy_array`.
Parameters
----------
dtype : numpy.dtype, optional
Desired output datatype of the ndarray.
Returns
-------
ndarray
``(N,4) ndarray``, where ``N`` denotes the number of bounding
boxes and ``4`` denotes the top-left and bottom-right bounding
box corner coordinates in form ``(x1, y1, x2, y2)``.
"""
xyxy_array = np.zeros((len(self.bounding_boxes), 4), dtype=np.float32)
for i, box in enumerate(self.bounding_boxes):
xyxy_array[i] = [box.x1, box.y1, box.x2, box.y2]
return xyxy_array.astype(dtype)
def to_xy_array(self):
"""Convert the ``BoundingBoxesOnImage`` object to an ``(N,2) ndarray``.
Returns
-------
ndarray
``(2*B,2) ndarray`` of xy-coordinates, where ``B`` denotes the
number of bounding boxes.
"""
return self.to_xyxy_array().reshape((-1, 2))
def fill_from_xyxy_array_(self, xyxy):
"""Modify the BB coordinates of this instance in-place.
.. note ::
This currently expects exactly one entry in `xyxy` per bounding
in this instance. (I.e. two corner coordinates per instance.)
Otherwise, an ``AssertionError`` will be raised.
.. note ::
This method will automatically flip x-coordinates if ``x1>x2``
for a bounding box. (Analogous for y-coordinates.)
Parameters
----------
xyxy : (N, 4) ndarray or iterable of iterable of number
Coordinates of ``N`` bounding boxes on an image, given as
a ``(N,4)`` array of two corner xy-coordinates per bounding box.
``N`` must match the number of bounding boxes in this instance.
Returns
-------
BoundingBoxesOnImage
This instance itself, with updated bounding box coordinates.
Note that the instance was modified in-place.
"""
xyxy = np.array(xyxy, dtype=np.float32)
# note that np.array([]) is (0,), not (0, 4)
assert xyxy.shape[0] == 0 or (xyxy.ndim == 2 and xyxy.shape[-1] == 4), (
"Expected input array to have shape (N,4), "
"got shape %s." % (xyxy.shape,))
assert len(xyxy) == len(self.bounding_boxes), (
"Expected to receive an array with as many rows there are "
"bounding boxes in this instance. Got %d rows, expected %d." % (
len(xyxy), len(self.bounding_boxes)))
for bb, (x1, y1, x2, y2) in zip(self.bounding_boxes, xyxy):
bb.x1 = min([x1, x2])
bb.y1 = min([y1, y2])
bb.x2 = max([x1, x2])
bb.y2 = max([y1, y2])
return self
def fill_from_xy_array_(self, xy):
"""Modify the BB coordinates of this instance in-place.
See
:func:`imgaug.augmentables.bbs.BoundingBoxesOnImage.fill_from_xyxy_array_`.
Parameters
----------
xy : (2*B, 2) ndarray or iterable of iterable of number
Coordinates of ``B`` bounding boxes on an image, given as
a ``(2*B,2)`` array of two corner xy-coordinates per bounding box.
``B`` must match the number of bounding boxes in this instance.
Returns
-------
BoundingBoxesOnImage
This instance itself, with updated bounding box coordinates.
Note that the instance was modified in-place.
"""
xy = np.array(xy, dtype=np.float32)
return self.fill_from_xyxy_array_(xy.reshape((-1, 4)))
def draw_on_image(self, image, color=(0, 255, 0), alpha=1.0, size=1,
copy=True, raise_if_out_of_image=False, thickness=None):
"""Draw all bounding boxes onto a given image.
Parameters
----------
image : (H,W,3) ndarray
The image onto which to draw the bounding boxes.
This image should usually have the same shape as set in
``BoundingBoxesOnImage.shape``.
color : int or list of int or tuple of int or (3,) ndarray, optional
The RGB color of all bounding boxes.
If a single ``int`` ``C``, then that is equivalent to ``(C,C,C)``.
alpha : float, optional
Alpha/transparency of the bounding box.
size : int, optional
Thickness in pixels.
copy : bool, optional
Whether to copy the image before drawing the bounding boxes.
raise_if_out_of_image : bool, optional
Whether to raise an exception if any bounding box is outside of the
image.
thickness : None or int, optional
Deprecated.
Returns
-------
(H,W,3) ndarray
Image with drawn bounding boxes.
"""
image = np.copy(image) if copy else image
for bb in self.bounding_boxes:
image = bb.draw_on_image(
image,
color=color,
alpha=alpha,
size=size,
copy=False,
raise_if_out_of_image=raise_if_out_of_image,
thickness=thickness
)
return image
def remove_out_of_image(self, fully=True, partly=False):
"""Remove all BBs that are fully/partially outside of the image.
Parameters
----------
fully : bool, optional
Whether to remove bounding boxes that are fully outside of the
image.
partly : bool, optional
Whether to remove bounding boxes that are partially outside of
the image.
Returns
-------
imgaug.augmentables.bbs.BoundingBoxesOnImage
Reduced set of bounding boxes, with those that were
fully/partially outside of the image being removed.
"""
bbs_clean = [
bb
for bb
in self.bounding_boxes
if not bb.is_out_of_image(self.shape, fully=fully, partly=partly)]
return BoundingBoxesOnImage(bbs_clean, shape=self.shape)
@ia.deprecated(alt_func="BoundingBoxesOnImage.clip_out_of_image()",
comment="clip_out_of_image() has the exactly same "
"interface.")
def cut_out_of_image(self):
return self.clip_out_of_image()
def clip_out_of_image(self):
"""Clip off all parts from all BBs that are outside of the image.
Returns
-------
imgaug.augmentables.bbs.BoundingBoxesOnImage
Bounding boxes, clipped to fall within the image dimensions.
"""
bbs_cut = [
bb.clip_out_of_image(self.shape)
for bb
in self.bounding_boxes
if bb.is_partly_within_image(self.shape)]
return BoundingBoxesOnImage(bbs_cut, shape=self.shape)
def shift(self, top=None, right=None, bottom=None, left=None):
"""Move all all BBs along the x/y-axis.
Parameters
----------
top : None or int, optional
Amount of pixels by which to shift all objects *from* the
top (towards the bottom).
right : None or int, optional
Amount of pixels by which to shift all objects *from* the
right (towads the left).
bottom : None or int, optional
Amount of pixels by which to shift all objects *from* the
bottom (towards the top).
left : None or int, optional
Amount of pixels by which to shift all objects *from* the
left (towards the right).
Returns
-------
imgaug.augmentables.bbs.BoundingBoxesOnImage
Shifted bounding boxes.
"""
bbs_new = [
bb.shift(top=top, right=right, bottom=bottom, left=left)
for bb
in self.bounding_boxes]
return BoundingBoxesOnImage(bbs_new, shape=self.shape)
def to_keypoints_on_image(self):
"""Convert the bounding boxes to one ``KeypointsOnImage`` instance.
Returns
-------
imgaug.augmentables.kps.KeypointsOnImage
A keypoints instance containing ``N*4`` coordinates for ``N``
bounding boxes. Order matches the order in ``bounding_boxes``.
"""
from .kps import KeypointsOnImage
# This currently uses 4 points instead of 2 points as the method
# is primarily used during augmentation and 4 points are overall
# the better choice there.
arr = np.zeros((len(self.bounding_boxes), 2*4), dtype=np.float32)
for i, box in enumerate(self.bounding_boxes):
arr[i] = [
box.x1, box.y1,
box.x2, box.y1,
box.x2, box.y2,
box.x1, box.y2
]
return KeypointsOnImage.from_xy_array(
arr.reshape((-1, 2)),
shape=self.shape
)
def invert_to_keypoints_on_image_(self, kpsoi):
"""Invert the output of ``to_keypoints_on_image()`` in-place.
This function writes in-place into this ``BoundingBoxesOnImage``
instance.
Parameters
----------
kpsoi : imgaug.augmentables.kps.KeypointsOnImages
Keypoints to convert back to bounding boxes, i.e. the outputs
of ``to_keypoints_on_image()``.
Returns
-------
BoundingBoxesOnImage
Bounding boxes container with updated coordinates.
Note that the instance is also updated in-place.
"""
assert len(kpsoi.keypoints) == len(self.bounding_boxes) * 4, (
"Expected %d coordinates, got %d." % (
len(self.bounding_boxes) * 2, len(kpsoi.keypoints)))
for i, bb in enumerate(self.bounding_boxes):
xx = [kpsoi.keypoints[4*i+0].x, kpsoi.keypoints[4*i+1].x,
kpsoi.keypoints[4*i+2].x, kpsoi.keypoints[4*i+3].x]
yy = [kpsoi.keypoints[4*i+0].y, kpsoi.keypoints[4*i+1].y,
kpsoi.keypoints[4*i+2].y, kpsoi.keypoints[4*i+3].y]
bb.x1 = min(xx)
bb.y1 = min(yy)
bb.x2 = max(xx)
bb.y2 = max(yy)
self.shape = kpsoi.shape
return self
def copy(self):
"""Create a shallow copy of the ``BoundingBoxesOnImage`` instance.
Returns
-------
imgaug.augmentables.bbs.BoundingBoxesOnImage
Shallow copy.
"""
return copy.copy(self)
def deepcopy(self):
"""Create a deep copy of the ``BoundingBoxesOnImage`` object.
Returns
-------
imgaug.augmentables.bbs.BoundingBoxesOnImage
Deep copy.
"""
# Manual copy is far faster than deepcopy for BoundingBoxesOnImage,
# so use manual copy here too
bbs = [bb.deepcopy() for bb in self.bounding_boxes]
return BoundingBoxesOnImage(bbs, tuple(self.shape))
def __repr__(self):
return self.__str__()
def __str__(self):
return (
"BoundingBoxesOnImage(%s, shape=%s)"
% (str(self.bounding_boxes), self.shape))
| en | 0.74666 | # TODO functions: square(), to_aspect_ratio(), contains_point() Class representing bounding boxes. Each bounding box is parameterized by its top left and bottom right corners. Both are given as x and y-coordinates. The corners are intended to lie inside the bounding box area. As a result, a bounding box that lies completely inside the image but has maximum extensions would have coordinates ``(0.0, 0.0)`` and ``(W - epsilon, H - epsilon)``. Note that coordinates are saved internally as floats. Parameters ---------- x1 : number X-coordinate of the top left of the bounding box. y1 : number Y-coordinate of the top left of the bounding box. x2 : number X-coordinate of the bottom right of the bounding box. y2 : number Y-coordinate of the bottom right of the bounding box. label : None or str, optional Label of the bounding box, e.g. a string representing the class. Create a new BoundingBox instance. Get the top-left and bottom-right coordinates as one array. Returns ------- ndarray A ``(N, 2)`` numpy array with ``N=2`` containing the top-left and bottom-right coordinates. Get the x-coordinate of the top left corner as an integer. Returns ------- int X-coordinate of the top left corner, rounded to the closest integer. # use numpy's round to have consistent behaviour between python # versions Get the y-coordinate of the top left corner as an integer. Returns ------- int Y-coordinate of the top left corner, rounded to the closest integer. # use numpy's round to have consistent behaviour between python # versions Get the x-coordinate of the bottom left corner as an integer. Returns ------- int X-coordinate of the bottom left corner, rounded to the closest integer. # use numpy's round to have consistent behaviour between python # versions Get the y-coordinate of the bottom left corner as an integer. Returns ------- int Y-coordinate of the bottom left corner, rounded to the closest integer. # use numpy's round to have consistent behaviour between python # versions Estimate the height of the bounding box. Returns ------- number Height of the bounding box. Estimate the width of the bounding box. Returns ------- number Width of the bounding box. Estimate the x-coordinate of the center point of the bounding box. Returns ------- number X-coordinate of the center point of the bounding box. Estimate the y-coordinate of the center point of the bounding box. Returns ------- number Y-coordinate of the center point of the bounding box. Estimate the area of the bounding box. Returns ------- number Area of the bounding box, i.e. ``height * width``. # TODO add test for tuple of number Estimate whether the bounding box contains a given point. Parameters ---------- other : tuple of number or imgaug.augmentables.kps.Keypoint Point to check for. Returns ------- bool ``True`` if the point is contained in the bounding box, ``False`` otherwise. # TODO add tests for ndarray inputs Project the bounding box onto a differently shaped image. E.g. if the bounding box is on its original image at ``x1=(10 of 100 pixels)`` and ``y1=(20 of 100 pixels)`` and is projected onto a new image with size ``(width=200, height=200)``, its new position will be ``(x1=20, y1=40)``. (Analogous for ``x2``/``y2``.) This is intended for cases where the original image is resized. It cannot be used for more complex changes (e.g. padding, cropping). Parameters ---------- from_shape : tuple of int or ndarray Shape of the original image. (Before resize.) to_shape : tuple of int or ndarray Shape of the new image. (After resize.) Returns ------- imgaug.augmentables.bbs.BoundingBox ``BoundingBox`` instance with new coordinates. Extend the size of the bounding box along its sides. Parameters ---------- all_sides : number, optional Value by which to extend the bounding box size along all sides. top : number, optional Value by which to extend the bounding box size along its top side. right : number, optional Value by which to extend the bounding box size along its right side. bottom : number, optional Value by which to extend the bounding box size along its bottom side. left : number, optional Value by which to extend the bounding box size along its left side. Returns ------- imgaug.BoundingBox Extended bounding box. Compute the intersection BB between this BB and another BB. Note that in extreme cases, the intersection can be a single point. In that case the intersection bounding box exists and it will be returned, but it will have a height and width of zero. Parameters ---------- other : imgaug.augmentables.bbs.BoundingBox Other bounding box with which to generate the intersection. default : any, optional Default value to return if there is no intersection. Returns ------- imgaug.augmentables.bbs.BoundingBox or any Intersection bounding box of the two bounding boxes if there is an intersection. If there is no intersection, the default value will be returned, which can by anything. Compute the union BB between this BB and another BB. This is equivalent to drawing a bounding box around all corner points of both bounding boxes. Parameters ---------- other : imgaug.augmentables.bbs.BoundingBox Other bounding box with which to generate the union. Returns ------- imgaug.augmentables.bbs.BoundingBox Union bounding box of the two bounding boxes. Compute the IoU between this bounding box and another one. IoU is the intersection over union, defined as:: ``area(intersection(A, B)) / area(union(A, B))`` ``= area(intersection(A, B)) / (area(A) + area(B) - area(intersection(A, B)))`` Parameters ---------- other : imgaug.augmentables.bbs.BoundingBox Other bounding box with which to compare. Returns ------- float IoU between the two bounding boxes. Estimate whether the bounding box is fully inside the image area. Parameters ---------- image : (H,W,...) ndarray or tuple of int Image dimensions to use. If an ``ndarray``, its shape will be used. If a ``tuple``, it is assumed to represent the image shape and must contain at least two integers. Returns ------- bool ``True`` if the bounding box is fully inside the image area. ``False`` otherwise. Estimate whether the BB is at least partially inside the image area. Parameters ---------- image : (H,W,...) ndarray or tuple of int Image dimensions to use. If an ``ndarray``, its shape will be used. If a ``tuple``, it is assumed to represent the image shape and must contain at least two integers. Returns ------- bool ``True`` if the bounding box is at least partially inside the image area. ``False`` otherwise. Estimate whether the BB is partially/fully outside of the image area. Parameters ---------- image : (H,W,...) ndarray or tuple of int Image dimensions to use. If an ``ndarray``, its shape will be used. If a ``tuple``, it is assumed to represent the image shape and must contain at least two integers. fully : bool, optional Whether to return ``True`` if the bounding box is fully outside of the image area. partly : bool, optional Whether to return ``True`` if the bounding box is at least partially outside fo the image area. Returns ------- bool ``True`` if the bounding box is partially/fully outside of the image area, depending on defined parameters. ``False`` otherwise. Clip off all parts of the BB box that are outside of the image. Parameters ---------- image : (H,W,...) ndarray or tuple of int Image dimensions to use for the clipping of the bounding box. If an ``ndarray``, its shape will be used. If a ``tuple``, it is assumed to represent the image shape and must contain at least two integers. Returns ------- imgaug.augmentables.bbs.BoundingBox Bounding box, clipped to fall within the image dimensions. # TODO convert this to x/y params? Move this bounding box along the x/y-axis. Parameters ---------- top : None or int, optional Amount of pixels by which to shift this object *from* the top (towards the bottom). right : None or int, optional Amount of pixels by which to shift this object *from* the right (towards the left). bottom : None or int, optional Amount of pixels by which to shift this object *from* the bottom (towards the top). left : None or int, optional Amount of pixels by which to shift this object *from* the left (towards the right). Returns ------- imgaug.augmentables.bbs.BoundingBox Shifted bounding box. # TODO add explicit test for zero-sized BBs (worked when tested by hand) Draw the bounding box on an image. Parameters ---------- image : (H,W,C) ndarray The image onto which to draw the bounding box. Currently expected to be ``uint8``. color : iterable of int, optional The color to use, corresponding to the channel layout of the image. Usually RGB. alpha : float, optional The transparency of the drawn bounding box, where ``1.0`` denotes no transparency and ``0.0`` is invisible. size : int, optional The thickness of the bounding box in pixels. If the value is larger than ``1``, then additional pixels will be added around the bounding box (i.e. extension towards the outside). copy : bool, optional Whether to copy the input image or change it in-place. raise_if_out_of_image : bool, optional Whether to raise an error if the bounding box is fully outside of the image. If set to ``False``, no error will be raised and only the parts inside the image will be drawn. thickness : None or int, optional Deprecated. Returns ------- (H,W,C) ndarray(uint8) Image with bounding box drawn on it. # When y values get into the range (H-0.5, H), the *_int functions # round them to H. That is technically sensible, but in the case # of drawing means that the border lies just barely outside of # the image, making the border disappear, even though the BB is # fully inside the image. Here we correct for that because of # beauty reasons. Same is the case for x coordinates. # TODO use blend_alpha here # TODO add tests for pad and pad_max Extract the image pixels within the bounding box. This function will zero-pad the image if the bounding box is partially/fully outside of the image. Parameters ---------- image : (H,W) ndarray or (H,W,C) ndarray The image from which to extract the pixels within the bounding box. pad : bool, optional Whether to zero-pad the image if the object is partially/fully outside of it. pad_max : None or int, optional The maximum number of pixels that may be zero-paded on any side, i.e. if this has value ``N`` the total maximum of added pixels is ``4*N``. This option exists to prevent extremely large images as a result of single points being moved very far away during augmentation. prevent_zero_size : bool, optional Whether to prevent the height or width of the extracted image from becoming zero. If this is set to ``True`` and the height or width of the bounding box is below ``1``, the height/width will be increased to ``1``. This can be useful to prevent problems, e.g. with image saving or plotting. If it is set to ``False``, images will be returned as ``(H', W')`` or ``(H', W', 3)`` with ``H`` or ``W`` potentially being 0. Returns ------- (H',W') ndarray or (H',W',C) ndarray Pixels within the bounding box. Zero-padded if the bounding box is partially/fully outside of the image. If `prevent_zero_size` is activated, it is guarantueed that ``H'>0`` and ``W'>0``, otherwise only ``H'>=0`` and ``W'>=0``. # When y values get into the range (H-0.5, H), the *_int functions # round them to H. That is technically sensible, but in the case of # extraction leads to a black border, which is both ugly and # unexpected after calling cut_out_of_image(). Here we correct for # that because of beauty reasons. Same is the case for x coordinates. # TODO add test # if the bb is outside of the image area, the following pads the # image first with black pixels until the bb is inside the image # and only then extracts the image area # TODO probably more efficient to initialize an array of zeros # and copy only the portions of the bb into that array that # are natively inside the image area # TODO also add to_heatmap # TODO add this to BoundingBoxesOnImage Convert the BB's corners to keypoints (clockwise, from top left). Returns ------- list of imgaug.augmentables.kps.Keypoint Corners of the bounding box as keypoints. # TODO get rid of this deferred import Estimate if this and another BB have almost identical coordinates. Parameters ---------- other : imgaug.augmentables.bbs.BoundingBox or iterable The other bounding box with which to compare this one. If this is an ``iterable``, it is assumed to represent the top-left and bottom-right coordinates of that bounding box, given as e.g. an ``(2,2)`` ndarray or an ``(4,)`` ndarray or as a similar list. max_distance : number, optional The maximum euclidean distance between a corner on one bounding box and the closest corner on the other bounding box. If the distance is exceeded for any such pair, the two BBs are not viewed as equal. Returns ------- bool Whether the two bounding boxes have almost identical corner coordinates. # we use flat here in case other is (N,2) instead of (4,) Compare this and another BB's label and coordinates. This is the same as :func:`imgaug.augmentables.bbs.BoundingBox.coords_almost_equals` but additionally compares the labels. Parameters ---------- other : imgaug.augmentables.bbs.BoundingBox or iterable The other object to compare against. Expected to be a ``BoundingBox``. max_distance : number, optional See :func:`imgaug.augmentables.bbs.BoundingBox.coords_almost_equals`. Returns ------- bool ``True`` if the coordinates are almost equal and additionally the labels are equal. Otherwise ``False``. Convert a ``(2P,) or (P,2) ndarray`` to a BB instance. This is the inverse of :func:`imgaug.BoundingBoxesOnImage.to_xyxy_array`. Parameters ---------- xy : (2P,) ndarray or (P, 2) array or iterable of number or iterable of iterable of number Array containing ``P`` points in xy-form denoting a soup of points around which to place a bounding box. The array should usually be of dtype ``float32``. Returns ------- imgaug.augmentables.bbs.BoundingBox Bounding box around the points. Create a shallow copy of this BoundingBox instance. Parameters ---------- x1 : None or number If not ``None``, then the ``x1`` coordinate of the copied object will be set to this value. y1 : None or number If not ``None``, then the ``y1`` coordinate of the copied object will be set to this value. x2 : None or number If not ``None``, then the ``x2`` coordinate of the copied object will be set to this value. y2 : None or number If not ``None``, then the ``y2`` coordinate of the copied object will be set to this value. label : None or string If not ``None``, then the ``label`` of the copied object will be set to this value. Returns ------- imgaug.augmentables.bbs.BoundingBox Shallow copy. Create a deep copy of the BoundingBox object. Parameters ---------- x1 : None or number If not ``None``, then the ``x1`` coordinate of the copied object will be set to this value. y1 : None or number If not ``None``, then the ``y1`` coordinate of the copied object will be set to this value. x2 : None or number If not ``None``, then the ``x2`` coordinate of the copied object will be set to this value. y2 : None or number If not ``None``, then the ``y2`` coordinate of the copied object will be set to this value. label : None or string If not ``None``, then the ``label`` of the copied object will be set to this value. Returns ------- imgaug.augmentables.bbs.BoundingBox Deep copy. # TODO write specific copy routine with deepcopy for label and remove # the deepcopy from copy() Container for the list of all bounding boxes on a single image. Parameters ---------- bounding_boxes : list of imgaug.augmentables.bbs.BoundingBox List of bounding boxes on the image. shape : tuple of int or ndarray The shape of the image on which the objects are placed. Either an image with shape ``(H,W,[C])`` or a ``tuple`` denoting such an image shape. Examples -------- >>> import numpy as np >>> from imgaug.augmentables.bbs import BoundingBox, BoundingBoxesOnImage >>> >>> image = np.zeros((100, 100)) >>> bbs = [ >>> BoundingBox(x1=10, y1=20, x2=20, y2=30), >>> BoundingBox(x1=25, y1=50, x2=30, y2=70) >>> ] >>> bbs_oi = BoundingBoxesOnImage(bbs, shape=image.shape) Get the bounding boxes in this container. Returns ------- list of BoundingBox Bounding boxes within this container. # TODO remove this? here it is image height, but in BoundingBox it is # bounding box height Get the height of the image on which the bounding boxes fall. Returns ------- int Image height. # TODO remove this? here it is image width, but in BoundingBox it is # bounding box width Get the width of the image on which the bounding boxes fall. Returns ------- int Image width. Determine whether this instance contains zero bounding boxes. Returns ------- bool True if this object contains zero bounding boxes. Project bounding boxes from one image (shape) to a another one. Parameters ---------- image : ndarray or tuple of int New image onto which the bounding boxes are to be projected. May also simply be that new image's shape tuple. Returns ------- imgaug.augmentables.bbs.BoundingBoxesOnImage Object containing the same bounding boxes after projection to the new image shape. Convert an ``(N, 4) or (N, 2, 2) ndarray`` to a BBsOI instance. This is the inverse of :func:`imgaug.BoundingBoxesOnImage.to_xyxy_array`. Parameters ---------- xyxy : (N, 4) ndarray or (N, 2, 2) array Array containing the corner coordinates of ``N`` bounding boxes. Each bounding box is represented by its top-left and bottom-right coordinates. The array should usually be of dtype ``float32``. shape : tuple of int Shape of the image on which the bounding boxes are placed. Should usually be ``(H, W, C)`` or ``(H, W)``. Returns ------- imgaug.augmentables.bbs.BoundingBoxesOnImage Object containing a list of :class:`BoundingBox` instances derived from the provided corner coordinates. # note that np.array([]) is (0,), not (0, 2) Convert an ``(N, 2P) or (N, P, 2) ndarray`` to a BBsOI instance. Parameters ---------- xy : (N, 2P) ndarray or (N, P, 2) array or iterable of iterable of number or iterable of iterable of iterable of number Array containing the corner coordinates of ``N`` bounding boxes. Each bounding box is represented by a soup of ``P`` points. If ``(N, P)`` then the second axis is expected to be in xy-form (e.g. ``x1``, ``y1``, ``x2``, ``y2``, ...). The final bounding box coordinates will be derived using ``min`` and ``max`` operations on the xy-values. The array should usually be of dtype ``float32``. shape : tuple of int Shape of the image on which the bounding boxes are placed. Should usually be ``(H, W, C)`` or ``(H, W)``. Returns ------- imgaug.augmentables.bbs.BoundingBoxesOnImage Object containing a list of :class:`BoundingBox` instances derived from the provided point soups. # from_xy_array() already checks the ndim/shape, so we don't have to # do it here Convert the ``BoundingBoxesOnImage`` object to an ``(N,4) ndarray``. This is the inverse of :func:`imgaug.BoundingBoxesOnImage.from_xyxy_array`. Parameters ---------- dtype : numpy.dtype, optional Desired output datatype of the ndarray. Returns ------- ndarray ``(N,4) ndarray``, where ``N`` denotes the number of bounding boxes and ``4`` denotes the top-left and bottom-right bounding box corner coordinates in form ``(x1, y1, x2, y2)``. Convert the ``BoundingBoxesOnImage`` object to an ``(N,2) ndarray``. Returns ------- ndarray ``(2*B,2) ndarray`` of xy-coordinates, where ``B`` denotes the number of bounding boxes. Modify the BB coordinates of this instance in-place. .. note :: This currently expects exactly one entry in `xyxy` per bounding in this instance. (I.e. two corner coordinates per instance.) Otherwise, an ``AssertionError`` will be raised. .. note :: This method will automatically flip x-coordinates if ``x1>x2`` for a bounding box. (Analogous for y-coordinates.) Parameters ---------- xyxy : (N, 4) ndarray or iterable of iterable of number Coordinates of ``N`` bounding boxes on an image, given as a ``(N,4)`` array of two corner xy-coordinates per bounding box. ``N`` must match the number of bounding boxes in this instance. Returns ------- BoundingBoxesOnImage This instance itself, with updated bounding box coordinates. Note that the instance was modified in-place. # note that np.array([]) is (0,), not (0, 4) Modify the BB coordinates of this instance in-place. See :func:`imgaug.augmentables.bbs.BoundingBoxesOnImage.fill_from_xyxy_array_`. Parameters ---------- xy : (2*B, 2) ndarray or iterable of iterable of number Coordinates of ``B`` bounding boxes on an image, given as a ``(2*B,2)`` array of two corner xy-coordinates per bounding box. ``B`` must match the number of bounding boxes in this instance. Returns ------- BoundingBoxesOnImage This instance itself, with updated bounding box coordinates. Note that the instance was modified in-place. Draw all bounding boxes onto a given image. Parameters ---------- image : (H,W,3) ndarray The image onto which to draw the bounding boxes. This image should usually have the same shape as set in ``BoundingBoxesOnImage.shape``. color : int or list of int or tuple of int or (3,) ndarray, optional The RGB color of all bounding boxes. If a single ``int`` ``C``, then that is equivalent to ``(C,C,C)``. alpha : float, optional Alpha/transparency of the bounding box. size : int, optional Thickness in pixels. copy : bool, optional Whether to copy the image before drawing the bounding boxes. raise_if_out_of_image : bool, optional Whether to raise an exception if any bounding box is outside of the image. thickness : None or int, optional Deprecated. Returns ------- (H,W,3) ndarray Image with drawn bounding boxes. Remove all BBs that are fully/partially outside of the image. Parameters ---------- fully : bool, optional Whether to remove bounding boxes that are fully outside of the image. partly : bool, optional Whether to remove bounding boxes that are partially outside of the image. Returns ------- imgaug.augmentables.bbs.BoundingBoxesOnImage Reduced set of bounding boxes, with those that were fully/partially outside of the image being removed. Clip off all parts from all BBs that are outside of the image. Returns ------- imgaug.augmentables.bbs.BoundingBoxesOnImage Bounding boxes, clipped to fall within the image dimensions. Move all all BBs along the x/y-axis. Parameters ---------- top : None or int, optional Amount of pixels by which to shift all objects *from* the top (towards the bottom). right : None or int, optional Amount of pixels by which to shift all objects *from* the right (towads the left). bottom : None or int, optional Amount of pixels by which to shift all objects *from* the bottom (towards the top). left : None or int, optional Amount of pixels by which to shift all objects *from* the left (towards the right). Returns ------- imgaug.augmentables.bbs.BoundingBoxesOnImage Shifted bounding boxes. Convert the bounding boxes to one ``KeypointsOnImage`` instance. Returns ------- imgaug.augmentables.kps.KeypointsOnImage A keypoints instance containing ``N*4`` coordinates for ``N`` bounding boxes. Order matches the order in ``bounding_boxes``. # This currently uses 4 points instead of 2 points as the method # is primarily used during augmentation and 4 points are overall # the better choice there. Invert the output of ``to_keypoints_on_image()`` in-place. This function writes in-place into this ``BoundingBoxesOnImage`` instance. Parameters ---------- kpsoi : imgaug.augmentables.kps.KeypointsOnImages Keypoints to convert back to bounding boxes, i.e. the outputs of ``to_keypoints_on_image()``. Returns ------- BoundingBoxesOnImage Bounding boxes container with updated coordinates. Note that the instance is also updated in-place. Create a shallow copy of the ``BoundingBoxesOnImage`` instance. Returns ------- imgaug.augmentables.bbs.BoundingBoxesOnImage Shallow copy. Create a deep copy of the ``BoundingBoxesOnImage`` object. Returns ------- imgaug.augmentables.bbs.BoundingBoxesOnImage Deep copy. # Manual copy is far faster than deepcopy for BoundingBoxesOnImage, # so use manual copy here too | 2.820793 | 3 |
scanner_relay/run.py | breakds/brokering | 0 | 10711 | <gh_stars>0
#!/usr/bin/env python
from twisted.internet import endpoints
from twisted.internet import protocol
from twisted.internet import defer
from twisted.mail import imap4
from scanner_relay.pipeline import Pipeline
from scanner_relay.authentication import PassStoreFetcher, PlainPasswordFetcher
import logging
# Global configuration for the logging. Note that we set the level to
# INFO so that only DEBUG logging does not get to stdout.
FORMAT = '[%(levelname)s] (%(name)s) %(message)s'
logging.basicConfig(level=logging.INFO, format=FORMAT)
logger = logging.getLogger('run')
class ScannerRelayProtocol(imap4.IMAP4Client):
def __init__(self, username, password_fetcher, onFinish):
super().__init__()
self.pipeline = Pipeline(self, username, password_fetcher, onFinish)
def serverGreeting(self, unused_capabilities):
"""The entry point for the whole program.
It merely starts the long-running pipeline.
"""
# NOTE: Although twisted official example suggest using the capabilities
# returned here to decide what kind of authentication methods to
# register, I found it to be not true as real capabilities are only
# returned after the authentication is successful.
username = self.pipeline.username
self.registerAuthenticator(imap4.PLAINAuthenticator(username))
self.registerAuthenticator(imap4.LOGINAuthenticator(username))
self.registerAuthenticator(
imap4.CramMD5ClientAuthenticator(username))
self.pipeline.start()
class ScannerRelayProtocolFactory(protocol.ClientFactory):
def __init__(self, username, password_fetcher, onFinish):
super().__init__()
self.username = username
self.password_fetcher = password_fetcher
self.onFinish = onFinish
def buildProtocol(self, addr):
logger.info('Constructing client protocol to connect to %s:%d', addr.host, addr.port)
protocol = ScannerRelayProtocol(
self.username, self.password_fetcher, self.onFinish)
protocol.factory = self
return protocol
def clientConnectionFailed(self, connector, reason):
print('Connection failed.')
# TODO(breakds): And a more graceful (singal handling) way to terminate the program.
def clean_up(unused):
from twisted.internet import reactor
reactor.stop()
print('All workd done!')
if __name__ == '__main__':
# FIXME: Make these configurable
hostname = 'mail.breakds.org'
username = '<EMAIL>'.encode('ascii')
pass_store_entry = 'mail.breakds.org/bds'
port = 143
from twisted.internet import reactor
endpoint = endpoints.HostnameEndpoint(reactor, hostname, port)
factory = ScannerRelayProtocolFactory(
username, PassStoreFetcher(pass_store_entry), clean_up)
endpoint.connect(factory)
reactor.run()
| #!/usr/bin/env python
from twisted.internet import endpoints
from twisted.internet import protocol
from twisted.internet import defer
from twisted.mail import imap4
from scanner_relay.pipeline import Pipeline
from scanner_relay.authentication import PassStoreFetcher, PlainPasswordFetcher
import logging
# Global configuration for the logging. Note that we set the level to
# INFO so that only DEBUG logging does not get to stdout.
FORMAT = '[%(levelname)s] (%(name)s) %(message)s'
logging.basicConfig(level=logging.INFO, format=FORMAT)
logger = logging.getLogger('run')
class ScannerRelayProtocol(imap4.IMAP4Client):
def __init__(self, username, password_fetcher, onFinish):
super().__init__()
self.pipeline = Pipeline(self, username, password_fetcher, onFinish)
def serverGreeting(self, unused_capabilities):
"""The entry point for the whole program.
It merely starts the long-running pipeline.
"""
# NOTE: Although twisted official example suggest using the capabilities
# returned here to decide what kind of authentication methods to
# register, I found it to be not true as real capabilities are only
# returned after the authentication is successful.
username = self.pipeline.username
self.registerAuthenticator(imap4.PLAINAuthenticator(username))
self.registerAuthenticator(imap4.LOGINAuthenticator(username))
self.registerAuthenticator(
imap4.CramMD5ClientAuthenticator(username))
self.pipeline.start()
class ScannerRelayProtocolFactory(protocol.ClientFactory):
def __init__(self, username, password_fetcher, onFinish):
super().__init__()
self.username = username
self.password_fetcher = password_fetcher
self.onFinish = onFinish
def buildProtocol(self, addr):
logger.info('Constructing client protocol to connect to %s:%d', addr.host, addr.port)
protocol = ScannerRelayProtocol(
self.username, self.password_fetcher, self.onFinish)
protocol.factory = self
return protocol
def clientConnectionFailed(self, connector, reason):
print('Connection failed.')
# TODO(breakds): And a more graceful (singal handling) way to terminate the program.
def clean_up(unused):
from twisted.internet import reactor
reactor.stop()
print('All workd done!')
if __name__ == '__main__':
# FIXME: Make these configurable
hostname = 'mail.breakds.org'
username = '<EMAIL>'.encode('ascii')
pass_store_entry = 'mail.breakds.org/bds'
port = 143
from twisted.internet import reactor
endpoint = endpoints.HostnameEndpoint(reactor, hostname, port)
factory = ScannerRelayProtocolFactory(
username, PassStoreFetcher(pass_store_entry), clean_up)
endpoint.connect(factory)
reactor.run() | en | 0.870873 | #!/usr/bin/env python # Global configuration for the logging. Note that we set the level to # INFO so that only DEBUG logging does not get to stdout. The entry point for the whole program. It merely starts the long-running pipeline. # NOTE: Although twisted official example suggest using the capabilities # returned here to decide what kind of authentication methods to # register, I found it to be not true as real capabilities are only # returned after the authentication is successful. # TODO(breakds): And a more graceful (singal handling) way to terminate the program. # FIXME: Make these configurable | 2.010756 | 2 |
cubes_pilingup.py | akiselev1/hackerrank-solutions | 0 | 10712 | """
Created by akiselev on 2019-06-14
There is a horizontal row of cubes. The length of each cube is given. You need to create a new vertical pile of cubes. The new pile should follow these directions: if is on top of then
.
When stacking the cubes, you can only pick up either the leftmost or the rightmost cube each time. Print "Yes" if it is possible to stack the cubes. Otherwise, print "No". Do not print the quotation marks.
Input Format
The first line contains a single integer
, the number of test cases.
For each test case, there are lines.
The first line of each test case contains , the number of cubes.
The second line contains
space separated integers, denoting the sideLengths of each cube in that order.
Constraints
Output Format
For each test case, output a single line containing either "Yes" or "No" without the quotes.
Sample Input
2
6
4 3 2 1 3 4
3
1 3 2
Sample Output
Yes
No
"""
for T in range(int(input())):
n = int(input())
cubes_h = list(map(int, input().split()))
i = 0
while i < n - 1 and cubes_h[i] >= cubes_h[i+1]:
i += 1
while i < n - 1 and cubes_h[i] <= cubes_h[i+1]:
i += 1
print("Yes" if i == n - 1 else "No")
| """
Created by akiselev on 2019-06-14
There is a horizontal row of cubes. The length of each cube is given. You need to create a new vertical pile of cubes. The new pile should follow these directions: if is on top of then
.
When stacking the cubes, you can only pick up either the leftmost or the rightmost cube each time. Print "Yes" if it is possible to stack the cubes. Otherwise, print "No". Do not print the quotation marks.
Input Format
The first line contains a single integer
, the number of test cases.
For each test case, there are lines.
The first line of each test case contains , the number of cubes.
The second line contains
space separated integers, denoting the sideLengths of each cube in that order.
Constraints
Output Format
For each test case, output a single line containing either "Yes" or "No" without the quotes.
Sample Input
2
6
4 3 2 1 3 4
3
1 3 2
Sample Output
Yes
No
"""
for T in range(int(input())):
n = int(input())
cubes_h = list(map(int, input().split()))
i = 0
while i < n - 1 and cubes_h[i] >= cubes_h[i+1]:
i += 1
while i < n - 1 and cubes_h[i] <= cubes_h[i+1]:
i += 1
print("Yes" if i == n - 1 else "No")
| en | 0.821491 | Created by akiselev on 2019-06-14 There is a horizontal row of cubes. The length of each cube is given. You need to create a new vertical pile of cubes. The new pile should follow these directions: if is on top of then . When stacking the cubes, you can only pick up either the leftmost or the rightmost cube each time. Print "Yes" if it is possible to stack the cubes. Otherwise, print "No". Do not print the quotation marks. Input Format The first line contains a single integer , the number of test cases. For each test case, there are lines. The first line of each test case contains , the number of cubes. The second line contains space separated integers, denoting the sideLengths of each cube in that order. Constraints Output Format For each test case, output a single line containing either "Yes" or "No" without the quotes. Sample Input 2 6 4 3 2 1 3 4 3 1 3 2 Sample Output Yes No | 4.153783 | 4 |
flask_web/bootstrap_web_core_py3.py | bopopescu/docker_images_a | 0 | 10713 | <filename>flask_web/bootstrap_web_core_py3.py<gh_stars>0
#
#
# File: flask_web_py3.py
#
#
#
import os
import json
import redis
import urllib
import flask
from flask import Flask
from flask import render_template,jsonify
from flask_httpauth import HTTPDigestAuth
from flask import request, session, url_for
from redis_support_py3.graph_query_support_py3 import Query_Support
from redis_support_py3.construct_data_handlers_py3 import Generate_Handlers
from web_core.load_static_pages_py3 import Load_Static_Files
from web_core.load_redis_access_py3 import Load_Redis_Access
from redis_support_py3.construct_data_handlers_py3 import Redis_RPC_Client
from bootstrap_web_system_control_py3 import PI_Web_System_Control
from bootstrap_web_monitoring_py3 import PI_Web_Monitor_Server
from bootstrap_mqtt_client_py3 import PI_MQTT_Client_Monitor
from bootstrap_eto_py3 import ETO_Management
from file_server_library.file_server_lib_py3 import Construct_RPC_Library
from bootstrap_irrigation_scheduling_py3 import Irrigation_Scheduling
from irrigation_control.load_irrigation_control_py3 import Load_Irrigation_Control
class URL_Rule_Class(object):
def __init__(self,app,auth):
self.subsystems = {}
self.subsystem_order = []
self.app = app
self.auth = auth
def add_get_rules(self,subsystem_name,function_list,url_list):
slash_name = "/"+subsystem_name+"/"
assert(len(function_list)==len(url_list))
menu_list = []
menu_data = {}
for i in range(0,len(function_list)):
a1 = self.auth.login_required( function_list[i] )
self.app.add_url_rule(slash_name+url_list[i][0]+url_list[i][1],slash_name+url_list[i][0],a1)
menu_data[url_list[i][0]] =[a1,url_list[i][0]+url_list[i][2],url_list[i][3]]
menu_list.append(url_list[i][0])
self.subsystems[subsystem_name] = {"menu_list":menu_list,"menu_data":menu_data}
self.subsystem_order.append(subsystem_name)
def move_directories(self,path):
#print("move directory path",path)
path_test = path.split("/")
if len(path_test) != 1:
path_dest = path_test[1]
else:
path_dest = path
#print(path)
#print(path_dest)
os.system('mkdir flask_templates/'+path_dest)
os.system('mkdir flask_templates/js/'+path_dest)
#os.system("ls flask_templates")
#print("path",path,path_dest)
os.system('cp -r ' +path+'/templates/* flask_templates/'+path_dest)
os.system('cp -r ' +path+'/js/* flask_templates/js/'+path_dest)
return path_dest
class Load_App_Sys_Files(object):
def __init__( self, app, auth, request, file_server_library ):
self.app = app
self.auth = auth
self.request = request
self.file_server_library = file_server_library
a1 = auth.login_required( self.get_system_file )
app.add_url_rule("/ajax/get_system_file/<path:file_name>","get_system_file",a1)
a1 = auth.login_required( self.get_app_file )
app.add_url_rule("/ajax/get_app_file/<path:file_name>","get_app_file",a1)
a1 = auth.login_required( self.save_app_file )
app.add_url_rule("/ajax/save_app_file/<path:file_name>","save_app_file",a1,methods=["POST"])
a1 = auth.login_required( self.save_sys_file )
app.add_url_rule("/ajax/save_sys_file/<path:file_name>","save_sys_file",a1,methods=["POST"])
def get_system_file(self, file_name):
data = self.file_server_library.load_file( "application_files",file_name)
return json.dumps(data)
def get_app_file(self,file_name):
data = self.file_server_library.load_file( "system_files",file_name)
return json.dumps(data )
def save_app_file(self,file_name):
json_object = self.request.json
if type(json_object) != str:
json_object = json.dumps(json_object)
self.file_server_library.save_file("application_files",file_name, json_object );
return json.dumps('SUCCESS')
def save_sys_file(self,file_name):
json_object = self.request.json
if type(json_object) != str:
json_object = json.dumps(json_object)
self.file_server_library.save_file( "system_files",file_name, json_object );
return json.dumps('SUCCESS')
class PI_Web_Server_Core(object):
def __init__(self , name, site_data ):
redis_handle_pw = redis.StrictRedis(site_data["host"],
site_data["port"],
db=site_data["redis_password_db"],
decode_responses=True)
self.site_data = site_data
startup_dict = redis_handle_pw.hgetall("web")
self.qs = Query_Support( site_data)
self.file_server_library = Construct_RPC_Library(self.qs,self.site_data)
self.app = Flask(name)
self.auth = HTTPDigestAuth()
self.url_rule_class = URL_Rule_Class(self.app,self.auth)
self.auth.get_password( self.get_pw )
self.startup_dict = startup_dict
self.app.template_folder = 'flask_templates'
self.app.static_folder = 'static'
self.app.config['SECRET_KEY'] = startup_dict["SECRET_KEY"]
self.users = json.loads(startup_dict["users"])
Load_Static_Files(self.app,self.auth) #enable static files to be fetched
self.redis_access = Load_Redis_Access(self.app, self.auth, request ) #enable web access for redis operations
Load_App_Sys_Files( self.app, self.auth, request, self.file_server_library )
self.subsystems = []
self.modules = {}
self.load_specified_modules()
def load_specified_modules(self):
results=self.common_qs_search(["WEB_SERVER","WEB_SERVER"])
result = results[0]
modules = result["modules"]
for i in modules:
if i == "monitoring":
print(i)
PI_Web_Monitor_Server(self)
elif i == "system_control":
print(i)
PI_Web_System_Control(self)
elif i == "mqtt_client":
print(i)
PI_MQTT_Client_Monitor(self )
elif i == "eto":
print(i)
ETO_Management(self)
elif i == "irrigation_scheduling":
print(i)
Irrigation_Scheduling(self)
elif i == "irrigation_control":
print(i)
Load_Irrigation_Control(self)
elif i == "modbus_control":
print("do nothing right now")
else:
raise ValueError("bad web module")
self.result = result
if "status_function" in self.result:
print(self.result["status_function"])
else:
self.result["status_function"] = ""
print("status function not defined")
file_handle = open("flask_templates/js/status_definition.js","w")
file_handle.write('__status_option__ = "'+self.result["status_function"]+'"; \n')
file_handle.close()
def common_qs_search(self,search_list): # generalized graph search
query_list = []
query_list = self.qs.add_match_relationship( query_list,relationship="SITE",label=self.site_data["site"] )
for i in range(0,len(search_list)-1):
if type(search_list[i]) == list:
query_list = self.qs.add_match_relationship( query_list,relationship = search_list[i][0],label = search_list[i][1] )
else:
query_list = self.qs.add_match_relationship( query_list,relationship = search_list[i] )
if type(search_list[-1]) == list:
query_list = self.qs.add_match_terminal( query_list,relationship = search_list[-1][0],label = search_list[-1][1] )
else:
query_list = self.qs.add_match_terminal( query_list,relationship = search_list[-1] )
node_sets, node_sources = self.qs.match_list(query_list)
return node_sources
def get_pw( self,username):
if username in self.users:
return self.users[username]
return None
def generate_menu_page(self):
self.subsystems.sort()
self.generate_menu_template()
self.generate_modal_template()
def generate_default_index_page(self):
self.app.add_url_rule("/","home_page",self.links_a1)
def generate_index_page(self,module,element):
menu_data = self.url_rule_class.subsystems[module]["menu_data"]
menu_element = menu_data[element]
self.app.add_url_rule("/","home page",menu_element[0])
def generate_site_map(self):
self.links_a1 = self.auth.login_required( self.site_map_function )
self.app.add_url_rule("/link_page","/links_page",self.links_a1)
def site_map_function(self):
links = []
for rule in self.app.url_map.iter_rules():
# Filter out rules we can't navigate to in a browser
# and rules that require parameters
#url = url_for(rule.endpoint, **(rule.defaults or {}))
links.append((rule.endpoint))
links.sort()
return render_template("list_of_endpoints",endpoints = links)
def run_http( self):
self.app.run(threaded=True , use_reloader=True, host='0.0.0.0',port=self.port,debug =self.debug )
def run_https( self ):
startup_dict = self.startup_dict
self.app.run(threaded=True , use_reloader=True, host='0.0.0.0',debug =self.debug,
port=self.port ,ssl_context=("/data/cert.pem", "/data/key.pem"))
def generate_menu_template(self):
f = open( self.app.template_folder+'/menu', 'w')
output_string = '''
<nav class="navbar navbar-expand-sm bg-dark navbar-dark">
<!-- Links -->
<ul class="navbar-nav">
<!-- Dropdown -->
<li class="nav-item dropdown">
<a class="nav-link dropdown-toggle" href="#" id="navbardrop" data-toggle="dropdown">Menu</a>
<div class="dropdown-menu">
'''
f.write(output_string)
self.url_rule_class.subsystems
for i in self.url_rule_class.subsystems:
temp = ' <a class="dropdown-item" href="#" data-toggle="modal" data-target="#'+i+'">'+i+"</a>\n"
f.write(temp)
output_string = '''
</div>
</li>
</ul>
<ul class="navbar-nav">
<button id="status_panel", class="btn " type="submit">Status</button>
</ul>
<nav class="navbar navbar-light bg-dark navbar-dark">
<span class="navbar-text" >
<h4 id ="status_display"> Status: </h4>
</span>
</nav>
</nav>
'''
f.write(output_string)
f.close()
def generate_modal_template(self):
f = open(self.app.template_folder+'/modals', 'w')
for i in self.url_rule_class.subsystem_order:
#print("generate_modal_template - i",i)
output_string = '<!–'+i+' –>\n'
f.write(output_string)
output_string ='<div class="modal fade" id='+i+' tabindex="-1" role="dialog" aria-labelledby="accountModalLabel" aria-hidden="true">\n'
f.write(output_string)
output_string = '''
<div class="modal-dialog" role="document">
<div class="modal-content">
<div class="modal-header">
'''
f.write(output_string)
f.write(' <h5 class="modal-title" id="accountModalLabel">'+i+'</h5>\n')
output_string = '''
<button type="button" class="close" data-dismiss="modal" aria-label="close">
<span aria-hidden="true">×</span>
</button>
</div>
<div class="modal-body">
<ul >
'''
f.write(output_string)
# <li><a href ='/control/display_past_system_alerts' target="_self">Current System State</a></li>
sub_system_data = self.url_rule_class.subsystems[i]
temp = sub_system_data["menu_data"]
#
for j in sub_system_data['menu_list']:
data = temp[j]
#print("data",data)
format_output = '<li><a href='+'"/'+i+'/'+data[1]+'" target="_self">'+data[2]+'</a></li>\n'
f.write(format_output)
output_string = '''
</ul>
</div>
<div class="modal-footer">
<button type="button" class="btn btn-secondary" data-dismiss="modal">Close</button>
</div>
</div>
</div>
</div>
'''
f.write(output_string)
f.close()
if __name__ == "__main__":
file_handle = open("/data/redis_server.json",'r')
data = file_handle.read()
file_handle.close()
redis_site_data = json.loads(data)
pi_web_server = PI_Web_Server_Core(__name__, redis_site_data )
pi_web_server.generate_menu_page()
pi_web_server.generate_site_map()
pi_web_server.generate_default_index_page()
port = pi_web_server.result["port"]
pi_web_server.port = port
debug = pi_web_server.result["debug"]
pi_web_server.debug = debug
https_flag = pi_web_server.result["https"]
if https_flag == False:
pi_web_server.run_https()
else:
pi_web_server.run_https()
| <filename>flask_web/bootstrap_web_core_py3.py<gh_stars>0
#
#
# File: flask_web_py3.py
#
#
#
import os
import json
import redis
import urllib
import flask
from flask import Flask
from flask import render_template,jsonify
from flask_httpauth import HTTPDigestAuth
from flask import request, session, url_for
from redis_support_py3.graph_query_support_py3 import Query_Support
from redis_support_py3.construct_data_handlers_py3 import Generate_Handlers
from web_core.load_static_pages_py3 import Load_Static_Files
from web_core.load_redis_access_py3 import Load_Redis_Access
from redis_support_py3.construct_data_handlers_py3 import Redis_RPC_Client
from bootstrap_web_system_control_py3 import PI_Web_System_Control
from bootstrap_web_monitoring_py3 import PI_Web_Monitor_Server
from bootstrap_mqtt_client_py3 import PI_MQTT_Client_Monitor
from bootstrap_eto_py3 import ETO_Management
from file_server_library.file_server_lib_py3 import Construct_RPC_Library
from bootstrap_irrigation_scheduling_py3 import Irrigation_Scheduling
from irrigation_control.load_irrigation_control_py3 import Load_Irrigation_Control
class URL_Rule_Class(object):
def __init__(self,app,auth):
self.subsystems = {}
self.subsystem_order = []
self.app = app
self.auth = auth
def add_get_rules(self,subsystem_name,function_list,url_list):
slash_name = "/"+subsystem_name+"/"
assert(len(function_list)==len(url_list))
menu_list = []
menu_data = {}
for i in range(0,len(function_list)):
a1 = self.auth.login_required( function_list[i] )
self.app.add_url_rule(slash_name+url_list[i][0]+url_list[i][1],slash_name+url_list[i][0],a1)
menu_data[url_list[i][0]] =[a1,url_list[i][0]+url_list[i][2],url_list[i][3]]
menu_list.append(url_list[i][0])
self.subsystems[subsystem_name] = {"menu_list":menu_list,"menu_data":menu_data}
self.subsystem_order.append(subsystem_name)
def move_directories(self,path):
#print("move directory path",path)
path_test = path.split("/")
if len(path_test) != 1:
path_dest = path_test[1]
else:
path_dest = path
#print(path)
#print(path_dest)
os.system('mkdir flask_templates/'+path_dest)
os.system('mkdir flask_templates/js/'+path_dest)
#os.system("ls flask_templates")
#print("path",path,path_dest)
os.system('cp -r ' +path+'/templates/* flask_templates/'+path_dest)
os.system('cp -r ' +path+'/js/* flask_templates/js/'+path_dest)
return path_dest
class Load_App_Sys_Files(object):
def __init__( self, app, auth, request, file_server_library ):
self.app = app
self.auth = auth
self.request = request
self.file_server_library = file_server_library
a1 = auth.login_required( self.get_system_file )
app.add_url_rule("/ajax/get_system_file/<path:file_name>","get_system_file",a1)
a1 = auth.login_required( self.get_app_file )
app.add_url_rule("/ajax/get_app_file/<path:file_name>","get_app_file",a1)
a1 = auth.login_required( self.save_app_file )
app.add_url_rule("/ajax/save_app_file/<path:file_name>","save_app_file",a1,methods=["POST"])
a1 = auth.login_required( self.save_sys_file )
app.add_url_rule("/ajax/save_sys_file/<path:file_name>","save_sys_file",a1,methods=["POST"])
def get_system_file(self, file_name):
data = self.file_server_library.load_file( "application_files",file_name)
return json.dumps(data)
def get_app_file(self,file_name):
data = self.file_server_library.load_file( "system_files",file_name)
return json.dumps(data )
def save_app_file(self,file_name):
json_object = self.request.json
if type(json_object) != str:
json_object = json.dumps(json_object)
self.file_server_library.save_file("application_files",file_name, json_object );
return json.dumps('SUCCESS')
def save_sys_file(self,file_name):
json_object = self.request.json
if type(json_object) != str:
json_object = json.dumps(json_object)
self.file_server_library.save_file( "system_files",file_name, json_object );
return json.dumps('SUCCESS')
class PI_Web_Server_Core(object):
def __init__(self , name, site_data ):
redis_handle_pw = redis.StrictRedis(site_data["host"],
site_data["port"],
db=site_data["redis_password_db"],
decode_responses=True)
self.site_data = site_data
startup_dict = redis_handle_pw.hgetall("web")
self.qs = Query_Support( site_data)
self.file_server_library = Construct_RPC_Library(self.qs,self.site_data)
self.app = Flask(name)
self.auth = HTTPDigestAuth()
self.url_rule_class = URL_Rule_Class(self.app,self.auth)
self.auth.get_password( self.get_pw )
self.startup_dict = startup_dict
self.app.template_folder = 'flask_templates'
self.app.static_folder = 'static'
self.app.config['SECRET_KEY'] = startup_dict["SECRET_KEY"]
self.users = json.loads(startup_dict["users"])
Load_Static_Files(self.app,self.auth) #enable static files to be fetched
self.redis_access = Load_Redis_Access(self.app, self.auth, request ) #enable web access for redis operations
Load_App_Sys_Files( self.app, self.auth, request, self.file_server_library )
self.subsystems = []
self.modules = {}
self.load_specified_modules()
def load_specified_modules(self):
results=self.common_qs_search(["WEB_SERVER","WEB_SERVER"])
result = results[0]
modules = result["modules"]
for i in modules:
if i == "monitoring":
print(i)
PI_Web_Monitor_Server(self)
elif i == "system_control":
print(i)
PI_Web_System_Control(self)
elif i == "mqtt_client":
print(i)
PI_MQTT_Client_Monitor(self )
elif i == "eto":
print(i)
ETO_Management(self)
elif i == "irrigation_scheduling":
print(i)
Irrigation_Scheduling(self)
elif i == "irrigation_control":
print(i)
Load_Irrigation_Control(self)
elif i == "modbus_control":
print("do nothing right now")
else:
raise ValueError("bad web module")
self.result = result
if "status_function" in self.result:
print(self.result["status_function"])
else:
self.result["status_function"] = ""
print("status function not defined")
file_handle = open("flask_templates/js/status_definition.js","w")
file_handle.write('__status_option__ = "'+self.result["status_function"]+'"; \n')
file_handle.close()
def common_qs_search(self,search_list): # generalized graph search
query_list = []
query_list = self.qs.add_match_relationship( query_list,relationship="SITE",label=self.site_data["site"] )
for i in range(0,len(search_list)-1):
if type(search_list[i]) == list:
query_list = self.qs.add_match_relationship( query_list,relationship = search_list[i][0],label = search_list[i][1] )
else:
query_list = self.qs.add_match_relationship( query_list,relationship = search_list[i] )
if type(search_list[-1]) == list:
query_list = self.qs.add_match_terminal( query_list,relationship = search_list[-1][0],label = search_list[-1][1] )
else:
query_list = self.qs.add_match_terminal( query_list,relationship = search_list[-1] )
node_sets, node_sources = self.qs.match_list(query_list)
return node_sources
def get_pw( self,username):
if username in self.users:
return self.users[username]
return None
def generate_menu_page(self):
self.subsystems.sort()
self.generate_menu_template()
self.generate_modal_template()
def generate_default_index_page(self):
self.app.add_url_rule("/","home_page",self.links_a1)
def generate_index_page(self,module,element):
menu_data = self.url_rule_class.subsystems[module]["menu_data"]
menu_element = menu_data[element]
self.app.add_url_rule("/","home page",menu_element[0])
def generate_site_map(self):
self.links_a1 = self.auth.login_required( self.site_map_function )
self.app.add_url_rule("/link_page","/links_page",self.links_a1)
def site_map_function(self):
links = []
for rule in self.app.url_map.iter_rules():
# Filter out rules we can't navigate to in a browser
# and rules that require parameters
#url = url_for(rule.endpoint, **(rule.defaults or {}))
links.append((rule.endpoint))
links.sort()
return render_template("list_of_endpoints",endpoints = links)
def run_http( self):
self.app.run(threaded=True , use_reloader=True, host='0.0.0.0',port=self.port,debug =self.debug )
def run_https( self ):
startup_dict = self.startup_dict
self.app.run(threaded=True , use_reloader=True, host='0.0.0.0',debug =self.debug,
port=self.port ,ssl_context=("/data/cert.pem", "/data/key.pem"))
def generate_menu_template(self):
f = open( self.app.template_folder+'/menu', 'w')
output_string = '''
<nav class="navbar navbar-expand-sm bg-dark navbar-dark">
<!-- Links -->
<ul class="navbar-nav">
<!-- Dropdown -->
<li class="nav-item dropdown">
<a class="nav-link dropdown-toggle" href="#" id="navbardrop" data-toggle="dropdown">Menu</a>
<div class="dropdown-menu">
'''
f.write(output_string)
self.url_rule_class.subsystems
for i in self.url_rule_class.subsystems:
temp = ' <a class="dropdown-item" href="#" data-toggle="modal" data-target="#'+i+'">'+i+"</a>\n"
f.write(temp)
output_string = '''
</div>
</li>
</ul>
<ul class="navbar-nav">
<button id="status_panel", class="btn " type="submit">Status</button>
</ul>
<nav class="navbar navbar-light bg-dark navbar-dark">
<span class="navbar-text" >
<h4 id ="status_display"> Status: </h4>
</span>
</nav>
</nav>
'''
f.write(output_string)
f.close()
def generate_modal_template(self):
f = open(self.app.template_folder+'/modals', 'w')
for i in self.url_rule_class.subsystem_order:
#print("generate_modal_template - i",i)
output_string = '<!–'+i+' –>\n'
f.write(output_string)
output_string ='<div class="modal fade" id='+i+' tabindex="-1" role="dialog" aria-labelledby="accountModalLabel" aria-hidden="true">\n'
f.write(output_string)
output_string = '''
<div class="modal-dialog" role="document">
<div class="modal-content">
<div class="modal-header">
'''
f.write(output_string)
f.write(' <h5 class="modal-title" id="accountModalLabel">'+i+'</h5>\n')
output_string = '''
<button type="button" class="close" data-dismiss="modal" aria-label="close">
<span aria-hidden="true">×</span>
</button>
</div>
<div class="modal-body">
<ul >
'''
f.write(output_string)
# <li><a href ='/control/display_past_system_alerts' target="_self">Current System State</a></li>
sub_system_data = self.url_rule_class.subsystems[i]
temp = sub_system_data["menu_data"]
#
for j in sub_system_data['menu_list']:
data = temp[j]
#print("data",data)
format_output = '<li><a href='+'"/'+i+'/'+data[1]+'" target="_self">'+data[2]+'</a></li>\n'
f.write(format_output)
output_string = '''
</ul>
</div>
<div class="modal-footer">
<button type="button" class="btn btn-secondary" data-dismiss="modal">Close</button>
</div>
</div>
</div>
</div>
'''
f.write(output_string)
f.close()
if __name__ == "__main__":
file_handle = open("/data/redis_server.json",'r')
data = file_handle.read()
file_handle.close()
redis_site_data = json.loads(data)
pi_web_server = PI_Web_Server_Core(__name__, redis_site_data )
pi_web_server.generate_menu_page()
pi_web_server.generate_site_map()
pi_web_server.generate_default_index_page()
port = pi_web_server.result["port"]
pi_web_server.port = port
debug = pi_web_server.result["debug"]
pi_web_server.debug = debug
https_flag = pi_web_server.result["https"]
if https_flag == False:
pi_web_server.run_https()
else:
pi_web_server.run_https()
| en | 0.282121 | # # # File: flask_web_py3.py # # # #print("move directory path",path) #print(path) #print(path_dest) #os.system("ls flask_templates") #print("path",path,path_dest) #enable static files to be fetched #enable web access for redis operations # generalized graph search # Filter out rules we can't navigate to in a browser # and rules that require parameters #url = url_for(rule.endpoint, **(rule.defaults or {})) <nav class="navbar navbar-expand-sm bg-dark navbar-dark">
<!-- Links -->
<ul class="navbar-nav">
<!-- Dropdown -->
<li class="nav-item dropdown">
<a class="nav-link dropdown-toggle" href="#" id="navbardrop" data-toggle="dropdown">Menu</a>
<div class="dropdown-menu"> </div>
</li>
</ul>
<ul class="navbar-nav">
<button id="status_panel", class="btn " type="submit">Status</button>
</ul>
<nav class="navbar navbar-light bg-dark navbar-dark">
<span class="navbar-text" >
<h4 id ="status_display"> Status: </h4>
</span>
</nav>
</nav> #print("generate_modal_template - i",i) <div class="modal-dialog" role="document">
<div class="modal-content">
<div class="modal-header"> <button type="button" class="close" data-dismiss="modal" aria-label="close">
<span aria-hidden="true">×</span>
</button>
</div>
<div class="modal-body">
<ul > # <li><a href ='/control/display_past_system_alerts' target="_self">Current System State</a></li> # #print("data",data) </ul>
</div>
<div class="modal-footer">
<button type="button" class="btn btn-secondary" data-dismiss="modal">Close</button>
</div>
</div>
</div>
</div> | 1.96469 | 2 |
git_talk/lib/changelog/main.py | cove9988/git-talk | 5 | 10714 | <filename>git_talk/lib/changelog/main.py<gh_stars>1-10
import os
import logging
from typing import Optional
import click
from git_talk.lib.changelog import generate_changelog
from git_talk.lib.changelog.presenter import MarkdownPresenter
from git_talk.lib.changelog.repository import GitRepository
# @click.command()
# @click.option(
# "-r",
# "--repo",
# type=click.Path(exists=True),
# default=".",
# help="Path to the repository's root directory [Default: .]",
# )
# @click.option("-t", "--title", default="Changelog", help="The changelog's title [Default: Changelog]")
# @click.option("-d", "--description", help="Your project's description")
# @click.option(
# "-o",
# "--output",
# type=click.File("w"),
# default="CHANGELOG.md",
# help="The place to save the generated changelog [Default: CHANGELOG.md]",
# )
# @click.option("-r", "--remote", default="origin", help="Specify git remote to use for links")
# @click.option("-v", "--latest-version", type=str, help="use specified version as latest release")
# @click.option("-u", "--unreleased", is_flag=True, default=False, help="Include section for unreleased changes")
# @click.option("--diff-url", default=None, help="override url for compares, use {current} and {previous} for tags")
# @click.option("--issue-url", default=None, help="Override url for issues, use {id} for issue id")
# @click.option(
# "--issue-pattern",
# default=r"(#([\w-]+))",
# help="Override regex pattern for issues in commit messages. Should contain two groups, original match and ID used "
# "by issue-url.",
# )
# @click.option(
# "--tag-pattern",
# default=None,
# help="override regex pattern for release tags. "
# "By default use semver tag names semantic. "
# "tag should be contain in one group named 'version'.",
# )
# @click.option("--tag-prefix", default="", help='prefix used in version tags, default: "" ')
# @click.option("--stdout", is_flag=True)
# @click.option("--tag-pattern", default=None, help="Override regex pattern for release tags")
# @click.option("--starting-commit", help="Starting commit to use for changelog generation", default="")
# @click.option("--stopping-commit", help="Stopping commit to use for changelog generation", default="HEAD")
# @click.option(
# "--debug", is_flag=True, help="set logging level to DEBUG",
# )
def main(
repo,
description,
latest_version,
title="Changelog",
output="CHANGELOG.md",
remote ="origin",
unreleased=False,
diff_url=None,
issue_url=r"(#([\w-]+))",
issue_pattern=None,
tag_prefix="",
stdout=True,
tag_pattern=None,
starting_commit="",
stopping_commit ="HEAD",
debug = False
):
if debug:
logging.basicConfig(level=logging.DEBUG)
logging.debug("Logging level has been set to DEBUG")
# Convert the repository name to an absolute path
repo = os.path.abspath(repo)
repository = GitRepository(
repo,
latest_version=latest_version,
skip_unreleased=not unreleased,
tag_prefix=tag_prefix,
tag_pattern=tag_pattern,
)
presenter = MarkdownPresenter()
changelog = generate_changelog(
repository,
presenter,
title,
description,
remote=remote,
issue_pattern=issue_pattern,
issue_url=issue_url,
diff_url=diff_url,
starting_commit=starting_commit,
stopping_commit=stopping_commit,
)
# if stdout:
# print(changelog)
# else:
# output.write(changelog)
changelog_file = os.path.join(repo, "CHANGELOG.md")
write_changelog(changelog_file, changelog)
def write_changelog(changelog_file, changelog):
if os.path.exists(changelog_file):
with open(changelog_file, 'r') as f:
data = f.read()
with open(changelog_file, 'w') as f:
# f.write(changelog + '\n\n' + data)
f.write(changelog)
else:
with open(changelog_file, 'w') as f:
f.write(changelog)
if __name__ == "__main__":
main() | <filename>git_talk/lib/changelog/main.py<gh_stars>1-10
import os
import logging
from typing import Optional
import click
from git_talk.lib.changelog import generate_changelog
from git_talk.lib.changelog.presenter import MarkdownPresenter
from git_talk.lib.changelog.repository import GitRepository
# @click.command()
# @click.option(
# "-r",
# "--repo",
# type=click.Path(exists=True),
# default=".",
# help="Path to the repository's root directory [Default: .]",
# )
# @click.option("-t", "--title", default="Changelog", help="The changelog's title [Default: Changelog]")
# @click.option("-d", "--description", help="Your project's description")
# @click.option(
# "-o",
# "--output",
# type=click.File("w"),
# default="CHANGELOG.md",
# help="The place to save the generated changelog [Default: CHANGELOG.md]",
# )
# @click.option("-r", "--remote", default="origin", help="Specify git remote to use for links")
# @click.option("-v", "--latest-version", type=str, help="use specified version as latest release")
# @click.option("-u", "--unreleased", is_flag=True, default=False, help="Include section for unreleased changes")
# @click.option("--diff-url", default=None, help="override url for compares, use {current} and {previous} for tags")
# @click.option("--issue-url", default=None, help="Override url for issues, use {id} for issue id")
# @click.option(
# "--issue-pattern",
# default=r"(#([\w-]+))",
# help="Override regex pattern for issues in commit messages. Should contain two groups, original match and ID used "
# "by issue-url.",
# )
# @click.option(
# "--tag-pattern",
# default=None,
# help="override regex pattern for release tags. "
# "By default use semver tag names semantic. "
# "tag should be contain in one group named 'version'.",
# )
# @click.option("--tag-prefix", default="", help='prefix used in version tags, default: "" ')
# @click.option("--stdout", is_flag=True)
# @click.option("--tag-pattern", default=None, help="Override regex pattern for release tags")
# @click.option("--starting-commit", help="Starting commit to use for changelog generation", default="")
# @click.option("--stopping-commit", help="Stopping commit to use for changelog generation", default="HEAD")
# @click.option(
# "--debug", is_flag=True, help="set logging level to DEBUG",
# )
def main(
repo,
description,
latest_version,
title="Changelog",
output="CHANGELOG.md",
remote ="origin",
unreleased=False,
diff_url=None,
issue_url=r"(#([\w-]+))",
issue_pattern=None,
tag_prefix="",
stdout=True,
tag_pattern=None,
starting_commit="",
stopping_commit ="HEAD",
debug = False
):
if debug:
logging.basicConfig(level=logging.DEBUG)
logging.debug("Logging level has been set to DEBUG")
# Convert the repository name to an absolute path
repo = os.path.abspath(repo)
repository = GitRepository(
repo,
latest_version=latest_version,
skip_unreleased=not unreleased,
tag_prefix=tag_prefix,
tag_pattern=tag_pattern,
)
presenter = MarkdownPresenter()
changelog = generate_changelog(
repository,
presenter,
title,
description,
remote=remote,
issue_pattern=issue_pattern,
issue_url=issue_url,
diff_url=diff_url,
starting_commit=starting_commit,
stopping_commit=stopping_commit,
)
# if stdout:
# print(changelog)
# else:
# output.write(changelog)
changelog_file = os.path.join(repo, "CHANGELOG.md")
write_changelog(changelog_file, changelog)
def write_changelog(changelog_file, changelog):
if os.path.exists(changelog_file):
with open(changelog_file, 'r') as f:
data = f.read()
with open(changelog_file, 'w') as f:
# f.write(changelog + '\n\n' + data)
f.write(changelog)
else:
with open(changelog_file, 'w') as f:
f.write(changelog)
if __name__ == "__main__":
main() | en | 0.283295 | # @click.command() # @click.option( # "-r", # "--repo", # type=click.Path(exists=True), # default=".", # help="Path to the repository's root directory [Default: .]", # ) # @click.option("-t", "--title", default="Changelog", help="The changelog's title [Default: Changelog]") # @click.option("-d", "--description", help="Your project's description") # @click.option( # "-o", # "--output", # type=click.File("w"), # default="CHANGELOG.md", # help="The place to save the generated changelog [Default: CHANGELOG.md]", # ) # @click.option("-r", "--remote", default="origin", help="Specify git remote to use for links") # @click.option("-v", "--latest-version", type=str, help="use specified version as latest release") # @click.option("-u", "--unreleased", is_flag=True, default=False, help="Include section for unreleased changes") # @click.option("--diff-url", default=None, help="override url for compares, use {current} and {previous} for tags") # @click.option("--issue-url", default=None, help="Override url for issues, use {id} for issue id") # @click.option( # "--issue-pattern", # default=r"(#([\w-]+))", # help="Override regex pattern for issues in commit messages. Should contain two groups, original match and ID used " # "by issue-url.", # ) # @click.option( # "--tag-pattern", # default=None, # help="override regex pattern for release tags. " # "By default use semver tag names semantic. " # "tag should be contain in one group named 'version'.", # ) # @click.option("--tag-prefix", default="", help='prefix used in version tags, default: "" ') # @click.option("--stdout", is_flag=True) # @click.option("--tag-pattern", default=None, help="Override regex pattern for release tags") # @click.option("--starting-commit", help="Starting commit to use for changelog generation", default="") # @click.option("--stopping-commit", help="Stopping commit to use for changelog generation", default="HEAD") # @click.option( # "--debug", is_flag=True, help="set logging level to DEBUG", # ) #([\w-]+))", # Convert the repository name to an absolute path # if stdout: # print(changelog) # else: # output.write(changelog) # f.write(changelog + '\n\n' + data) | 2.283237 | 2 |
SQED-Generator/Generators/constraint_generator.py | upscale-project/generic-sqed-demo | 6 | 10715 | <reponame>upscale-project/generic-sqed-demo
# Copyright (c) Stanford University
#
# This source code is patent protected and being made available under the
# terms explained in the ../LICENSE-Academic and ../LICENSE-GOV files.
# Author: <NAME>
# Email: <EMAIL>
import copy
import sys
sys.path.append("../FormatParsers/")
sys.path.append("../Interface/")
import format_parser as P
import module_interface as I
def generate_constraints_file(MODULENAME, INPUTS, OUTPUTS, format_dicts):
# Get ISA information
isa_info = format_dicts["ISA"]
# Get register names
registers = format_dicts["REGISTERS"]
# Get memory fields needed for modification
memory = format_dicts["MEMORY"]
# Get constraints for qed module setup
qed_constraints = format_dicts["QEDCONSTRAINTS"]
# Get the instruction types
ins_types = format_dicts["INSTYPES"]
# Get the instruction fields for each type
ins_fields = format_dicts["INSFIELDS"]
# Get instruction types requirements
ins_reqs = format_dicts["INSREQS"]
# Get the bit fields
bit_fields = format_dicts["BITFIELDS"]
# Get all instruction types
instructions = {}
for ins in format_dicts["INSTYPES"].keys():
if ins != "CONSTRAINT":
instructions[ins] = format_dicts[ins]
# Verilog file
verilog = ""
# Adds module header definition
verilog += I.module_header(MODULENAME, INPUTS, OUTPUTS)
verilog += I.newline(2)
# Instantiate inputs
for inp in INPUTS:
verilog += I.signal_def(INPUTS[inp], "input", inp, num_spaces=2)
verilog += I.newline(1)
# Instantiate outputs
for out in OUTPUTS:
verilog += I.signal_def(OUTPUTS[out], "output", out, num_spaces=2)
verilog += I.newline(1)
# Instantiate bit fields
verilog += I.newline(1)
for bit_field in bit_fields:
if bit_field != "CONSTRAINT":
msb, lsb = bit_fields[bit_field].split()
bits = int(msb) - int(lsb) + 1
verilog += I.signal_def(bits, "wire", bit_field, num_spaces=2)
verilog += I.newline(1)
# Instantiate instructions
verilog += I.newline(1)
for ins_type in instructions:
if ins_type != "NOP":
verilog += I.signal_def(1, "wire", "FORMAT_"+ins_type, num_spaces=2)
verilog += I.newline(1)
verilog += I.signal_def(1, "wire", "ALLOWED_"+ins_type, num_spaces=2)
verilog += I.newline(1)
for ins in instructions[ins_type]:
if ins != "CONSTRAINT":
verilog += I.signal_def(1, "wire", ins, num_spaces=2)
verilog += I.newline(1)
verilog += I.newline(1)
# Assign bit fields
for bit_field in bit_fields:
if bit_field != "CONSTRAINT":
msb, lsb = bit_fields[bit_field].split()
verilog += I.assign_def(bit_field, I.signal_index("instruction", msb, lsb), num_spaces=2)
verilog += I.newline(1)
# Assign instruction types
verilog += I.newline(1)
for ins_type in instructions:
type_constraints = instructions[ins_type]["CONSTRAINT"]
constraints = type_constraints
if qed_constraints["half_registers"] == "1":
fields = ins_fields[ins_type].split()
for field in fields:
if field in registers:
constraints.append(I._lt(field, str(int(isa_info["num_registers"])/2), parens=True))
if ins_type != "NOP" and len(constraints) > 0:
expression = constraints[0]
for i in range(1, len(constraints)):
expression = I._and(expression, constraints[i], parens=False)
verilog += I.assign_def("FORMAT_"+ins_type, expression, num_spaces=2)
verilog += I.newline(1)
allowed_expression = ""
for ins in instructions[ins_type]:
if ins != "CONSTRAINT":
fields = instructions[ins_type][ins]
reqs = fields["CONSTRAINT"]
for field in fields:
if field != "CONSTRAINT":
if type(fields[field]) == type([]):
first = fields[field][0]
req_expression = I._equals(field, I._constant(len(first), first), parens=True)
for req in fields[field][1:]:
equality = I._equals(field, I._constant(len(req), req), parens=True)
req_expression = I._or(req_expression, equality, parens=False)
req_expression = "(" + req_expression + ")"
reqs.append(req_expression)
else:
equality = I._equals(field, I._constant(len(fields[field]), fields[field]), parens=True)
reqs.append(equality)
if ins != "NOP":
reqs_expression = "FORMAT_" + ins_type
for i in range(len(reqs)):
reqs_expression = I._and(reqs_expression, reqs[i], parens=False)
else:
reqs_expression = reqs[0]
for i in range(1, len(reqs)):
reqs_expression = I._and(reqs_expression, reqs[i], parens=False)
verilog += I.assign_def(ins, reqs_expression, num_spaces=2)
verilog += I.newline(1)
if allowed_expression == "":
allowed_expression = ins
else:
allowed_expression = I._or(allowed_expression, ins, parens=False)
verilog += I.assign_def("ALLOWED_"+ins_type, allowed_expression, num_spaces=2)
verilog += I.newline(2)
# Property assertion
assertions = instructions.keys()
property_expression = ""
for ins_type in assertions:
if property_expression == "":
property_expression = "ALLOWED_" + ins_type
else:
property_expression = I._or(property_expression, "ALLOWED_"+ins_type, parens=False)
verilog += I.always_def("clk", num_spaces=2) + I.begin(num_spaces=1)
verilog += I.newline(1)
verilog += I.property_def(property_expression, num_spaces=4)
verilog += I.newline(1)
verilog += I.end(num_spaces=2)
verilog += I.newline(1)
# End module with footer
verilog += I.newline(1)
verilog += I.module_footer()
return verilog
| # Copyright (c) Stanford University
#
# This source code is patent protected and being made available under the
# terms explained in the ../LICENSE-Academic and ../LICENSE-GOV files.
# Author: <NAME>
# Email: <EMAIL>
import copy
import sys
sys.path.append("../FormatParsers/")
sys.path.append("../Interface/")
import format_parser as P
import module_interface as I
def generate_constraints_file(MODULENAME, INPUTS, OUTPUTS, format_dicts):
# Get ISA information
isa_info = format_dicts["ISA"]
# Get register names
registers = format_dicts["REGISTERS"]
# Get memory fields needed for modification
memory = format_dicts["MEMORY"]
# Get constraints for qed module setup
qed_constraints = format_dicts["QEDCONSTRAINTS"]
# Get the instruction types
ins_types = format_dicts["INSTYPES"]
# Get the instruction fields for each type
ins_fields = format_dicts["INSFIELDS"]
# Get instruction types requirements
ins_reqs = format_dicts["INSREQS"]
# Get the bit fields
bit_fields = format_dicts["BITFIELDS"]
# Get all instruction types
instructions = {}
for ins in format_dicts["INSTYPES"].keys():
if ins != "CONSTRAINT":
instructions[ins] = format_dicts[ins]
# Verilog file
verilog = ""
# Adds module header definition
verilog += I.module_header(MODULENAME, INPUTS, OUTPUTS)
verilog += I.newline(2)
# Instantiate inputs
for inp in INPUTS:
verilog += I.signal_def(INPUTS[inp], "input", inp, num_spaces=2)
verilog += I.newline(1)
# Instantiate outputs
for out in OUTPUTS:
verilog += I.signal_def(OUTPUTS[out], "output", out, num_spaces=2)
verilog += I.newline(1)
# Instantiate bit fields
verilog += I.newline(1)
for bit_field in bit_fields:
if bit_field != "CONSTRAINT":
msb, lsb = bit_fields[bit_field].split()
bits = int(msb) - int(lsb) + 1
verilog += I.signal_def(bits, "wire", bit_field, num_spaces=2)
verilog += I.newline(1)
# Instantiate instructions
verilog += I.newline(1)
for ins_type in instructions:
if ins_type != "NOP":
verilog += I.signal_def(1, "wire", "FORMAT_"+ins_type, num_spaces=2)
verilog += I.newline(1)
verilog += I.signal_def(1, "wire", "ALLOWED_"+ins_type, num_spaces=2)
verilog += I.newline(1)
for ins in instructions[ins_type]:
if ins != "CONSTRAINT":
verilog += I.signal_def(1, "wire", ins, num_spaces=2)
verilog += I.newline(1)
verilog += I.newline(1)
# Assign bit fields
for bit_field in bit_fields:
if bit_field != "CONSTRAINT":
msb, lsb = bit_fields[bit_field].split()
verilog += I.assign_def(bit_field, I.signal_index("instruction", msb, lsb), num_spaces=2)
verilog += I.newline(1)
# Assign instruction types
verilog += I.newline(1)
for ins_type in instructions:
type_constraints = instructions[ins_type]["CONSTRAINT"]
constraints = type_constraints
if qed_constraints["half_registers"] == "1":
fields = ins_fields[ins_type].split()
for field in fields:
if field in registers:
constraints.append(I._lt(field, str(int(isa_info["num_registers"])/2), parens=True))
if ins_type != "NOP" and len(constraints) > 0:
expression = constraints[0]
for i in range(1, len(constraints)):
expression = I._and(expression, constraints[i], parens=False)
verilog += I.assign_def("FORMAT_"+ins_type, expression, num_spaces=2)
verilog += I.newline(1)
allowed_expression = ""
for ins in instructions[ins_type]:
if ins != "CONSTRAINT":
fields = instructions[ins_type][ins]
reqs = fields["CONSTRAINT"]
for field in fields:
if field != "CONSTRAINT":
if type(fields[field]) == type([]):
first = fields[field][0]
req_expression = I._equals(field, I._constant(len(first), first), parens=True)
for req in fields[field][1:]:
equality = I._equals(field, I._constant(len(req), req), parens=True)
req_expression = I._or(req_expression, equality, parens=False)
req_expression = "(" + req_expression + ")"
reqs.append(req_expression)
else:
equality = I._equals(field, I._constant(len(fields[field]), fields[field]), parens=True)
reqs.append(equality)
if ins != "NOP":
reqs_expression = "FORMAT_" + ins_type
for i in range(len(reqs)):
reqs_expression = I._and(reqs_expression, reqs[i], parens=False)
else:
reqs_expression = reqs[0]
for i in range(1, len(reqs)):
reqs_expression = I._and(reqs_expression, reqs[i], parens=False)
verilog += I.assign_def(ins, reqs_expression, num_spaces=2)
verilog += I.newline(1)
if allowed_expression == "":
allowed_expression = ins
else:
allowed_expression = I._or(allowed_expression, ins, parens=False)
verilog += I.assign_def("ALLOWED_"+ins_type, allowed_expression, num_spaces=2)
verilog += I.newline(2)
# Property assertion
assertions = instructions.keys()
property_expression = ""
for ins_type in assertions:
if property_expression == "":
property_expression = "ALLOWED_" + ins_type
else:
property_expression = I._or(property_expression, "ALLOWED_"+ins_type, parens=False)
verilog += I.always_def("clk", num_spaces=2) + I.begin(num_spaces=1)
verilog += I.newline(1)
verilog += I.property_def(property_expression, num_spaces=4)
verilog += I.newline(1)
verilog += I.end(num_spaces=2)
verilog += I.newline(1)
# End module with footer
verilog += I.newline(1)
verilog += I.module_footer()
return verilog | en | 0.70028 | # Copyright (c) Stanford University # # This source code is patent protected and being made available under the # terms explained in the ../LICENSE-Academic and ../LICENSE-GOV files. # Author: <NAME> # Email: <EMAIL> # Get ISA information # Get register names # Get memory fields needed for modification # Get constraints for qed module setup # Get the instruction types # Get the instruction fields for each type # Get instruction types requirements # Get the bit fields # Get all instruction types # Verilog file # Adds module header definition # Instantiate inputs # Instantiate outputs # Instantiate bit fields # Instantiate instructions # Assign bit fields # Assign instruction types # Property assertion # End module with footer | 2.368483 | 2 |
modules/losses.py | Sapperdomonik/retinaface-tf2 | 0 | 10716 | import tensorflow as tf
def _smooth_l1_loss(y_true, y_pred):
t = tf.abs(y_pred - y_true)
return tf.where(t < 1, 0.5 * t ** 2, t - 0.5)
def MultiBoxLoss(num_class=2, neg_pos_ratio=3):
"""multi-box loss"""
def multi_box_loss(y_true, y_pred):
num_batch = tf.shape(y_true)[0]
num_prior = tf.shape(y_true)[1]
loc_pred = tf.reshape(y_pred[0], [num_batch * num_prior, 4])
landm_pred = tf.reshape(y_pred[1], [num_batch * num_prior, 8])
class_pred = tf.reshape(y_pred[2], [num_batch * num_prior, num_class])
loc_true = tf.reshape(y_true[..., :4], [num_batch * num_prior, 4])
landm_true = tf.reshape(y_true[..., 4:12], [num_batch * num_prior, 8])
landm_valid = tf.reshape(y_true[..., 12], [num_batch * num_prior, 1])
class_true = tf.reshape(y_true[..., 13], [num_batch * num_prior, 1])
# define filter mask: class_true = 1 (pos), 0 (neg), -1 (ignore)
# landm_valid = 1 (w landm), 0 (w/o landm)
mask_pos = tf.equal(class_true, 1)
mask_neg = tf.equal(class_true, 0)
mask_landm = tf.logical_and(tf.equal(landm_valid, 1), mask_pos)
# landm loss (smooth L1)
mask_landm_b = tf.broadcast_to(mask_landm, tf.shape(landm_true))
loss_landm = _smooth_l1_loss(tf.boolean_mask(landm_true, mask_landm_b),
tf.boolean_mask(landm_pred, mask_landm_b))
loss_landm = tf.reduce_mean(loss_landm)
# localization loss (smooth L1)
mask_pos_b = tf.broadcast_to(mask_pos, tf.shape(loc_true))
loss_loc = _smooth_l1_loss(tf.boolean_mask(loc_true, mask_pos_b),
tf.boolean_mask(loc_pred, mask_pos_b))
loss_loc = tf.reduce_mean(loss_loc)
# classification loss (crossentropy)
# 1. compute max conf across batch for hard negative mining
loss_class = tf.where(mask_neg,
1 - class_pred[:, 0][..., tf.newaxis], 0)
# 2. hard negative mining
loss_class = tf.reshape(loss_class, [num_batch, num_prior])
loss_class_idx = tf.argsort(loss_class, axis=1, direction='DESCENDING')
loss_class_idx_rank = tf.argsort(loss_class_idx, axis=1)
mask_pos_per_batch = tf.reshape(mask_pos, [num_batch, num_prior])
num_pos_per_batch = tf.reduce_sum(
tf.cast(mask_pos_per_batch, tf.float32), 1, keepdims=True)
num_pos_per_batch = tf.maximum(num_pos_per_batch, 1)
num_neg_per_batch = tf.minimum(neg_pos_ratio * num_pos_per_batch,
tf.cast(num_prior, tf.float32) - 1)
mask_hard_neg = tf.reshape(
tf.cast(loss_class_idx_rank, tf.float32) < num_neg_per_batch,
[num_batch * num_prior, 1])
# 3. classification loss including positive and negative examples
loss_class_mask = tf.logical_or(mask_pos, mask_hard_neg)
loss_class_mask_b = tf.broadcast_to(loss_class_mask,
tf.shape(class_pred))
filter_class_true = tf.boolean_mask(tf.cast(mask_pos, tf.float32),
loss_class_mask)
filter_class_pred = tf.boolean_mask(class_pred, loss_class_mask_b)
filter_class_pred = tf.reshape(filter_class_pred, [-1, num_class])
loss_class = tf.keras.losses.sparse_categorical_crossentropy(
y_true=filter_class_true, y_pred=filter_class_pred)
loss_class = tf.reduce_mean(loss_class)
return loss_loc, loss_landm, loss_class
return multi_box_loss
| import tensorflow as tf
def _smooth_l1_loss(y_true, y_pred):
t = tf.abs(y_pred - y_true)
return tf.where(t < 1, 0.5 * t ** 2, t - 0.5)
def MultiBoxLoss(num_class=2, neg_pos_ratio=3):
"""multi-box loss"""
def multi_box_loss(y_true, y_pred):
num_batch = tf.shape(y_true)[0]
num_prior = tf.shape(y_true)[1]
loc_pred = tf.reshape(y_pred[0], [num_batch * num_prior, 4])
landm_pred = tf.reshape(y_pred[1], [num_batch * num_prior, 8])
class_pred = tf.reshape(y_pred[2], [num_batch * num_prior, num_class])
loc_true = tf.reshape(y_true[..., :4], [num_batch * num_prior, 4])
landm_true = tf.reshape(y_true[..., 4:12], [num_batch * num_prior, 8])
landm_valid = tf.reshape(y_true[..., 12], [num_batch * num_prior, 1])
class_true = tf.reshape(y_true[..., 13], [num_batch * num_prior, 1])
# define filter mask: class_true = 1 (pos), 0 (neg), -1 (ignore)
# landm_valid = 1 (w landm), 0 (w/o landm)
mask_pos = tf.equal(class_true, 1)
mask_neg = tf.equal(class_true, 0)
mask_landm = tf.logical_and(tf.equal(landm_valid, 1), mask_pos)
# landm loss (smooth L1)
mask_landm_b = tf.broadcast_to(mask_landm, tf.shape(landm_true))
loss_landm = _smooth_l1_loss(tf.boolean_mask(landm_true, mask_landm_b),
tf.boolean_mask(landm_pred, mask_landm_b))
loss_landm = tf.reduce_mean(loss_landm)
# localization loss (smooth L1)
mask_pos_b = tf.broadcast_to(mask_pos, tf.shape(loc_true))
loss_loc = _smooth_l1_loss(tf.boolean_mask(loc_true, mask_pos_b),
tf.boolean_mask(loc_pred, mask_pos_b))
loss_loc = tf.reduce_mean(loss_loc)
# classification loss (crossentropy)
# 1. compute max conf across batch for hard negative mining
loss_class = tf.where(mask_neg,
1 - class_pred[:, 0][..., tf.newaxis], 0)
# 2. hard negative mining
loss_class = tf.reshape(loss_class, [num_batch, num_prior])
loss_class_idx = tf.argsort(loss_class, axis=1, direction='DESCENDING')
loss_class_idx_rank = tf.argsort(loss_class_idx, axis=1)
mask_pos_per_batch = tf.reshape(mask_pos, [num_batch, num_prior])
num_pos_per_batch = tf.reduce_sum(
tf.cast(mask_pos_per_batch, tf.float32), 1, keepdims=True)
num_pos_per_batch = tf.maximum(num_pos_per_batch, 1)
num_neg_per_batch = tf.minimum(neg_pos_ratio * num_pos_per_batch,
tf.cast(num_prior, tf.float32) - 1)
mask_hard_neg = tf.reshape(
tf.cast(loss_class_idx_rank, tf.float32) < num_neg_per_batch,
[num_batch * num_prior, 1])
# 3. classification loss including positive and negative examples
loss_class_mask = tf.logical_or(mask_pos, mask_hard_neg)
loss_class_mask_b = tf.broadcast_to(loss_class_mask,
tf.shape(class_pred))
filter_class_true = tf.boolean_mask(tf.cast(mask_pos, tf.float32),
loss_class_mask)
filter_class_pred = tf.boolean_mask(class_pred, loss_class_mask_b)
filter_class_pred = tf.reshape(filter_class_pred, [-1, num_class])
loss_class = tf.keras.losses.sparse_categorical_crossentropy(
y_true=filter_class_true, y_pred=filter_class_pred)
loss_class = tf.reduce_mean(loss_class)
return loss_loc, loss_landm, loss_class
return multi_box_loss
| en | 0.788595 | multi-box loss # define filter mask: class_true = 1 (pos), 0 (neg), -1 (ignore) # landm_valid = 1 (w landm), 0 (w/o landm) # landm loss (smooth L1) # localization loss (smooth L1) # classification loss (crossentropy) # 1. compute max conf across batch for hard negative mining # 2. hard negative mining # 3. classification loss including positive and negative examples | 2.432049 | 2 |
tests.py | ckelly/pybingmaps | 0 | 10717 | <reponame>ckelly/pybingmaps
import unittest
import random
from time import sleep
import os
from bingmaps import *
class BingMapsTestError(Exception):
"""Bing Maps test exception"""
def __init__(self, reason):
self.reason = unicode(reason)
def __str__(self):
return self.reason
# TODO: enter your key for testing
api_key = ''
class DirectionsTests(unittest.TestCase):
def setUp(self):
self.api = BingMapsAPI(api_key=api_key)
def testBasicNav(self):
# start - 717 Market St
# end - Ferry Plaza, San Francisco, CA
# we shrunk the precision to match return values for easier comparison
start_lat = "37.786861"
start_lon = "-122.403689"
end_lat = "37.795556"
end_lon = "-122.392124"
start = start_lat+","+start_lon
end = end_lat+","+end_lon
ret = self.api.routes(waypoints=[start, end])
# verify start and end points are reflected in response
self.assertNotEqual(ret, {})
estimated_total = ret['resourceSets'][0]['estimatedTotal']
self.assertEqual(estimated_total, 1)
routeLegs = ret['resourceSets'][0]['resources'][0]['routeLegs']
self.assertEqual(len(routeLegs), 1)
itinerary_items = routeLegs[0]['itineraryItems']
self.assertNotEqual(itinerary_items, [])
# skip the last step, as it doesn't have a transport Mode
for i in itinerary_items:
self.assertEqual(i['details'][0]['mode'], 'Driving')
def testPedestrianNav(self):
start_lat = "37.7868609332517"
start_lon = "-122.403689949149"
end_lat = "37.795556930015"
end_lon = "-122.392124051039"
start = start_lat+","+start_lon
end = end_lat+","+end_lon
ret = self.api.routes(waypoints=[start,end], travelMode='Walking')
self.assertNotEqual(ret, {})
legs = ret['resourceSets'][0]['resources'][0]['routeLegs']
self.assertNotEqual(legs, [])
legs = legs[0]
itinerary_items = legs['itineraryItems']
self.assertNotEqual(itinerary_items, [])
# skip the last step, as it doesn't have a transport Mode
for i in itinerary_items:
self.assertEqual(i['details'][0]['mode'], 'Walking')
if __name__ == '__main__':
unittest.main() | import unittest
import random
from time import sleep
import os
from bingmaps import *
class BingMapsTestError(Exception):
"""Bing Maps test exception"""
def __init__(self, reason):
self.reason = unicode(reason)
def __str__(self):
return self.reason
# TODO: enter your key for testing
api_key = ''
class DirectionsTests(unittest.TestCase):
def setUp(self):
self.api = BingMapsAPI(api_key=api_key)
def testBasicNav(self):
# start - 717 Market St
# end - Ferry Plaza, San Francisco, CA
# we shrunk the precision to match return values for easier comparison
start_lat = "37.786861"
start_lon = "-122.403689"
end_lat = "37.795556"
end_lon = "-122.392124"
start = start_lat+","+start_lon
end = end_lat+","+end_lon
ret = self.api.routes(waypoints=[start, end])
# verify start and end points are reflected in response
self.assertNotEqual(ret, {})
estimated_total = ret['resourceSets'][0]['estimatedTotal']
self.assertEqual(estimated_total, 1)
routeLegs = ret['resourceSets'][0]['resources'][0]['routeLegs']
self.assertEqual(len(routeLegs), 1)
itinerary_items = routeLegs[0]['itineraryItems']
self.assertNotEqual(itinerary_items, [])
# skip the last step, as it doesn't have a transport Mode
for i in itinerary_items:
self.assertEqual(i['details'][0]['mode'], 'Driving')
def testPedestrianNav(self):
start_lat = "37.7868609332517"
start_lon = "-122.403689949149"
end_lat = "37.795556930015"
end_lon = "-122.392124051039"
start = start_lat+","+start_lon
end = end_lat+","+end_lon
ret = self.api.routes(waypoints=[start,end], travelMode='Walking')
self.assertNotEqual(ret, {})
legs = ret['resourceSets'][0]['resources'][0]['routeLegs']
self.assertNotEqual(legs, [])
legs = legs[0]
itinerary_items = legs['itineraryItems']
self.assertNotEqual(itinerary_items, [])
# skip the last step, as it doesn't have a transport Mode
for i in itinerary_items:
self.assertEqual(i['details'][0]['mode'], 'Walking')
if __name__ == '__main__':
unittest.main() | en | 0.865721 | Bing Maps test exception # TODO: enter your key for testing # start - 717 Market St # end - Ferry Plaza, San Francisco, CA # we shrunk the precision to match return values for easier comparison # verify start and end points are reflected in response # skip the last step, as it doesn't have a transport Mode # skip the last step, as it doesn't have a transport Mode | 3.15805 | 3 |
fds/config.py | dvershinin/fds | 9 | 10718 | <filename>fds/config.py
from cds.CloudflareWrapper import suggest_set_up, cf_config_filename
from .FirewallWrapper import FirewallWrapper
import logging as log
def open_web_if_webserver_running():
fw = FirewallWrapper()
from .utils import is_process_running, query_yes_no
webserver_running = is_process_running('nginx')
if webserver_running:
zone = fw.fw.getDefaultZone()
zone_services = fw.fw.getServices(zone)
if 'http' not in zone_services or 'https' not in zone_services:
open_web = query_yes_no('Webserver is running. Open up HTTP/HTTPs ports?')
if open_web:
fw.add_service('http')
fw.add_service('https')
else:
log.info('Webserver is running and ports are already open.')
def action_config():
# if nginx runs, check/ask to ensure open web ports:
open_web_if_webserver_running()
# if cloudflare.cfg is missing, check/ask to ensure Cloudflare support:
from cds.CloudflareWrapper import CloudflareWrapper
cw = CloudflareWrapper()
if cw.use:
log.info('Cloudflare integration validated.')
else:
suggest_set_up()
| <filename>fds/config.py
from cds.CloudflareWrapper import suggest_set_up, cf_config_filename
from .FirewallWrapper import FirewallWrapper
import logging as log
def open_web_if_webserver_running():
fw = FirewallWrapper()
from .utils import is_process_running, query_yes_no
webserver_running = is_process_running('nginx')
if webserver_running:
zone = fw.fw.getDefaultZone()
zone_services = fw.fw.getServices(zone)
if 'http' not in zone_services or 'https' not in zone_services:
open_web = query_yes_no('Webserver is running. Open up HTTP/HTTPs ports?')
if open_web:
fw.add_service('http')
fw.add_service('https')
else:
log.info('Webserver is running and ports are already open.')
def action_config():
# if nginx runs, check/ask to ensure open web ports:
open_web_if_webserver_running()
# if cloudflare.cfg is missing, check/ask to ensure Cloudflare support:
from cds.CloudflareWrapper import CloudflareWrapper
cw = CloudflareWrapper()
if cw.use:
log.info('Cloudflare integration validated.')
else:
suggest_set_up()
| en | 0.661594 | # if nginx runs, check/ask to ensure open web ports: # if cloudflare.cfg is missing, check/ask to ensure Cloudflare support: | 2.414877 | 2 |
awardapp/migrations/0004_auto_20191024_1607.py | Elisephan/Awards-project | 0 | 10719 | # -*- coding: utf-8 -*-
# Generated by Django 1.11 on 2019-10-24 16:07
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('awardapp', '0003_auto_20191024_1606'),
]
operations = [
migrations.AlterField(
model_name='project',
name='link',
field=models.TextField(max_length=130),
),
]
| # -*- coding: utf-8 -*-
# Generated by Django 1.11 on 2019-10-24 16:07
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('awardapp', '0003_auto_20191024_1606'),
]
operations = [
migrations.AlterField(
model_name='project',
name='link',
field=models.TextField(max_length=130),
),
]
| en | 0.792713 | # -*- coding: utf-8 -*- # Generated by Django 1.11 on 2019-10-24 16:07 | 1.372422 | 1 |
hikari/events/channel_events.py | Reliku/hikari | 0 | 10720 | <reponame>Reliku/hikari<gh_stars>0
# -*- coding: utf-8 -*-
# cython: language_level=3
# Copyright (c) 2020 Nekokatt
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
"""Events that fire when channels are modified.
This does not include message events, nor reaction events.
"""
from __future__ import annotations
__all__: typing.List[str] = [
"ChannelEvent",
"GuildChannelEvent",
"DMChannelEvent",
"ChannelCreateEvent",
"GuildChannelCreateEvent",
"ChannelUpdateEvent",
"GuildChannelUpdateEvent",
"ChannelDeleteEvent",
"GuildChannelDeleteEvent",
"PinsUpdateEvent",
"GuildPinsUpdateEvent",
"DMPinsUpdateEvent",
"InviteCreateEvent",
"InviteDeleteEvent",
"WebhookUpdateEvent",
]
import abc
import typing
import attr
from hikari import channels
from hikari import intents
from hikari import traits
from hikari.events import base_events
from hikari.events import shard_events
from hikari.internal import attr_extensions
if typing.TYPE_CHECKING:
import datetime
from hikari import guilds
from hikari import invites
from hikari import messages
from hikari import snowflakes
from hikari import webhooks
from hikari.api import shard as gateway_shard
@base_events.requires_intents(intents.Intents.GUILDS, intents.Intents.DM_MESSAGES)
@attr.s(kw_only=True, slots=True, weakref_slot=False)
class ChannelEvent(shard_events.ShardEvent, abc.ABC):
"""Event base for any channel-bound event in guilds or private messages."""
@property
@abc.abstractmethod
def channel_id(self) -> snowflakes.Snowflake:
"""ID of the channel the event relates to.
Returns
-------
hikari.snowflakes.Snowflake
The ID of the channel this event relates to.
"""
@abc.abstractmethod
async def fetch_channel(self) -> channels.PartialChannel:
"""Perform an API call to fetch the details about this channel.
!!! note
For `ChannelDeleteEvent`-derived events, this will always raise
an exception, since the channel will have already been removed.
Returns
-------
hikari.channels.PartialChannel
A derivative of `hikari.channels.PartialChannel`. The actual
type will vary depending on the type of channel this event
concerns.
"""
@base_events.requires_intents(intents.Intents.GUILDS)
@attr.s(kw_only=True, slots=True, weakref_slot=False)
class GuildChannelEvent(ChannelEvent, abc.ABC):
"""Event base for any channel-bound event in guilds."""
@property
@abc.abstractmethod
def guild_id(self) -> snowflakes.Snowflake:
"""ID of the guild that this event relates to.
Returns
-------
hikari.snowflakes.Snowflake
The ID of the guild that relates to this event.
"""
@property
def guild(self) -> typing.Optional[guilds.GatewayGuild]:
"""Get the cached guild that this event relates to, if known.
If not, return `builtins.None`.
Returns
-------
typing.Optional[hikari.guilds.GatewayGuild]
The gateway guild this event relates to, if known. Otherwise
this will return `builtins.None`.
"""
return self.app.cache.get_available_guild(self.guild_id) or self.app.cache.get_unavailable_guild(self.guild_id)
async def fetch_guild(self) -> guilds.RESTGuild:
"""Perform an API call to fetch the guild that this event relates to.
Returns
-------
hikari.guilds.RESTGuild
The guild that this event occurred in.
"""
return await self.app.rest.fetch_guild(self.guild_id)
@property
def channel(self) -> typing.Optional[channels.GuildChannel]:
"""Get the cached channel that this event relates to, if known.
If not, return `builtins.None`.
Returns
-------
typing.Optional[hikari.channels.GuildChannel]
The cached channel this event relates to. If not known, this
will return `builtins.None` instead.
"""
return self.app.cache.get_guild_channel(self.channel_id)
async def fetch_channel(self) -> channels.GuildChannel:
"""Perform an API call to fetch the details about this channel.
!!! note
For `ChannelDeleteEvent`-derived events, this will always raise
an exception, since the channel will have already been removed.
Returns
-------
hikari.channels.GuildChannel
A derivative of `hikari.channels.GuildChannel`. The actual
type will vary depending on the type of channel this event
concerns.
"""
channel = await self.app.rest.fetch_channel(self.channel_id)
assert isinstance(channel, channels.GuildChannel)
return channel
@attr.s(kw_only=True, slots=True, weakref_slot=False)
class DMChannelEvent(ChannelEvent, abc.ABC):
"""Event base for any channel-bound event in private messages."""
async def fetch_channel(self) -> channels.PrivateChannel:
"""Perform an API call to fetch the details about this channel.
!!! note
For `ChannelDeleteEvent`-derived events, this will always raise
an exception, since the channel will have already been removed.
Returns
-------
hikari.channels.PrivateChannel
A derivative of `hikari.channels.PrivateChannel`. The actual
type will vary depending on the type of channel this event
concerns.
"""
channel = await self.app.rest.fetch_channel(self.channel_id)
assert isinstance(channel, channels.PrivateChannel)
return channel
@base_events.requires_intents(intents.Intents.GUILDS, intents.Intents.DM_MESSAGES)
@attr.s(kw_only=True, slots=True, weakref_slot=False)
class ChannelCreateEvent(ChannelEvent, abc.ABC):
"""Base event for any channel being created."""
@property
@abc.abstractmethod
def channel(self) -> channels.PartialChannel:
"""Channel this event represents.
Returns
-------
hikari.channels.PartialChannel
The channel that was created.
"""
@property
def channel_id(self) -> snowflakes.Snowflake:
# <<inherited docstring from ChannelEvent>>.
return self.channel.id
@base_events.requires_intents(intents.Intents.GUILDS)
@attr_extensions.with_copy
@attr.s(kw_only=True, slots=True, weakref_slot=False)
class GuildChannelCreateEvent(GuildChannelEvent, ChannelCreateEvent):
"""Event fired when a guild channel is created."""
app: traits.RESTAware = attr.ib(metadata={attr_extensions.SKIP_DEEP_COPY: True})
# <<inherited docstring from Event>>.
shard: gateway_shard.GatewayShard = attr.ib(metadata={attr_extensions.SKIP_DEEP_COPY: True})
# <<inherited docstring from ShardEvent>>.
channel: channels.GuildChannel = attr.ib(repr=True)
"""Guild channel that this event represents.
Returns
-------
hikari.channels.GuildChannel
The guild channel that was created.
"""
@property
def guild_id(self) -> snowflakes.Snowflake:
# <<inherited docstring from GuildChannelEvent>>.
return self.channel.guild_id
@base_events.requires_intents(intents.Intents.GUILDS, intents.Intents.DM_MESSAGES)
@attr.s(kw_only=True, slots=True, weakref_slot=False)
class ChannelUpdateEvent(ChannelEvent, abc.ABC):
"""Base event for any channel being updated."""
@property
@abc.abstractmethod
def channel(self) -> channels.PartialChannel:
"""Channel this event represents.
Returns
-------
hikari.channels.PartialChannel
The channel that was updated.
"""
@property
def channel_id(self) -> snowflakes.Snowflake:
# <<inherited docstring from ChannelEvent>>.
return self.channel.id
@base_events.requires_intents(intents.Intents.GUILDS)
@attr_extensions.with_copy
@attr.s(kw_only=True, slots=True, weakref_slot=False)
class GuildChannelUpdateEvent(GuildChannelEvent, ChannelUpdateEvent):
"""Event fired when a guild channel is edited."""
app: traits.RESTAware = attr.ib(metadata={attr_extensions.SKIP_DEEP_COPY: True})
# <<inherited docstring from Event>>.
shard: gateway_shard.GatewayShard = attr.ib(metadata={attr_extensions.SKIP_DEEP_COPY: True})
# <<inherited docstring from ShardEvent>>.
old_channel: channels.GuildChannel = attr.ib(repr=True)
"""Old guild channel object from cache.
Returns
-------
hikari.channels.GuildChannel
"""
channel: channels.GuildChannel = attr.ib(repr=True)
"""Guild channel that this event represents.
Returns
-------
hikari.channels.GuildChannel
The guild channel that was updated.
"""
@property
def guild_id(self) -> snowflakes.Snowflake:
# <<inherited docstring from GuildChannelEvent>>.
return self.channel.guild_id
@base_events.requires_intents(intents.Intents.GUILDS, intents.Intents.DM_MESSAGES)
@attr.s(kw_only=True, slots=True, weakref_slot=False)
class ChannelDeleteEvent(ChannelEvent, abc.ABC):
"""Base event for any channel being deleted."""
@property
@abc.abstractmethod
def channel(self) -> channels.PartialChannel:
"""Channel this event represents.
Returns
-------
hikari.channels.PartialChannel
The channel that was deleted.
"""
@property
def channel_id(self) -> snowflakes.Snowflake:
# <<inherited docstring from ChannelEvent>>.
return self.channel.id
if typing.TYPE_CHECKING:
# Channel will never be found.
async def fetch_channel(self) -> typing.NoReturn:
...
@base_events.requires_intents(intents.Intents.GUILDS)
@attr_extensions.with_copy
@attr.s(kw_only=True, slots=True, weakref_slot=False)
class GuildChannelDeleteEvent(GuildChannelEvent, ChannelDeleteEvent):
"""Event fired when a guild channel is deleted."""
app: traits.RESTAware = attr.ib(metadata={attr_extensions.SKIP_DEEP_COPY: True})
# <<inherited docstring from Event>>.
shard: gateway_shard.GatewayShard = attr.ib(metadata={attr_extensions.SKIP_DEEP_COPY: True})
# <<inherited docstring from ShardEvent>>.
channel: channels.GuildChannel = attr.ib(repr=True)
"""Guild channel that this event represents.
Returns
-------
hikari.channels.GuildChannel
The guild channel that was deleted.
"""
@property
def guild_id(self) -> snowflakes.Snowflake:
# <<inherited docstring from GuildChannelEvent>>.
return self.channel.guild_id
if typing.TYPE_CHECKING:
# Channel will never be found.
async def fetch_channel(self) -> typing.NoReturn:
...
# TODO: find out what private message intents are needed.
@attr.s(kw_only=True, slots=True, weakref_slot=False)
class PinsUpdateEvent(ChannelEvent, abc.ABC):
"""Base event fired when a message is pinned/unpinned in a channel."""
@property
@abc.abstractmethod
def last_pin_timestamp(self) -> typing.Optional[datetime.datetime]:
"""Datetime of when the most recent message was pinned in the channel.
Will be `builtins.None` if nothing is pinned or the information is
unavailable.
Returns
-------
typing.Optional[datetime.datetime]
The datetime of the most recent pinned message in the channel,
or `builtins.None` if no pins are available.
"""
@abc.abstractmethod
async def fetch_channel(self) -> channels.TextChannel:
"""Perform an API call to fetch the details about this channel.
Returns
-------
hikari.channels.TextChannel
A derivative of `hikari.channels.TextChannel`. The actual
type will vary depending on the type of channel this event
concerns.
"""
async def fetch_pins(self) -> typing.Sequence[messages.Message]:
"""Perform an API call to fetch the pinned messages in this channel.
Returns
-------
typing.Sequence[hikari.messages.Message]
The pinned messages in this channel.
"""
return await self.app.rest.fetch_pins(self.channel_id)
@base_events.requires_intents(intents.Intents.GUILDS)
@attr_extensions.with_copy
@attr.s(kw_only=True, slots=True, weakref_slot=False)
class GuildPinsUpdateEvent(PinsUpdateEvent, GuildChannelEvent):
"""Event fired when a message is pinned/unpinned in a guild channel."""
app: traits.RESTAware = attr.ib(metadata={attr_extensions.SKIP_DEEP_COPY: True})
# <<inherited docstring from Event>>.
shard: gateway_shard.GatewayShard = attr.ib(metadata={attr_extensions.SKIP_DEEP_COPY: True})
# <<inherited docstring from ShardEvent>>.
channel_id: snowflakes.Snowflake = attr.ib()
# <<inherited docstring from ChannelEvent>>.
guild_id: snowflakes.Snowflake = attr.ib()
# <<inherited docstring from GuildChannelEvent>>.
last_pin_timestamp: typing.Optional[datetime.datetime] = attr.ib(repr=True)
# <<inherited docstring from ChannelPinsUpdateEvent>>.
@property
def channel(self) -> typing.Optional[channels.GuildTextChannel]:
"""Get the cached channel that this event relates to, if known.
If not, return `builtins.None`.
Returns
-------
typing.Optional[hikari.channels.GuildTextChannel]
The cached channel this event relates to. If not known, this
will return `builtins.None` instead.
"""
channel = self.app.cache.get_guild_channel(self.channel_id)
assert isinstance(channel, channels.GuildTextChannel)
return channel
async def fetch_channel(self) -> channels.GuildTextChannel:
"""Perform an API call to fetch the details about this channel.
Returns
-------
hikari.channels.GuildTextChannel
A derivative of `hikari.channels.GuildTextChannel`. The actual
type will vary depending on the type of channel this event
concerns.
"""
channel = await self.app.rest.fetch_channel(self.channel_id)
assert isinstance(channel, channels.GuildTextChannel)
return channel
# TODO: This is not documented as having an intent, is this right? The guild version requires GUILDS intent.
@attr_extensions.with_copy
@attr.s(kw_only=True, slots=True, weakref_slot=False)
class DMPinsUpdateEvent(PinsUpdateEvent, DMChannelEvent):
"""Event fired when a message is pinned/unpinned in a private channel."""
app: traits.RESTAware = attr.ib(metadata={attr_extensions.SKIP_DEEP_COPY: True})
# <<inherited docstring from Event>>.
shard: gateway_shard.GatewayShard = attr.ib(metadata={attr_extensions.SKIP_DEEP_COPY: True})
# <<inherited docstring from ShardEvent>>.
channel_id: snowflakes.Snowflake = attr.ib()
# <<inherited docstring from ChannelEvent>>.
last_pin_timestamp: typing.Optional[datetime.datetime] = attr.ib(repr=True)
# <<inherited docstring from ChannelPinsUpdateEvent>>.
async def fetch_channel(self) -> channels.DMChannel:
"""Perform an API call to fetch the details about this channel.
Returns
-------
hikari.channels.DMChannel
A derivative of `hikari.channels.DMChannel`. The actual
type will vary depending on the type of channel this event
concerns.
"""
channel = await self.app.rest.fetch_channel(self.channel_id)
assert isinstance(channel, channels.DMChannel)
return channel
@base_events.requires_intents(intents.Intents.GUILD_INVITES)
@attr.s(kw_only=True, slots=True, weakref_slot=False)
class InviteEvent(GuildChannelEvent, abc.ABC):
"""Base event type for guild invite updates."""
@property
@abc.abstractmethod
def code(self) -> str:
"""Code that is used in the URL for the invite.
Returns
-------
builtins.str
The invite code.
"""
async def fetch_invite(self) -> invites.Invite:
"""Perform an API call to retrieve an up-to-date image of this invite.
Returns
-------
hikari.invites.Invite
The invite object.
"""
return await self.app.rest.fetch_invite(self.code)
@base_events.requires_intents(intents.Intents.GUILD_INVITES)
@attr_extensions.with_copy
@attr.s(kw_only=True, slots=True, weakref_slot=False)
class InviteCreateEvent(InviteEvent):
"""Event fired when an invite is created in a channel."""
app: traits.RESTAware = attr.ib(metadata={attr_extensions.SKIP_DEEP_COPY: True})
# <<inherited docstring from Event>>.
shard: gateway_shard.GatewayShard = attr.ib(metadata={attr_extensions.SKIP_DEEP_COPY: True})
# <<inherited docstring from ShardEvent>>.
invite: invites.InviteWithMetadata = attr.ib()
"""Invite that was created.
Returns
-------
hikari.invites.InviteWithMetadata
The created invite object.
"""
@property
def channel_id(self) -> snowflakes.Snowflake:
# <<inherited docstring from ChannelEvent>>.
return self.invite.channel_id
@property
def guild_id(self) -> snowflakes.Snowflake:
# <<inherited docstring from GuildChannelEvent>>.
# This will always be non-None for guild channel invites.
assert self.invite.guild_id is not None
return self.invite.guild_id
@property
def code(self) -> str:
# <<inherited docstring from InviteEvent>>.
return self.invite.code
@base_events.requires_intents(intents.Intents.GUILD_INVITES)
@attr_extensions.with_copy
@attr.s(kw_only=True, slots=True, weakref_slot=False)
class InviteDeleteEvent(InviteEvent):
"""Event fired when an invite is deleted from a channel."""
app: traits.RESTAware = attr.ib(metadata={attr_extensions.SKIP_DEEP_COPY: True})
# <<inherited docstring from Event>>.
shard: gateway_shard.GatewayShard = attr.ib(metadata={attr_extensions.SKIP_DEEP_COPY: True})
# <<inherited docstring from ShardEvent>>.
channel_id: snowflakes.Snowflake = attr.ib()
# <<inherited docstring from ChannelEvent>>.
guild_id: snowflakes.Snowflake = attr.ib()
# <<inherited docstring from GuildChannelEvent>>.
code: str = attr.ib()
# <<inherited docstring from InviteEvent>>.
if typing.TYPE_CHECKING:
# Invite will never be found.
async def fetch_invite(self) -> typing.NoReturn:
...
@base_events.requires_intents(intents.Intents.GUILD_WEBHOOKS)
@attr_extensions.with_copy
@attr.s(kw_only=True, slots=True, weakref_slot=False)
class WebhookUpdateEvent(GuildChannelEvent):
"""Event fired when a webhook is created/updated/deleted in a channel.
Unfortunately, Discord does not provide any information on what webhook
actually changed, nor specifically whether it was created/updated/deleted,
so this event is pretty useless unless you keep track of the webhooks in
the channel manually beforehand.
"""
app: traits.RESTAware = attr.ib(metadata={attr_extensions.SKIP_DEEP_COPY: True})
# <<inherited docstring from Event>>.
shard: gateway_shard.GatewayShard = attr.ib(metadata={attr_extensions.SKIP_DEEP_COPY: True})
# <<inherited docstring from ShardEvent>>.
channel_id: snowflakes.Snowflake = attr.ib()
# <<inherited docstring from ChannelEvent>>.
guild_id: snowflakes.Snowflake = attr.ib()
# <<inherited docstring from GuildChannelEvent>>.
async def fetch_channel_webhooks(self) -> typing.Sequence[webhooks.Webhook]:
"""Perform an API call to fetch the webhooks for this channel.
Returns
-------
typing.Sequence[hikari.webhooks.Webhook]
The webhooks in this channel.
"""
return await self.app.rest.fetch_channel_webhooks(self.channel_id)
async def fetch_guild_webhooks(self) -> typing.Sequence[webhooks.Webhook]:
"""Perform an API call to fetch the webhooks for this guild.
Returns
-------
typing.Sequence[hikari.webhooks.Webhook]
The webhooks in this guild.
"""
return await self.app.rest.fetch_guild_webhooks(self.guild_id)
| # -*- coding: utf-8 -*-
# cython: language_level=3
# Copyright (c) 2020 Nekokatt
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
"""Events that fire when channels are modified.
This does not include message events, nor reaction events.
"""
from __future__ import annotations
__all__: typing.List[str] = [
"ChannelEvent",
"GuildChannelEvent",
"DMChannelEvent",
"ChannelCreateEvent",
"GuildChannelCreateEvent",
"ChannelUpdateEvent",
"GuildChannelUpdateEvent",
"ChannelDeleteEvent",
"GuildChannelDeleteEvent",
"PinsUpdateEvent",
"GuildPinsUpdateEvent",
"DMPinsUpdateEvent",
"InviteCreateEvent",
"InviteDeleteEvent",
"WebhookUpdateEvent",
]
import abc
import typing
import attr
from hikari import channels
from hikari import intents
from hikari import traits
from hikari.events import base_events
from hikari.events import shard_events
from hikari.internal import attr_extensions
if typing.TYPE_CHECKING:
import datetime
from hikari import guilds
from hikari import invites
from hikari import messages
from hikari import snowflakes
from hikari import webhooks
from hikari.api import shard as gateway_shard
@base_events.requires_intents(intents.Intents.GUILDS, intents.Intents.DM_MESSAGES)
@attr.s(kw_only=True, slots=True, weakref_slot=False)
class ChannelEvent(shard_events.ShardEvent, abc.ABC):
"""Event base for any channel-bound event in guilds or private messages."""
@property
@abc.abstractmethod
def channel_id(self) -> snowflakes.Snowflake:
"""ID of the channel the event relates to.
Returns
-------
hikari.snowflakes.Snowflake
The ID of the channel this event relates to.
"""
@abc.abstractmethod
async def fetch_channel(self) -> channels.PartialChannel:
"""Perform an API call to fetch the details about this channel.
!!! note
For `ChannelDeleteEvent`-derived events, this will always raise
an exception, since the channel will have already been removed.
Returns
-------
hikari.channels.PartialChannel
A derivative of `hikari.channels.PartialChannel`. The actual
type will vary depending on the type of channel this event
concerns.
"""
@base_events.requires_intents(intents.Intents.GUILDS)
@attr.s(kw_only=True, slots=True, weakref_slot=False)
class GuildChannelEvent(ChannelEvent, abc.ABC):
"""Event base for any channel-bound event in guilds."""
@property
@abc.abstractmethod
def guild_id(self) -> snowflakes.Snowflake:
"""ID of the guild that this event relates to.
Returns
-------
hikari.snowflakes.Snowflake
The ID of the guild that relates to this event.
"""
@property
def guild(self) -> typing.Optional[guilds.GatewayGuild]:
"""Get the cached guild that this event relates to, if known.
If not, return `builtins.None`.
Returns
-------
typing.Optional[hikari.guilds.GatewayGuild]
The gateway guild this event relates to, if known. Otherwise
this will return `builtins.None`.
"""
return self.app.cache.get_available_guild(self.guild_id) or self.app.cache.get_unavailable_guild(self.guild_id)
async def fetch_guild(self) -> guilds.RESTGuild:
"""Perform an API call to fetch the guild that this event relates to.
Returns
-------
hikari.guilds.RESTGuild
The guild that this event occurred in.
"""
return await self.app.rest.fetch_guild(self.guild_id)
@property
def channel(self) -> typing.Optional[channels.GuildChannel]:
"""Get the cached channel that this event relates to, if known.
If not, return `builtins.None`.
Returns
-------
typing.Optional[hikari.channels.GuildChannel]
The cached channel this event relates to. If not known, this
will return `builtins.None` instead.
"""
return self.app.cache.get_guild_channel(self.channel_id)
async def fetch_channel(self) -> channels.GuildChannel:
"""Perform an API call to fetch the details about this channel.
!!! note
For `ChannelDeleteEvent`-derived events, this will always raise
an exception, since the channel will have already been removed.
Returns
-------
hikari.channels.GuildChannel
A derivative of `hikari.channels.GuildChannel`. The actual
type will vary depending on the type of channel this event
concerns.
"""
channel = await self.app.rest.fetch_channel(self.channel_id)
assert isinstance(channel, channels.GuildChannel)
return channel
@attr.s(kw_only=True, slots=True, weakref_slot=False)
class DMChannelEvent(ChannelEvent, abc.ABC):
"""Event base for any channel-bound event in private messages."""
async def fetch_channel(self) -> channels.PrivateChannel:
"""Perform an API call to fetch the details about this channel.
!!! note
For `ChannelDeleteEvent`-derived events, this will always raise
an exception, since the channel will have already been removed.
Returns
-------
hikari.channels.PrivateChannel
A derivative of `hikari.channels.PrivateChannel`. The actual
type will vary depending on the type of channel this event
concerns.
"""
channel = await self.app.rest.fetch_channel(self.channel_id)
assert isinstance(channel, channels.PrivateChannel)
return channel
@base_events.requires_intents(intents.Intents.GUILDS, intents.Intents.DM_MESSAGES)
@attr.s(kw_only=True, slots=True, weakref_slot=False)
class ChannelCreateEvent(ChannelEvent, abc.ABC):
"""Base event for any channel being created."""
@property
@abc.abstractmethod
def channel(self) -> channels.PartialChannel:
"""Channel this event represents.
Returns
-------
hikari.channels.PartialChannel
The channel that was created.
"""
@property
def channel_id(self) -> snowflakes.Snowflake:
# <<inherited docstring from ChannelEvent>>.
return self.channel.id
@base_events.requires_intents(intents.Intents.GUILDS)
@attr_extensions.with_copy
@attr.s(kw_only=True, slots=True, weakref_slot=False)
class GuildChannelCreateEvent(GuildChannelEvent, ChannelCreateEvent):
"""Event fired when a guild channel is created."""
app: traits.RESTAware = attr.ib(metadata={attr_extensions.SKIP_DEEP_COPY: True})
# <<inherited docstring from Event>>.
shard: gateway_shard.GatewayShard = attr.ib(metadata={attr_extensions.SKIP_DEEP_COPY: True})
# <<inherited docstring from ShardEvent>>.
channel: channels.GuildChannel = attr.ib(repr=True)
"""Guild channel that this event represents.
Returns
-------
hikari.channels.GuildChannel
The guild channel that was created.
"""
@property
def guild_id(self) -> snowflakes.Snowflake:
# <<inherited docstring from GuildChannelEvent>>.
return self.channel.guild_id
@base_events.requires_intents(intents.Intents.GUILDS, intents.Intents.DM_MESSAGES)
@attr.s(kw_only=True, slots=True, weakref_slot=False)
class ChannelUpdateEvent(ChannelEvent, abc.ABC):
"""Base event for any channel being updated."""
@property
@abc.abstractmethod
def channel(self) -> channels.PartialChannel:
"""Channel this event represents.
Returns
-------
hikari.channels.PartialChannel
The channel that was updated.
"""
@property
def channel_id(self) -> snowflakes.Snowflake:
# <<inherited docstring from ChannelEvent>>.
return self.channel.id
@base_events.requires_intents(intents.Intents.GUILDS)
@attr_extensions.with_copy
@attr.s(kw_only=True, slots=True, weakref_slot=False)
class GuildChannelUpdateEvent(GuildChannelEvent, ChannelUpdateEvent):
"""Event fired when a guild channel is edited."""
app: traits.RESTAware = attr.ib(metadata={attr_extensions.SKIP_DEEP_COPY: True})
# <<inherited docstring from Event>>.
shard: gateway_shard.GatewayShard = attr.ib(metadata={attr_extensions.SKIP_DEEP_COPY: True})
# <<inherited docstring from ShardEvent>>.
old_channel: channels.GuildChannel = attr.ib(repr=True)
"""Old guild channel object from cache.
Returns
-------
hikari.channels.GuildChannel
"""
channel: channels.GuildChannel = attr.ib(repr=True)
"""Guild channel that this event represents.
Returns
-------
hikari.channels.GuildChannel
The guild channel that was updated.
"""
@property
def guild_id(self) -> snowflakes.Snowflake:
# <<inherited docstring from GuildChannelEvent>>.
return self.channel.guild_id
@base_events.requires_intents(intents.Intents.GUILDS, intents.Intents.DM_MESSAGES)
@attr.s(kw_only=True, slots=True, weakref_slot=False)
class ChannelDeleteEvent(ChannelEvent, abc.ABC):
"""Base event for any channel being deleted."""
@property
@abc.abstractmethod
def channel(self) -> channels.PartialChannel:
"""Channel this event represents.
Returns
-------
hikari.channels.PartialChannel
The channel that was deleted.
"""
@property
def channel_id(self) -> snowflakes.Snowflake:
# <<inherited docstring from ChannelEvent>>.
return self.channel.id
if typing.TYPE_CHECKING:
# Channel will never be found.
async def fetch_channel(self) -> typing.NoReturn:
...
@base_events.requires_intents(intents.Intents.GUILDS)
@attr_extensions.with_copy
@attr.s(kw_only=True, slots=True, weakref_slot=False)
class GuildChannelDeleteEvent(GuildChannelEvent, ChannelDeleteEvent):
"""Event fired when a guild channel is deleted."""
app: traits.RESTAware = attr.ib(metadata={attr_extensions.SKIP_DEEP_COPY: True})
# <<inherited docstring from Event>>.
shard: gateway_shard.GatewayShard = attr.ib(metadata={attr_extensions.SKIP_DEEP_COPY: True})
# <<inherited docstring from ShardEvent>>.
channel: channels.GuildChannel = attr.ib(repr=True)
"""Guild channel that this event represents.
Returns
-------
hikari.channels.GuildChannel
The guild channel that was deleted.
"""
@property
def guild_id(self) -> snowflakes.Snowflake:
# <<inherited docstring from GuildChannelEvent>>.
return self.channel.guild_id
if typing.TYPE_CHECKING:
# Channel will never be found.
async def fetch_channel(self) -> typing.NoReturn:
...
# TODO: find out what private message intents are needed.
@attr.s(kw_only=True, slots=True, weakref_slot=False)
class PinsUpdateEvent(ChannelEvent, abc.ABC):
"""Base event fired when a message is pinned/unpinned in a channel."""
@property
@abc.abstractmethod
def last_pin_timestamp(self) -> typing.Optional[datetime.datetime]:
"""Datetime of when the most recent message was pinned in the channel.
Will be `builtins.None` if nothing is pinned or the information is
unavailable.
Returns
-------
typing.Optional[datetime.datetime]
The datetime of the most recent pinned message in the channel,
or `builtins.None` if no pins are available.
"""
@abc.abstractmethod
async def fetch_channel(self) -> channels.TextChannel:
"""Perform an API call to fetch the details about this channel.
Returns
-------
hikari.channels.TextChannel
A derivative of `hikari.channels.TextChannel`. The actual
type will vary depending on the type of channel this event
concerns.
"""
async def fetch_pins(self) -> typing.Sequence[messages.Message]:
"""Perform an API call to fetch the pinned messages in this channel.
Returns
-------
typing.Sequence[hikari.messages.Message]
The pinned messages in this channel.
"""
return await self.app.rest.fetch_pins(self.channel_id)
@base_events.requires_intents(intents.Intents.GUILDS)
@attr_extensions.with_copy
@attr.s(kw_only=True, slots=True, weakref_slot=False)
class GuildPinsUpdateEvent(PinsUpdateEvent, GuildChannelEvent):
"""Event fired when a message is pinned/unpinned in a guild channel."""
app: traits.RESTAware = attr.ib(metadata={attr_extensions.SKIP_DEEP_COPY: True})
# <<inherited docstring from Event>>.
shard: gateway_shard.GatewayShard = attr.ib(metadata={attr_extensions.SKIP_DEEP_COPY: True})
# <<inherited docstring from ShardEvent>>.
channel_id: snowflakes.Snowflake = attr.ib()
# <<inherited docstring from ChannelEvent>>.
guild_id: snowflakes.Snowflake = attr.ib()
# <<inherited docstring from GuildChannelEvent>>.
last_pin_timestamp: typing.Optional[datetime.datetime] = attr.ib(repr=True)
# <<inherited docstring from ChannelPinsUpdateEvent>>.
@property
def channel(self) -> typing.Optional[channels.GuildTextChannel]:
"""Get the cached channel that this event relates to, if known.
If not, return `builtins.None`.
Returns
-------
typing.Optional[hikari.channels.GuildTextChannel]
The cached channel this event relates to. If not known, this
will return `builtins.None` instead.
"""
channel = self.app.cache.get_guild_channel(self.channel_id)
assert isinstance(channel, channels.GuildTextChannel)
return channel
async def fetch_channel(self) -> channels.GuildTextChannel:
"""Perform an API call to fetch the details about this channel.
Returns
-------
hikari.channels.GuildTextChannel
A derivative of `hikari.channels.GuildTextChannel`. The actual
type will vary depending on the type of channel this event
concerns.
"""
channel = await self.app.rest.fetch_channel(self.channel_id)
assert isinstance(channel, channels.GuildTextChannel)
return channel
# TODO: This is not documented as having an intent, is this right? The guild version requires GUILDS intent.
@attr_extensions.with_copy
@attr.s(kw_only=True, slots=True, weakref_slot=False)
class DMPinsUpdateEvent(PinsUpdateEvent, DMChannelEvent):
"""Event fired when a message is pinned/unpinned in a private channel."""
app: traits.RESTAware = attr.ib(metadata={attr_extensions.SKIP_DEEP_COPY: True})
# <<inherited docstring from Event>>.
shard: gateway_shard.GatewayShard = attr.ib(metadata={attr_extensions.SKIP_DEEP_COPY: True})
# <<inherited docstring from ShardEvent>>.
channel_id: snowflakes.Snowflake = attr.ib()
# <<inherited docstring from ChannelEvent>>.
last_pin_timestamp: typing.Optional[datetime.datetime] = attr.ib(repr=True)
# <<inherited docstring from ChannelPinsUpdateEvent>>.
async def fetch_channel(self) -> channels.DMChannel:
"""Perform an API call to fetch the details about this channel.
Returns
-------
hikari.channels.DMChannel
A derivative of `hikari.channels.DMChannel`. The actual
type will vary depending on the type of channel this event
concerns.
"""
channel = await self.app.rest.fetch_channel(self.channel_id)
assert isinstance(channel, channels.DMChannel)
return channel
@base_events.requires_intents(intents.Intents.GUILD_INVITES)
@attr.s(kw_only=True, slots=True, weakref_slot=False)
class InviteEvent(GuildChannelEvent, abc.ABC):
"""Base event type for guild invite updates."""
@property
@abc.abstractmethod
def code(self) -> str:
"""Code that is used in the URL for the invite.
Returns
-------
builtins.str
The invite code.
"""
async def fetch_invite(self) -> invites.Invite:
"""Perform an API call to retrieve an up-to-date image of this invite.
Returns
-------
hikari.invites.Invite
The invite object.
"""
return await self.app.rest.fetch_invite(self.code)
@base_events.requires_intents(intents.Intents.GUILD_INVITES)
@attr_extensions.with_copy
@attr.s(kw_only=True, slots=True, weakref_slot=False)
class InviteCreateEvent(InviteEvent):
"""Event fired when an invite is created in a channel."""
app: traits.RESTAware = attr.ib(metadata={attr_extensions.SKIP_DEEP_COPY: True})
# <<inherited docstring from Event>>.
shard: gateway_shard.GatewayShard = attr.ib(metadata={attr_extensions.SKIP_DEEP_COPY: True})
# <<inherited docstring from ShardEvent>>.
invite: invites.InviteWithMetadata = attr.ib()
"""Invite that was created.
Returns
-------
hikari.invites.InviteWithMetadata
The created invite object.
"""
@property
def channel_id(self) -> snowflakes.Snowflake:
# <<inherited docstring from ChannelEvent>>.
return self.invite.channel_id
@property
def guild_id(self) -> snowflakes.Snowflake:
# <<inherited docstring from GuildChannelEvent>>.
# This will always be non-None for guild channel invites.
assert self.invite.guild_id is not None
return self.invite.guild_id
@property
def code(self) -> str:
# <<inherited docstring from InviteEvent>>.
return self.invite.code
@base_events.requires_intents(intents.Intents.GUILD_INVITES)
@attr_extensions.with_copy
@attr.s(kw_only=True, slots=True, weakref_slot=False)
class InviteDeleteEvent(InviteEvent):
"""Event fired when an invite is deleted from a channel."""
app: traits.RESTAware = attr.ib(metadata={attr_extensions.SKIP_DEEP_COPY: True})
# <<inherited docstring from Event>>.
shard: gateway_shard.GatewayShard = attr.ib(metadata={attr_extensions.SKIP_DEEP_COPY: True})
# <<inherited docstring from ShardEvent>>.
channel_id: snowflakes.Snowflake = attr.ib()
# <<inherited docstring from ChannelEvent>>.
guild_id: snowflakes.Snowflake = attr.ib()
# <<inherited docstring from GuildChannelEvent>>.
code: str = attr.ib()
# <<inherited docstring from InviteEvent>>.
if typing.TYPE_CHECKING:
# Invite will never be found.
async def fetch_invite(self) -> typing.NoReturn:
...
@base_events.requires_intents(intents.Intents.GUILD_WEBHOOKS)
@attr_extensions.with_copy
@attr.s(kw_only=True, slots=True, weakref_slot=False)
class WebhookUpdateEvent(GuildChannelEvent):
"""Event fired when a webhook is created/updated/deleted in a channel.
Unfortunately, Discord does not provide any information on what webhook
actually changed, nor specifically whether it was created/updated/deleted,
so this event is pretty useless unless you keep track of the webhooks in
the channel manually beforehand.
"""
app: traits.RESTAware = attr.ib(metadata={attr_extensions.SKIP_DEEP_COPY: True})
# <<inherited docstring from Event>>.
shard: gateway_shard.GatewayShard = attr.ib(metadata={attr_extensions.SKIP_DEEP_COPY: True})
# <<inherited docstring from ShardEvent>>.
channel_id: snowflakes.Snowflake = attr.ib()
# <<inherited docstring from ChannelEvent>>.
guild_id: snowflakes.Snowflake = attr.ib()
# <<inherited docstring from GuildChannelEvent>>.
async def fetch_channel_webhooks(self) -> typing.Sequence[webhooks.Webhook]:
"""Perform an API call to fetch the webhooks for this channel.
Returns
-------
typing.Sequence[hikari.webhooks.Webhook]
The webhooks in this channel.
"""
return await self.app.rest.fetch_channel_webhooks(self.channel_id)
async def fetch_guild_webhooks(self) -> typing.Sequence[webhooks.Webhook]:
"""Perform an API call to fetch the webhooks for this guild.
Returns
-------
typing.Sequence[hikari.webhooks.Webhook]
The webhooks in this guild.
"""
return await self.app.rest.fetch_guild_webhooks(self.guild_id) | en | 0.832842 | # -*- coding: utf-8 -*- # cython: language_level=3 # Copyright (c) 2020 Nekokatt # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in all # copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE. Events that fire when channels are modified. This does not include message events, nor reaction events. Event base for any channel-bound event in guilds or private messages. ID of the channel the event relates to. Returns ------- hikari.snowflakes.Snowflake The ID of the channel this event relates to. Perform an API call to fetch the details about this channel. !!! note For `ChannelDeleteEvent`-derived events, this will always raise an exception, since the channel will have already been removed. Returns ------- hikari.channels.PartialChannel A derivative of `hikari.channels.PartialChannel`. The actual type will vary depending on the type of channel this event concerns. Event base for any channel-bound event in guilds. ID of the guild that this event relates to. Returns ------- hikari.snowflakes.Snowflake The ID of the guild that relates to this event. Get the cached guild that this event relates to, if known. If not, return `builtins.None`. Returns ------- typing.Optional[hikari.guilds.GatewayGuild] The gateway guild this event relates to, if known. Otherwise this will return `builtins.None`. Perform an API call to fetch the guild that this event relates to. Returns ------- hikari.guilds.RESTGuild The guild that this event occurred in. Get the cached channel that this event relates to, if known. If not, return `builtins.None`. Returns ------- typing.Optional[hikari.channels.GuildChannel] The cached channel this event relates to. If not known, this will return `builtins.None` instead. Perform an API call to fetch the details about this channel. !!! note For `ChannelDeleteEvent`-derived events, this will always raise an exception, since the channel will have already been removed. Returns ------- hikari.channels.GuildChannel A derivative of `hikari.channels.GuildChannel`. The actual type will vary depending on the type of channel this event concerns. Event base for any channel-bound event in private messages. Perform an API call to fetch the details about this channel. !!! note For `ChannelDeleteEvent`-derived events, this will always raise an exception, since the channel will have already been removed. Returns ------- hikari.channels.PrivateChannel A derivative of `hikari.channels.PrivateChannel`. The actual type will vary depending on the type of channel this event concerns. Base event for any channel being created. Channel this event represents. Returns ------- hikari.channels.PartialChannel The channel that was created. # <<inherited docstring from ChannelEvent>>. Event fired when a guild channel is created. # <<inherited docstring from Event>>. # <<inherited docstring from ShardEvent>>. Guild channel that this event represents. Returns ------- hikari.channels.GuildChannel The guild channel that was created. # <<inherited docstring from GuildChannelEvent>>. Base event for any channel being updated. Channel this event represents. Returns ------- hikari.channels.PartialChannel The channel that was updated. # <<inherited docstring from ChannelEvent>>. Event fired when a guild channel is edited. # <<inherited docstring from Event>>. # <<inherited docstring from ShardEvent>>. Old guild channel object from cache. Returns ------- hikari.channels.GuildChannel Guild channel that this event represents. Returns ------- hikari.channels.GuildChannel The guild channel that was updated. # <<inherited docstring from GuildChannelEvent>>. Base event for any channel being deleted. Channel this event represents. Returns ------- hikari.channels.PartialChannel The channel that was deleted. # <<inherited docstring from ChannelEvent>>. # Channel will never be found. Event fired when a guild channel is deleted. # <<inherited docstring from Event>>. # <<inherited docstring from ShardEvent>>. Guild channel that this event represents. Returns ------- hikari.channels.GuildChannel The guild channel that was deleted. # <<inherited docstring from GuildChannelEvent>>. # Channel will never be found. # TODO: find out what private message intents are needed. Base event fired when a message is pinned/unpinned in a channel. Datetime of when the most recent message was pinned in the channel. Will be `builtins.None` if nothing is pinned or the information is unavailable. Returns ------- typing.Optional[datetime.datetime] The datetime of the most recent pinned message in the channel, or `builtins.None` if no pins are available. Perform an API call to fetch the details about this channel. Returns ------- hikari.channels.TextChannel A derivative of `hikari.channels.TextChannel`. The actual type will vary depending on the type of channel this event concerns. Perform an API call to fetch the pinned messages in this channel. Returns ------- typing.Sequence[hikari.messages.Message] The pinned messages in this channel. Event fired when a message is pinned/unpinned in a guild channel. # <<inherited docstring from Event>>. # <<inherited docstring from ShardEvent>>. # <<inherited docstring from ChannelEvent>>. # <<inherited docstring from GuildChannelEvent>>. # <<inherited docstring from ChannelPinsUpdateEvent>>. Get the cached channel that this event relates to, if known. If not, return `builtins.None`. Returns ------- typing.Optional[hikari.channels.GuildTextChannel] The cached channel this event relates to. If not known, this will return `builtins.None` instead. Perform an API call to fetch the details about this channel. Returns ------- hikari.channels.GuildTextChannel A derivative of `hikari.channels.GuildTextChannel`. The actual type will vary depending on the type of channel this event concerns. # TODO: This is not documented as having an intent, is this right? The guild version requires GUILDS intent. Event fired when a message is pinned/unpinned in a private channel. # <<inherited docstring from Event>>. # <<inherited docstring from ShardEvent>>. # <<inherited docstring from ChannelEvent>>. # <<inherited docstring from ChannelPinsUpdateEvent>>. Perform an API call to fetch the details about this channel. Returns ------- hikari.channels.DMChannel A derivative of `hikari.channels.DMChannel`. The actual type will vary depending on the type of channel this event concerns. Base event type for guild invite updates. Code that is used in the URL for the invite. Returns ------- builtins.str The invite code. Perform an API call to retrieve an up-to-date image of this invite. Returns ------- hikari.invites.Invite The invite object. Event fired when an invite is created in a channel. # <<inherited docstring from Event>>. # <<inherited docstring from ShardEvent>>. Invite that was created. Returns ------- hikari.invites.InviteWithMetadata The created invite object. # <<inherited docstring from ChannelEvent>>. # <<inherited docstring from GuildChannelEvent>>. # This will always be non-None for guild channel invites. # <<inherited docstring from InviteEvent>>. Event fired when an invite is deleted from a channel. # <<inherited docstring from Event>>. # <<inherited docstring from ShardEvent>>. # <<inherited docstring from ChannelEvent>>. # <<inherited docstring from GuildChannelEvent>>. # <<inherited docstring from InviteEvent>>. # Invite will never be found. Event fired when a webhook is created/updated/deleted in a channel. Unfortunately, Discord does not provide any information on what webhook actually changed, nor specifically whether it was created/updated/deleted, so this event is pretty useless unless you keep track of the webhooks in the channel manually beforehand. # <<inherited docstring from Event>>. # <<inherited docstring from ShardEvent>>. # <<inherited docstring from ChannelEvent>>. # <<inherited docstring from GuildChannelEvent>>. Perform an API call to fetch the webhooks for this channel. Returns ------- typing.Sequence[hikari.webhooks.Webhook] The webhooks in this channel. Perform an API call to fetch the webhooks for this guild. Returns ------- typing.Sequence[hikari.webhooks.Webhook] The webhooks in this guild. | 1.439952 | 1 |
tests/unit/test_coordinator.py | sopel39/presto-admin | 34 | 10721 | # -*- coding: utf-8 -*-
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Tests the coordinator module
"""
from fabric.api import env
from mock import patch
from prestoadmin import coordinator
from prestoadmin.util.exception import ConfigurationError
from tests.base_test_case import BaseTestCase
class TestCoordinator(BaseTestCase):
def test_build_all_defaults(self):
env.roledefs['coordinator'] = 'a'
env.roledefs['workers'] = ['b', 'c']
actual_default = coordinator.Coordinator().build_all_defaults()
expected = {'node.properties':
{'node.environment': 'presto',
'node.data-dir': '/var/lib/presto/data',
'node.launcher-log-file': '/var/log/presto/launcher.log',
'node.server-log-file': '/var/log/presto/server.log',
'catalog.config-dir': '/etc/presto/catalog',
'plugin.dir': '/usr/lib/presto/lib/plugin'},
'jvm.config': ['-server',
'-Xmx16G',
'-XX:-UseBiasedLocking',
'-XX:+UseG1GC',
'-XX:G1HeapRegionSize=32M',
'-XX:+ExplicitGCInvokesConcurrent',
'-XX:+HeapDumpOnOutOfMemoryError',
'-XX:+UseGCOverheadLimit',
'-XX:+ExitOnOutOfMemoryError',
'-XX:ReservedCodeCacheSize=512M',
'-DHADOOP_USER_NAME=hive'],
'config.properties': {
'coordinator': 'true',
'discovery-server.enabled': 'true',
'discovery.uri': 'http://a:8080',
'http-server.http.port': '8080',
'node-scheduler.include-coordinator': 'false',
'query.max-memory': '50GB',
'query.max-memory-per-node': '8GB'}
}
self.assertEqual(actual_default, expected)
def test_defaults_coord_is_worker(self):
env.roledefs['coordinator'] = ['a']
env.roledefs['worker'] = ['a', 'b', 'c']
actual_default = coordinator.Coordinator().build_all_defaults()
expected = {'node.properties': {
'node.environment': 'presto',
'node.data-dir': '/var/lib/presto/data',
'node.launcher-log-file': '/var/log/presto/launcher.log',
'node.server-log-file': '/var/log/presto/server.log',
'catalog.config-dir': '/etc/presto/catalog',
'plugin.dir': '/usr/lib/presto/lib/plugin'},
'jvm.config': ['-server',
'-Xmx16G',
'-XX:-UseBiasedLocking',
'-XX:+UseG1GC',
'-XX:G1HeapRegionSize=32M',
'-XX:+ExplicitGCInvokesConcurrent',
'-XX:+HeapDumpOnOutOfMemoryError',
'-XX:+UseGCOverheadLimit',
'-XX:+ExitOnOutOfMemoryError',
'-XX:ReservedCodeCacheSize=512M',
'-DHADOOP_USER_NAME=hive'],
'config.properties': {
'coordinator': 'true',
'discovery-server.enabled': 'true',
'discovery.uri': 'http://a:8080',
'http-server.http.port': '8080',
'node-scheduler.include-coordinator': 'true',
'query.max-memory': '50GB',
'query.max-memory-per-node': '8GB'}
}
self.assertEqual(actual_default, expected)
def test_validate_valid(self):
conf = {'node.properties': {},
'jvm.config': [],
'config.properties': {'coordinator': 'true',
'discovery.uri': 'http://uri'}}
self.assertEqual(conf, coordinator.Coordinator.validate(conf))
def test_validate_default(self):
env.roledefs['coordinator'] = 'localhost'
env.roledefs['workers'] = ['localhost']
conf = coordinator.Coordinator().build_all_defaults()
self.assertEqual(conf, coordinator.Coordinator.validate(conf))
def test_invalid_conf(self):
conf = {'node.propoerties': {}}
self.assertRaisesRegexp(ConfigurationError,
'Missing configuration for required file: ',
coordinator.Coordinator.validate, conf)
def test_invalid_conf_missing_coordinator(self):
conf = {'node.properties': {},
'jvm.config': [],
'config.properties': {'discovery.uri': 'http://uri'}
}
self.assertRaisesRegexp(ConfigurationError,
'Must specify coordinator=true in '
'coordinator\'s config.properties',
coordinator.Coordinator.validate, conf)
def test_invalid_conf_coordinator(self):
conf = {'node.properties': {},
'jvm.config': [],
'config.properties': {'coordinator': 'false',
'discovery.uri': 'http://uri'}
}
self.assertRaisesRegexp(ConfigurationError,
'Coordinator cannot be false in the '
'coordinator\'s config.properties',
coordinator.Coordinator.validate, conf)
@patch('prestoadmin.node.config.write_conf_to_file')
@patch('prestoadmin.node.get_presto_conf')
def test_get_conf_empty_is_default(self, get_conf_from_file_mock,
write_mock):
env.roledefs['coordinator'] = 'j'
env.roledefs['workers'] = ['K', 'L']
get_conf_from_file_mock.return_value = {}
self.assertEqual(coordinator.Coordinator().get_conf(),
coordinator.Coordinator().build_all_defaults())
@patch('prestoadmin.node.config.write_conf_to_file')
@patch('prestoadmin.node.get_presto_conf')
def test_get_conf(self, get_conf_from_file_mock, write_mock):
env.roledefs['coordinator'] = 'j'
env.roledefs['workers'] = ['K', 'L']
file_conf = {'node.properties': {'my-property': 'value',
'node.environment': 'test'}}
get_conf_from_file_mock.return_value = file_conf
expected = {'node.properties':
{'my-property': 'value',
'node.environment': 'test'},
'jvm.config': ['-server',
'-Xmx16G',
'-XX:-UseBiasedLocking',
'-XX:+UseG1GC',
'-XX:G1HeapRegionSize=32M',
'-XX:+ExplicitGCInvokesConcurrent',
'-XX:+HeapDumpOnOutOfMemoryError',
'-XX:+UseGCOverheadLimit',
'-XX:+ExitOnOutOfMemoryError',
'-XX:ReservedCodeCacheSize=512M',
'-DHADOOP_USER_NAME=hive'],
'config.properties': {
'coordinator': 'true',
'discovery-server.enabled': 'true',
'discovery.uri': 'http://j:8080',
'http-server.http.port': '8080',
'node-scheduler.include-coordinator': 'false',
'query.max-memory': '50GB',
'query.max-memory-per-node': '8GB'}
}
self.assertEqual(coordinator.Coordinator().get_conf(), expected)
| # -*- coding: utf-8 -*-
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Tests the coordinator module
"""
from fabric.api import env
from mock import patch
from prestoadmin import coordinator
from prestoadmin.util.exception import ConfigurationError
from tests.base_test_case import BaseTestCase
class TestCoordinator(BaseTestCase):
def test_build_all_defaults(self):
env.roledefs['coordinator'] = 'a'
env.roledefs['workers'] = ['b', 'c']
actual_default = coordinator.Coordinator().build_all_defaults()
expected = {'node.properties':
{'node.environment': 'presto',
'node.data-dir': '/var/lib/presto/data',
'node.launcher-log-file': '/var/log/presto/launcher.log',
'node.server-log-file': '/var/log/presto/server.log',
'catalog.config-dir': '/etc/presto/catalog',
'plugin.dir': '/usr/lib/presto/lib/plugin'},
'jvm.config': ['-server',
'-Xmx16G',
'-XX:-UseBiasedLocking',
'-XX:+UseG1GC',
'-XX:G1HeapRegionSize=32M',
'-XX:+ExplicitGCInvokesConcurrent',
'-XX:+HeapDumpOnOutOfMemoryError',
'-XX:+UseGCOverheadLimit',
'-XX:+ExitOnOutOfMemoryError',
'-XX:ReservedCodeCacheSize=512M',
'-DHADOOP_USER_NAME=hive'],
'config.properties': {
'coordinator': 'true',
'discovery-server.enabled': 'true',
'discovery.uri': 'http://a:8080',
'http-server.http.port': '8080',
'node-scheduler.include-coordinator': 'false',
'query.max-memory': '50GB',
'query.max-memory-per-node': '8GB'}
}
self.assertEqual(actual_default, expected)
def test_defaults_coord_is_worker(self):
env.roledefs['coordinator'] = ['a']
env.roledefs['worker'] = ['a', 'b', 'c']
actual_default = coordinator.Coordinator().build_all_defaults()
expected = {'node.properties': {
'node.environment': 'presto',
'node.data-dir': '/var/lib/presto/data',
'node.launcher-log-file': '/var/log/presto/launcher.log',
'node.server-log-file': '/var/log/presto/server.log',
'catalog.config-dir': '/etc/presto/catalog',
'plugin.dir': '/usr/lib/presto/lib/plugin'},
'jvm.config': ['-server',
'-Xmx16G',
'-XX:-UseBiasedLocking',
'-XX:+UseG1GC',
'-XX:G1HeapRegionSize=32M',
'-XX:+ExplicitGCInvokesConcurrent',
'-XX:+HeapDumpOnOutOfMemoryError',
'-XX:+UseGCOverheadLimit',
'-XX:+ExitOnOutOfMemoryError',
'-XX:ReservedCodeCacheSize=512M',
'-DHADOOP_USER_NAME=hive'],
'config.properties': {
'coordinator': 'true',
'discovery-server.enabled': 'true',
'discovery.uri': 'http://a:8080',
'http-server.http.port': '8080',
'node-scheduler.include-coordinator': 'true',
'query.max-memory': '50GB',
'query.max-memory-per-node': '8GB'}
}
self.assertEqual(actual_default, expected)
def test_validate_valid(self):
conf = {'node.properties': {},
'jvm.config': [],
'config.properties': {'coordinator': 'true',
'discovery.uri': 'http://uri'}}
self.assertEqual(conf, coordinator.Coordinator.validate(conf))
def test_validate_default(self):
env.roledefs['coordinator'] = 'localhost'
env.roledefs['workers'] = ['localhost']
conf = coordinator.Coordinator().build_all_defaults()
self.assertEqual(conf, coordinator.Coordinator.validate(conf))
def test_invalid_conf(self):
conf = {'node.propoerties': {}}
self.assertRaisesRegexp(ConfigurationError,
'Missing configuration for required file: ',
coordinator.Coordinator.validate, conf)
def test_invalid_conf_missing_coordinator(self):
conf = {'node.properties': {},
'jvm.config': [],
'config.properties': {'discovery.uri': 'http://uri'}
}
self.assertRaisesRegexp(ConfigurationError,
'Must specify coordinator=true in '
'coordinator\'s config.properties',
coordinator.Coordinator.validate, conf)
def test_invalid_conf_coordinator(self):
conf = {'node.properties': {},
'jvm.config': [],
'config.properties': {'coordinator': 'false',
'discovery.uri': 'http://uri'}
}
self.assertRaisesRegexp(ConfigurationError,
'Coordinator cannot be false in the '
'coordinator\'s config.properties',
coordinator.Coordinator.validate, conf)
@patch('prestoadmin.node.config.write_conf_to_file')
@patch('prestoadmin.node.get_presto_conf')
def test_get_conf_empty_is_default(self, get_conf_from_file_mock,
write_mock):
env.roledefs['coordinator'] = 'j'
env.roledefs['workers'] = ['K', 'L']
get_conf_from_file_mock.return_value = {}
self.assertEqual(coordinator.Coordinator().get_conf(),
coordinator.Coordinator().build_all_defaults())
@patch('prestoadmin.node.config.write_conf_to_file')
@patch('prestoadmin.node.get_presto_conf')
def test_get_conf(self, get_conf_from_file_mock, write_mock):
env.roledefs['coordinator'] = 'j'
env.roledefs['workers'] = ['K', 'L']
file_conf = {'node.properties': {'my-property': 'value',
'node.environment': 'test'}}
get_conf_from_file_mock.return_value = file_conf
expected = {'node.properties':
{'my-property': 'value',
'node.environment': 'test'},
'jvm.config': ['-server',
'-Xmx16G',
'-XX:-UseBiasedLocking',
'-XX:+UseG1GC',
'-XX:G1HeapRegionSize=32M',
'-XX:+ExplicitGCInvokesConcurrent',
'-XX:+HeapDumpOnOutOfMemoryError',
'-XX:+UseGCOverheadLimit',
'-XX:+ExitOnOutOfMemoryError',
'-XX:ReservedCodeCacheSize=512M',
'-DHADOOP_USER_NAME=hive'],
'config.properties': {
'coordinator': 'true',
'discovery-server.enabled': 'true',
'discovery.uri': 'http://j:8080',
'http-server.http.port': '8080',
'node-scheduler.include-coordinator': 'false',
'query.max-memory': '50GB',
'query.max-memory-per-node': '8GB'}
}
self.assertEqual(coordinator.Coordinator().get_conf(), expected)
| en | 0.840628 | # -*- coding: utf-8 -*- # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. Tests the coordinator module | 1.520716 | 2 |
other/string chains/strings4.py | saulc/myth-math | 0 | 10722 | # <NAME>
# <NAME>
# config file format
import random
def openFile():
file = open("test.txt", 'r')
return file
def printFile(f):
print(f.read())
def readInput():
testout = "CarpenteRatcheThread"
file = open("test.txt", 'r')
s = str(file.read())
words = s.split(" ");
# print("Expected Output: " + testout)
for w in words:
if w == '\n': words.remove(w)
print(words)
testWords(words, False)
# makeChains(words)
def reduceRepeats(inputstr, showChecks=False):
print(" testing repeat filter start.___ ")
ret = []
# words, i = showlist(inputstr, False)
w = inputstr.split(" ");
print(w)
i = 0
for s in w:
i +=1
if showChecks: print(i, ": ", s)
if s not in ret: ret. append(s)
print(i, " elements checked, ", len(ret) , " unique words found. ? ")
# showlist(ret, False)
for w in ret:
print(" . . ", w)
print(" testing repeat filter end.___ ")
return ret
def testWords(words, showChecks=False):
if showChecks: print(" testing chatbot ")
for w in words:
print( w )
sen = []
for i in range(0, len(words) ):
sen. append( words[i])
for j in range(0, len(words) ):
sen. append( words[j]+" "+ words[i])
if j % 3 == 0: sen. append(" ")
# showlist(w, True)
# print(i, " : ", words[i])
# print(" ", words[j], words[i])
if showChecks:
print(sen ) #raw data
st = listtoString(sen, " ") #data only
print(st) #show it
ft = reduceRepeats(sen[0], False)
print(ft)
if showChecks:
print(len(words), " words found")
print(len(sen), " phrases found")
# list to string with sep 'char'
def listtoString( lst, sep):
s = ""
for c in lst: s += str(c) + sep
return s
# string to list, count
def showlist( li, showChecks=False):
w = li.split(" ");
print(w)
i = 0
for s in li:
i +=1
if showChecks: print(i, ": ", s)
return w, i
def makeChains(words, showChecks=False):
chain = []
wordcheck = []
for i in range(0, len(words) ):
# print(i )
se = [False, False] #used as start word, end word
wordcheck.append(se)
for i in range(0, len(words) ):
if showChecks: print(str(i) + " - " + words[i] )
aword = words[i]
for j in range(i+1, len(words) ):
#check the rest of the words for matching links
bword = words[j]
if showChecks: print(" " + str(j) + " ~ " + bword )
if wordcheck[j][0] == False and wordcheck[j][1] == False:
temp = checkLinks(aword, bword)
if showChecks: print("Check state: " + str(temp) )
if temp == 1: #word have not been swapped
wordcheck[i][0] = True
wordcheck[j][1] = True
chain.append(aword)
chain.append(bword)
elif temp == 2: #words have been swapped, swap flag indexes to match.
wordcheck[j][0] = True
wordcheck[i][1] = True
chain.append(bword)
chain.append(aword)
print(chain)
# k = 0
# for i in wordcheck:
# print("word check: " + str(i) + " = "+ words[k] )
#
# k+= 1
# compare words, return 0 for no match,
# 1 if end of a == start of b
# 2 if end of b == start of a
def checkLinks(a, b):
print(" " + a + " " + b)
s , e = getEnds(a)
ss , ee = getEnds(b)
if e == ss :
return 1
elif s == ee:
return 2
return 0
# st = "start: " + s + " end:" + e
# print("end" + st)
def getEnds(word):
st = word[0]
ed = word[len(word)-1]
return st, ed
if __name__ == '__main__':
readInput()
| # <NAME>
# <NAME>
# config file format
import random
def openFile():
file = open("test.txt", 'r')
return file
def printFile(f):
print(f.read())
def readInput():
testout = "CarpenteRatcheThread"
file = open("test.txt", 'r')
s = str(file.read())
words = s.split(" ");
# print("Expected Output: " + testout)
for w in words:
if w == '\n': words.remove(w)
print(words)
testWords(words, False)
# makeChains(words)
def reduceRepeats(inputstr, showChecks=False):
print(" testing repeat filter start.___ ")
ret = []
# words, i = showlist(inputstr, False)
w = inputstr.split(" ");
print(w)
i = 0
for s in w:
i +=1
if showChecks: print(i, ": ", s)
if s not in ret: ret. append(s)
print(i, " elements checked, ", len(ret) , " unique words found. ? ")
# showlist(ret, False)
for w in ret:
print(" . . ", w)
print(" testing repeat filter end.___ ")
return ret
def testWords(words, showChecks=False):
if showChecks: print(" testing chatbot ")
for w in words:
print( w )
sen = []
for i in range(0, len(words) ):
sen. append( words[i])
for j in range(0, len(words) ):
sen. append( words[j]+" "+ words[i])
if j % 3 == 0: sen. append(" ")
# showlist(w, True)
# print(i, " : ", words[i])
# print(" ", words[j], words[i])
if showChecks:
print(sen ) #raw data
st = listtoString(sen, " ") #data only
print(st) #show it
ft = reduceRepeats(sen[0], False)
print(ft)
if showChecks:
print(len(words), " words found")
print(len(sen), " phrases found")
# list to string with sep 'char'
def listtoString( lst, sep):
s = ""
for c in lst: s += str(c) + sep
return s
# string to list, count
def showlist( li, showChecks=False):
w = li.split(" ");
print(w)
i = 0
for s in li:
i +=1
if showChecks: print(i, ": ", s)
return w, i
def makeChains(words, showChecks=False):
chain = []
wordcheck = []
for i in range(0, len(words) ):
# print(i )
se = [False, False] #used as start word, end word
wordcheck.append(se)
for i in range(0, len(words) ):
if showChecks: print(str(i) + " - " + words[i] )
aword = words[i]
for j in range(i+1, len(words) ):
#check the rest of the words for matching links
bword = words[j]
if showChecks: print(" " + str(j) + " ~ " + bword )
if wordcheck[j][0] == False and wordcheck[j][1] == False:
temp = checkLinks(aword, bword)
if showChecks: print("Check state: " + str(temp) )
if temp == 1: #word have not been swapped
wordcheck[i][0] = True
wordcheck[j][1] = True
chain.append(aword)
chain.append(bword)
elif temp == 2: #words have been swapped, swap flag indexes to match.
wordcheck[j][0] = True
wordcheck[i][1] = True
chain.append(bword)
chain.append(aword)
print(chain)
# k = 0
# for i in wordcheck:
# print("word check: " + str(i) + " = "+ words[k] )
#
# k+= 1
# compare words, return 0 for no match,
# 1 if end of a == start of b
# 2 if end of b == start of a
def checkLinks(a, b):
print(" " + a + " " + b)
s , e = getEnds(a)
ss , ee = getEnds(b)
if e == ss :
return 1
elif s == ee:
return 2
return 0
# st = "start: " + s + " end:" + e
# print("end" + st)
def getEnds(word):
st = word[0]
ed = word[len(word)-1]
return st, ed
if __name__ == '__main__':
readInput()
| en | 0.721229 | # <NAME> # <NAME> # config file format # print("Expected Output: " + testout) # makeChains(words) # words, i = showlist(inputstr, False) # showlist(ret, False) # showlist(w, True) # print(i, " : ", words[i]) # print(" ", words[j], words[i]) #raw data #data only #show it # list to string with sep 'char' # string to list, count # print(i ) #used as start word, end word #check the rest of the words for matching links #word have not been swapped #words have been swapped, swap flag indexes to match. # k = 0 # for i in wordcheck: # print("word check: " + str(i) + " = "+ words[k] ) # # k+= 1 # compare words, return 0 for no match, # 1 if end of a == start of b # 2 if end of b == start of a # st = "start: " + s + " end:" + e # print("end" + st) | 3.524371 | 4 |
supervised_learning/analysis.py | gonzalezJohnas/SpeechCommand-recognition | 0 | 10723 | from global_utils import *
# target word
TARGET_WORD = 'right'
def display_lowpass_normal(wav, lowpass_signal, fs, label=''):
fig, (axs_raw, axs_low) = plt.subplots(2)
fig.tight_layout(pad=3.0)
fig.set_figheight(FIG_HEIGHT)
fig.set_figwidth(FIG_WIDTH)
# display the plot
axs_raw.plot(wav)
# label the axes
axs_raw.set_ylabel("Amplitude", fontsize=FONT_SIZE)
axs_raw.set_xlabel("Time", fontsize=FONT_SIZE)
# set the title
axs_raw.set_title("Audio sample : {}".format(label), fontsize=FONT_SIZE)
axs_low.plot(lowpass_signal)
# label the axes
axs_low.set_ylabel("Amplitude", fontsize=FONT_SIZE)
axs_low.set_xlabel("Time", fontsize=FONT_SIZE)
# set the title
axs_low.set_title("Audio sample with low pass filter", fontsize=FONT_SIZE)
f_raw, periodogram_raw = signal.periodogram(wav, fs)
f_raw, periodogram_low = signal.periodogram(lowpass_signal, fs)
fig, (axs_periodogram_raw, axs_periodogram_low) = plt.subplots(2)
fig.tight_layout(pad=3.0)
fig.set_figheight(FIG_HEIGHT)
fig.set_figwidth(FIG_WIDTH)
axs_periodogram_raw.semilogy(f_raw, periodogram_raw)
axs_periodogram_raw.set_xlabel('frequency [Hz]', fontsize=FONT_SIZE)
axs_periodogram_raw.set_ylabel('PSD [V**2/Hz]', fontsize=FONT_SIZE)
axs_periodogram_raw.set_title("Periodogram raw signal", fontsize=FONT_SIZE)
axs_periodogram_low.semilogy(f_raw, periodogram_low)
axs_periodogram_low.set_xlabel('frequency [Hz]', fontsize=FONT_SIZE)
axs_periodogram_low.set_ylabel('PSD [V**2/Hz]', fontsize=FONT_SIZE)
axs_periodogram_low.set_title("Periodogram low pass filtered signal", fontsize=FONT_SIZE)
def main(args):
if args.wavfile:
fs, wav = wavfile.read(args.wavfile, "wb")
lowpass_signal = low_pass_filter(wav, sample_rate=fs, cutoff_frequency=1000)
display_lowpass_normal(wav, lowpass_signal, fs)
plt.show()
elif args.indir:
data_dict = get_data(args.indir)
word_samples = data_dict[TARGET_WORD]
mean_lowpass_array, normal_array = mean_low_pass_filter(word_samples, SAMPLE_RATE, CUTOFF_FREQ)
display_lowpass_normal(normal_array, mean_lowpass_array, SAMPLE_RATE, TARGET_WORD)
plt.show()
return 0
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument(
'--wavfile',
help='Path to the .wav files',
required=False
)
parser.add_argument(
'--indir',
help='Absolute path to data directory containing .wav files',
required=False
)
args = parser.parse_args()
main(args)
| from global_utils import *
# target word
TARGET_WORD = 'right'
def display_lowpass_normal(wav, lowpass_signal, fs, label=''):
fig, (axs_raw, axs_low) = plt.subplots(2)
fig.tight_layout(pad=3.0)
fig.set_figheight(FIG_HEIGHT)
fig.set_figwidth(FIG_WIDTH)
# display the plot
axs_raw.plot(wav)
# label the axes
axs_raw.set_ylabel("Amplitude", fontsize=FONT_SIZE)
axs_raw.set_xlabel("Time", fontsize=FONT_SIZE)
# set the title
axs_raw.set_title("Audio sample : {}".format(label), fontsize=FONT_SIZE)
axs_low.plot(lowpass_signal)
# label the axes
axs_low.set_ylabel("Amplitude", fontsize=FONT_SIZE)
axs_low.set_xlabel("Time", fontsize=FONT_SIZE)
# set the title
axs_low.set_title("Audio sample with low pass filter", fontsize=FONT_SIZE)
f_raw, periodogram_raw = signal.periodogram(wav, fs)
f_raw, periodogram_low = signal.periodogram(lowpass_signal, fs)
fig, (axs_periodogram_raw, axs_periodogram_low) = plt.subplots(2)
fig.tight_layout(pad=3.0)
fig.set_figheight(FIG_HEIGHT)
fig.set_figwidth(FIG_WIDTH)
axs_periodogram_raw.semilogy(f_raw, periodogram_raw)
axs_periodogram_raw.set_xlabel('frequency [Hz]', fontsize=FONT_SIZE)
axs_periodogram_raw.set_ylabel('PSD [V**2/Hz]', fontsize=FONT_SIZE)
axs_periodogram_raw.set_title("Periodogram raw signal", fontsize=FONT_SIZE)
axs_periodogram_low.semilogy(f_raw, periodogram_low)
axs_periodogram_low.set_xlabel('frequency [Hz]', fontsize=FONT_SIZE)
axs_periodogram_low.set_ylabel('PSD [V**2/Hz]', fontsize=FONT_SIZE)
axs_periodogram_low.set_title("Periodogram low pass filtered signal", fontsize=FONT_SIZE)
def main(args):
if args.wavfile:
fs, wav = wavfile.read(args.wavfile, "wb")
lowpass_signal = low_pass_filter(wav, sample_rate=fs, cutoff_frequency=1000)
display_lowpass_normal(wav, lowpass_signal, fs)
plt.show()
elif args.indir:
data_dict = get_data(args.indir)
word_samples = data_dict[TARGET_WORD]
mean_lowpass_array, normal_array = mean_low_pass_filter(word_samples, SAMPLE_RATE, CUTOFF_FREQ)
display_lowpass_normal(normal_array, mean_lowpass_array, SAMPLE_RATE, TARGET_WORD)
plt.show()
return 0
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument(
'--wavfile',
help='Path to the .wav files',
required=False
)
parser.add_argument(
'--indir',
help='Absolute path to data directory containing .wav files',
required=False
)
args = parser.parse_args()
main(args)
| en | 0.501985 | # target word # display the plot # label the axes # set the title # label the axes # set the title | 2.603808 | 3 |
qnarre/base/proof.py | quantapix/qnarre.com | 0 | 10724 | # Copyright 2019 Quantapix Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
from .claim import Claim
from .narrative import Node
from .author import Authority
class Proof(Node):
sign = '!p'
authority = None
def __init__(self,
text=None,
author=None,
agent=None,
authority=None,
factor=2,
**kw):
super().__init__(factor=factor, **kw)
if text:
for k in ('factor', 'bias', 'weight'):
kw.pop(k, None)
self.claim = Claim(text=text, **kw)
if not authority:
if agent:
authority = 'agent'
elif author:
authority = 'self'
if authority:
self.authority = Authority.create(name=authority)
@property
def weight(self):
p = self.partial(self.authority.weight, self.claim.weight)
return p + self.bias
@property
def credibility(self):
return self.weight
@property
def value(self):
a = self.authority.agency
return '{} {}: {}'.format(super().value, a, self.claim.value)
@property
def fields(self):
fs = self.authority.fields
fs.update(self.claim.fields)
fs.update(super().fields)
fs['Credibility'] = self.credibility
return fs
| # Copyright 2019 Quantapix Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
from .claim import Claim
from .narrative import Node
from .author import Authority
class Proof(Node):
sign = '!p'
authority = None
def __init__(self,
text=None,
author=None,
agent=None,
authority=None,
factor=2,
**kw):
super().__init__(factor=factor, **kw)
if text:
for k in ('factor', 'bias', 'weight'):
kw.pop(k, None)
self.claim = Claim(text=text, **kw)
if not authority:
if agent:
authority = 'agent'
elif author:
authority = 'self'
if authority:
self.authority = Authority.create(name=authority)
@property
def weight(self):
p = self.partial(self.authority.weight, self.claim.weight)
return p + self.bias
@property
def credibility(self):
return self.weight
@property
def value(self):
a = self.authority.agency
return '{} {}: {}'.format(super().value, a, self.claim.value)
@property
def fields(self):
fs = self.authority.fields
fs.update(self.claim.fields)
fs.update(super().fields)
fs['Credibility'] = self.credibility
return fs
| en | 0.813792 | # Copyright 2019 Quantapix Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================= | 2.273734 | 2 |
learningPygame/Dave/06-SpaceInvaders/space_invaders.py | Rosebotics/catapult2019 | 0 | 10725 | import pygame, sys, random, time
from pygame.locals import *
class Missile:
def __init__(self, screen, x):
# Store the data. Initialize: y to 591 and exploded to False.
self.screen = screen
self.x = x
self.y = 591
self.exploded = False
def move(self):
# Make self.y 5 smaller than it was (which will cause the Missile to move UP).
self.y = self.y - 5
def draw(self):
# Draw a vertical, 4 pixels thick, 8 pixels long, red (or green) line on the screen,
# where the line starts at the current position of this Missile.
pygame.draw.line(self.screen, (0, 255, 0), (self.x, self.y), (self.x, self.y - 8), 4)
class Fighter:
def __init__(self, screen, x, y):
# Store the data.
# Set self.missiles to the empty list.
# Load the file "fighter.png" as the image
# Set the colorkey to white (it has a white background that needs removed)
self.screen = screen
self.x = x
self.y = y
self.missiles = []
self.image = pygame.image.load("fighter.png")
self.image.set_colorkey(pygame.Color("White"))
def draw(self):
# Draw this Fighter, using its image at its current (x, y) position.
self.screen.blit(self.image, (self.x, self.y))
def fire(self):
# Construct a new Missile 50 pixels to the right of this Fighter.
# Append that Missile to this Fighter's list of Missile objects.
new_missile = Missile(self.screen, self.x + 50)
self.missiles.append(new_missile)
def remove_exploded_missiles(self):
# Already complete
for k in range(len(self.missiles) - 1, -1, -1):
if self.missiles[k].exploded or self.missiles[k].y < 0:
del self.missiles[k]
class Badguy:
def __init__(self, screen, x, y):
# Store the data.
# Set dead to False and original_x to x and move_right to True.
# Load the file "badguy.png" as the image. and set its colorkey to black.
self.screen = screen
self.x = x
self.y = y
self.dead = False
self.original_x = x
self.move_right = True
self.image = pygame.image.load("badguy.png")
self.image.set_colorkey(pygame.Color("Black"))
def move(self):
# Move 2 units in the current direction.
# Switch direction if this Badguy's position is more than 100 pixels from its original position.
if self.move_right:
self.x = self.x + 8
if self.x > self.original_x + 100:
self.move_right = False
self.y = self.y + 15
else:
self.x = self.x - 8
if self.x < self.original_x - 100:
self.move_right = True
self.y = self.y + 15
def draw(self):
# Draw this Badguy, using its image at its current (x, y) position.
self.screen.blit(self.image, (self.x, self.y))
def hit_by(self, missile):
# Return True if a 70x45 rectangle at this Badguy's current position
# collides with the xy point of the given missile.
# Return False otherwise.
return pygame.Rect(self.x, self.y, 70, 45).collidepoint(missile.x, missile.y)
class EnemyFleet:
def __init__(self, screen, enemy_rows):
# Already done. Prepares the list of Badguys.
self.badguys = []
for j in range(enemy_rows):
for k in range(8):
self.badguys.append(Badguy(screen, 80 * k, 50 * j + 20))
@property
def is_defeated(self):
# Return True if the number of badguys in this Enemy Fleet is 0,
# otherwise return False.
return len(self.badguys) == 0
def move(self):
# Make each badguy in this EnemyFleet move.
for badguy in self.badguys:
badguy.move()
def draw(self):
# Make each badguy in this EnemyFleet draw itself.
for badguy in self.badguys:
badguy.draw()
def remove_dead_badguys(self):
for k in range(len(self.badguys) - 1, -1, -1):
if self.badguys[k].dead:
del self.badguys[k]
# Create a Scoreboard class (from scratch)
# Instance variables: screen, x, y, score, and font (size 30)
# Methods: draw (and __init__)
# Create a scoreboard at location 5, 5
# Draw the scoreboard in the game loop
class Scoreboard:
def __init__(self, screen):
self.screen = screen
self.score = 0
self.font = pygame.font.Font(None, 30)
def draw(self):
text_as_image = self.font.render("Score: " + str(self.score), True, (255, 255, 255))
self.screen.blit(text_as_image, (5, 5))
def main():
pygame.init()
clock = pygame.time.Clock()
pygame.display.set_caption("SPACE INVADERS!")
screen = pygame.display.set_mode((640, 650))
enemy_rows = 3
enemy = EnemyFleet(screen, enemy_rows)
fighter = Fighter(screen, 320, 590)
scoreboard = Scoreboard(screen)
gameover_image = pygame.image.load("gameover.png")
is_game_over = False
while True:
clock.tick(60)
for event in pygame.event.get():
pressed_keys = pygame.key.get_pressed()
if event.type == KEYDOWN and pressed_keys[K_SPACE]:
fighter.fire()
if event.type == QUIT:
sys.exit()
screen.fill((0, 0, 0))
pressed_keys = pygame.key.get_pressed()
if pressed_keys[K_LEFT] and fighter.x > -50:
fighter.x = fighter.x - 5
if pressed_keys[K_RIGHT] and fighter.x < 590:
fighter.x = fighter.x + 5
fighter.draw()
enemy.move()
enemy.draw()
for missile in fighter.missiles:
missile.move()
missile.draw()
for badguy in enemy.badguys:
for missile in fighter.missiles:
if badguy.hit_by(missile):
scoreboard.score = scoreboard.score + 100
badguy.dead = True
missile.exploded = True
fighter.remove_exploded_missiles()
enemy.remove_dead_badguys()
if enemy.is_defeated:
enemy_rows = enemy_rows + 1
enemy = EnemyFleet(screen, enemy_rows)
scoreboard.draw()
if not is_game_over:
pygame.display.update()
for badguy in enemy.badguys:
if badguy.y > 545:
screen.blit(gameover_image, (170, 200))
pygame.display.update()
is_game_over = True
main()
| import pygame, sys, random, time
from pygame.locals import *
class Missile:
def __init__(self, screen, x):
# Store the data. Initialize: y to 591 and exploded to False.
self.screen = screen
self.x = x
self.y = 591
self.exploded = False
def move(self):
# Make self.y 5 smaller than it was (which will cause the Missile to move UP).
self.y = self.y - 5
def draw(self):
# Draw a vertical, 4 pixels thick, 8 pixels long, red (or green) line on the screen,
# where the line starts at the current position of this Missile.
pygame.draw.line(self.screen, (0, 255, 0), (self.x, self.y), (self.x, self.y - 8), 4)
class Fighter:
def __init__(self, screen, x, y):
# Store the data.
# Set self.missiles to the empty list.
# Load the file "fighter.png" as the image
# Set the colorkey to white (it has a white background that needs removed)
self.screen = screen
self.x = x
self.y = y
self.missiles = []
self.image = pygame.image.load("fighter.png")
self.image.set_colorkey(pygame.Color("White"))
def draw(self):
# Draw this Fighter, using its image at its current (x, y) position.
self.screen.blit(self.image, (self.x, self.y))
def fire(self):
# Construct a new Missile 50 pixels to the right of this Fighter.
# Append that Missile to this Fighter's list of Missile objects.
new_missile = Missile(self.screen, self.x + 50)
self.missiles.append(new_missile)
def remove_exploded_missiles(self):
# Already complete
for k in range(len(self.missiles) - 1, -1, -1):
if self.missiles[k].exploded or self.missiles[k].y < 0:
del self.missiles[k]
class Badguy:
def __init__(self, screen, x, y):
# Store the data.
# Set dead to False and original_x to x and move_right to True.
# Load the file "badguy.png" as the image. and set its colorkey to black.
self.screen = screen
self.x = x
self.y = y
self.dead = False
self.original_x = x
self.move_right = True
self.image = pygame.image.load("badguy.png")
self.image.set_colorkey(pygame.Color("Black"))
def move(self):
# Move 2 units in the current direction.
# Switch direction if this Badguy's position is more than 100 pixels from its original position.
if self.move_right:
self.x = self.x + 8
if self.x > self.original_x + 100:
self.move_right = False
self.y = self.y + 15
else:
self.x = self.x - 8
if self.x < self.original_x - 100:
self.move_right = True
self.y = self.y + 15
def draw(self):
# Draw this Badguy, using its image at its current (x, y) position.
self.screen.blit(self.image, (self.x, self.y))
def hit_by(self, missile):
# Return True if a 70x45 rectangle at this Badguy's current position
# collides with the xy point of the given missile.
# Return False otherwise.
return pygame.Rect(self.x, self.y, 70, 45).collidepoint(missile.x, missile.y)
class EnemyFleet:
def __init__(self, screen, enemy_rows):
# Already done. Prepares the list of Badguys.
self.badguys = []
for j in range(enemy_rows):
for k in range(8):
self.badguys.append(Badguy(screen, 80 * k, 50 * j + 20))
@property
def is_defeated(self):
# Return True if the number of badguys in this Enemy Fleet is 0,
# otherwise return False.
return len(self.badguys) == 0
def move(self):
# Make each badguy in this EnemyFleet move.
for badguy in self.badguys:
badguy.move()
def draw(self):
# Make each badguy in this EnemyFleet draw itself.
for badguy in self.badguys:
badguy.draw()
def remove_dead_badguys(self):
for k in range(len(self.badguys) - 1, -1, -1):
if self.badguys[k].dead:
del self.badguys[k]
# Create a Scoreboard class (from scratch)
# Instance variables: screen, x, y, score, and font (size 30)
# Methods: draw (and __init__)
# Create a scoreboard at location 5, 5
# Draw the scoreboard in the game loop
class Scoreboard:
def __init__(self, screen):
self.screen = screen
self.score = 0
self.font = pygame.font.Font(None, 30)
def draw(self):
text_as_image = self.font.render("Score: " + str(self.score), True, (255, 255, 255))
self.screen.blit(text_as_image, (5, 5))
def main():
pygame.init()
clock = pygame.time.Clock()
pygame.display.set_caption("SPACE INVADERS!")
screen = pygame.display.set_mode((640, 650))
enemy_rows = 3
enemy = EnemyFleet(screen, enemy_rows)
fighter = Fighter(screen, 320, 590)
scoreboard = Scoreboard(screen)
gameover_image = pygame.image.load("gameover.png")
is_game_over = False
while True:
clock.tick(60)
for event in pygame.event.get():
pressed_keys = pygame.key.get_pressed()
if event.type == KEYDOWN and pressed_keys[K_SPACE]:
fighter.fire()
if event.type == QUIT:
sys.exit()
screen.fill((0, 0, 0))
pressed_keys = pygame.key.get_pressed()
if pressed_keys[K_LEFT] and fighter.x > -50:
fighter.x = fighter.x - 5
if pressed_keys[K_RIGHT] and fighter.x < 590:
fighter.x = fighter.x + 5
fighter.draw()
enemy.move()
enemy.draw()
for missile in fighter.missiles:
missile.move()
missile.draw()
for badguy in enemy.badguys:
for missile in fighter.missiles:
if badguy.hit_by(missile):
scoreboard.score = scoreboard.score + 100
badguy.dead = True
missile.exploded = True
fighter.remove_exploded_missiles()
enemy.remove_dead_badguys()
if enemy.is_defeated:
enemy_rows = enemy_rows + 1
enemy = EnemyFleet(screen, enemy_rows)
scoreboard.draw()
if not is_game_over:
pygame.display.update()
for badguy in enemy.badguys:
if badguy.y > 545:
screen.blit(gameover_image, (170, 200))
pygame.display.update()
is_game_over = True
main()
| en | 0.865241 | # Store the data. Initialize: y to 591 and exploded to False. # Make self.y 5 smaller than it was (which will cause the Missile to move UP). # Draw a vertical, 4 pixels thick, 8 pixels long, red (or green) line on the screen, # where the line starts at the current position of this Missile. # Store the data. # Set self.missiles to the empty list. # Load the file "fighter.png" as the image # Set the colorkey to white (it has a white background that needs removed) # Draw this Fighter, using its image at its current (x, y) position. # Construct a new Missile 50 pixels to the right of this Fighter. # Append that Missile to this Fighter's list of Missile objects. # Already complete # Store the data. # Set dead to False and original_x to x and move_right to True. # Load the file "badguy.png" as the image. and set its colorkey to black. # Move 2 units in the current direction. # Switch direction if this Badguy's position is more than 100 pixels from its original position. # Draw this Badguy, using its image at its current (x, y) position. # Return True if a 70x45 rectangle at this Badguy's current position # collides with the xy point of the given missile. # Return False otherwise. # Already done. Prepares the list of Badguys. # Return True if the number of badguys in this Enemy Fleet is 0, # otherwise return False. # Make each badguy in this EnemyFleet move. # Make each badguy in this EnemyFleet draw itself. # Create a Scoreboard class (from scratch) # Instance variables: screen, x, y, score, and font (size 30) # Methods: draw (and __init__) # Create a scoreboard at location 5, 5 # Draw the scoreboard in the game loop | 3.283297 | 3 |
tests/test_app/rest_app/rest_app/controllers/config_controller.py | jadbin/guniflask | 12 | 10726 | from guniflask.config import settings
from guniflask.web import blueprint, get_route
@blueprint
class ConfigController:
def __init__(self):
pass
@get_route('/settings/<name>')
def get_setting(self, name):
return {name: settings[name]}
| from guniflask.config import settings
from guniflask.web import blueprint, get_route
@blueprint
class ConfigController:
def __init__(self):
pass
@get_route('/settings/<name>')
def get_setting(self, name):
return {name: settings[name]}
| none | 1 | 1.895714 | 2 |
|
model/_UNet_trainer.py | yasahi-hpc/AMRNet | 0 | 10727 | <gh_stars>0
from ._base_trainer import _BaseTrainer, MeasureMemory
import pathlib
import torch.multiprocessing as mp
import torch
from torch import nn
import horovod.torch as hvd
import numpy as np
import xarray as xr
import itertools
from .flow_dataset import FlowDataset
from .unet import UNet
import sys
from .visualization import save_flows
from .converter import save_as_netcdf
class UNetTrainer(_BaseTrainer):
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.model_name = 'UNet'
def _initialize(self, **kwargs):
# Horovod: Initialize library
hvd.init()
torch.manual_seed(self.seed)
if self.device == 'cuda':
# Horovod: Pin GPU to be used to process local rank (one GPU per process)
torch.cuda.set_device(hvd.local_rank())
torch.cuda.manual_seed(self.seed)
# Horovod: limit # of CPU threads to be used per worker.
torch.set_num_threads(1)
self.rank, self.size = hvd.rank(), hvd.size()
self.master = self.rank == 0
super()._prepare_dirs()
self.train_loader, self.val_loader, self.test_loader = super()._dataloaders()
self.model = self._get_model(self.run_number)
self.model = self.model.to(self.device)
## Optimizers
# By default, Adasum doesn't need scaling up leraning rate
lr_scaler = hvd.size() if not self.use_adasum else 1
if self.device == 'cuda' and self.use_adasum and hvd.nccl_built():
lr_scaler = hvd.local_size()
lr = self.lr * lr_scaler
self.opt = torch.optim.Adam(self.model.parameters(), lr=lr, betas=(self.beta_1, self.beta_2))
# Horovod: broadcast parameters & optimizer state.
hvd.broadcast_parameters(self.model.state_dict(), root_rank=0)
hvd.broadcast_optimizer_state(self.opt, root_rank=0)
# Horovod: (optional) compression algorithm.
compression = hvd.Compression.fp16 if self.fp16_allreduce else hvd.Compression.none
# Horovod: wrap optimizer with DistributedOptimizer.
self.opt = hvd.DistributedOptimizer(self.opt,
named_parameters=self.model.named_parameters(),
compression=compression,
op=hvd.Adasum if self.use_adasum else hvd.Average,
gradient_predivide_factor=self.gradient_predivide_factor)
self.criterion = nn.L1Loss() if self.loss_type == 'mae_loss' else nn.MSELoss(reduction='mean')
# Set normalization coefficients
super()._set_normalization_coefs(shape=[1,-1,1,1])
# Memory measurement
device_name = 'cpu'
if self.device == 'cuda':
local_rank = hvd.local_rank()
device_name = f'{self.device}:{local_rank}'
self.memory = MeasureMemory(device=device_name)
# Synchronize
if self.device == 'cuda':
torch.cuda.synchronize() # Waits for everything to finish running
def _initialize_for_inference(self, **kwargs):
# Set output directory
super()._prepare_dirs()
self.train_loader, self.val_loader, self.test_loader = super()._dataloaders()
self.model = self._get_model(self.run_number)
self.model = self.model.to(self.device)
# Set normalization coefficients
super()._set_normalization_coefs(shape=[1,-1,1,1])
# Memory measurement
self.memory = MeasureMemory(device=self.device)
# Synchronize
if self.device == 'cuda':
torch.cuda.synchronize() # Waits for everything to finish running
def _get_model(self, run_number):
model = UNet(n_layers=8, hidden_dim=8, dim=self.dim, padding_mode=self.padding_mode)
if self.inference_mode:
self.epoch_start = self.load_nth_state_file
# To load the state file for inference
rank = 0
model.load_state_dict( torch.load(f'{self.state_file_dir}/model_{rank}_{self.epoch_start:03}.pt') )
else:
self.epoch_start = 0
if run_number > 0:
if self.master:
print(f'restart, {run_number}')
# Load model states from previous run
prev_run_number = run_number - 1
prev_result_filename = self.out_dir / f'flow_cnn_result_rank{self.rank}_rst{prev_run_number:03}.h5'
if not prev_result_filename.is_file():
raise IOError(f'prev_result_filename')
ds_prev = xr.open_dataset(prev_result_filename, engine='netcdf4')
# To load the previous files
epoch_end = ds_prev.attrs['epoch_end']
model.load_state_dict( torch.load(f'{self.model_dir}/model_{self.rank}_{epoch_end:03}.pt') )
# Next epoch should start from epoch_end + 1
self.epoch_start = int(epoch_end) + 1
return model
def _save_models(self, total_epoch):
torch.save(self.model.state_dict(), f'{self.model_dir}/model_{self.rank}_{total_epoch:03}.pt')
########### Main scripts
def _train(self, data_loader, epoch):
name = 'train'
self.model.train()
log_loss = 0
nb_samples = len(data_loader.sampler)
level = 2
# Timers
for i, (sdf, flows) in enumerate(data_loader):
# Load data and meta-data
*_, sdf_Lv2 = sdf
*_, flows_Lv2 = flows
batch_len = len(sdf_Lv2)
## To device
self.timer.start()
sdf_Lv2 = sdf_Lv2.to(self.device)
flows_Lv2 = flows_Lv2.to(self.device)
self.timer.stop()
self.elapsed_times[f'MemcpyH2D_{name}'].append(self.timer.elapsed_seconds())
# Keep sdfs on CPUs
sdf_Lv2_cpu = sdf_Lv2.to('cpu')
## Normalization or standardization
sdf_Lv2 = super()._preprocess(sdf_Lv2, self.sdf_Lv2_var0, self.sdf_Lv2_var1)
flows_Lv2 = super()._preprocess(flows_Lv2, self.flows_Lv2_var0, self.flows_Lv2_var1)
# Objectives: construct pred_flows_Lv2
pred_flows_Lv2_ = torch.zeros_like(flows_Lv2, device='cpu')
#### Train Lv2
self.timer.start()
### Update weights
pred_flows_Lv2 = self.model(sdf_Lv2)
loss_mae = self.criterion(pred_flows_Lv2, flows_Lv2)
self.opt.zero_grad()
### Measure memory usage before backward
self.memory.measure()
if 'reserved' not in self.memory_consumption:
self.memory_consumption['reserved'] = self.memory.reserved()
self.memory_consumption['alloc'] = self.memory.alloc()
loss_mae.backward()
self.opt.step()
### Log losses
log_loss += loss_mae.item() / nb_samples
### Destandardization and save
pred_flows_Lv2 = super()._postprocess(pred_flows_Lv2, self.flows_Lv2_var0, self.flows_Lv2_var1)
pred_flows_Lv2_ = pred_flows_Lv2.detach().cpu()
self.timer.stop()
self.elapsed_times[f'{name}_Lv{level}'].append(self.timer.elapsed_seconds())
# Saving figures
if i == 0:
self.timer.start()
flows_Lv2 = super()._postprocess(flows_Lv2, self.flows_Lv2_var0, self.flows_Lv2_var1)
### Zeros inside objects
pred_flows_Lv2_ = super()._zeros_inside_objects(pred_flows_Lv2_, sdf_Lv2_cpu)
### Lv2 figures
level = 2
save_flows(flows_Lv2, name=name, img_dir = self.sub_img_dir, type_name = 'ref', level = level, epoch=epoch)
save_flows(pred_flows_Lv2_, name=name, img_dir = self.sub_img_dir, type_name = 'pred', level = level, epoch=epoch)
# Check errors
save_flows(pred_flows_Lv2_-flows_Lv2.cpu(), name=name, img_dir = self.sub_img_dir, type_name = 'error', level = level, epoch=epoch)
self.timer.stop()
self.elapsed_times[f'save_figs_{name}'].append(self.timer.elapsed_seconds())
# Horovod: average metric values across workers.
losses = {}
losses[f'log_loss_{name}_{self.loss_type}_Lv{level}'] = log_loss
for key, value in losses.items():
loss = super()._metric_average(value, key)
self.loss_dict[key].append(loss)
def _validation(self, data_loader, epoch, name):
self.model.eval()
log_loss = 0
nb_samples = len(data_loader.sampler)
level = 2
for i, (sdf, flows) in enumerate(data_loader):
# Load data and meta-data
*_, sdf_Lv2 = sdf
*_, flows_Lv2 = flows
batch_len = len(sdf_Lv2)
## To device
self.timer.start()
sdf_Lv2 = sdf_Lv2.to(self.device)
flows_Lv2 = flows_Lv2.to(self.device)
self.timer.stop()
self.elapsed_times[f'MemcpyH2D_{name}'].append(self.timer.elapsed_seconds())
# Keep sdfs on CPUs
sdf_Lv2_cpu = sdf_Lv2.to('cpu')
## Normalization or standardization
sdf_Lv2 = super()._preprocess(sdf_Lv2, self.sdf_Lv2_var0, self.sdf_Lv2_var1)
flows_Lv2 = super()._preprocess(flows_Lv2, self.flows_Lv2_var0, self.flows_Lv2_var1)
# Objectives: construct pred_flows_Lv2
pred_flows_Lv2_ = torch.zeros_like(flows_Lv2, device='cpu')
#### Train Lv0
self.timer.start()
### Update weights
pred_flows_Lv2 = self.model(sdf_Lv2)
loss_mae = self.criterion(pred_flows_Lv2, flows_Lv2)
### Log losses
log_loss += loss_mae.item() / nb_samples
### Destandardization and save
pred_flows_Lv2 = super()._postprocess(pred_flows_Lv2, self.flows_Lv2_var0, self.flows_Lv2_var1)
pred_flows_Lv2_ = pred_flows_Lv2.detach().cpu()
self.timer.stop()
self.elapsed_times[f'{name}_Lv{level}'].append(self.timer.elapsed_seconds())
# Saving figures
if i == 0:
self.timer.start()
flows_Lv2 = super()._postprocess(flows_Lv2, self.flows_Lv2_var0, self.flows_Lv2_var1)
### Zeros inside objects
pred_flows_Lv2_ = super()._zeros_inside_objects(pred_flows_Lv2_, sdf_Lv2_cpu)
### Lv2 figures
level = 2
save_flows(flows_Lv2, name=name, img_dir = self.sub_img_dir, type_name = 'ref', level = level, epoch=epoch)
save_flows(pred_flows_Lv2_, name=name, img_dir = self.sub_img_dir, type_name = 'pred', level = level, epoch=epoch)
# Check errors
save_flows(pred_flows_Lv2_-flows_Lv2.cpu(), name=name, img_dir = self.sub_img_dir, type_name = 'error', level = level, epoch=epoch)
self.timer.stop()
self.elapsed_times[f'save_figs_{name}'].append(self.timer.elapsed_seconds())
# Horovod: average metric values across workers.
losses = {}
losses[f'log_loss_{name}_{self.loss_type}_Lv{level}'] = log_loss
for key, value in losses.items():
loss = super()._metric_average(value, key)
self.loss_dict[key].append(loss)
### For inference
def _infer(self):
with torch.no_grad():
self._convert(data_loader=self.val_loader, name='validation')
self._convert(data_loader=self.test_loader, name='test')
def _convert(self, data_loader, name):
self.model.eval()
level = 2
for indices, sdf, flows in data_loader:
# Load data and meta-data
*_, sdf_Lv2 = sdf
*_, flows_Lv2 = flows
batch_len = len(sdf_Lv2)
## To device
self.timer.start()
sdf_Lv2 = sdf_Lv2.to(self.device)
flows_Lv2 = flows_Lv2.to(self.device)
self.timer.stop()
self.elapsed_times[f'MemcpyH2D_{name}'].append(self.timer.elapsed_seconds())
# Keep sdfs on CPUs
sdf_Lv2_cpu = sdf_Lv2.to('cpu')
## Normalization or standardization
sdf_Lv2 = super()._preprocess(sdf_Lv2, self.sdf_Lv2_var0, self.sdf_Lv2_var1)
flows_Lv2 = super()._preprocess(flows_Lv2, self.flows_Lv2_var0, self.flows_Lv2_var1)
# Objectives: construct pred_flows_Lv2
pred_flows_Lv2_ = torch.zeros_like(flows_Lv2, device='cpu')
#### Infer Lv2
self.timer.start()
### Update weights
pred_flows_Lv2 = self.model(sdf_Lv2)
### Destandardization and save
pred_flows_Lv2 = super()._postprocess(pred_flows_Lv2, self.flows_Lv2_var0, self.flows_Lv2_var1)
pred_flows_Lv2_ = pred_flows_Lv2.detach().cpu()
self.timer.stop()
self.elapsed_times[f'{name}_Lv{level}'].append(self.timer.elapsed_seconds())
# Save the data in netcdf format
self.timer.start()
flows_Lv2 = super()._postprocess(flows_Lv2, self.flows_Lv2_var0, self.flows_Lv2_var1)
### Zeros inside objects
pred_flows_Lv2_ = super()._zeros_inside_objects(pred_flows_Lv2_, sdf_Lv2_cpu)
### Lv2 data
save_as_netcdf(sdf=sdf_Lv2_cpu, real_flows=flows_Lv2.cpu(), pred_flows=pred_flows_Lv2_,
indices=indices, epoch=self.epoch_start, level=level, name=name, data_dir=self.inference_dir)
self.timer.stop()
self.elapsed_times[f'save_data_{name}'].append(self.timer.elapsed_seconds())
| from ._base_trainer import _BaseTrainer, MeasureMemory
import pathlib
import torch.multiprocessing as mp
import torch
from torch import nn
import horovod.torch as hvd
import numpy as np
import xarray as xr
import itertools
from .flow_dataset import FlowDataset
from .unet import UNet
import sys
from .visualization import save_flows
from .converter import save_as_netcdf
class UNetTrainer(_BaseTrainer):
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.model_name = 'UNet'
def _initialize(self, **kwargs):
# Horovod: Initialize library
hvd.init()
torch.manual_seed(self.seed)
if self.device == 'cuda':
# Horovod: Pin GPU to be used to process local rank (one GPU per process)
torch.cuda.set_device(hvd.local_rank())
torch.cuda.manual_seed(self.seed)
# Horovod: limit # of CPU threads to be used per worker.
torch.set_num_threads(1)
self.rank, self.size = hvd.rank(), hvd.size()
self.master = self.rank == 0
super()._prepare_dirs()
self.train_loader, self.val_loader, self.test_loader = super()._dataloaders()
self.model = self._get_model(self.run_number)
self.model = self.model.to(self.device)
## Optimizers
# By default, Adasum doesn't need scaling up leraning rate
lr_scaler = hvd.size() if not self.use_adasum else 1
if self.device == 'cuda' and self.use_adasum and hvd.nccl_built():
lr_scaler = hvd.local_size()
lr = self.lr * lr_scaler
self.opt = torch.optim.Adam(self.model.parameters(), lr=lr, betas=(self.beta_1, self.beta_2))
# Horovod: broadcast parameters & optimizer state.
hvd.broadcast_parameters(self.model.state_dict(), root_rank=0)
hvd.broadcast_optimizer_state(self.opt, root_rank=0)
# Horovod: (optional) compression algorithm.
compression = hvd.Compression.fp16 if self.fp16_allreduce else hvd.Compression.none
# Horovod: wrap optimizer with DistributedOptimizer.
self.opt = hvd.DistributedOptimizer(self.opt,
named_parameters=self.model.named_parameters(),
compression=compression,
op=hvd.Adasum if self.use_adasum else hvd.Average,
gradient_predivide_factor=self.gradient_predivide_factor)
self.criterion = nn.L1Loss() if self.loss_type == 'mae_loss' else nn.MSELoss(reduction='mean')
# Set normalization coefficients
super()._set_normalization_coefs(shape=[1,-1,1,1])
# Memory measurement
device_name = 'cpu'
if self.device == 'cuda':
local_rank = hvd.local_rank()
device_name = f'{self.device}:{local_rank}'
self.memory = MeasureMemory(device=device_name)
# Synchronize
if self.device == 'cuda':
torch.cuda.synchronize() # Waits for everything to finish running
def _initialize_for_inference(self, **kwargs):
# Set output directory
super()._prepare_dirs()
self.train_loader, self.val_loader, self.test_loader = super()._dataloaders()
self.model = self._get_model(self.run_number)
self.model = self.model.to(self.device)
# Set normalization coefficients
super()._set_normalization_coefs(shape=[1,-1,1,1])
# Memory measurement
self.memory = MeasureMemory(device=self.device)
# Synchronize
if self.device == 'cuda':
torch.cuda.synchronize() # Waits for everything to finish running
def _get_model(self, run_number):
model = UNet(n_layers=8, hidden_dim=8, dim=self.dim, padding_mode=self.padding_mode)
if self.inference_mode:
self.epoch_start = self.load_nth_state_file
# To load the state file for inference
rank = 0
model.load_state_dict( torch.load(f'{self.state_file_dir}/model_{rank}_{self.epoch_start:03}.pt') )
else:
self.epoch_start = 0
if run_number > 0:
if self.master:
print(f'restart, {run_number}')
# Load model states from previous run
prev_run_number = run_number - 1
prev_result_filename = self.out_dir / f'flow_cnn_result_rank{self.rank}_rst{prev_run_number:03}.h5'
if not prev_result_filename.is_file():
raise IOError(f'prev_result_filename')
ds_prev = xr.open_dataset(prev_result_filename, engine='netcdf4')
# To load the previous files
epoch_end = ds_prev.attrs['epoch_end']
model.load_state_dict( torch.load(f'{self.model_dir}/model_{self.rank}_{epoch_end:03}.pt') )
# Next epoch should start from epoch_end + 1
self.epoch_start = int(epoch_end) + 1
return model
def _save_models(self, total_epoch):
torch.save(self.model.state_dict(), f'{self.model_dir}/model_{self.rank}_{total_epoch:03}.pt')
########### Main scripts
def _train(self, data_loader, epoch):
name = 'train'
self.model.train()
log_loss = 0
nb_samples = len(data_loader.sampler)
level = 2
# Timers
for i, (sdf, flows) in enumerate(data_loader):
# Load data and meta-data
*_, sdf_Lv2 = sdf
*_, flows_Lv2 = flows
batch_len = len(sdf_Lv2)
## To device
self.timer.start()
sdf_Lv2 = sdf_Lv2.to(self.device)
flows_Lv2 = flows_Lv2.to(self.device)
self.timer.stop()
self.elapsed_times[f'MemcpyH2D_{name}'].append(self.timer.elapsed_seconds())
# Keep sdfs on CPUs
sdf_Lv2_cpu = sdf_Lv2.to('cpu')
## Normalization or standardization
sdf_Lv2 = super()._preprocess(sdf_Lv2, self.sdf_Lv2_var0, self.sdf_Lv2_var1)
flows_Lv2 = super()._preprocess(flows_Lv2, self.flows_Lv2_var0, self.flows_Lv2_var1)
# Objectives: construct pred_flows_Lv2
pred_flows_Lv2_ = torch.zeros_like(flows_Lv2, device='cpu')
#### Train Lv2
self.timer.start()
### Update weights
pred_flows_Lv2 = self.model(sdf_Lv2)
loss_mae = self.criterion(pred_flows_Lv2, flows_Lv2)
self.opt.zero_grad()
### Measure memory usage before backward
self.memory.measure()
if 'reserved' not in self.memory_consumption:
self.memory_consumption['reserved'] = self.memory.reserved()
self.memory_consumption['alloc'] = self.memory.alloc()
loss_mae.backward()
self.opt.step()
### Log losses
log_loss += loss_mae.item() / nb_samples
### Destandardization and save
pred_flows_Lv2 = super()._postprocess(pred_flows_Lv2, self.flows_Lv2_var0, self.flows_Lv2_var1)
pred_flows_Lv2_ = pred_flows_Lv2.detach().cpu()
self.timer.stop()
self.elapsed_times[f'{name}_Lv{level}'].append(self.timer.elapsed_seconds())
# Saving figures
if i == 0:
self.timer.start()
flows_Lv2 = super()._postprocess(flows_Lv2, self.flows_Lv2_var0, self.flows_Lv2_var1)
### Zeros inside objects
pred_flows_Lv2_ = super()._zeros_inside_objects(pred_flows_Lv2_, sdf_Lv2_cpu)
### Lv2 figures
level = 2
save_flows(flows_Lv2, name=name, img_dir = self.sub_img_dir, type_name = 'ref', level = level, epoch=epoch)
save_flows(pred_flows_Lv2_, name=name, img_dir = self.sub_img_dir, type_name = 'pred', level = level, epoch=epoch)
# Check errors
save_flows(pred_flows_Lv2_-flows_Lv2.cpu(), name=name, img_dir = self.sub_img_dir, type_name = 'error', level = level, epoch=epoch)
self.timer.stop()
self.elapsed_times[f'save_figs_{name}'].append(self.timer.elapsed_seconds())
# Horovod: average metric values across workers.
losses = {}
losses[f'log_loss_{name}_{self.loss_type}_Lv{level}'] = log_loss
for key, value in losses.items():
loss = super()._metric_average(value, key)
self.loss_dict[key].append(loss)
def _validation(self, data_loader, epoch, name):
self.model.eval()
log_loss = 0
nb_samples = len(data_loader.sampler)
level = 2
for i, (sdf, flows) in enumerate(data_loader):
# Load data and meta-data
*_, sdf_Lv2 = sdf
*_, flows_Lv2 = flows
batch_len = len(sdf_Lv2)
## To device
self.timer.start()
sdf_Lv2 = sdf_Lv2.to(self.device)
flows_Lv2 = flows_Lv2.to(self.device)
self.timer.stop()
self.elapsed_times[f'MemcpyH2D_{name}'].append(self.timer.elapsed_seconds())
# Keep sdfs on CPUs
sdf_Lv2_cpu = sdf_Lv2.to('cpu')
## Normalization or standardization
sdf_Lv2 = super()._preprocess(sdf_Lv2, self.sdf_Lv2_var0, self.sdf_Lv2_var1)
flows_Lv2 = super()._preprocess(flows_Lv2, self.flows_Lv2_var0, self.flows_Lv2_var1)
# Objectives: construct pred_flows_Lv2
pred_flows_Lv2_ = torch.zeros_like(flows_Lv2, device='cpu')
#### Train Lv0
self.timer.start()
### Update weights
pred_flows_Lv2 = self.model(sdf_Lv2)
loss_mae = self.criterion(pred_flows_Lv2, flows_Lv2)
### Log losses
log_loss += loss_mae.item() / nb_samples
### Destandardization and save
pred_flows_Lv2 = super()._postprocess(pred_flows_Lv2, self.flows_Lv2_var0, self.flows_Lv2_var1)
pred_flows_Lv2_ = pred_flows_Lv2.detach().cpu()
self.timer.stop()
self.elapsed_times[f'{name}_Lv{level}'].append(self.timer.elapsed_seconds())
# Saving figures
if i == 0:
self.timer.start()
flows_Lv2 = super()._postprocess(flows_Lv2, self.flows_Lv2_var0, self.flows_Lv2_var1)
### Zeros inside objects
pred_flows_Lv2_ = super()._zeros_inside_objects(pred_flows_Lv2_, sdf_Lv2_cpu)
### Lv2 figures
level = 2
save_flows(flows_Lv2, name=name, img_dir = self.sub_img_dir, type_name = 'ref', level = level, epoch=epoch)
save_flows(pred_flows_Lv2_, name=name, img_dir = self.sub_img_dir, type_name = 'pred', level = level, epoch=epoch)
# Check errors
save_flows(pred_flows_Lv2_-flows_Lv2.cpu(), name=name, img_dir = self.sub_img_dir, type_name = 'error', level = level, epoch=epoch)
self.timer.stop()
self.elapsed_times[f'save_figs_{name}'].append(self.timer.elapsed_seconds())
# Horovod: average metric values across workers.
losses = {}
losses[f'log_loss_{name}_{self.loss_type}_Lv{level}'] = log_loss
for key, value in losses.items():
loss = super()._metric_average(value, key)
self.loss_dict[key].append(loss)
### For inference
def _infer(self):
with torch.no_grad():
self._convert(data_loader=self.val_loader, name='validation')
self._convert(data_loader=self.test_loader, name='test')
def _convert(self, data_loader, name):
self.model.eval()
level = 2
for indices, sdf, flows in data_loader:
# Load data and meta-data
*_, sdf_Lv2 = sdf
*_, flows_Lv2 = flows
batch_len = len(sdf_Lv2)
## To device
self.timer.start()
sdf_Lv2 = sdf_Lv2.to(self.device)
flows_Lv2 = flows_Lv2.to(self.device)
self.timer.stop()
self.elapsed_times[f'MemcpyH2D_{name}'].append(self.timer.elapsed_seconds())
# Keep sdfs on CPUs
sdf_Lv2_cpu = sdf_Lv2.to('cpu')
## Normalization or standardization
sdf_Lv2 = super()._preprocess(sdf_Lv2, self.sdf_Lv2_var0, self.sdf_Lv2_var1)
flows_Lv2 = super()._preprocess(flows_Lv2, self.flows_Lv2_var0, self.flows_Lv2_var1)
# Objectives: construct pred_flows_Lv2
pred_flows_Lv2_ = torch.zeros_like(flows_Lv2, device='cpu')
#### Infer Lv2
self.timer.start()
### Update weights
pred_flows_Lv2 = self.model(sdf_Lv2)
### Destandardization and save
pred_flows_Lv2 = super()._postprocess(pred_flows_Lv2, self.flows_Lv2_var0, self.flows_Lv2_var1)
pred_flows_Lv2_ = pred_flows_Lv2.detach().cpu()
self.timer.stop()
self.elapsed_times[f'{name}_Lv{level}'].append(self.timer.elapsed_seconds())
# Save the data in netcdf format
self.timer.start()
flows_Lv2 = super()._postprocess(flows_Lv2, self.flows_Lv2_var0, self.flows_Lv2_var1)
### Zeros inside objects
pred_flows_Lv2_ = super()._zeros_inside_objects(pred_flows_Lv2_, sdf_Lv2_cpu)
### Lv2 data
save_as_netcdf(sdf=sdf_Lv2_cpu, real_flows=flows_Lv2.cpu(), pred_flows=pred_flows_Lv2_,
indices=indices, epoch=self.epoch_start, level=level, name=name, data_dir=self.inference_dir)
self.timer.stop()
self.elapsed_times[f'save_data_{name}'].append(self.timer.elapsed_seconds()) | en | 0.600346 | # Horovod: Initialize library # Horovod: Pin GPU to be used to process local rank (one GPU per process) # Horovod: limit # of CPU threads to be used per worker. ## Optimizers # By default, Adasum doesn't need scaling up leraning rate # Horovod: broadcast parameters & optimizer state. # Horovod: (optional) compression algorithm. # Horovod: wrap optimizer with DistributedOptimizer. # Set normalization coefficients # Memory measurement # Synchronize # Waits for everything to finish running # Set output directory # Set normalization coefficients # Memory measurement # Synchronize # Waits for everything to finish running # To load the state file for inference # Load model states from previous run # To load the previous files # Next epoch should start from epoch_end + 1 ########### Main scripts # Timers # Load data and meta-data ## To device # Keep sdfs on CPUs ## Normalization or standardization # Objectives: construct pred_flows_Lv2 #### Train Lv2 ### Update weights ### Measure memory usage before backward ### Log losses ### Destandardization and save # Saving figures ### Zeros inside objects ### Lv2 figures # Check errors # Horovod: average metric values across workers. # Load data and meta-data ## To device # Keep sdfs on CPUs ## Normalization or standardization # Objectives: construct pred_flows_Lv2 #### Train Lv0 ### Update weights ### Log losses ### Destandardization and save # Saving figures ### Zeros inside objects ### Lv2 figures # Check errors # Horovod: average metric values across workers. ### For inference # Load data and meta-data ## To device # Keep sdfs on CPUs ## Normalization or standardization # Objectives: construct pred_flows_Lv2 #### Infer Lv2 ### Update weights ### Destandardization and save # Save the data in netcdf format ### Zeros inside objects ### Lv2 data | 2.024745 | 2 |
agents/vpg_policy_translation_with_dislocation.py | pjarosik/rlus | 3 | 10728 | <reponame>pjarosik/rlus
from spinup import vpg
import tensorflow as tf
import numpy as np
from gym.spaces import Box, Discrete
from envs.focal_point_task_us_env import FocalPointTaskUsEnv
from envs.phantom import (
ScatterersPhantom,
Ball,
Teddy
)
from envs.imaging import ImagingSystem, Probe
from envs.generator import ConstPhantomGenerator, RandomProbeGenerator
import envs.logger
import matplotlib
import argparse
N_STEPS_PER_EPISODE = 16
N_STEPS_PER_EPOCH = 64
EPOCHS = 251 # NO_EPISODES = (NSTEPS_PER_EPOCH/NSTEPS_PER_EPISODE)*EPOCHS
N_WORKERS = 4
def env_fn(trajectory_logger):
probe = Probe(
pos=np.array([-20 / 1000, 0]), # only X and Y
angle=0,
width=40 / 1000,
height=10 / 1000,
focal_depth=10 / 1000
)
teddy = Teddy(
belly_pos=np.array([0 / 1000, 0, 50 / 1000]),
scale=12 / 1000,
head_offset=.9
)
phantom = ScatterersPhantom(
objects=[teddy],
x_border=(-40 / 1000, 40 / 1000),
y_border=(-40 / 1000, 40 / 1000),
z_border=(0, 90 / 1000),
n_scatterers=int(1e4),
n_bck_scatterers=int(1e3),
seed=42,
)
imaging = ImagingSystem(
c=1540,
fs=100e6,
image_width=40 / 1000,
image_height=90 / 1000,
image_resolution=(40, 90), # [pixels]
median_filter_size=5,
dr_threshold=-200,
dec=1,
no_lines=64
)
env = FocalPointTaskUsEnv(
dx_reward_coeff=1,
dz_reward_coeff=1,
imaging=imaging,
phantom_generator=ConstPhantomGenerator(phantom),
probe_generator=RandomProbeGenerator(
ref_probe=probe,
object_to_align=teddy,
seed=42,
x_pos=np.arange(-20/1000, 24/1000, step=5/1000),
focal_pos=[10/1000]
),
max_steps=N_STEPS_PER_EPISODE,
no_workers=N_WORKERS,
use_cache=True,
trajectory_logger=trajectory_logger,
step_size=5/1000,
# probe_dislocation_prob=0,
# max_probe_dislocation=2,
# dislocation_seed=42
)
return env
AC_KWARGS = dict(
hidden_sizes=[16, 32],
activation=tf.nn.relu
)
# Below functions base on openai.spinup's A-C scheme implementation.
def cnn(x,
training_ph,
hidden_sizes=(32,),
kernel_size=(3, 3),
pool_size=(2, 2),
output_activation=None
):
x = tf.layers.batch_normalization(x, training=training_ph)
for h in hidden_sizes[:-1]:
x = tf.layers.conv2d(x, filters=h, kernel_size=kernel_size)
x = tf.layers.batch_normalization(x, training=training_ph)
x = tf.nn.relu(x)
# x = tf.nn.tanh(x)
x = tf.layers.max_pooling2d(x, pool_size=pool_size, strides=pool_size)
x = tf.layers.flatten(x)
return tf.layers.dense(x, units=hidden_sizes[-1],
activation=output_activation)
def cnn_categorical_policy(x, a, training_ph, hidden_sizes, output_activation,
action_space):
act_dim = action_space.n
logits = cnn(x, training_ph, hidden_sizes=list(hidden_sizes) + [act_dim],
output_activation=None)
logp_all = tf.nn.log_softmax(logits)
pi = tf.squeeze(tf.multinomial(logits, 1),
axis=1) # action drawn from current policy
logp = tf.reduce_sum(tf.one_hot(a, depth=act_dim) * logp_all,
axis=1) # log probability of given actions
logp_pi = tf.reduce_sum(tf.one_hot(pi, depth=act_dim) * logp_all,
axis=1) # log probability of actions of given pi
return pi, logp, logp_pi, logp_all
def cnn_actor_critic(x, a, training_ph, hidden_sizes=(64, 64),
activation=tf.tanh,
output_activation=None, policy=None, action_space=None):
# default policy builder depends on action space
if policy is None and isinstance(action_space, Box):
policy = cnn_gaussian_policy
elif policy is None and isinstance(action_space, Discrete):
policy = cnn_categorical_policy
with tf.variable_scope('pi'):
pi, logp, logp_pi, logp_all = policy(x, a, training_ph, hidden_sizes,
output_activation, action_space)
with tf.variable_scope('v'):
v = tf.squeeze(
cnn(x, training_ph, hidden_sizes=list(hidden_sizes) + [1],
output_activation=None), axis=1)
return pi, logp, logp_pi, v, logp_all
def main():
matplotlib.use('agg')
np.random.seed(2442)
parser = argparse.ArgumentParser(description="Train agent in env: %s" %
FocalPointTaskUsEnv.__name__)
parser.add_argument("--exp_dir", dest="exp_dir",
help="Where to put all information about the experiment",
required=True)
args = parser.parse_args()
trajactory_logger = envs.logger.TrajectoryLogger(
log_dir=".",
log_action_csv_freq=1,
log_state_csv_freq=1,
log_state_render_freq=200
)
spinup_logger_kwargs = dict(output_dir=".", exp_name='log_files')
env_builder = lambda: env_fn(trajactory_logger)
vpg(env_fn=env_builder,
actor_critic=cnn_actor_critic,
ac_kwargs=AC_KWARGS,
steps_per_epoch=N_STEPS_PER_EPOCH,
epochs=EPOCHS,
max_ep_len=N_STEPS_PER_EPISODE,
logger_kwargs=spinup_logger_kwargs,
save_freq=200,
lam=0.95
)
if __name__ == "__main__":
main()
| from spinup import vpg
import tensorflow as tf
import numpy as np
from gym.spaces import Box, Discrete
from envs.focal_point_task_us_env import FocalPointTaskUsEnv
from envs.phantom import (
ScatterersPhantom,
Ball,
Teddy
)
from envs.imaging import ImagingSystem, Probe
from envs.generator import ConstPhantomGenerator, RandomProbeGenerator
import envs.logger
import matplotlib
import argparse
N_STEPS_PER_EPISODE = 16
N_STEPS_PER_EPOCH = 64
EPOCHS = 251 # NO_EPISODES = (NSTEPS_PER_EPOCH/NSTEPS_PER_EPISODE)*EPOCHS
N_WORKERS = 4
def env_fn(trajectory_logger):
probe = Probe(
pos=np.array([-20 / 1000, 0]), # only X and Y
angle=0,
width=40 / 1000,
height=10 / 1000,
focal_depth=10 / 1000
)
teddy = Teddy(
belly_pos=np.array([0 / 1000, 0, 50 / 1000]),
scale=12 / 1000,
head_offset=.9
)
phantom = ScatterersPhantom(
objects=[teddy],
x_border=(-40 / 1000, 40 / 1000),
y_border=(-40 / 1000, 40 / 1000),
z_border=(0, 90 / 1000),
n_scatterers=int(1e4),
n_bck_scatterers=int(1e3),
seed=42,
)
imaging = ImagingSystem(
c=1540,
fs=100e6,
image_width=40 / 1000,
image_height=90 / 1000,
image_resolution=(40, 90), # [pixels]
median_filter_size=5,
dr_threshold=-200,
dec=1,
no_lines=64
)
env = FocalPointTaskUsEnv(
dx_reward_coeff=1,
dz_reward_coeff=1,
imaging=imaging,
phantom_generator=ConstPhantomGenerator(phantom),
probe_generator=RandomProbeGenerator(
ref_probe=probe,
object_to_align=teddy,
seed=42,
x_pos=np.arange(-20/1000, 24/1000, step=5/1000),
focal_pos=[10/1000]
),
max_steps=N_STEPS_PER_EPISODE,
no_workers=N_WORKERS,
use_cache=True,
trajectory_logger=trajectory_logger,
step_size=5/1000,
# probe_dislocation_prob=0,
# max_probe_dislocation=2,
# dislocation_seed=42
)
return env
AC_KWARGS = dict(
hidden_sizes=[16, 32],
activation=tf.nn.relu
)
# Below functions base on openai.spinup's A-C scheme implementation.
def cnn(x,
training_ph,
hidden_sizes=(32,),
kernel_size=(3, 3),
pool_size=(2, 2),
output_activation=None
):
x = tf.layers.batch_normalization(x, training=training_ph)
for h in hidden_sizes[:-1]:
x = tf.layers.conv2d(x, filters=h, kernel_size=kernel_size)
x = tf.layers.batch_normalization(x, training=training_ph)
x = tf.nn.relu(x)
# x = tf.nn.tanh(x)
x = tf.layers.max_pooling2d(x, pool_size=pool_size, strides=pool_size)
x = tf.layers.flatten(x)
return tf.layers.dense(x, units=hidden_sizes[-1],
activation=output_activation)
def cnn_categorical_policy(x, a, training_ph, hidden_sizes, output_activation,
action_space):
act_dim = action_space.n
logits = cnn(x, training_ph, hidden_sizes=list(hidden_sizes) + [act_dim],
output_activation=None)
logp_all = tf.nn.log_softmax(logits)
pi = tf.squeeze(tf.multinomial(logits, 1),
axis=1) # action drawn from current policy
logp = tf.reduce_sum(tf.one_hot(a, depth=act_dim) * logp_all,
axis=1) # log probability of given actions
logp_pi = tf.reduce_sum(tf.one_hot(pi, depth=act_dim) * logp_all,
axis=1) # log probability of actions of given pi
return pi, logp, logp_pi, logp_all
def cnn_actor_critic(x, a, training_ph, hidden_sizes=(64, 64),
activation=tf.tanh,
output_activation=None, policy=None, action_space=None):
# default policy builder depends on action space
if policy is None and isinstance(action_space, Box):
policy = cnn_gaussian_policy
elif policy is None and isinstance(action_space, Discrete):
policy = cnn_categorical_policy
with tf.variable_scope('pi'):
pi, logp, logp_pi, logp_all = policy(x, a, training_ph, hidden_sizes,
output_activation, action_space)
with tf.variable_scope('v'):
v = tf.squeeze(
cnn(x, training_ph, hidden_sizes=list(hidden_sizes) + [1],
output_activation=None), axis=1)
return pi, logp, logp_pi, v, logp_all
def main():
matplotlib.use('agg')
np.random.seed(2442)
parser = argparse.ArgumentParser(description="Train agent in env: %s" %
FocalPointTaskUsEnv.__name__)
parser.add_argument("--exp_dir", dest="exp_dir",
help="Where to put all information about the experiment",
required=True)
args = parser.parse_args()
trajactory_logger = envs.logger.TrajectoryLogger(
log_dir=".",
log_action_csv_freq=1,
log_state_csv_freq=1,
log_state_render_freq=200
)
spinup_logger_kwargs = dict(output_dir=".", exp_name='log_files')
env_builder = lambda: env_fn(trajactory_logger)
vpg(env_fn=env_builder,
actor_critic=cnn_actor_critic,
ac_kwargs=AC_KWARGS,
steps_per_epoch=N_STEPS_PER_EPOCH,
epochs=EPOCHS,
max_ep_len=N_STEPS_PER_EPISODE,
logger_kwargs=spinup_logger_kwargs,
save_freq=200,
lam=0.95
)
if __name__ == "__main__":
main() | en | 0.680341 | # NO_EPISODES = (NSTEPS_PER_EPOCH/NSTEPS_PER_EPISODE)*EPOCHS # only X and Y # [pixels] # probe_dislocation_prob=0, # max_probe_dislocation=2, # dislocation_seed=42 # Below functions base on openai.spinup's A-C scheme implementation. # x = tf.nn.tanh(x) # action drawn from current policy # log probability of given actions # log probability of actions of given pi # default policy builder depends on action space | 1.832565 | 2 |
lib/losses/dice.py | zongdaoming/CMT | 3 | 10729 | import sys,os
sys.path.append('/home/zongdaoming/cv/multi-organ/multi-organ-ijcai')
from lib.losses.BaseClass import _AbstractDiceLoss
from lib.losses.basic import *
class DiceLoss(_AbstractDiceLoss):
"""Computes Dice Loss according to https://arxiv.org/abs/1606.04797.
For multi-class segmentation `weight` parameter can be used to assign different weights per class.
"""
def __init__(self, classes=4, skip_index_after=None, weight=None, sigmoid_normalization=True ):
super().__init__(weight, sigmoid_normalization)
self.classes = classes
if skip_index_after is not None:
self.skip_index_after = skip_index_after
def dice(self, input, target, weight):
return compute_per_channel_dice(input, target, weights=self.weight)
| import sys,os
sys.path.append('/home/zongdaoming/cv/multi-organ/multi-organ-ijcai')
from lib.losses.BaseClass import _AbstractDiceLoss
from lib.losses.basic import *
class DiceLoss(_AbstractDiceLoss):
"""Computes Dice Loss according to https://arxiv.org/abs/1606.04797.
For multi-class segmentation `weight` parameter can be used to assign different weights per class.
"""
def __init__(self, classes=4, skip_index_after=None, weight=None, sigmoid_normalization=True ):
super().__init__(weight, sigmoid_normalization)
self.classes = classes
if skip_index_after is not None:
self.skip_index_after = skip_index_after
def dice(self, input, target, weight):
return compute_per_channel_dice(input, target, weights=self.weight)
| en | 0.695353 | Computes Dice Loss according to https://arxiv.org/abs/1606.04797. For multi-class segmentation `weight` parameter can be used to assign different weights per class. | 2.400475 | 2 |
icons.py | jasantunes/alfred-golinks | 312 | 10730 | <gh_stars>100-1000
# encoding: utf-8
#
# Copyright (c) 2019 <NAME> <<EMAIL>>
#
# MIT Licence. See http://opensource.org/licenses/MIT
#
# Created on 2019-09-06
#
"""Overlay check mark on icons."""
from __future__ import print_function, absolute_import
from Cocoa import (
NSBitmapImageRep,
NSPNGFileType,
NSImage,
NSMakeSize,
NSCompositeCopy,
NSSizeToCGSize,
NSZeroPoint,
)
from CoreGraphics import CGRectZero
def overlay(src, overlay, dest):
"""Create image ``dest`` by putting ``overlay`` on top of ``src``.
Args:
src (str): Path to source image.
overlay (str): Path to overlay image.
dest (str): Path to save combined image to.
"""
src = NSImage.alloc().initWithContentsOfFile_(src)
overlay = NSImage.alloc().initWithContentsOfFile_(overlay)
img = NSImage.alloc().initWithSize_(src.size())
img.lockFocus()
rect = (0, 0), src.size()
src.drawInRect_(rect)
overlay.drawInRect_(rect)
img.unlockFocus()
rep = NSBitmapImageRep.imageRepWithData_(img.TIFFRepresentation())
data = rep.representationUsingType_properties_(NSPNGFileType,{})
data.writeToFile_atomically_(dest, False)
| # encoding: utf-8
#
# Copyright (c) 2019 <NAME> <<EMAIL>>
#
# MIT Licence. See http://opensource.org/licenses/MIT
#
# Created on 2019-09-06
#
"""Overlay check mark on icons."""
from __future__ import print_function, absolute_import
from Cocoa import (
NSBitmapImageRep,
NSPNGFileType,
NSImage,
NSMakeSize,
NSCompositeCopy,
NSSizeToCGSize,
NSZeroPoint,
)
from CoreGraphics import CGRectZero
def overlay(src, overlay, dest):
"""Create image ``dest`` by putting ``overlay`` on top of ``src``.
Args:
src (str): Path to source image.
overlay (str): Path to overlay image.
dest (str): Path to save combined image to.
"""
src = NSImage.alloc().initWithContentsOfFile_(src)
overlay = NSImage.alloc().initWithContentsOfFile_(overlay)
img = NSImage.alloc().initWithSize_(src.size())
img.lockFocus()
rect = (0, 0), src.size()
src.drawInRect_(rect)
overlay.drawInRect_(rect)
img.unlockFocus()
rep = NSBitmapImageRep.imageRepWithData_(img.TIFFRepresentation())
data = rep.representationUsingType_properties_(NSPNGFileType,{})
data.writeToFile_atomically_(dest, False) | en | 0.592877 | # encoding: utf-8 # # Copyright (c) 2019 <NAME> <<EMAIL>> # # MIT Licence. See http://opensource.org/licenses/MIT # # Created on 2019-09-06 # Overlay check mark on icons. Create image ``dest`` by putting ``overlay`` on top of ``src``. Args: src (str): Path to source image. overlay (str): Path to overlay image. dest (str): Path to save combined image to. | 1.674414 | 2 |
project/python/Main/CTRL/tracker.py | warak/IOT-GrannyWarden | 0 | 10731 | import datetime
from threading import Thread
from time import sleep
import DBC.dbcreate as dbc
class Tracker(Thread):
max_idle_time = 720 # minutes
default_sleep = 3600 # secs
def track(self):
dbcl = dbc.DBClient()
# print(dbcl.getlasttime())
print("Tracker activated")
while True:
date = datetime.datetime.strftime(datetime.datetime.now(), '%Y-%m-%d-%H:%M')
string = date.rsplit("-", 1)
yearmonthday = (string[0].rsplit("-", 3))
hoursminutes = (string[1].rsplit(":", 2))
# print(yearmonthday)
# print(hoursminutes)
year = int(yearmonthday[0])
month = int(yearmonthday[1])
day = int(yearmonthday[2])
hour = int(hoursminutes[0])
minute = int(hoursminutes[1])
date = dbcl.getlasttime()
string = date.rsplit("-", 1)
yearmonthday = (string[0].rsplit("-", 3))
hoursminutes = (string[1].rsplit(":", 2))
#print(yearmonthday)
#print(hoursminutes)
yeard = int(yearmonthday[0])
monthd = int(yearmonthday[1])
dayd = int(yearmonthday[2])
hourd = int(hoursminutes[0])
minuted = int(hoursminutes[1])
# tämä loopitus tyhmää, voisi käyttää valmista kirjastoa
if year == yeard:
if month == monthd:
if day == dayd:
if hour == hourd:
away = minute - minuted
else:
away = ((hour*60) + minute) - ((hourd*60) + minuted)
else:
if hour == hourd:
away = ((hourd + (day-dayd)*24 - hour) * 60) + minute - minuted
else:
away = ((day*hour*60) + minute) - ((dayd*hourd*60) + minuted)
else:
# puutteellinen
away = 3
#print(away)
self.actions(away, dbcl.getlastaway())
sleep(self.default_sleep)
def run(self):
self.track()
def actions(self, time, away):
if time < self.max_idle_time:
print("Everything ok")
else:
away = (int(away) * 60)
if time > away:
print("Contacting users")
else:
print("Holiday mode") | import datetime
from threading import Thread
from time import sleep
import DBC.dbcreate as dbc
class Tracker(Thread):
max_idle_time = 720 # minutes
default_sleep = 3600 # secs
def track(self):
dbcl = dbc.DBClient()
# print(dbcl.getlasttime())
print("Tracker activated")
while True:
date = datetime.datetime.strftime(datetime.datetime.now(), '%Y-%m-%d-%H:%M')
string = date.rsplit("-", 1)
yearmonthday = (string[0].rsplit("-", 3))
hoursminutes = (string[1].rsplit(":", 2))
# print(yearmonthday)
# print(hoursminutes)
year = int(yearmonthday[0])
month = int(yearmonthday[1])
day = int(yearmonthday[2])
hour = int(hoursminutes[0])
minute = int(hoursminutes[1])
date = dbcl.getlasttime()
string = date.rsplit("-", 1)
yearmonthday = (string[0].rsplit("-", 3))
hoursminutes = (string[1].rsplit(":", 2))
#print(yearmonthday)
#print(hoursminutes)
yeard = int(yearmonthday[0])
monthd = int(yearmonthday[1])
dayd = int(yearmonthday[2])
hourd = int(hoursminutes[0])
minuted = int(hoursminutes[1])
# tämä loopitus tyhmää, voisi käyttää valmista kirjastoa
if year == yeard:
if month == monthd:
if day == dayd:
if hour == hourd:
away = minute - minuted
else:
away = ((hour*60) + minute) - ((hourd*60) + minuted)
else:
if hour == hourd:
away = ((hourd + (day-dayd)*24 - hour) * 60) + minute - minuted
else:
away = ((day*hour*60) + minute) - ((dayd*hourd*60) + minuted)
else:
# puutteellinen
away = 3
#print(away)
self.actions(away, dbcl.getlastaway())
sleep(self.default_sleep)
def run(self):
self.track()
def actions(self, time, away):
if time < self.max_idle_time:
print("Everything ok")
else:
away = (int(away) * 60)
if time > away:
print("Contacting users")
else:
print("Holiday mode") | fi | 0.849457 | # minutes # secs # print(dbcl.getlasttime()) # print(yearmonthday) # print(hoursminutes) #print(yearmonthday) #print(hoursminutes) # tämä loopitus tyhmää, voisi käyttää valmista kirjastoa # puutteellinen #print(away) | 3.099842 | 3 |
tests/unit/test_snapshot.py | cnnradams/python-spanner | 0 | 10732 | # Copyright 2016 Google LLC All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import google.api_core.gapic_v1.method
import mock
TABLE_NAME = "citizens"
COLUMNS = ["email", "first_name", "last_name", "age"]
SQL_QUERY = """\
SELECT first_name, last_name, age FROM citizens ORDER BY age"""
SQL_QUERY_WITH_PARAM = """
SELECT first_name, last_name, email FROM citizens WHERE age <= @max_age"""
PARAMS = {"max_age": 30}
PARAM_TYPES = {"max_age": "INT64"}
SQL_QUERY_WITH_BYTES_PARAM = """\
SELECT image_name FROM images WHERE @bytes IN image_data"""
PARAMS_WITH_BYTES = {"bytes": b"FACEDACE"}
RESUME_TOKEN = b"<PASSWORD>"
TXN_ID = b"DEAFBEAD"
SECONDS = 3
MICROS = 123456
class Test_restart_on_unavailable(unittest.TestCase):
def _call_fut(self, restart):
from google.cloud.spanner_v1.snapshot import _restart_on_unavailable
return _restart_on_unavailable(restart)
def _make_item(self, value, resume_token=b""):
return mock.Mock(
value=value, resume_token=resume_token, spec=["value", "resume_token"]
)
def test_iteration_w_empty_raw(self):
raw = _MockIterator()
restart = mock.Mock(spec=[], return_value=raw)
resumable = self._call_fut(restart)
self.assertEqual(list(resumable), [])
def test_iteration_w_non_empty_raw(self):
ITEMS = (self._make_item(0), self._make_item(1))
raw = _MockIterator(*ITEMS)
restart = mock.Mock(spec=[], return_value=raw)
resumable = self._call_fut(restart)
self.assertEqual(list(resumable), list(ITEMS))
restart.assert_called_once_with()
def test_iteration_w_raw_w_resume_tken(self):
ITEMS = (
self._make_item(0),
self._make_item(1, resume_token=RESUME_TOKEN),
self._make_item(2),
self._make_item(3),
)
raw = _MockIterator(*ITEMS)
restart = mock.Mock(spec=[], return_value=raw)
resumable = self._call_fut(restart)
self.assertEqual(list(resumable), list(ITEMS))
restart.assert_called_once_with()
def test_iteration_w_raw_raising_unavailable_no_token(self):
ITEMS = (
self._make_item(0),
self._make_item(1, resume_token=RESUME_TOKEN),
self._make_item(2),
)
before = _MockIterator(fail_after=True)
after = _MockIterator(*ITEMS)
restart = mock.Mock(spec=[], side_effect=[before, after])
resumable = self._call_fut(restart)
self.assertEqual(list(resumable), list(ITEMS))
self.assertEqual(restart.mock_calls, [mock.call(), mock.call(resume_token=b"")])
def test_iteration_w_raw_raising_unavailable(self):
FIRST = (self._make_item(0), self._make_item(1, resume_token=RESUME_TOKEN))
SECOND = (self._make_item(2),) # discarded after 503
LAST = (self._make_item(3),)
before = _MockIterator(*(FIRST + SECOND), fail_after=True)
after = _MockIterator(*LAST)
restart = mock.Mock(spec=[], side_effect=[before, after])
resumable = self._call_fut(restart)
self.assertEqual(list(resumable), list(FIRST + LAST))
self.assertEqual(
restart.mock_calls, [mock.call(), mock.call(resume_token=RESUME_TOKEN)]
)
def test_iteration_w_raw_raising_unavailable_after_token(self):
FIRST = (self._make_item(0), self._make_item(1, resume_token=RESUME_TOKEN))
SECOND = (self._make_item(2), self._make_item(3))
before = _MockIterator(*FIRST, fail_after=True)
after = _MockIterator(*SECOND)
restart = mock.Mock(spec=[], side_effect=[before, after])
resumable = self._call_fut(restart)
self.assertEqual(list(resumable), list(FIRST + SECOND))
self.assertEqual(
restart.mock_calls, [mock.call(), mock.call(resume_token=RESUME_TOKEN)]
)
class Test_SnapshotBase(unittest.TestCase):
PROJECT_ID = "project-id"
INSTANCE_ID = "instance-id"
INSTANCE_NAME = "projects/" + PROJECT_ID + "/instances/" + INSTANCE_ID
DATABASE_ID = "database-id"
DATABASE_NAME = INSTANCE_NAME + "/databases/" + DATABASE_ID
SESSION_ID = "session-id"
SESSION_NAME = DATABASE_NAME + "/sessions/" + SESSION_ID
def _getTargetClass(self):
from google.cloud.spanner_v1.snapshot import _SnapshotBase
return _SnapshotBase
def _make_one(self, session):
return self._getTargetClass()(session)
def _makeDerived(self, session):
class _Derived(self._getTargetClass()):
_transaction_id = None
_multi_use = False
def _make_txn_selector(self):
from google.cloud.spanner_v1.proto.transaction_pb2 import (
TransactionOptions,
TransactionSelector,
)
if self._transaction_id:
return TransactionSelector(id=self._transaction_id)
options = TransactionOptions(
read_only=TransactionOptions.ReadOnly(strong=True)
)
if self._multi_use:
return TransactionSelector(begin=options)
return TransactionSelector(single_use=options)
return _Derived(session)
def _make_spanner_api(self):
import google.cloud.spanner_v1.gapic.spanner_client
return mock.create_autospec(
google.cloud.spanner_v1.gapic.spanner_client.SpannerClient, instance=True
)
def test_ctor(self):
session = _Session()
base = self._make_one(session)
self.assertIs(base._session, session)
self.assertEqual(base._execute_sql_count, 0)
def test__make_txn_selector_virtual(self):
session = _Session()
base = self._make_one(session)
with self.assertRaises(NotImplementedError):
base._make_txn_selector()
def test_read_other_error(self):
from google.cloud.spanner_v1.keyset import KeySet
keyset = KeySet(all_=True)
database = _Database()
database.spanner_api = self._make_spanner_api()
database.spanner_api.streaming_read.side_effect = RuntimeError()
session = _Session(database)
derived = self._makeDerived(session)
with self.assertRaises(RuntimeError):
list(derived.read(TABLE_NAME, COLUMNS, keyset))
def _read_helper(self, multi_use, first=True, count=0, partition=None):
from google.protobuf.struct_pb2 import Struct
from google.cloud.spanner_v1.proto.result_set_pb2 import (
PartialResultSet,
ResultSetMetadata,
ResultSetStats,
)
from google.cloud.spanner_v1.proto.transaction_pb2 import (
TransactionSelector,
TransactionOptions,
)
from google.cloud.spanner_v1.proto.type_pb2 import Type, StructType
from google.cloud.spanner_v1.proto.type_pb2 import STRING, INT64
from google.cloud.spanner_v1.keyset import KeySet
from google.cloud.spanner_v1._helpers import _make_value_pb
VALUES = [[u"bharney", 31], [u"phred", 32]]
VALUE_PBS = [[_make_value_pb(item) for item in row] for row in VALUES]
struct_type_pb = StructType(
fields=[
StructType.Field(name="name", type=Type(code=STRING)),
StructType.Field(name="age", type=Type(code=INT64)),
]
)
metadata_pb = ResultSetMetadata(row_type=struct_type_pb)
stats_pb = ResultSetStats(
query_stats=Struct(fields={"rows_returned": _make_value_pb(2)})
)
result_sets = [
PartialResultSet(values=VALUE_PBS[0], metadata=metadata_pb),
PartialResultSet(values=VALUE_PBS[1], stats=stats_pb),
]
KEYS = [["<EMAIL>"], ["<EMAIL>"]]
keyset = KeySet(keys=KEYS)
INDEX = "email-address-index"
LIMIT = 20
database = _Database()
api = database.spanner_api = self._make_spanner_api()
api.streaming_read.return_value = _MockIterator(*result_sets)
session = _Session(database)
derived = self._makeDerived(session)
derived._multi_use = multi_use
derived._read_request_count = count
if not first:
derived._transaction_id = TXN_ID
if partition is not None: # 'limit' and 'partition' incompatible
result_set = derived.read(
TABLE_NAME, COLUMNS, keyset, index=INDEX, partition=partition
)
else:
result_set = derived.read(
TABLE_NAME, COLUMNS, keyset, index=INDEX, limit=LIMIT
)
self.assertEqual(derived._read_request_count, count + 1)
if multi_use:
self.assertIs(result_set._source, derived)
else:
self.assertIsNone(result_set._source)
self.assertEqual(list(result_set), VALUES)
self.assertEqual(result_set.metadata, metadata_pb)
self.assertEqual(result_set.stats, stats_pb)
txn_options = TransactionOptions(
read_only=TransactionOptions.ReadOnly(strong=True)
)
if multi_use:
if first:
expected_transaction = TransactionSelector(begin=txn_options)
else:
expected_transaction = TransactionSelector(id=TXN_ID)
else:
expected_transaction = TransactionSelector(single_use=txn_options)
if partition is not None:
expected_limit = 0
else:
expected_limit = LIMIT
api.streaming_read.assert_called_once_with(
self.SESSION_NAME,
TABLE_NAME,
COLUMNS,
keyset._to_pb(),
transaction=expected_transaction,
index=INDEX,
limit=expected_limit,
partition_token=partition,
metadata=[("google-cloud-resource-prefix", database.name)],
)
def test_read_wo_multi_use(self):
self._read_helper(multi_use=False)
def test_read_wo_multi_use_w_read_request_count_gt_0(self):
with self.assertRaises(ValueError):
self._read_helper(multi_use=False, count=1)
def test_read_w_multi_use_wo_first(self):
self._read_helper(multi_use=True, first=False)
def test_read_w_multi_use_wo_first_w_count_gt_0(self):
self._read_helper(multi_use=True, first=False, count=1)
def test_read_w_multi_use_w_first_w_partition(self):
PARTITION = b"FADEABED"
self._read_helper(multi_use=True, first=True, partition=PARTITION)
def test_read_w_multi_use_w_first_w_count_gt_0(self):
with self.assertRaises(ValueError):
self._read_helper(multi_use=True, first=True, count=1)
def test_execute_sql_other_error(self):
database = _Database()
database.spanner_api = self._make_spanner_api()
database.spanner_api.execute_streaming_sql.side_effect = RuntimeError()
session = _Session(database)
derived = self._makeDerived(session)
with self.assertRaises(RuntimeError):
list(derived.execute_sql(SQL_QUERY))
self.assertEqual(derived._execute_sql_count, 1)
def test_execute_sql_w_params_wo_param_types(self):
database = _Database()
session = _Session(database)
derived = self._makeDerived(session)
with self.assertRaises(ValueError):
derived.execute_sql(SQL_QUERY_WITH_PARAM, PARAMS)
def _execute_sql_helper(
self,
multi_use,
first=True,
count=0,
partition=None,
sql_count=0,
query_options=None,
timeout=google.api_core.gapic_v1.method.DEFAULT,
retry=google.api_core.gapic_v1.method.DEFAULT,
):
from google.protobuf.struct_pb2 import Struct
from google.cloud.spanner_v1.proto.result_set_pb2 import (
PartialResultSet,
ResultSetMetadata,
ResultSetStats,
)
from google.cloud.spanner_v1.proto.transaction_pb2 import (
TransactionSelector,
TransactionOptions,
)
from google.cloud.spanner_v1.proto.type_pb2 import Type, StructType
from google.cloud.spanner_v1.proto.type_pb2 import STRING, INT64
from google.cloud.spanner_v1._helpers import (
_make_value_pb,
_merge_query_options,
)
VALUES = [[u"bharney", u"rhubbyl", 31], [u"phred", u"phlyntstone", 32]]
VALUE_PBS = [[_make_value_pb(item) for item in row] for row in VALUES]
MODE = 2 # PROFILE
struct_type_pb = StructType(
fields=[
StructType.Field(name="first_name", type=Type(code=STRING)),
StructType.Field(name="last_name", type=Type(code=STRING)),
StructType.Field(name="age", type=Type(code=INT64)),
]
)
metadata_pb = ResultSetMetadata(row_type=struct_type_pb)
stats_pb = ResultSetStats(
query_stats=Struct(fields={"rows_returned": _make_value_pb(2)})
)
result_sets = [
PartialResultSet(values=VALUE_PBS[0], metadata=metadata_pb),
PartialResultSet(values=VALUE_PBS[1], stats=stats_pb),
]
iterator = _MockIterator(*result_sets)
database = _Database()
api = database.spanner_api = self._make_spanner_api()
api.execute_streaming_sql.return_value = iterator
session = _Session(database)
derived = self._makeDerived(session)
derived._multi_use = multi_use
derived._read_request_count = count
derived._execute_sql_count = sql_count
if not first:
derived._transaction_id = TXN_ID
result_set = derived.execute_sql(
SQL_QUERY_WITH_PARAM,
PARAMS,
PARAM_TYPES,
query_mode=MODE,
query_options=query_options,
partition=partition,
retry=retry,
timeout=timeout,
)
self.assertEqual(derived._read_request_count, count + 1)
if multi_use:
self.assertIs(result_set._source, derived)
else:
self.assertIsNone(result_set._source)
self.assertEqual(list(result_set), VALUES)
self.assertEqual(result_set.metadata, metadata_pb)
self.assertEqual(result_set.stats, stats_pb)
txn_options = TransactionOptions(
read_only=TransactionOptions.ReadOnly(strong=True)
)
if multi_use:
if first:
expected_transaction = TransactionSelector(begin=txn_options)
else:
expected_transaction = TransactionSelector(id=TXN_ID)
else:
expected_transaction = TransactionSelector(single_use=txn_options)
expected_params = Struct(
fields={key: _make_value_pb(value) for (key, value) in PARAMS.items()}
)
expected_query_options = database._instance._client._query_options
if query_options:
expected_query_options = _merge_query_options(
expected_query_options, query_options
)
api.execute_streaming_sql.assert_called_once_with(
self.SESSION_NAME,
SQL_QUERY_WITH_PARAM,
transaction=expected_transaction,
params=expected_params,
param_types=PARAM_TYPES,
query_mode=MODE,
query_options=expected_query_options,
partition_token=partition,
seqno=sql_count,
metadata=[("google-cloud-resource-prefix", database.name)],
timeout=timeout,
retry=retry,
)
self.assertEqual(derived._execute_sql_count, sql_count + 1)
def test_execute_sql_wo_multi_use(self):
self._execute_sql_helper(multi_use=False)
def test_execute_sql_wo_multi_use_w_read_request_count_gt_0(self):
with self.assertRaises(ValueError):
self._execute_sql_helper(multi_use=False, count=1)
def test_execute_sql_w_multi_use_wo_first(self):
self._execute_sql_helper(multi_use=True, first=False, sql_count=1)
def test_execute_sql_w_multi_use_wo_first_w_count_gt_0(self):
self._execute_sql_helper(multi_use=True, first=False, count=1)
def test_execute_sql_w_multi_use_w_first(self):
self._execute_sql_helper(multi_use=True, first=True)
def test_execute_sql_w_multi_use_w_first_w_count_gt_0(self):
with self.assertRaises(ValueError):
self._execute_sql_helper(multi_use=True, first=True, count=1)
def test_execute_sql_w_retry(self):
self._execute_sql_helper(multi_use=False, retry=None)
def test_execute_sql_w_timeout(self):
self._execute_sql_helper(multi_use=False, timeout=None)
def test_execute_sql_w_query_options(self):
from google.cloud.spanner_v1.proto.spanner_pb2 import ExecuteSqlRequest
self._execute_sql_helper(
multi_use=False,
query_options=ExecuteSqlRequest.QueryOptions(optimizer_version="3"),
)
def _partition_read_helper(
self, multi_use, w_txn, size=None, max_partitions=None, index=None
):
from google.cloud.spanner_v1.keyset import KeySet
from google.cloud.spanner_v1.types import Partition
from google.cloud.spanner_v1.types import PartitionOptions
from google.cloud.spanner_v1.types import PartitionResponse
from google.cloud.spanner_v1.types import Transaction
from google.cloud.spanner_v1.proto.transaction_pb2 import TransactionSelector
keyset = KeySet(all_=True)
new_txn_id = b"ABECAB91"
token_1 = b"<PASSWORD>"
token_2 = b"<PASSWORD>"
response = PartitionResponse(
partitions=[
Partition(partition_token=token_1),
Partition(partition_token=token_2),
],
transaction=Transaction(id=new_txn_id),
)
database = _Database()
api = database.spanner_api = self._make_spanner_api()
api.partition_read.return_value = response
session = _Session(database)
derived = self._makeDerived(session)
derived._multi_use = multi_use
if w_txn:
derived._transaction_id = TXN_ID
tokens = list(
derived.partition_read(
TABLE_NAME,
COLUMNS,
keyset,
index=index,
partition_size_bytes=size,
max_partitions=max_partitions,
)
)
self.assertEqual(tokens, [token_1, token_2])
expected_txn_selector = TransactionSelector(id=TXN_ID)
expected_partition_options = PartitionOptions(
partition_size_bytes=size, max_partitions=max_partitions
)
api.partition_read.assert_called_once_with(
session=self.SESSION_NAME,
table=TABLE_NAME,
columns=COLUMNS,
key_set=keyset._to_pb(),
transaction=expected_txn_selector,
index=index,
partition_options=expected_partition_options,
metadata=[("google-cloud-resource-prefix", database.name)],
)
def test_partition_read_single_use_raises(self):
with self.assertRaises(ValueError):
self._partition_read_helper(multi_use=False, w_txn=True)
def test_partition_read_wo_existing_transaction_raises(self):
with self.assertRaises(ValueError):
self._partition_read_helper(multi_use=True, w_txn=False)
def test_partition_read_other_error(self):
from google.cloud.spanner_v1.keyset import KeySet
keyset = KeySet(all_=True)
database = _Database()
database.spanner_api = self._make_spanner_api()
database.spanner_api.partition_read.side_effect = RuntimeError()
session = _Session(database)
derived = self._makeDerived(session)
derived._multi_use = True
derived._transaction_id = TXN_ID
with self.assertRaises(RuntimeError):
list(derived.partition_read(TABLE_NAME, COLUMNS, keyset))
def test_partition_read_ok_w_index_no_options(self):
self._partition_read_helper(multi_use=True, w_txn=True, index="index")
def test_partition_read_ok_w_size(self):
self._partition_read_helper(multi_use=True, w_txn=True, size=2000)
def test_partition_read_ok_w_max_partitions(self):
self._partition_read_helper(multi_use=True, w_txn=True, max_partitions=4)
def _partition_query_helper(self, multi_use, w_txn, size=None, max_partitions=None):
from google.protobuf.struct_pb2 import Struct
from google.cloud.spanner_v1.types import Partition
from google.cloud.spanner_v1.types import PartitionOptions
from google.cloud.spanner_v1.types import PartitionResponse
from google.cloud.spanner_v1.types import Transaction
from google.cloud.spanner_v1.proto.transaction_pb2 import TransactionSelector
from google.cloud.spanner_v1._helpers import _make_value_pb
new_txn_id = b"ABECAB91"
token_1 = b"FACE0FFF"
token_2 = b"BADE8CAF"
response = PartitionResponse(
partitions=[
Partition(partition_token=token_1),
Partition(partition_token=token_2),
],
transaction=Transaction(id=new_txn_id),
)
database = _Database()
api = database.spanner_api = self._make_spanner_api()
api.partition_query.return_value = response
session = _Session(database)
derived = self._makeDerived(session)
derived._multi_use = multi_use
if w_txn:
derived._transaction_id = TXN_ID
tokens = list(
derived.partition_query(
SQL_QUERY_WITH_PARAM,
PARAMS,
PARAM_TYPES,
partition_size_bytes=size,
max_partitions=max_partitions,
)
)
self.assertEqual(tokens, [token_1, token_2])
expected_params = Struct(
fields={key: _make_value_pb(value) for (key, value) in PARAMS.items()}
)
expected_txn_selector = TransactionSelector(id=TXN_ID)
expected_partition_options = PartitionOptions(
partition_size_bytes=size, max_partitions=max_partitions
)
api.partition_query.assert_called_once_with(
session=self.SESSION_NAME,
sql=SQL_QUERY_WITH_PARAM,
transaction=expected_txn_selector,
params=expected_params,
param_types=PARAM_TYPES,
partition_options=expected_partition_options,
metadata=[("google-cloud-resource-prefix", database.name)],
)
def test_partition_query_other_error(self):
database = _Database()
database.spanner_api = self._make_spanner_api()
database.spanner_api.partition_query.side_effect = RuntimeError()
session = _Session(database)
derived = self._makeDerived(session)
derived._multi_use = True
derived._transaction_id = TXN_ID
with self.assertRaises(RuntimeError):
list(derived.partition_query(SQL_QUERY))
def test_partition_query_w_params_wo_param_types(self):
database = _Database()
session = _Session(database)
derived = self._makeDerived(session)
derived._multi_use = True
derived._transaction_id = TXN_ID
with self.assertRaises(ValueError):
list(derived.partition_query(SQL_QUERY_WITH_PARAM, PARAMS))
def test_partition_query_single_use_raises(self):
with self.assertRaises(ValueError):
self._partition_query_helper(multi_use=False, w_txn=True)
def test_partition_query_wo_transaction_raises(self):
with self.assertRaises(ValueError):
self._partition_query_helper(multi_use=True, w_txn=False)
def test_partition_query_ok_w_index_no_options(self):
self._partition_query_helper(multi_use=True, w_txn=True)
def test_partition_query_ok_w_size(self):
self._partition_query_helper(multi_use=True, w_txn=True, size=2000)
def test_partition_query_ok_w_max_partitions(self):
self._partition_query_helper(multi_use=True, w_txn=True, max_partitions=4)
class TestSnapshot(unittest.TestCase):
PROJECT_ID = "project-id"
INSTANCE_ID = "instance-id"
INSTANCE_NAME = "projects/" + PROJECT_ID + "/instances/" + INSTANCE_ID
DATABASE_ID = "database-id"
DATABASE_NAME = INSTANCE_NAME + "/databases/" + DATABASE_ID
SESSION_ID = "session-id"
SESSION_NAME = DATABASE_NAME + "/sessions/" + SESSION_ID
def _getTargetClass(self):
from google.cloud.spanner_v1.snapshot import Snapshot
return Snapshot
def _make_one(self, *args, **kwargs):
return self._getTargetClass()(*args, **kwargs)
def _make_spanner_api(self):
import google.cloud.spanner_v1.gapic.spanner_client
return mock.create_autospec(
google.cloud.spanner_v1.gapic.spanner_client.SpannerClient, instance=True
)
def _makeTimestamp(self):
import datetime
from google.cloud._helpers import UTC
return datetime.datetime.utcnow().replace(tzinfo=UTC)
def _makeDuration(self, seconds=1, microseconds=0):
import datetime
return datetime.timedelta(seconds=seconds, microseconds=microseconds)
def test_ctor_defaults(self):
session = _Session()
snapshot = self._make_one(session)
self.assertIs(snapshot._session, session)
self.assertTrue(snapshot._strong)
self.assertIsNone(snapshot._read_timestamp)
self.assertIsNone(snapshot._min_read_timestamp)
self.assertIsNone(snapshot._max_staleness)
self.assertIsNone(snapshot._exact_staleness)
self.assertFalse(snapshot._multi_use)
def test_ctor_w_multiple_options(self):
timestamp = self._makeTimestamp()
duration = self._makeDuration()
session = _Session()
with self.assertRaises(ValueError):
self._make_one(session, read_timestamp=timestamp, max_staleness=duration)
def test_ctor_w_read_timestamp(self):
timestamp = self._makeTimestamp()
session = _Session()
snapshot = self._make_one(session, read_timestamp=timestamp)
self.assertIs(snapshot._session, session)
self.assertFalse(snapshot._strong)
self.assertEqual(snapshot._read_timestamp, timestamp)
self.assertIsNone(snapshot._min_read_timestamp)
self.assertIsNone(snapshot._max_staleness)
self.assertIsNone(snapshot._exact_staleness)
self.assertFalse(snapshot._multi_use)
def test_ctor_w_min_read_timestamp(self):
timestamp = self._makeTimestamp()
session = _Session()
snapshot = self._make_one(session, min_read_timestamp=timestamp)
self.assertIs(snapshot._session, session)
self.assertFalse(snapshot._strong)
self.assertIsNone(snapshot._read_timestamp)
self.assertEqual(snapshot._min_read_timestamp, timestamp)
self.assertIsNone(snapshot._max_staleness)
self.assertIsNone(snapshot._exact_staleness)
self.assertFalse(snapshot._multi_use)
def test_ctor_w_max_staleness(self):
duration = self._makeDuration()
session = _Session()
snapshot = self._make_one(session, max_staleness=duration)
self.assertIs(snapshot._session, session)
self.assertFalse(snapshot._strong)
self.assertIsNone(snapshot._read_timestamp)
self.assertIsNone(snapshot._min_read_timestamp)
self.assertEqual(snapshot._max_staleness, duration)
self.assertIsNone(snapshot._exact_staleness)
self.assertFalse(snapshot._multi_use)
def test_ctor_w_exact_staleness(self):
duration = self._makeDuration()
session = _Session()
snapshot = self._make_one(session, exact_staleness=duration)
self.assertIs(snapshot._session, session)
self.assertFalse(snapshot._strong)
self.assertIsNone(snapshot._read_timestamp)
self.assertIsNone(snapshot._min_read_timestamp)
self.assertIsNone(snapshot._max_staleness)
self.assertEqual(snapshot._exact_staleness, duration)
self.assertFalse(snapshot._multi_use)
def test_ctor_w_multi_use(self):
session = _Session()
snapshot = self._make_one(session, multi_use=True)
self.assertTrue(snapshot._session is session)
self.assertTrue(snapshot._strong)
self.assertIsNone(snapshot._read_timestamp)
self.assertIsNone(snapshot._min_read_timestamp)
self.assertIsNone(snapshot._max_staleness)
self.assertIsNone(snapshot._exact_staleness)
self.assertTrue(snapshot._multi_use)
def test_ctor_w_multi_use_and_read_timestamp(self):
timestamp = self._makeTimestamp()
session = _Session()
snapshot = self._make_one(session, read_timestamp=timestamp, multi_use=True)
self.assertTrue(snapshot._session is session)
self.assertFalse(snapshot._strong)
self.assertEqual(snapshot._read_timestamp, timestamp)
self.assertIsNone(snapshot._min_read_timestamp)
self.assertIsNone(snapshot._max_staleness)
self.assertIsNone(snapshot._exact_staleness)
self.assertTrue(snapshot._multi_use)
def test_ctor_w_multi_use_and_min_read_timestamp(self):
timestamp = self._makeTimestamp()
session = _Session()
with self.assertRaises(ValueError):
self._make_one(session, min_read_timestamp=timestamp, multi_use=True)
def test_ctor_w_multi_use_and_max_staleness(self):
duration = self._makeDuration()
session = _Session()
with self.assertRaises(ValueError):
self._make_one(session, max_staleness=duration, multi_use=True)
def test_ctor_w_multi_use_and_exact_staleness(self):
duration = self._makeDuration()
session = _Session()
snapshot = self._make_one(session, exact_staleness=duration, multi_use=True)
self.assertTrue(snapshot._session is session)
self.assertFalse(snapshot._strong)
self.assertIsNone(snapshot._read_timestamp)
self.assertIsNone(snapshot._min_read_timestamp)
self.assertIsNone(snapshot._max_staleness)
self.assertEqual(snapshot._exact_staleness, duration)
self.assertTrue(snapshot._multi_use)
def test__make_txn_selector_w_transaction_id(self):
session = _Session()
snapshot = self._make_one(session)
snapshot._transaction_id = TXN_ID
selector = snapshot._make_txn_selector()
self.assertEqual(selector.id, TXN_ID)
def test__make_txn_selector_strong(self):
session = _Session()
snapshot = self._make_one(session)
selector = snapshot._make_txn_selector()
options = selector.single_use
self.assertTrue(options.read_only.strong)
def test__make_txn_selector_w_read_timestamp(self):
from google.cloud._helpers import _pb_timestamp_to_datetime
timestamp = self._makeTimestamp()
session = _Session()
snapshot = self._make_one(session, read_timestamp=timestamp)
selector = snapshot._make_txn_selector()
options = selector.single_use
self.assertEqual(
_pb_timestamp_to_datetime(options.read_only.read_timestamp), timestamp
)
def test__make_txn_selector_w_min_read_timestamp(self):
from google.cloud._helpers import _pb_timestamp_to_datetime
timestamp = self._makeTimestamp()
session = _Session()
snapshot = self._make_one(session, min_read_timestamp=timestamp)
selector = snapshot._make_txn_selector()
options = selector.single_use
self.assertEqual(
_pb_timestamp_to_datetime(options.read_only.min_read_timestamp), timestamp
)
def test__make_txn_selector_w_max_staleness(self):
duration = self._makeDuration(seconds=3, microseconds=123456)
session = _Session()
snapshot = self._make_one(session, max_staleness=duration)
selector = snapshot._make_txn_selector()
options = selector.single_use
self.assertEqual(options.read_only.max_staleness.seconds, 3)
self.assertEqual(options.read_only.max_staleness.nanos, 123456000)
def test__make_txn_selector_w_exact_staleness(self):
duration = self._makeDuration(seconds=3, microseconds=123456)
session = _Session()
snapshot = self._make_one(session, exact_staleness=duration)
selector = snapshot._make_txn_selector()
options = selector.single_use
self.assertEqual(options.read_only.exact_staleness.seconds, 3)
self.assertEqual(options.read_only.exact_staleness.nanos, 123456000)
def test__make_txn_selector_strong_w_multi_use(self):
session = _Session()
snapshot = self._make_one(session, multi_use=True)
selector = snapshot._make_txn_selector()
options = selector.begin
self.assertTrue(options.read_only.strong)
def test__make_txn_selector_w_read_timestamp_w_multi_use(self):
from google.cloud._helpers import _pb_timestamp_to_datetime
timestamp = self._makeTimestamp()
session = _Session()
snapshot = self._make_one(session, read_timestamp=timestamp, multi_use=True)
selector = snapshot._make_txn_selector()
options = selector.begin
self.assertEqual(
_pb_timestamp_to_datetime(options.read_only.read_timestamp), timestamp
)
def test__make_txn_selector_w_exact_staleness_w_multi_use(self):
duration = self._makeDuration(seconds=3, microseconds=123456)
session = _Session()
snapshot = self._make_one(session, exact_staleness=duration, multi_use=True)
selector = snapshot._make_txn_selector()
options = selector.begin
self.assertEqual(options.read_only.exact_staleness.seconds, 3)
self.assertEqual(options.read_only.exact_staleness.nanos, 123456000)
def test_begin_wo_multi_use(self):
session = _Session()
snapshot = self._make_one(session)
with self.assertRaises(ValueError):
snapshot.begin()
def test_begin_w_read_request_count_gt_0(self):
session = _Session()
snapshot = self._make_one(session, multi_use=True)
snapshot._read_request_count = 1
with self.assertRaises(ValueError):
snapshot.begin()
def test_begin_w_existing_txn_id(self):
session = _Session()
snapshot = self._make_one(session, multi_use=True)
snapshot._transaction_id = TXN_ID
with self.assertRaises(ValueError):
snapshot.begin()
def test_begin_w_other_error(self):
database = _Database()
database.spanner_api = self._make_spanner_api()
database.spanner_api.begin_transaction.side_effect = RuntimeError()
timestamp = self._makeTimestamp()
session = _Session(database)
snapshot = self._make_one(session, read_timestamp=timestamp, multi_use=True)
with self.assertRaises(RuntimeError):
snapshot.begin()
def test_begin_ok_exact_staleness(self):
from google.protobuf.duration_pb2 import Duration
from google.cloud.spanner_v1.proto.transaction_pb2 import (
Transaction as TransactionPB,
TransactionOptions,
)
transaction_pb = TransactionPB(id=TXN_ID)
database = _Database()
api = database.spanner_api = self._make_spanner_api()
api.begin_transaction.return_value = transaction_pb
duration = self._makeDuration(seconds=SECONDS, microseconds=MICROS)
session = _Session(database)
snapshot = self._make_one(session, exact_staleness=duration, multi_use=True)
txn_id = snapshot.begin()
self.assertEqual(txn_id, TXN_ID)
self.assertEqual(snapshot._transaction_id, TXN_ID)
expected_duration = Duration(seconds=SECONDS, nanos=MICROS * 1000)
expected_txn_options = TransactionOptions(
read_only=TransactionOptions.ReadOnly(exact_staleness=expected_duration)
)
api.begin_transaction.assert_called_once_with(
session.name,
expected_txn_options,
metadata=[("google-cloud-resource-prefix", database.name)],
)
def test_begin_ok_exact_strong(self):
from google.cloud.spanner_v1.proto.transaction_pb2 import (
Transaction as TransactionPB,
TransactionOptions,
)
transaction_pb = TransactionPB(id=TXN_ID)
database = _Database()
api = database.spanner_api = self._make_spanner_api()
api.begin_transaction.return_value = transaction_pb
session = _Session(database)
snapshot = self._make_one(session, multi_use=True)
txn_id = snapshot.begin()
self.assertEqual(txn_id, TXN_ID)
self.assertEqual(snapshot._transaction_id, TXN_ID)
expected_txn_options = TransactionOptions(
read_only=TransactionOptions.ReadOnly(strong=True)
)
api.begin_transaction.assert_called_once_with(
session.name,
expected_txn_options,
metadata=[("google-cloud-resource-prefix", database.name)],
)
class _Client(object):
def __init__(self):
from google.cloud.spanner_v1.proto.spanner_pb2 import ExecuteSqlRequest
self._query_options = ExecuteSqlRequest.QueryOptions(optimizer_version="1")
class _Instance(object):
def __init__(self):
self._client = _Client()
class _Database(object):
def __init__(self):
self.name = "testing"
self._instance = _Instance()
class _Session(object):
def __init__(self, database=None, name=TestSnapshot.SESSION_NAME):
self._database = database
self.name = name
class _MockIterator(object):
def __init__(self, *values, **kw):
self._iter_values = iter(values)
self._fail_after = kw.pop("fail_after", False)
def __iter__(self):
return self
def __next__(self):
from google.api_core.exceptions import ServiceUnavailable
try:
return next(self._iter_values)
except StopIteration:
if self._fail_after:
raise ServiceUnavailable("testing")
raise
next = __next__
| # Copyright 2016 Google LLC All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import google.api_core.gapic_v1.method
import mock
TABLE_NAME = "citizens"
COLUMNS = ["email", "first_name", "last_name", "age"]
SQL_QUERY = """\
SELECT first_name, last_name, age FROM citizens ORDER BY age"""
SQL_QUERY_WITH_PARAM = """
SELECT first_name, last_name, email FROM citizens WHERE age <= @max_age"""
PARAMS = {"max_age": 30}
PARAM_TYPES = {"max_age": "INT64"}
SQL_QUERY_WITH_BYTES_PARAM = """\
SELECT image_name FROM images WHERE @bytes IN image_data"""
PARAMS_WITH_BYTES = {"bytes": b"FACEDACE"}
RESUME_TOKEN = b"<PASSWORD>"
TXN_ID = b"DEAFBEAD"
SECONDS = 3
MICROS = 123456
class Test_restart_on_unavailable(unittest.TestCase):
def _call_fut(self, restart):
from google.cloud.spanner_v1.snapshot import _restart_on_unavailable
return _restart_on_unavailable(restart)
def _make_item(self, value, resume_token=b""):
return mock.Mock(
value=value, resume_token=resume_token, spec=["value", "resume_token"]
)
def test_iteration_w_empty_raw(self):
raw = _MockIterator()
restart = mock.Mock(spec=[], return_value=raw)
resumable = self._call_fut(restart)
self.assertEqual(list(resumable), [])
def test_iteration_w_non_empty_raw(self):
ITEMS = (self._make_item(0), self._make_item(1))
raw = _MockIterator(*ITEMS)
restart = mock.Mock(spec=[], return_value=raw)
resumable = self._call_fut(restart)
self.assertEqual(list(resumable), list(ITEMS))
restart.assert_called_once_with()
def test_iteration_w_raw_w_resume_tken(self):
ITEMS = (
self._make_item(0),
self._make_item(1, resume_token=RESUME_TOKEN),
self._make_item(2),
self._make_item(3),
)
raw = _MockIterator(*ITEMS)
restart = mock.Mock(spec=[], return_value=raw)
resumable = self._call_fut(restart)
self.assertEqual(list(resumable), list(ITEMS))
restart.assert_called_once_with()
def test_iteration_w_raw_raising_unavailable_no_token(self):
ITEMS = (
self._make_item(0),
self._make_item(1, resume_token=RESUME_TOKEN),
self._make_item(2),
)
before = _MockIterator(fail_after=True)
after = _MockIterator(*ITEMS)
restart = mock.Mock(spec=[], side_effect=[before, after])
resumable = self._call_fut(restart)
self.assertEqual(list(resumable), list(ITEMS))
self.assertEqual(restart.mock_calls, [mock.call(), mock.call(resume_token=b"")])
def test_iteration_w_raw_raising_unavailable(self):
FIRST = (self._make_item(0), self._make_item(1, resume_token=RESUME_TOKEN))
SECOND = (self._make_item(2),) # discarded after 503
LAST = (self._make_item(3),)
before = _MockIterator(*(FIRST + SECOND), fail_after=True)
after = _MockIterator(*LAST)
restart = mock.Mock(spec=[], side_effect=[before, after])
resumable = self._call_fut(restart)
self.assertEqual(list(resumable), list(FIRST + LAST))
self.assertEqual(
restart.mock_calls, [mock.call(), mock.call(resume_token=RESUME_TOKEN)]
)
def test_iteration_w_raw_raising_unavailable_after_token(self):
FIRST = (self._make_item(0), self._make_item(1, resume_token=RESUME_TOKEN))
SECOND = (self._make_item(2), self._make_item(3))
before = _MockIterator(*FIRST, fail_after=True)
after = _MockIterator(*SECOND)
restart = mock.Mock(spec=[], side_effect=[before, after])
resumable = self._call_fut(restart)
self.assertEqual(list(resumable), list(FIRST + SECOND))
self.assertEqual(
restart.mock_calls, [mock.call(), mock.call(resume_token=RESUME_TOKEN)]
)
class Test_SnapshotBase(unittest.TestCase):
PROJECT_ID = "project-id"
INSTANCE_ID = "instance-id"
INSTANCE_NAME = "projects/" + PROJECT_ID + "/instances/" + INSTANCE_ID
DATABASE_ID = "database-id"
DATABASE_NAME = INSTANCE_NAME + "/databases/" + DATABASE_ID
SESSION_ID = "session-id"
SESSION_NAME = DATABASE_NAME + "/sessions/" + SESSION_ID
def _getTargetClass(self):
from google.cloud.spanner_v1.snapshot import _SnapshotBase
return _SnapshotBase
def _make_one(self, session):
return self._getTargetClass()(session)
def _makeDerived(self, session):
class _Derived(self._getTargetClass()):
_transaction_id = None
_multi_use = False
def _make_txn_selector(self):
from google.cloud.spanner_v1.proto.transaction_pb2 import (
TransactionOptions,
TransactionSelector,
)
if self._transaction_id:
return TransactionSelector(id=self._transaction_id)
options = TransactionOptions(
read_only=TransactionOptions.ReadOnly(strong=True)
)
if self._multi_use:
return TransactionSelector(begin=options)
return TransactionSelector(single_use=options)
return _Derived(session)
def _make_spanner_api(self):
import google.cloud.spanner_v1.gapic.spanner_client
return mock.create_autospec(
google.cloud.spanner_v1.gapic.spanner_client.SpannerClient, instance=True
)
def test_ctor(self):
session = _Session()
base = self._make_one(session)
self.assertIs(base._session, session)
self.assertEqual(base._execute_sql_count, 0)
def test__make_txn_selector_virtual(self):
session = _Session()
base = self._make_one(session)
with self.assertRaises(NotImplementedError):
base._make_txn_selector()
def test_read_other_error(self):
from google.cloud.spanner_v1.keyset import KeySet
keyset = KeySet(all_=True)
database = _Database()
database.spanner_api = self._make_spanner_api()
database.spanner_api.streaming_read.side_effect = RuntimeError()
session = _Session(database)
derived = self._makeDerived(session)
with self.assertRaises(RuntimeError):
list(derived.read(TABLE_NAME, COLUMNS, keyset))
def _read_helper(self, multi_use, first=True, count=0, partition=None):
from google.protobuf.struct_pb2 import Struct
from google.cloud.spanner_v1.proto.result_set_pb2 import (
PartialResultSet,
ResultSetMetadata,
ResultSetStats,
)
from google.cloud.spanner_v1.proto.transaction_pb2 import (
TransactionSelector,
TransactionOptions,
)
from google.cloud.spanner_v1.proto.type_pb2 import Type, StructType
from google.cloud.spanner_v1.proto.type_pb2 import STRING, INT64
from google.cloud.spanner_v1.keyset import KeySet
from google.cloud.spanner_v1._helpers import _make_value_pb
VALUES = [[u"bharney", 31], [u"phred", 32]]
VALUE_PBS = [[_make_value_pb(item) for item in row] for row in VALUES]
struct_type_pb = StructType(
fields=[
StructType.Field(name="name", type=Type(code=STRING)),
StructType.Field(name="age", type=Type(code=INT64)),
]
)
metadata_pb = ResultSetMetadata(row_type=struct_type_pb)
stats_pb = ResultSetStats(
query_stats=Struct(fields={"rows_returned": _make_value_pb(2)})
)
result_sets = [
PartialResultSet(values=VALUE_PBS[0], metadata=metadata_pb),
PartialResultSet(values=VALUE_PBS[1], stats=stats_pb),
]
KEYS = [["<EMAIL>"], ["<EMAIL>"]]
keyset = KeySet(keys=KEYS)
INDEX = "email-address-index"
LIMIT = 20
database = _Database()
api = database.spanner_api = self._make_spanner_api()
api.streaming_read.return_value = _MockIterator(*result_sets)
session = _Session(database)
derived = self._makeDerived(session)
derived._multi_use = multi_use
derived._read_request_count = count
if not first:
derived._transaction_id = TXN_ID
if partition is not None: # 'limit' and 'partition' incompatible
result_set = derived.read(
TABLE_NAME, COLUMNS, keyset, index=INDEX, partition=partition
)
else:
result_set = derived.read(
TABLE_NAME, COLUMNS, keyset, index=INDEX, limit=LIMIT
)
self.assertEqual(derived._read_request_count, count + 1)
if multi_use:
self.assertIs(result_set._source, derived)
else:
self.assertIsNone(result_set._source)
self.assertEqual(list(result_set), VALUES)
self.assertEqual(result_set.metadata, metadata_pb)
self.assertEqual(result_set.stats, stats_pb)
txn_options = TransactionOptions(
read_only=TransactionOptions.ReadOnly(strong=True)
)
if multi_use:
if first:
expected_transaction = TransactionSelector(begin=txn_options)
else:
expected_transaction = TransactionSelector(id=TXN_ID)
else:
expected_transaction = TransactionSelector(single_use=txn_options)
if partition is not None:
expected_limit = 0
else:
expected_limit = LIMIT
api.streaming_read.assert_called_once_with(
self.SESSION_NAME,
TABLE_NAME,
COLUMNS,
keyset._to_pb(),
transaction=expected_transaction,
index=INDEX,
limit=expected_limit,
partition_token=partition,
metadata=[("google-cloud-resource-prefix", database.name)],
)
def test_read_wo_multi_use(self):
self._read_helper(multi_use=False)
def test_read_wo_multi_use_w_read_request_count_gt_0(self):
with self.assertRaises(ValueError):
self._read_helper(multi_use=False, count=1)
def test_read_w_multi_use_wo_first(self):
self._read_helper(multi_use=True, first=False)
def test_read_w_multi_use_wo_first_w_count_gt_0(self):
self._read_helper(multi_use=True, first=False, count=1)
def test_read_w_multi_use_w_first_w_partition(self):
PARTITION = b"FADEABED"
self._read_helper(multi_use=True, first=True, partition=PARTITION)
def test_read_w_multi_use_w_first_w_count_gt_0(self):
with self.assertRaises(ValueError):
self._read_helper(multi_use=True, first=True, count=1)
def test_execute_sql_other_error(self):
database = _Database()
database.spanner_api = self._make_spanner_api()
database.spanner_api.execute_streaming_sql.side_effect = RuntimeError()
session = _Session(database)
derived = self._makeDerived(session)
with self.assertRaises(RuntimeError):
list(derived.execute_sql(SQL_QUERY))
self.assertEqual(derived._execute_sql_count, 1)
def test_execute_sql_w_params_wo_param_types(self):
database = _Database()
session = _Session(database)
derived = self._makeDerived(session)
with self.assertRaises(ValueError):
derived.execute_sql(SQL_QUERY_WITH_PARAM, PARAMS)
def _execute_sql_helper(
self,
multi_use,
first=True,
count=0,
partition=None,
sql_count=0,
query_options=None,
timeout=google.api_core.gapic_v1.method.DEFAULT,
retry=google.api_core.gapic_v1.method.DEFAULT,
):
from google.protobuf.struct_pb2 import Struct
from google.cloud.spanner_v1.proto.result_set_pb2 import (
PartialResultSet,
ResultSetMetadata,
ResultSetStats,
)
from google.cloud.spanner_v1.proto.transaction_pb2 import (
TransactionSelector,
TransactionOptions,
)
from google.cloud.spanner_v1.proto.type_pb2 import Type, StructType
from google.cloud.spanner_v1.proto.type_pb2 import STRING, INT64
from google.cloud.spanner_v1._helpers import (
_make_value_pb,
_merge_query_options,
)
VALUES = [[u"bharney", u"rhubbyl", 31], [u"phred", u"phlyntstone", 32]]
VALUE_PBS = [[_make_value_pb(item) for item in row] for row in VALUES]
MODE = 2 # PROFILE
struct_type_pb = StructType(
fields=[
StructType.Field(name="first_name", type=Type(code=STRING)),
StructType.Field(name="last_name", type=Type(code=STRING)),
StructType.Field(name="age", type=Type(code=INT64)),
]
)
metadata_pb = ResultSetMetadata(row_type=struct_type_pb)
stats_pb = ResultSetStats(
query_stats=Struct(fields={"rows_returned": _make_value_pb(2)})
)
result_sets = [
PartialResultSet(values=VALUE_PBS[0], metadata=metadata_pb),
PartialResultSet(values=VALUE_PBS[1], stats=stats_pb),
]
iterator = _MockIterator(*result_sets)
database = _Database()
api = database.spanner_api = self._make_spanner_api()
api.execute_streaming_sql.return_value = iterator
session = _Session(database)
derived = self._makeDerived(session)
derived._multi_use = multi_use
derived._read_request_count = count
derived._execute_sql_count = sql_count
if not first:
derived._transaction_id = TXN_ID
result_set = derived.execute_sql(
SQL_QUERY_WITH_PARAM,
PARAMS,
PARAM_TYPES,
query_mode=MODE,
query_options=query_options,
partition=partition,
retry=retry,
timeout=timeout,
)
self.assertEqual(derived._read_request_count, count + 1)
if multi_use:
self.assertIs(result_set._source, derived)
else:
self.assertIsNone(result_set._source)
self.assertEqual(list(result_set), VALUES)
self.assertEqual(result_set.metadata, metadata_pb)
self.assertEqual(result_set.stats, stats_pb)
txn_options = TransactionOptions(
read_only=TransactionOptions.ReadOnly(strong=True)
)
if multi_use:
if first:
expected_transaction = TransactionSelector(begin=txn_options)
else:
expected_transaction = TransactionSelector(id=TXN_ID)
else:
expected_transaction = TransactionSelector(single_use=txn_options)
expected_params = Struct(
fields={key: _make_value_pb(value) for (key, value) in PARAMS.items()}
)
expected_query_options = database._instance._client._query_options
if query_options:
expected_query_options = _merge_query_options(
expected_query_options, query_options
)
api.execute_streaming_sql.assert_called_once_with(
self.SESSION_NAME,
SQL_QUERY_WITH_PARAM,
transaction=expected_transaction,
params=expected_params,
param_types=PARAM_TYPES,
query_mode=MODE,
query_options=expected_query_options,
partition_token=partition,
seqno=sql_count,
metadata=[("google-cloud-resource-prefix", database.name)],
timeout=timeout,
retry=retry,
)
self.assertEqual(derived._execute_sql_count, sql_count + 1)
def test_execute_sql_wo_multi_use(self):
self._execute_sql_helper(multi_use=False)
def test_execute_sql_wo_multi_use_w_read_request_count_gt_0(self):
with self.assertRaises(ValueError):
self._execute_sql_helper(multi_use=False, count=1)
def test_execute_sql_w_multi_use_wo_first(self):
self._execute_sql_helper(multi_use=True, first=False, sql_count=1)
def test_execute_sql_w_multi_use_wo_first_w_count_gt_0(self):
self._execute_sql_helper(multi_use=True, first=False, count=1)
def test_execute_sql_w_multi_use_w_first(self):
self._execute_sql_helper(multi_use=True, first=True)
def test_execute_sql_w_multi_use_w_first_w_count_gt_0(self):
with self.assertRaises(ValueError):
self._execute_sql_helper(multi_use=True, first=True, count=1)
def test_execute_sql_w_retry(self):
self._execute_sql_helper(multi_use=False, retry=None)
def test_execute_sql_w_timeout(self):
self._execute_sql_helper(multi_use=False, timeout=None)
def test_execute_sql_w_query_options(self):
from google.cloud.spanner_v1.proto.spanner_pb2 import ExecuteSqlRequest
self._execute_sql_helper(
multi_use=False,
query_options=ExecuteSqlRequest.QueryOptions(optimizer_version="3"),
)
def _partition_read_helper(
self, multi_use, w_txn, size=None, max_partitions=None, index=None
):
from google.cloud.spanner_v1.keyset import KeySet
from google.cloud.spanner_v1.types import Partition
from google.cloud.spanner_v1.types import PartitionOptions
from google.cloud.spanner_v1.types import PartitionResponse
from google.cloud.spanner_v1.types import Transaction
from google.cloud.spanner_v1.proto.transaction_pb2 import TransactionSelector
keyset = KeySet(all_=True)
new_txn_id = b"ABECAB91"
token_1 = b"<PASSWORD>"
token_2 = b"<PASSWORD>"
response = PartitionResponse(
partitions=[
Partition(partition_token=token_1),
Partition(partition_token=token_2),
],
transaction=Transaction(id=new_txn_id),
)
database = _Database()
api = database.spanner_api = self._make_spanner_api()
api.partition_read.return_value = response
session = _Session(database)
derived = self._makeDerived(session)
derived._multi_use = multi_use
if w_txn:
derived._transaction_id = TXN_ID
tokens = list(
derived.partition_read(
TABLE_NAME,
COLUMNS,
keyset,
index=index,
partition_size_bytes=size,
max_partitions=max_partitions,
)
)
self.assertEqual(tokens, [token_1, token_2])
expected_txn_selector = TransactionSelector(id=TXN_ID)
expected_partition_options = PartitionOptions(
partition_size_bytes=size, max_partitions=max_partitions
)
api.partition_read.assert_called_once_with(
session=self.SESSION_NAME,
table=TABLE_NAME,
columns=COLUMNS,
key_set=keyset._to_pb(),
transaction=expected_txn_selector,
index=index,
partition_options=expected_partition_options,
metadata=[("google-cloud-resource-prefix", database.name)],
)
def test_partition_read_single_use_raises(self):
with self.assertRaises(ValueError):
self._partition_read_helper(multi_use=False, w_txn=True)
def test_partition_read_wo_existing_transaction_raises(self):
with self.assertRaises(ValueError):
self._partition_read_helper(multi_use=True, w_txn=False)
def test_partition_read_other_error(self):
from google.cloud.spanner_v1.keyset import KeySet
keyset = KeySet(all_=True)
database = _Database()
database.spanner_api = self._make_spanner_api()
database.spanner_api.partition_read.side_effect = RuntimeError()
session = _Session(database)
derived = self._makeDerived(session)
derived._multi_use = True
derived._transaction_id = TXN_ID
with self.assertRaises(RuntimeError):
list(derived.partition_read(TABLE_NAME, COLUMNS, keyset))
def test_partition_read_ok_w_index_no_options(self):
self._partition_read_helper(multi_use=True, w_txn=True, index="index")
def test_partition_read_ok_w_size(self):
self._partition_read_helper(multi_use=True, w_txn=True, size=2000)
def test_partition_read_ok_w_max_partitions(self):
self._partition_read_helper(multi_use=True, w_txn=True, max_partitions=4)
def _partition_query_helper(self, multi_use, w_txn, size=None, max_partitions=None):
from google.protobuf.struct_pb2 import Struct
from google.cloud.spanner_v1.types import Partition
from google.cloud.spanner_v1.types import PartitionOptions
from google.cloud.spanner_v1.types import PartitionResponse
from google.cloud.spanner_v1.types import Transaction
from google.cloud.spanner_v1.proto.transaction_pb2 import TransactionSelector
from google.cloud.spanner_v1._helpers import _make_value_pb
new_txn_id = b"ABECAB91"
token_1 = b"FACE0FFF"
token_2 = b"BADE8CAF"
response = PartitionResponse(
partitions=[
Partition(partition_token=token_1),
Partition(partition_token=token_2),
],
transaction=Transaction(id=new_txn_id),
)
database = _Database()
api = database.spanner_api = self._make_spanner_api()
api.partition_query.return_value = response
session = _Session(database)
derived = self._makeDerived(session)
derived._multi_use = multi_use
if w_txn:
derived._transaction_id = TXN_ID
tokens = list(
derived.partition_query(
SQL_QUERY_WITH_PARAM,
PARAMS,
PARAM_TYPES,
partition_size_bytes=size,
max_partitions=max_partitions,
)
)
self.assertEqual(tokens, [token_1, token_2])
expected_params = Struct(
fields={key: _make_value_pb(value) for (key, value) in PARAMS.items()}
)
expected_txn_selector = TransactionSelector(id=TXN_ID)
expected_partition_options = PartitionOptions(
partition_size_bytes=size, max_partitions=max_partitions
)
api.partition_query.assert_called_once_with(
session=self.SESSION_NAME,
sql=SQL_QUERY_WITH_PARAM,
transaction=expected_txn_selector,
params=expected_params,
param_types=PARAM_TYPES,
partition_options=expected_partition_options,
metadata=[("google-cloud-resource-prefix", database.name)],
)
def test_partition_query_other_error(self):
database = _Database()
database.spanner_api = self._make_spanner_api()
database.spanner_api.partition_query.side_effect = RuntimeError()
session = _Session(database)
derived = self._makeDerived(session)
derived._multi_use = True
derived._transaction_id = TXN_ID
with self.assertRaises(RuntimeError):
list(derived.partition_query(SQL_QUERY))
def test_partition_query_w_params_wo_param_types(self):
database = _Database()
session = _Session(database)
derived = self._makeDerived(session)
derived._multi_use = True
derived._transaction_id = TXN_ID
with self.assertRaises(ValueError):
list(derived.partition_query(SQL_QUERY_WITH_PARAM, PARAMS))
def test_partition_query_single_use_raises(self):
with self.assertRaises(ValueError):
self._partition_query_helper(multi_use=False, w_txn=True)
def test_partition_query_wo_transaction_raises(self):
with self.assertRaises(ValueError):
self._partition_query_helper(multi_use=True, w_txn=False)
def test_partition_query_ok_w_index_no_options(self):
self._partition_query_helper(multi_use=True, w_txn=True)
def test_partition_query_ok_w_size(self):
self._partition_query_helper(multi_use=True, w_txn=True, size=2000)
def test_partition_query_ok_w_max_partitions(self):
self._partition_query_helper(multi_use=True, w_txn=True, max_partitions=4)
class TestSnapshot(unittest.TestCase):
PROJECT_ID = "project-id"
INSTANCE_ID = "instance-id"
INSTANCE_NAME = "projects/" + PROJECT_ID + "/instances/" + INSTANCE_ID
DATABASE_ID = "database-id"
DATABASE_NAME = INSTANCE_NAME + "/databases/" + DATABASE_ID
SESSION_ID = "session-id"
SESSION_NAME = DATABASE_NAME + "/sessions/" + SESSION_ID
def _getTargetClass(self):
from google.cloud.spanner_v1.snapshot import Snapshot
return Snapshot
def _make_one(self, *args, **kwargs):
return self._getTargetClass()(*args, **kwargs)
def _make_spanner_api(self):
import google.cloud.spanner_v1.gapic.spanner_client
return mock.create_autospec(
google.cloud.spanner_v1.gapic.spanner_client.SpannerClient, instance=True
)
def _makeTimestamp(self):
import datetime
from google.cloud._helpers import UTC
return datetime.datetime.utcnow().replace(tzinfo=UTC)
def _makeDuration(self, seconds=1, microseconds=0):
import datetime
return datetime.timedelta(seconds=seconds, microseconds=microseconds)
def test_ctor_defaults(self):
session = _Session()
snapshot = self._make_one(session)
self.assertIs(snapshot._session, session)
self.assertTrue(snapshot._strong)
self.assertIsNone(snapshot._read_timestamp)
self.assertIsNone(snapshot._min_read_timestamp)
self.assertIsNone(snapshot._max_staleness)
self.assertIsNone(snapshot._exact_staleness)
self.assertFalse(snapshot._multi_use)
def test_ctor_w_multiple_options(self):
timestamp = self._makeTimestamp()
duration = self._makeDuration()
session = _Session()
with self.assertRaises(ValueError):
self._make_one(session, read_timestamp=timestamp, max_staleness=duration)
def test_ctor_w_read_timestamp(self):
timestamp = self._makeTimestamp()
session = _Session()
snapshot = self._make_one(session, read_timestamp=timestamp)
self.assertIs(snapshot._session, session)
self.assertFalse(snapshot._strong)
self.assertEqual(snapshot._read_timestamp, timestamp)
self.assertIsNone(snapshot._min_read_timestamp)
self.assertIsNone(snapshot._max_staleness)
self.assertIsNone(snapshot._exact_staleness)
self.assertFalse(snapshot._multi_use)
def test_ctor_w_min_read_timestamp(self):
timestamp = self._makeTimestamp()
session = _Session()
snapshot = self._make_one(session, min_read_timestamp=timestamp)
self.assertIs(snapshot._session, session)
self.assertFalse(snapshot._strong)
self.assertIsNone(snapshot._read_timestamp)
self.assertEqual(snapshot._min_read_timestamp, timestamp)
self.assertIsNone(snapshot._max_staleness)
self.assertIsNone(snapshot._exact_staleness)
self.assertFalse(snapshot._multi_use)
def test_ctor_w_max_staleness(self):
duration = self._makeDuration()
session = _Session()
snapshot = self._make_one(session, max_staleness=duration)
self.assertIs(snapshot._session, session)
self.assertFalse(snapshot._strong)
self.assertIsNone(snapshot._read_timestamp)
self.assertIsNone(snapshot._min_read_timestamp)
self.assertEqual(snapshot._max_staleness, duration)
self.assertIsNone(snapshot._exact_staleness)
self.assertFalse(snapshot._multi_use)
def test_ctor_w_exact_staleness(self):
duration = self._makeDuration()
session = _Session()
snapshot = self._make_one(session, exact_staleness=duration)
self.assertIs(snapshot._session, session)
self.assertFalse(snapshot._strong)
self.assertIsNone(snapshot._read_timestamp)
self.assertIsNone(snapshot._min_read_timestamp)
self.assertIsNone(snapshot._max_staleness)
self.assertEqual(snapshot._exact_staleness, duration)
self.assertFalse(snapshot._multi_use)
def test_ctor_w_multi_use(self):
session = _Session()
snapshot = self._make_one(session, multi_use=True)
self.assertTrue(snapshot._session is session)
self.assertTrue(snapshot._strong)
self.assertIsNone(snapshot._read_timestamp)
self.assertIsNone(snapshot._min_read_timestamp)
self.assertIsNone(snapshot._max_staleness)
self.assertIsNone(snapshot._exact_staleness)
self.assertTrue(snapshot._multi_use)
def test_ctor_w_multi_use_and_read_timestamp(self):
timestamp = self._makeTimestamp()
session = _Session()
snapshot = self._make_one(session, read_timestamp=timestamp, multi_use=True)
self.assertTrue(snapshot._session is session)
self.assertFalse(snapshot._strong)
self.assertEqual(snapshot._read_timestamp, timestamp)
self.assertIsNone(snapshot._min_read_timestamp)
self.assertIsNone(snapshot._max_staleness)
self.assertIsNone(snapshot._exact_staleness)
self.assertTrue(snapshot._multi_use)
def test_ctor_w_multi_use_and_min_read_timestamp(self):
timestamp = self._makeTimestamp()
session = _Session()
with self.assertRaises(ValueError):
self._make_one(session, min_read_timestamp=timestamp, multi_use=True)
def test_ctor_w_multi_use_and_max_staleness(self):
duration = self._makeDuration()
session = _Session()
with self.assertRaises(ValueError):
self._make_one(session, max_staleness=duration, multi_use=True)
def test_ctor_w_multi_use_and_exact_staleness(self):
duration = self._makeDuration()
session = _Session()
snapshot = self._make_one(session, exact_staleness=duration, multi_use=True)
self.assertTrue(snapshot._session is session)
self.assertFalse(snapshot._strong)
self.assertIsNone(snapshot._read_timestamp)
self.assertIsNone(snapshot._min_read_timestamp)
self.assertIsNone(snapshot._max_staleness)
self.assertEqual(snapshot._exact_staleness, duration)
self.assertTrue(snapshot._multi_use)
def test__make_txn_selector_w_transaction_id(self):
session = _Session()
snapshot = self._make_one(session)
snapshot._transaction_id = TXN_ID
selector = snapshot._make_txn_selector()
self.assertEqual(selector.id, TXN_ID)
def test__make_txn_selector_strong(self):
session = _Session()
snapshot = self._make_one(session)
selector = snapshot._make_txn_selector()
options = selector.single_use
self.assertTrue(options.read_only.strong)
def test__make_txn_selector_w_read_timestamp(self):
from google.cloud._helpers import _pb_timestamp_to_datetime
timestamp = self._makeTimestamp()
session = _Session()
snapshot = self._make_one(session, read_timestamp=timestamp)
selector = snapshot._make_txn_selector()
options = selector.single_use
self.assertEqual(
_pb_timestamp_to_datetime(options.read_only.read_timestamp), timestamp
)
def test__make_txn_selector_w_min_read_timestamp(self):
from google.cloud._helpers import _pb_timestamp_to_datetime
timestamp = self._makeTimestamp()
session = _Session()
snapshot = self._make_one(session, min_read_timestamp=timestamp)
selector = snapshot._make_txn_selector()
options = selector.single_use
self.assertEqual(
_pb_timestamp_to_datetime(options.read_only.min_read_timestamp), timestamp
)
def test__make_txn_selector_w_max_staleness(self):
duration = self._makeDuration(seconds=3, microseconds=123456)
session = _Session()
snapshot = self._make_one(session, max_staleness=duration)
selector = snapshot._make_txn_selector()
options = selector.single_use
self.assertEqual(options.read_only.max_staleness.seconds, 3)
self.assertEqual(options.read_only.max_staleness.nanos, 123456000)
def test__make_txn_selector_w_exact_staleness(self):
duration = self._makeDuration(seconds=3, microseconds=123456)
session = _Session()
snapshot = self._make_one(session, exact_staleness=duration)
selector = snapshot._make_txn_selector()
options = selector.single_use
self.assertEqual(options.read_only.exact_staleness.seconds, 3)
self.assertEqual(options.read_only.exact_staleness.nanos, 123456000)
def test__make_txn_selector_strong_w_multi_use(self):
session = _Session()
snapshot = self._make_one(session, multi_use=True)
selector = snapshot._make_txn_selector()
options = selector.begin
self.assertTrue(options.read_only.strong)
def test__make_txn_selector_w_read_timestamp_w_multi_use(self):
from google.cloud._helpers import _pb_timestamp_to_datetime
timestamp = self._makeTimestamp()
session = _Session()
snapshot = self._make_one(session, read_timestamp=timestamp, multi_use=True)
selector = snapshot._make_txn_selector()
options = selector.begin
self.assertEqual(
_pb_timestamp_to_datetime(options.read_only.read_timestamp), timestamp
)
def test__make_txn_selector_w_exact_staleness_w_multi_use(self):
duration = self._makeDuration(seconds=3, microseconds=123456)
session = _Session()
snapshot = self._make_one(session, exact_staleness=duration, multi_use=True)
selector = snapshot._make_txn_selector()
options = selector.begin
self.assertEqual(options.read_only.exact_staleness.seconds, 3)
self.assertEqual(options.read_only.exact_staleness.nanos, 123456000)
def test_begin_wo_multi_use(self):
session = _Session()
snapshot = self._make_one(session)
with self.assertRaises(ValueError):
snapshot.begin()
def test_begin_w_read_request_count_gt_0(self):
session = _Session()
snapshot = self._make_one(session, multi_use=True)
snapshot._read_request_count = 1
with self.assertRaises(ValueError):
snapshot.begin()
def test_begin_w_existing_txn_id(self):
session = _Session()
snapshot = self._make_one(session, multi_use=True)
snapshot._transaction_id = TXN_ID
with self.assertRaises(ValueError):
snapshot.begin()
def test_begin_w_other_error(self):
database = _Database()
database.spanner_api = self._make_spanner_api()
database.spanner_api.begin_transaction.side_effect = RuntimeError()
timestamp = self._makeTimestamp()
session = _Session(database)
snapshot = self._make_one(session, read_timestamp=timestamp, multi_use=True)
with self.assertRaises(RuntimeError):
snapshot.begin()
def test_begin_ok_exact_staleness(self):
from google.protobuf.duration_pb2 import Duration
from google.cloud.spanner_v1.proto.transaction_pb2 import (
Transaction as TransactionPB,
TransactionOptions,
)
transaction_pb = TransactionPB(id=TXN_ID)
database = _Database()
api = database.spanner_api = self._make_spanner_api()
api.begin_transaction.return_value = transaction_pb
duration = self._makeDuration(seconds=SECONDS, microseconds=MICROS)
session = _Session(database)
snapshot = self._make_one(session, exact_staleness=duration, multi_use=True)
txn_id = snapshot.begin()
self.assertEqual(txn_id, TXN_ID)
self.assertEqual(snapshot._transaction_id, TXN_ID)
expected_duration = Duration(seconds=SECONDS, nanos=MICROS * 1000)
expected_txn_options = TransactionOptions(
read_only=TransactionOptions.ReadOnly(exact_staleness=expected_duration)
)
api.begin_transaction.assert_called_once_with(
session.name,
expected_txn_options,
metadata=[("google-cloud-resource-prefix", database.name)],
)
def test_begin_ok_exact_strong(self):
from google.cloud.spanner_v1.proto.transaction_pb2 import (
Transaction as TransactionPB,
TransactionOptions,
)
transaction_pb = TransactionPB(id=TXN_ID)
database = _Database()
api = database.spanner_api = self._make_spanner_api()
api.begin_transaction.return_value = transaction_pb
session = _Session(database)
snapshot = self._make_one(session, multi_use=True)
txn_id = snapshot.begin()
self.assertEqual(txn_id, TXN_ID)
self.assertEqual(snapshot._transaction_id, TXN_ID)
expected_txn_options = TransactionOptions(
read_only=TransactionOptions.ReadOnly(strong=True)
)
api.begin_transaction.assert_called_once_with(
session.name,
expected_txn_options,
metadata=[("google-cloud-resource-prefix", database.name)],
)
class _Client(object):
def __init__(self):
from google.cloud.spanner_v1.proto.spanner_pb2 import ExecuteSqlRequest
self._query_options = ExecuteSqlRequest.QueryOptions(optimizer_version="1")
class _Instance(object):
def __init__(self):
self._client = _Client()
class _Database(object):
def __init__(self):
self.name = "testing"
self._instance = _Instance()
class _Session(object):
def __init__(self, database=None, name=TestSnapshot.SESSION_NAME):
self._database = database
self.name = name
class _MockIterator(object):
def __init__(self, *values, **kw):
self._iter_values = iter(values)
self._fail_after = kw.pop("fail_after", False)
def __iter__(self):
return self
def __next__(self):
from google.api_core.exceptions import ServiceUnavailable
try:
return next(self._iter_values)
except StopIteration:
if self._fail_after:
raise ServiceUnavailable("testing")
raise
next = __next__
| en | 0.77779 | # Copyright 2016 Google LLC All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. \ SELECT first_name, last_name, age FROM citizens ORDER BY age SELECT first_name, last_name, email FROM citizens WHERE age <= @max_age \ SELECT image_name FROM images WHERE @bytes IN image_data # discarded after 503 # 'limit' and 'partition' incompatible # PROFILE | 1.983594 | 2 |
hashtable.py | quake0day/oj | 0 | 10733 | A = ['a','b']
B = ['c','b','a']
def generatehash(A):
hashA = {}
for item in A:
if item not in hashA:
hashA[item] = 1
else:
hashA[item] += 1
return hashA
def compareHash(A, B):
lenA = len(A)
lenB = len(B)
hashA = generatehash(A)
if lenB < lenA:
return False
elif lenB == lenA:
return hashA == generatehash(B)
else:
for i in xrange(lenB-lenA+1):
newB = B[i:i+lenA]
if hashA == generatehash(newB):
return True
return False
print compareHash(A, B)
| A = ['a','b']
B = ['c','b','a']
def generatehash(A):
hashA = {}
for item in A:
if item not in hashA:
hashA[item] = 1
else:
hashA[item] += 1
return hashA
def compareHash(A, B):
lenA = len(A)
lenB = len(B)
hashA = generatehash(A)
if lenB < lenA:
return False
elif lenB == lenA:
return hashA == generatehash(B)
else:
for i in xrange(lenB-lenA+1):
newB = B[i:i+lenA]
if hashA == generatehash(newB):
return True
return False
print compareHash(A, B)
| none | 1 | 3.627682 | 4 |
|
Manipulation of PDF Files/pandf_gui.py | clair513/DIY | 1 | 10734 | <filename>Manipulation of PDF Files/pandf_gui.py
# Importing required packages:
import pandas as pd
from tkinter import *
from tkinter.ttk import *
root = Tk()
# To visualize input DataFrame:
def generate_plot(gui_root, df, x_axis, y_axis=None,
plot={'type':None, 'hue':None},
aesthetics={'style':'whitegrid', 'palette':'hsv',
'size':(10,7), 'dpi':100}):
"""
DESCRIPTION: Reads input Pandas DataFrame and returns a plot based on selected parameters.
PARAMETERS:
> gui_root : [Required] Accepts Tkinter application base class (Tk) initialized variable/instance.
> df : [Required] Accepts Pandas DataFrame.
"""
# Importing external dependencies:
import matplotlib.pyplot as plt
from matplotlib.backends.backend_tkagg import FigureCanvasTkAgg
from matplotlib.figure import Figure
import seaborn as sns
sns.set(style=aesthetics['style'], palette=aesthetics['palette'])
import warnings
warnings.filterwarnings('ignore')
# Defining Tableau colors:
tableau_20 = [(31, 119, 180), (174, 199, 232), (255, 127, 14), (255, 187, 120),
(44, 160, 44), (152, 223, 138), (214, 39, 40), (255, 152, 150),
(148, 103, 189), (197, 176, 213), (140, 86, 75), (196, 156, 148),
(227, 119, 194), (247, 182, 210), (127, 127, 127), (199, 199,
199),(188, 189, 34), (219, 219, 141), (23, 190, 207), (158, 218, 229)]
# Scaling over RGB values to [0,1] range (Matplotlib acceptable format):
for i in range(len(tableau_20)):
r,g,b = tableau_20[i]
tableau_20[i] = (r/255., g/255., b/255.)
# Setting up Tkinter Frame:
lf = Labelframe(gui_root)
lf.grid(row=0, column=0, sticky='nwes', padx=3, pady=3)
# Setting up Canvas backed by Matplotlib:
fig = Figure(figsize=aesthetics['size'], dpi=aesthetics['dpi'])
ax = fig.add_subplot(111)
# Drawing various plots with Seaborn:
if plot['type']=='lineplot': # Lineplot
g = sns.lineplot(x=x_axis, y=y_axis, data=df, ax=ax)
elif plot['type']=='regplot': # Regplot
g = sns.regplot(x=x_axis, y=y_axis, data=df, color=tableau_20[16], ax=ax)
elif plot['type']=='distplot': # Distplot
g = sns.distplot(a=df[x_axis].dropna(), color=tableau_20[7],
hist_kws=dict(edgecolor='k', linewidth=0.5), ax=ax)
elif plot['type']=='barplot': # Grouped Barplot
g = sns.catplot(x=x_axis, y=y_axis, hue=plot['hue'], data=df,
kind="bar", palette='rocket', ax=ax)
g.despine(left=True)
else:
# More to be added later
pass
# Displaying plot on Canvas:
canvas = FigureCanvasTkAgg(fig, master=lf)
canvas.draw()
canvas.get_tk_widget().grid(row=0, column=0)
generate_plot()
root.mainloop()
| <filename>Manipulation of PDF Files/pandf_gui.py
# Importing required packages:
import pandas as pd
from tkinter import *
from tkinter.ttk import *
root = Tk()
# To visualize input DataFrame:
def generate_plot(gui_root, df, x_axis, y_axis=None,
plot={'type':None, 'hue':None},
aesthetics={'style':'whitegrid', 'palette':'hsv',
'size':(10,7), 'dpi':100}):
"""
DESCRIPTION: Reads input Pandas DataFrame and returns a plot based on selected parameters.
PARAMETERS:
> gui_root : [Required] Accepts Tkinter application base class (Tk) initialized variable/instance.
> df : [Required] Accepts Pandas DataFrame.
"""
# Importing external dependencies:
import matplotlib.pyplot as plt
from matplotlib.backends.backend_tkagg import FigureCanvasTkAgg
from matplotlib.figure import Figure
import seaborn as sns
sns.set(style=aesthetics['style'], palette=aesthetics['palette'])
import warnings
warnings.filterwarnings('ignore')
# Defining Tableau colors:
tableau_20 = [(31, 119, 180), (174, 199, 232), (255, 127, 14), (255, 187, 120),
(44, 160, 44), (152, 223, 138), (214, 39, 40), (255, 152, 150),
(148, 103, 189), (197, 176, 213), (140, 86, 75), (196, 156, 148),
(227, 119, 194), (247, 182, 210), (127, 127, 127), (199, 199,
199),(188, 189, 34), (219, 219, 141), (23, 190, 207), (158, 218, 229)]
# Scaling over RGB values to [0,1] range (Matplotlib acceptable format):
for i in range(len(tableau_20)):
r,g,b = tableau_20[i]
tableau_20[i] = (r/255., g/255., b/255.)
# Setting up Tkinter Frame:
lf = Labelframe(gui_root)
lf.grid(row=0, column=0, sticky='nwes', padx=3, pady=3)
# Setting up Canvas backed by Matplotlib:
fig = Figure(figsize=aesthetics['size'], dpi=aesthetics['dpi'])
ax = fig.add_subplot(111)
# Drawing various plots with Seaborn:
if plot['type']=='lineplot': # Lineplot
g = sns.lineplot(x=x_axis, y=y_axis, data=df, ax=ax)
elif plot['type']=='regplot': # Regplot
g = sns.regplot(x=x_axis, y=y_axis, data=df, color=tableau_20[16], ax=ax)
elif plot['type']=='distplot': # Distplot
g = sns.distplot(a=df[x_axis].dropna(), color=tableau_20[7],
hist_kws=dict(edgecolor='k', linewidth=0.5), ax=ax)
elif plot['type']=='barplot': # Grouped Barplot
g = sns.catplot(x=x_axis, y=y_axis, hue=plot['hue'], data=df,
kind="bar", palette='rocket', ax=ax)
g.despine(left=True)
else:
# More to be added later
pass
# Displaying plot on Canvas:
canvas = FigureCanvasTkAgg(fig, master=lf)
canvas.draw()
canvas.get_tk_widget().grid(row=0, column=0)
generate_plot()
root.mainloop()
| en | 0.649799 | # Importing required packages: # To visualize input DataFrame: DESCRIPTION: Reads input Pandas DataFrame and returns a plot based on selected parameters.
PARAMETERS:
> gui_root : [Required] Accepts Tkinter application base class (Tk) initialized variable/instance.
> df : [Required] Accepts Pandas DataFrame. # Importing external dependencies: # Defining Tableau colors: # Scaling over RGB values to [0,1] range (Matplotlib acceptable format): # Setting up Tkinter Frame: # Setting up Canvas backed by Matplotlib: # Drawing various plots with Seaborn: # Lineplot # Regplot # Distplot # Grouped Barplot # More to be added later # Displaying plot on Canvas: | 3.394244 | 3 |
utils/get_dataset.py | gautierdag/pytorch-attentive-lm | 16 | 10735 | <gh_stars>10-100
import os
import torch
from torch.utils.data import DataLoader, TensorDataset
import requests
import io
import zipfile
from .data_reader import read_vocabulary, read_lm_data, lm_data_producer
from .pre_process_wikitext import pre_process
def get_dataset(dataset, batch_size, device):
"""
Returns data iterator for each set and vocabulary
"""
download_dataset(dataset) # downloads and preprocess dataset if needed
if dataset == "wiki-02":
data_files = [".data/wikitext-2/wikitext-2/wiki.train.tokens.sents",
".data/wikitext-2/wikitext-2/wiki.valid.tokens.sents",
".data/wikitext-2/wikitext-2/wiki.test.tokens.sents"]
vocab_size = 33278 + 1 # add 1 to account for PAD
if dataset == 'ptb':
data_files = [".data/penn-treebank/ptb.train.txt",
".data/penn-treebank/ptb.valid.txt",
".data/penn-treebank/ptb.test.txt"]
vocab_size = 10000 + 1 # add 1 to account for PAD
vocabulary = read_vocabulary(data_files, vocab_size)
train_data, valid_data, test_data = read_lm_data(data_files,
vocabulary)
# Convert numpy to datasets and obtain iterators for each
train_data = lm_data_producer(train_data)
train_x = torch.tensor(train_data[0], dtype=torch.long, device=device)
train_y = torch.tensor(train_data[1], dtype=torch.long, device=device)
train_lengths = torch.tensor(
train_data[2], dtype=torch.float, device=device)
train_dataset = TensorDataset(train_x, train_y, train_lengths)
valid_data = lm_data_producer(valid_data)
valid_x = torch.tensor(valid_data[0], dtype=torch.long, device=device)
valid_y = torch.tensor(valid_data[1], dtype=torch.long, device=device)
valid_lengths = torch.tensor(
valid_data[2], dtype=torch.float, device=device)
valid_dataset = TensorDataset(valid_x, valid_y, valid_lengths)
test_data = lm_data_producer(test_data)
test_x = torch.tensor(test_data[0], dtype=torch.long, device=device)
test_y = torch.tensor(test_data[1], dtype=torch.long, device=device)
test_lengths = torch.tensor(test_data[2], dtype=torch.float, device=device)
test_dataset = TensorDataset(test_x, test_y, test_lengths)
train_iter = DataLoader(train_dataset, batch_size=batch_size)
valid_iter = DataLoader(valid_dataset, batch_size=batch_size)
test_iter = DataLoader(test_dataset, batch_size=batch_size)
return train_iter, valid_iter, test_iter, vocabulary
# downloading/preprocessing functions
def download_dataset(dataset):
if not os.path.exists('.data'):
os.makedirs('.data')
if dataset == 'ptb':
folder_name = 'penn-treebank'
filename = 'ptb.test.txt'
if dataset == 'wiki-02':
folder_name = 'wikitext-2'
filename = 'wiki.test.tokens'
dataset_path = '.data/' + folder_name
if not os.path.exists(dataset_path):
os.makedirs(dataset_path)
if dataset == 'ptb':
filepath = dataset_path + '/' + filename
if not os.path.exists(filepath):
download_ptb(dataset_path)
if dataset == 'wiki-02':
filepath = dataset_path + '/'+folder_name + '/'+filename
if not os.path.exists(filepath):
download_and_preproc_wiki(dataset_path)
return
def download_ptb(dataset_path):
urls = ['https://raw.githubusercontent.com/wojzaremba/lstm/master/data/ptb.train.txt',
'https://raw.githubusercontent.com/wojzaremba/lstm/master/data/ptb.valid.txt',
'https://raw.githubusercontent.com/wojzaremba/lstm/master/data/ptb.test.txt']
# To save to a relative path.
r = requests.get(urls[0])
with open(dataset_path+'/ptb.train.txt', 'wb') as f:
f.write(r.content)
r = requests.get(urls[1])
with open(dataset_path+'/ptb.valid.txt', 'wb') as f:
f.write(r.content)
r = requests.get(urls[2])
with open(dataset_path+'/ptb.test.txt', 'wb') as f:
f.write(r.content)
def download_and_preproc_wiki(dataset_path):
print("Downloading wikitext")
url = 'https://s3.amazonaws.com/research.metamind.io/wikitext/wikitext-2-v1.zip'
r = requests.get(url)
z = zipfile.ZipFile(io.BytesIO(r.content))
z.extractall(dataset_path)
train = ".data/wikitext-2/wikitext-2/wiki.train.tokens"
valid = ".data/wikitext-2/wikitext-2/wiki.valid.tokens"
test = ".data/wikitext-2/wikitext-2/wiki.test.tokens"
print("Pre-processing wikitext-02 training set...")
pre_process(train)
print("Pre-processing wikitext-02 validation set...")
pre_process(valid)
print("Pre-processing wikitext-02 test set...")
pre_process(test)
| import os
import torch
from torch.utils.data import DataLoader, TensorDataset
import requests
import io
import zipfile
from .data_reader import read_vocabulary, read_lm_data, lm_data_producer
from .pre_process_wikitext import pre_process
def get_dataset(dataset, batch_size, device):
"""
Returns data iterator for each set and vocabulary
"""
download_dataset(dataset) # downloads and preprocess dataset if needed
if dataset == "wiki-02":
data_files = [".data/wikitext-2/wikitext-2/wiki.train.tokens.sents",
".data/wikitext-2/wikitext-2/wiki.valid.tokens.sents",
".data/wikitext-2/wikitext-2/wiki.test.tokens.sents"]
vocab_size = 33278 + 1 # add 1 to account for PAD
if dataset == 'ptb':
data_files = [".data/penn-treebank/ptb.train.txt",
".data/penn-treebank/ptb.valid.txt",
".data/penn-treebank/ptb.test.txt"]
vocab_size = 10000 + 1 # add 1 to account for PAD
vocabulary = read_vocabulary(data_files, vocab_size)
train_data, valid_data, test_data = read_lm_data(data_files,
vocabulary)
# Convert numpy to datasets and obtain iterators for each
train_data = lm_data_producer(train_data)
train_x = torch.tensor(train_data[0], dtype=torch.long, device=device)
train_y = torch.tensor(train_data[1], dtype=torch.long, device=device)
train_lengths = torch.tensor(
train_data[2], dtype=torch.float, device=device)
train_dataset = TensorDataset(train_x, train_y, train_lengths)
valid_data = lm_data_producer(valid_data)
valid_x = torch.tensor(valid_data[0], dtype=torch.long, device=device)
valid_y = torch.tensor(valid_data[1], dtype=torch.long, device=device)
valid_lengths = torch.tensor(
valid_data[2], dtype=torch.float, device=device)
valid_dataset = TensorDataset(valid_x, valid_y, valid_lengths)
test_data = lm_data_producer(test_data)
test_x = torch.tensor(test_data[0], dtype=torch.long, device=device)
test_y = torch.tensor(test_data[1], dtype=torch.long, device=device)
test_lengths = torch.tensor(test_data[2], dtype=torch.float, device=device)
test_dataset = TensorDataset(test_x, test_y, test_lengths)
train_iter = DataLoader(train_dataset, batch_size=batch_size)
valid_iter = DataLoader(valid_dataset, batch_size=batch_size)
test_iter = DataLoader(test_dataset, batch_size=batch_size)
return train_iter, valid_iter, test_iter, vocabulary
# downloading/preprocessing functions
def download_dataset(dataset):
if not os.path.exists('.data'):
os.makedirs('.data')
if dataset == 'ptb':
folder_name = 'penn-treebank'
filename = 'ptb.test.txt'
if dataset == 'wiki-02':
folder_name = 'wikitext-2'
filename = 'wiki.test.tokens'
dataset_path = '.data/' + folder_name
if not os.path.exists(dataset_path):
os.makedirs(dataset_path)
if dataset == 'ptb':
filepath = dataset_path + '/' + filename
if not os.path.exists(filepath):
download_ptb(dataset_path)
if dataset == 'wiki-02':
filepath = dataset_path + '/'+folder_name + '/'+filename
if not os.path.exists(filepath):
download_and_preproc_wiki(dataset_path)
return
def download_ptb(dataset_path):
urls = ['https://raw.githubusercontent.com/wojzaremba/lstm/master/data/ptb.train.txt',
'https://raw.githubusercontent.com/wojzaremba/lstm/master/data/ptb.valid.txt',
'https://raw.githubusercontent.com/wojzaremba/lstm/master/data/ptb.test.txt']
# To save to a relative path.
r = requests.get(urls[0])
with open(dataset_path+'/ptb.train.txt', 'wb') as f:
f.write(r.content)
r = requests.get(urls[1])
with open(dataset_path+'/ptb.valid.txt', 'wb') as f:
f.write(r.content)
r = requests.get(urls[2])
with open(dataset_path+'/ptb.test.txt', 'wb') as f:
f.write(r.content)
def download_and_preproc_wiki(dataset_path):
print("Downloading wikitext")
url = 'https://s3.amazonaws.com/research.metamind.io/wikitext/wikitext-2-v1.zip'
r = requests.get(url)
z = zipfile.ZipFile(io.BytesIO(r.content))
z.extractall(dataset_path)
train = ".data/wikitext-2/wikitext-2/wiki.train.tokens"
valid = ".data/wikitext-2/wikitext-2/wiki.valid.tokens"
test = ".data/wikitext-2/wikitext-2/wiki.test.tokens"
print("Pre-processing wikitext-02 training set...")
pre_process(train)
print("Pre-processing wikitext-02 validation set...")
pre_process(valid)
print("Pre-processing wikitext-02 test set...")
pre_process(test) | en | 0.713527 | Returns data iterator for each set and vocabulary # downloads and preprocess dataset if needed # add 1 to account for PAD # add 1 to account for PAD # Convert numpy to datasets and obtain iterators for each # downloading/preprocessing functions # To save to a relative path. | 2.584593 | 3 |
Conteudo das Aulas/087/calc_est.py | cerberus707/lab-python | 0 | 10736 | from tkinter import *
#Cria a nossa tela
instancia = Tk()
#Dá um título a tela
instancia.title('Calculadora para Estatística')
#Dá um tamanho a tela
instancia.geometry("800x600")
#Dá um ícone ao aplicativo
#instancia.wm_iconbitmap('icone.ico')
#Inicia o programa
instancia.mainloop()
| from tkinter import *
#Cria a nossa tela
instancia = Tk()
#Dá um título a tela
instancia.title('Calculadora para Estatística')
#Dá um tamanho a tela
instancia.geometry("800x600")
#Dá um ícone ao aplicativo
#instancia.wm_iconbitmap('icone.ico')
#Inicia o programa
instancia.mainloop()
| pt | 0.895104 | #Cria a nossa tela #Dá um título a tela #Dá um tamanho a tela #Dá um ícone ao aplicativo #instancia.wm_iconbitmap('icone.ico') #Inicia o programa | 3.448497 | 3 |
property_scraper.py | iplaughlin/property_scraping | 0 | 10737 | # -*- coding: utf-8 -*-
"""
Created on Sat Mar 19 09:42:09 2022
@author: iaala
"""
import requests
import sql_configs
import datetime
import os
from bs4 import BeautifulSoup
import time
from find_tables import (
table_information_one,
table_information_two,
table_information_three,
table_information_four,
)
from create_connection import create_sql_connection
import columns
__location__ = os.path.realpath(
os.path.join(os.getcwd(), os.path.dirname(__file__)))
def main():
for ppin in range(200001, 18600, -1):
try:
conn = create_sql_connection(
user=sql_configs.USER,
password=<PASSWORD>,
host=sql_configs.HOST,
database=sql_configs.DATABASE,
)
temp_dict = dict()
print(ppin)
with open(os.path.join(__location__, 'status.txt'), 'w') as f:
f.write(f"currently starting {ppin}")
c = conn.cursor()
c.execute('select pin from parcel;')
items_collected = [int(''.join(map(str, item))) for item in c.fetchall()]
if ppin not in items_collected:
url = f"https://madisonproperty.countygovservices.com/Property/Property/Summary?taxyear=2022&ppin={ppin}"
headers = {
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:98.0) Gecko/20100101 Firefox/98.0"
}
resp = requests.get(url, headers=headers)
time.sleep(0.25)
soup = BeautifulSoup(resp.text, "html.parser")
parcel_info = table_information_one(soup, "collapseParcelInfo")
if parcel_info == {}:
parcel_info = {column: "" for column in columns.PARCEL_INFO_COLUMNS}
parcel_info['PIN']= ppin
property_values = table_information_one(soup, "collapseSummaryPropertyValues")
if property_values == {}:
property_values = {column: "" for column in columns.PROPERTY_COLUMNS}
subdivision = table_information_one(soup, "collapseSummarySubdivision")
if subdivision == {}:
subdivision = {column: "" for column in columns.SUBDIVISION_COLUMNS}
tax = table_information_two(soup, "collapseTaxInfo")
if tax == {}:
tax = {column: "" for column in columns.TAX_COLUMNS}
tax_history = table_information_three(soup, "collapseTaxHistory")
details = table_information_three(soup, "collapseSummaryDetailInfo")
building_components = table_information_four(
soup, "collapseSummaryBuildingComponents"
)
improvement = building_components.get("improvement")
computations = building_components.get("computations")
materials = building_components.get("materials")
gis_url = f"https://isv.kcsgis.com/al.Madison_revenue/?fips={ppin}"
temp_dict[ppin] = {
"ppin": ppin,
"date": str(datetime.datetime.now()),
"parcel": parcel_info,
"property_values": property_values,
"subdivision": subdivision,
"tax": tax,
"tax_history": tax_history,
"details": details,
"improvement": improvement,
"computations": computations,
"materials": materials,
"gis_url": f"https://isv.kcsgis.com/al.Madison_revenue/?fips={ppin}",
}
ppin = [ppin]
conn = create_sql_connection(
user=configs.USER,
password=configs.PASSWORD,
host=configs.HOST,
database=configs.DATABASE,
)
c = conn.cursor()
date = [str(datetime.datetime.now())]
parcel_values = list(parcel_info.values()) + date
c.execute(configs.PARCEL_STATEMENT, parcel_values)
property_values = list(property_values.values()) + date + ppin
c.execute(configs.PROPERTY_VALUES_STATEMENT, property_values)
subdivision_values = list(subdivision.values()) + date + ppin
c.execute(configs.SUBDIVISION_STATEMENT, subdivision_values)
tax_values = [str(item) for item in tax.values()] + date + ppin
tax_values = tuple(tax_values)
c.execute(configs.TAX_STATEMENT, tax_values)
for row in zip(*list(tax_history.values())):
c.execute(configs.TAX_HISTORY_STATEMENT, row + tuple(date) + tuple(ppin))
for row in zip(*list(details.values())):
c.execute(configs.DETAILS_STATEMENT, row + tuple(date)+ tuple(ppin))
improvement_values = list(improvement.values()) + date + ppin
improvement_values = tuple(improvement_values)
c.execute(configs.IMPROVEMENTS_STATEMENT, improvement_values)
computations_values = list(computations.values()) + date + ppin
computations_values = tuple(computations_values)
c.execute(configs.COMPUTATION_STATEMENT, computations_values)
for row in zip(*list(materials.values())):
row_length = len(row)
if row_length != 0:
c.execute(configs.MATERIALS_STATEMENT, row + tuple(date)+ tuple(ppin))
urls_values = (url, gis_url) + tuple(date) + tuple(ppin)
c.execute(configs.URLS_STATEMENT, urls_values)
conn.commit()
except Exception as e:
# raise Exception
new_line = '\n'
if isinstance(ppin, int):
ppin = [ppin]
with open(os.path.join(__location__, 'errors.txt'), 'a') as f:
f.write(f"error in {ppin[0]} occurred. error was {e}{new_line}")
if __name__ == "__main__":
main()
| # -*- coding: utf-8 -*-
"""
Created on Sat Mar 19 09:42:09 2022
@author: iaala
"""
import requests
import sql_configs
import datetime
import os
from bs4 import BeautifulSoup
import time
from find_tables import (
table_information_one,
table_information_two,
table_information_three,
table_information_four,
)
from create_connection import create_sql_connection
import columns
__location__ = os.path.realpath(
os.path.join(os.getcwd(), os.path.dirname(__file__)))
def main():
for ppin in range(200001, 18600, -1):
try:
conn = create_sql_connection(
user=sql_configs.USER,
password=<PASSWORD>,
host=sql_configs.HOST,
database=sql_configs.DATABASE,
)
temp_dict = dict()
print(ppin)
with open(os.path.join(__location__, 'status.txt'), 'w') as f:
f.write(f"currently starting {ppin}")
c = conn.cursor()
c.execute('select pin from parcel;')
items_collected = [int(''.join(map(str, item))) for item in c.fetchall()]
if ppin not in items_collected:
url = f"https://madisonproperty.countygovservices.com/Property/Property/Summary?taxyear=2022&ppin={ppin}"
headers = {
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:98.0) Gecko/20100101 Firefox/98.0"
}
resp = requests.get(url, headers=headers)
time.sleep(0.25)
soup = BeautifulSoup(resp.text, "html.parser")
parcel_info = table_information_one(soup, "collapseParcelInfo")
if parcel_info == {}:
parcel_info = {column: "" for column in columns.PARCEL_INFO_COLUMNS}
parcel_info['PIN']= ppin
property_values = table_information_one(soup, "collapseSummaryPropertyValues")
if property_values == {}:
property_values = {column: "" for column in columns.PROPERTY_COLUMNS}
subdivision = table_information_one(soup, "collapseSummarySubdivision")
if subdivision == {}:
subdivision = {column: "" for column in columns.SUBDIVISION_COLUMNS}
tax = table_information_two(soup, "collapseTaxInfo")
if tax == {}:
tax = {column: "" for column in columns.TAX_COLUMNS}
tax_history = table_information_three(soup, "collapseTaxHistory")
details = table_information_three(soup, "collapseSummaryDetailInfo")
building_components = table_information_four(
soup, "collapseSummaryBuildingComponents"
)
improvement = building_components.get("improvement")
computations = building_components.get("computations")
materials = building_components.get("materials")
gis_url = f"https://isv.kcsgis.com/al.Madison_revenue/?fips={ppin}"
temp_dict[ppin] = {
"ppin": ppin,
"date": str(datetime.datetime.now()),
"parcel": parcel_info,
"property_values": property_values,
"subdivision": subdivision,
"tax": tax,
"tax_history": tax_history,
"details": details,
"improvement": improvement,
"computations": computations,
"materials": materials,
"gis_url": f"https://isv.kcsgis.com/al.Madison_revenue/?fips={ppin}",
}
ppin = [ppin]
conn = create_sql_connection(
user=configs.USER,
password=configs.PASSWORD,
host=configs.HOST,
database=configs.DATABASE,
)
c = conn.cursor()
date = [str(datetime.datetime.now())]
parcel_values = list(parcel_info.values()) + date
c.execute(configs.PARCEL_STATEMENT, parcel_values)
property_values = list(property_values.values()) + date + ppin
c.execute(configs.PROPERTY_VALUES_STATEMENT, property_values)
subdivision_values = list(subdivision.values()) + date + ppin
c.execute(configs.SUBDIVISION_STATEMENT, subdivision_values)
tax_values = [str(item) for item in tax.values()] + date + ppin
tax_values = tuple(tax_values)
c.execute(configs.TAX_STATEMENT, tax_values)
for row in zip(*list(tax_history.values())):
c.execute(configs.TAX_HISTORY_STATEMENT, row + tuple(date) + tuple(ppin))
for row in zip(*list(details.values())):
c.execute(configs.DETAILS_STATEMENT, row + tuple(date)+ tuple(ppin))
improvement_values = list(improvement.values()) + date + ppin
improvement_values = tuple(improvement_values)
c.execute(configs.IMPROVEMENTS_STATEMENT, improvement_values)
computations_values = list(computations.values()) + date + ppin
computations_values = tuple(computations_values)
c.execute(configs.COMPUTATION_STATEMENT, computations_values)
for row in zip(*list(materials.values())):
row_length = len(row)
if row_length != 0:
c.execute(configs.MATERIALS_STATEMENT, row + tuple(date)+ tuple(ppin))
urls_values = (url, gis_url) + tuple(date) + tuple(ppin)
c.execute(configs.URLS_STATEMENT, urls_values)
conn.commit()
except Exception as e:
# raise Exception
new_line = '\n'
if isinstance(ppin, int):
ppin = [ppin]
with open(os.path.join(__location__, 'errors.txt'), 'a') as f:
f.write(f"error in {ppin[0]} occurred. error was {e}{new_line}")
if __name__ == "__main__":
main()
| en | 0.55106 | # -*- coding: utf-8 -*- Created on Sat Mar 19 09:42:09 2022 @author: iaala # raise Exception | 2.556087 | 3 |
Chapter03/Activity11/fibonacci.py | vumaasha/python-workshop | 0 | 10738 | def fibonacci_iterative(n):
previous = 0
current = 1
for i in range(n - 1):
current_old = current
current = previous + current
previous = current_old
return current
def fibonacci_recursive(n):
if n == 0 or n == 1:
return n
else:
return fibonacci_recursive(n - 2) + fibonacci_recursive(n - 1)
| def fibonacci_iterative(n):
previous = 0
current = 1
for i in range(n - 1):
current_old = current
current = previous + current
previous = current_old
return current
def fibonacci_recursive(n):
if n == 0 or n == 1:
return n
else:
return fibonacci_recursive(n - 2) + fibonacci_recursive(n - 1)
| none | 1 | 4.248877 | 4 |
|
matches/tests/test_view_index.py | ToxaZ/nostradamus | 0 | 10739 | <filename>matches/tests/test_view_index.py
from django.urls import resolve, reverse
from django.test import TestCase
from matches.views import matches_index
from matches.models import Match
class AllMatchesTests(TestCase):
def setUp(self):
self.match = Match.objects.create(
match_id=1,
home_team='Netherlands',
guest_team='Russia',
start_time='2008-06-21 19:45Z'
)
url = reverse('matches:matches_index')
self.response = self.client.get(url)
def test_index_view_status_code(self):
self.assertEquals(self.response.status_code, 200)
def test_index_url_resolves_index_view(self):
view = resolve('/matches/')
self.assertEquals(view.func, matches_index)
def test_index_view_contains_link_to_single_match_page(self):
single_match_url = reverse(
'matches:single_match', kwargs={'match_id': self.match.match_id})
self.assertContains(
self.response, 'href="{0}"'.format(single_match_url))
| <filename>matches/tests/test_view_index.py
from django.urls import resolve, reverse
from django.test import TestCase
from matches.views import matches_index
from matches.models import Match
class AllMatchesTests(TestCase):
def setUp(self):
self.match = Match.objects.create(
match_id=1,
home_team='Netherlands',
guest_team='Russia',
start_time='2008-06-21 19:45Z'
)
url = reverse('matches:matches_index')
self.response = self.client.get(url)
def test_index_view_status_code(self):
self.assertEquals(self.response.status_code, 200)
def test_index_url_resolves_index_view(self):
view = resolve('/matches/')
self.assertEquals(view.func, matches_index)
def test_index_view_contains_link_to_single_match_page(self):
single_match_url = reverse(
'matches:single_match', kwargs={'match_id': self.match.match_id})
self.assertContains(
self.response, 'href="{0}"'.format(single_match_url))
| none | 1 | 2.339883 | 2 |
|
hmc/applications/banana/banana.py | JamesBrofos/Thresholds-in-Hamiltonian-Monte-Carlo | 1 | 10740 | from typing import Callable, Tuple
import numpy as np
def posterior_factory(y: np.ndarray, sigma_y: float, sigma_theta: float) -> Tuple[Callable]:
"""The banana distribution is a distribution that exhibits a characteristic
banana-shaped ridge that resembles the posterior that can emerge from
models that are not identifiable. The distribution is the posterior of the
following generative model.
y ~ Normal(theta[0] + theta[1]**2, sigma_sq_y)
theta[i] ~ Normal(0, sigma_sq_theta)
Args:
y: Observations of the banana model.
sigma_y: Standard deviation of the observations.
sigma_theta: Standard deviation of prior over linear coefficients.
Returns:
log_posterior: Function to compute the log-posterior.
metric: Function to compute the Fisher information metric.
euclidean_auxiliaries: Function to compute the log-posterior and its
gradient.
riemannian_auxiliaries: Function to compute the log-posterior, the
gradient of the log-posterior, the Fisher information metric, and the
derivatives of the Fisher information metric.
"""
sigma_sq_y = np.square(sigma_y)
sigma_sq_theta = np.square(sigma_theta)
def log_posterior(theta: np.ndarray) -> float:
"""The banana-shaped distribution posterior.
Args:
theta: Linear coefficients.
Returns:
out: The log-posterior of the banana-shaped distribution.
"""
p = theta[0] + np.square(theta[1])
ll = -0.5 / sigma_sq_y * np.square(y - p).sum()
lp = -0.5 / sigma_sq_theta * np.square(theta).sum()
return ll + lp
def grad_log_posterior(theta: np.ndarray) -> np.ndarray:
"""Gradient of the banana-shaped distribution with respect to the linear
coefficients.
Args:
theta: Linear coefficients.
Returns:
out: The gradient of the log-posterior of the banana-shaped
distribution with respect to the linear coefficients.
"""
p = theta[0] + np.square(theta[1])
d = np.sum(y - p)
ga = d / sigma_sq_y - theta[0] / sigma_sq_theta
gb = 2.0*d / sigma_sq_y * theta[1] - theta[1] / sigma_sq_theta
return np.hstack((ga, gb))
def metric(theta: np.ndarray) -> np.ndarray:
"""The Fisher information is the negative expected outer product of the
gradient of the posterior.
Args:
theta: Linear coefficients.
Returns:
G: The Fisher information metric of the banana-shaped distribution.
"""
n = y.size
s = 2.0*n*theta[1] / sigma_sq_y
G = np.array([[n / sigma_sq_y + 1.0 / sigma_sq_theta, s],
[s, 4.0*n*np.square(theta[1]) / sigma_sq_y + 1.0 / sigma_sq_theta]])
return G
def grad_metric(theta: np.ndarray) -> np.ndarray:
"""The gradient of the Fisher information metric with respect to the linear
coefficients.
Args:
theta: Linear coefficients.
Returns:
dG: The gradient of the Fisher information metric with respect to the
linear coefficients.
"""
n = y.size
dG = np.array([
[[0.0, 0.0], [0.0, 2.0*n / sigma_sq_y]],
[[0.0, 2.0*n / sigma_sq_y], [0.0, 8.0*n*theta[1] / sigma_sq_y]]
])
return dG
def euclidean_auxiliaries(theta: np.ndarray) -> Tuple[np.ndarray]:
"""Function to compute the log-posterior and the gradient of the
log-posterior.
Args:
theta: Linear coefficients.
Returns:
lp: The log-posterior of the banana-shaped distribution.
glp: The gradient of the log-posterior of the banana-shaped
distribution with respect to the linear coefficients.
"""
lp = log_posterior(theta)
glp = grad_log_posterior(theta)
return lp, glp
def riemannnian_auxiliaries(theta: np.ndarray) -> Tuple[np.ndarray]:
"""Function to compute the log-posterior, the gradient of the log-posterior,
the Fisher information metric and the derivatives of the Fisher
information metric.
Args:
theta: Linear coefficients.
Returns:
lp: The log-posterior of the banana-shaped distribution.
glp: The gradient of the log-posterior of the banana-shaped
distribution with respect to the linear coefficients.
G: The Fisher information metric of the banana-shaped distribution.
dG: The gradient of the Fisher information metric with respect to the
linear coefficients.
"""
lp = log_posterior(theta)
glp = grad_log_posterior(theta)
G = metric(theta)
dG = grad_metric(theta)
return lp, glp, G, dG
def log_posterior_and_metric(theta: np.ndarray) -> Tuple[np.ndarray]:
lp = log_posterior(theta)
G = metric(theta)
return lp, G
return log_posterior, metric, log_posterior_and_metric, euclidean_auxiliaries, riemannnian_auxiliaries
def generate_data(t: float, sigma_y: float, sigma_theta: float, num_obs: int) -> np.ndarray:
"""Generate data from the banana-shaped posterior distribution.
Args:
t: Free-parameter determining the thetas.
sigma_y: Noise standard deviation.
sigma_theta: Prior standard deviation over the thetas.
num_obs: Number of observations to generate.
Returns:
theta: Linear coefficients of the banana-shaped distribution.
y: Observations from the unidentifiable model.
"""
theta = np.array([t, np.sqrt(1.0 - t)])
y = theta[0] + np.square(theta[1]) + sigma_y * np.random.normal(size=(num_obs, ))
return theta, y
| from typing import Callable, Tuple
import numpy as np
def posterior_factory(y: np.ndarray, sigma_y: float, sigma_theta: float) -> Tuple[Callable]:
"""The banana distribution is a distribution that exhibits a characteristic
banana-shaped ridge that resembles the posterior that can emerge from
models that are not identifiable. The distribution is the posterior of the
following generative model.
y ~ Normal(theta[0] + theta[1]**2, sigma_sq_y)
theta[i] ~ Normal(0, sigma_sq_theta)
Args:
y: Observations of the banana model.
sigma_y: Standard deviation of the observations.
sigma_theta: Standard deviation of prior over linear coefficients.
Returns:
log_posterior: Function to compute the log-posterior.
metric: Function to compute the Fisher information metric.
euclidean_auxiliaries: Function to compute the log-posterior and its
gradient.
riemannian_auxiliaries: Function to compute the log-posterior, the
gradient of the log-posterior, the Fisher information metric, and the
derivatives of the Fisher information metric.
"""
sigma_sq_y = np.square(sigma_y)
sigma_sq_theta = np.square(sigma_theta)
def log_posterior(theta: np.ndarray) -> float:
"""The banana-shaped distribution posterior.
Args:
theta: Linear coefficients.
Returns:
out: The log-posterior of the banana-shaped distribution.
"""
p = theta[0] + np.square(theta[1])
ll = -0.5 / sigma_sq_y * np.square(y - p).sum()
lp = -0.5 / sigma_sq_theta * np.square(theta).sum()
return ll + lp
def grad_log_posterior(theta: np.ndarray) -> np.ndarray:
"""Gradient of the banana-shaped distribution with respect to the linear
coefficients.
Args:
theta: Linear coefficients.
Returns:
out: The gradient of the log-posterior of the banana-shaped
distribution with respect to the linear coefficients.
"""
p = theta[0] + np.square(theta[1])
d = np.sum(y - p)
ga = d / sigma_sq_y - theta[0] / sigma_sq_theta
gb = 2.0*d / sigma_sq_y * theta[1] - theta[1] / sigma_sq_theta
return np.hstack((ga, gb))
def metric(theta: np.ndarray) -> np.ndarray:
"""The Fisher information is the negative expected outer product of the
gradient of the posterior.
Args:
theta: Linear coefficients.
Returns:
G: The Fisher information metric of the banana-shaped distribution.
"""
n = y.size
s = 2.0*n*theta[1] / sigma_sq_y
G = np.array([[n / sigma_sq_y + 1.0 / sigma_sq_theta, s],
[s, 4.0*n*np.square(theta[1]) / sigma_sq_y + 1.0 / sigma_sq_theta]])
return G
def grad_metric(theta: np.ndarray) -> np.ndarray:
"""The gradient of the Fisher information metric with respect to the linear
coefficients.
Args:
theta: Linear coefficients.
Returns:
dG: The gradient of the Fisher information metric with respect to the
linear coefficients.
"""
n = y.size
dG = np.array([
[[0.0, 0.0], [0.0, 2.0*n / sigma_sq_y]],
[[0.0, 2.0*n / sigma_sq_y], [0.0, 8.0*n*theta[1] / sigma_sq_y]]
])
return dG
def euclidean_auxiliaries(theta: np.ndarray) -> Tuple[np.ndarray]:
"""Function to compute the log-posterior and the gradient of the
log-posterior.
Args:
theta: Linear coefficients.
Returns:
lp: The log-posterior of the banana-shaped distribution.
glp: The gradient of the log-posterior of the banana-shaped
distribution with respect to the linear coefficients.
"""
lp = log_posterior(theta)
glp = grad_log_posterior(theta)
return lp, glp
def riemannnian_auxiliaries(theta: np.ndarray) -> Tuple[np.ndarray]:
"""Function to compute the log-posterior, the gradient of the log-posterior,
the Fisher information metric and the derivatives of the Fisher
information metric.
Args:
theta: Linear coefficients.
Returns:
lp: The log-posterior of the banana-shaped distribution.
glp: The gradient of the log-posterior of the banana-shaped
distribution with respect to the linear coefficients.
G: The Fisher information metric of the banana-shaped distribution.
dG: The gradient of the Fisher information metric with respect to the
linear coefficients.
"""
lp = log_posterior(theta)
glp = grad_log_posterior(theta)
G = metric(theta)
dG = grad_metric(theta)
return lp, glp, G, dG
def log_posterior_and_metric(theta: np.ndarray) -> Tuple[np.ndarray]:
lp = log_posterior(theta)
G = metric(theta)
return lp, G
return log_posterior, metric, log_posterior_and_metric, euclidean_auxiliaries, riemannnian_auxiliaries
def generate_data(t: float, sigma_y: float, sigma_theta: float, num_obs: int) -> np.ndarray:
"""Generate data from the banana-shaped posterior distribution.
Args:
t: Free-parameter determining the thetas.
sigma_y: Noise standard deviation.
sigma_theta: Prior standard deviation over the thetas.
num_obs: Number of observations to generate.
Returns:
theta: Linear coefficients of the banana-shaped distribution.
y: Observations from the unidentifiable model.
"""
theta = np.array([t, np.sqrt(1.0 - t)])
y = theta[0] + np.square(theta[1]) + sigma_y * np.random.normal(size=(num_obs, ))
return theta, y
| en | 0.747928 | The banana distribution is a distribution that exhibits a characteristic banana-shaped ridge that resembles the posterior that can emerge from models that are not identifiable. The distribution is the posterior of the following generative model. y ~ Normal(theta[0] + theta[1]**2, sigma_sq_y) theta[i] ~ Normal(0, sigma_sq_theta) Args: y: Observations of the banana model. sigma_y: Standard deviation of the observations. sigma_theta: Standard deviation of prior over linear coefficients. Returns: log_posterior: Function to compute the log-posterior. metric: Function to compute the Fisher information metric. euclidean_auxiliaries: Function to compute the log-posterior and its gradient. riemannian_auxiliaries: Function to compute the log-posterior, the gradient of the log-posterior, the Fisher information metric, and the derivatives of the Fisher information metric. The banana-shaped distribution posterior. Args: theta: Linear coefficients. Returns: out: The log-posterior of the banana-shaped distribution. Gradient of the banana-shaped distribution with respect to the linear coefficients. Args: theta: Linear coefficients. Returns: out: The gradient of the log-posterior of the banana-shaped distribution with respect to the linear coefficients. The Fisher information is the negative expected outer product of the gradient of the posterior. Args: theta: Linear coefficients. Returns: G: The Fisher information metric of the banana-shaped distribution. The gradient of the Fisher information metric with respect to the linear coefficients. Args: theta: Linear coefficients. Returns: dG: The gradient of the Fisher information metric with respect to the linear coefficients. Function to compute the log-posterior and the gradient of the log-posterior. Args: theta: Linear coefficients. Returns: lp: The log-posterior of the banana-shaped distribution. glp: The gradient of the log-posterior of the banana-shaped distribution with respect to the linear coefficients. Function to compute the log-posterior, the gradient of the log-posterior, the Fisher information metric and the derivatives of the Fisher information metric. Args: theta: Linear coefficients. Returns: lp: The log-posterior of the banana-shaped distribution. glp: The gradient of the log-posterior of the banana-shaped distribution with respect to the linear coefficients. G: The Fisher information metric of the banana-shaped distribution. dG: The gradient of the Fisher information metric with respect to the linear coefficients. Generate data from the banana-shaped posterior distribution. Args: t: Free-parameter determining the thetas. sigma_y: Noise standard deviation. sigma_theta: Prior standard deviation over the thetas. num_obs: Number of observations to generate. Returns: theta: Linear coefficients of the banana-shaped distribution. y: Observations from the unidentifiable model. | 3.15785 | 3 |
msp430.py | sprout42/binaryninja-msp430 | 0 | 10741 | <reponame>sprout42/binaryninja-msp430
from binaryninja import (
Architecture,
BranchType,
FlagRole,
InstructionInfo,
LowLevelILFlagCondition,
RegisterInfo,
)
from .instructions import TYPE3_INSTRUCTIONS, Instruction, Registers
from .lifter import Lifter
class MSP430(Architecture):
name = "msp430"
address_size = 2
default_int_size = 2
global_regs = ["sr"]
stack_pointer = "sp"
regs = {r: RegisterInfo(r, 2) for r in Registers}
flags = ["v", "n", "c", "z"]
# The first flag write type is ignored currently.
# See: https://github.com/Vector35/binaryninja-api/issues/513
flag_write_types = ["", "*", "cnv", "cnz"]
flags_written_by_flag_write_type = {
"*": ["v", "n", "c", "z"],
"cnv": ["v", "n", "c"],
"cnz": ["c", "n", "z"],
}
flag_roles = {
"c": FlagRole.CarryFlagRole,
"n": FlagRole.NegativeSignFlagRole,
"z": FlagRole.ZeroFlagRole,
"v": FlagRole.OverflowFlagRole,
}
flags_required_for_flag_condition = {
LowLevelILFlagCondition.LLFC_UGE: ['c'],
LowLevelILFlagCondition.LLFC_UGT: ['c'],
LowLevelILFlagCondition.LLFC_ULT: ['c'],
LowLevelILFlagCondition.LLFC_ULE: ['c'],
LowLevelILFlagCondition.LLFC_SGE: ['n', 'v'],
LowLevelILFlagCondition.LLFC_SLT: ['n', 'v'],
LowLevelILFlagCondition.LLFC_E: ['z'],
LowLevelILFlagCondition.LLFC_NE: ['z'],
LowLevelILFlagCondition.LLFC_NEG: ['n'],
LowLevelILFlagCondition.LLFC_POS: ['n']
}
def get_instruction_info(self, data, addr):
instr = Instruction.decode(data, addr)
if instr is None:
return None
result = InstructionInfo()
result.length = instr.length
# Add branches
if instr.mnemonic in ["ret", "reti"]:
result.add_branch(BranchType.FunctionReturn)
elif instr.mnemonic in ["jmp", "br"] and instr.src.value is not None:
result.add_branch(BranchType.UnconditionalBranch, instr.src.value)
elif instr.type == 3:
result.add_branch(BranchType.TrueBranch, instr.src.value)
result.add_branch(BranchType.FalseBranch, addr + 2)
elif instr.mnemonic == "call" and instr.src.value is not None:
result.add_branch(BranchType.CallDestination, instr.src.value)
return result
def get_instruction_text(self, data, addr):
instr = Instruction.decode(data, addr)
if instr is None:
return None
tokens = instr.generate_tokens()
return tokens, instr.length
def get_instruction_low_level_il(self, data, addr, il):
instr = Instruction.decode(data, addr)
if instr is None:
return None
# Halting the system means turning off interrupts and just looping
# indefinitely
if instr.mnemonic == "dint":
next_instr = Instruction.decode(data[instr.length :], addr + instr.length)
if next_instr.mnemonic == "jmp" and next_instr.src.value == addr:
instr.mnemonic = "hlt"
Lifter.lift(il, instr)
return instr.length
| from binaryninja import (
Architecture,
BranchType,
FlagRole,
InstructionInfo,
LowLevelILFlagCondition,
RegisterInfo,
)
from .instructions import TYPE3_INSTRUCTIONS, Instruction, Registers
from .lifter import Lifter
class MSP430(Architecture):
name = "msp430"
address_size = 2
default_int_size = 2
global_regs = ["sr"]
stack_pointer = "sp"
regs = {r: RegisterInfo(r, 2) for r in Registers}
flags = ["v", "n", "c", "z"]
# The first flag write type is ignored currently.
# See: https://github.com/Vector35/binaryninja-api/issues/513
flag_write_types = ["", "*", "cnv", "cnz"]
flags_written_by_flag_write_type = {
"*": ["v", "n", "c", "z"],
"cnv": ["v", "n", "c"],
"cnz": ["c", "n", "z"],
}
flag_roles = {
"c": FlagRole.CarryFlagRole,
"n": FlagRole.NegativeSignFlagRole,
"z": FlagRole.ZeroFlagRole,
"v": FlagRole.OverflowFlagRole,
}
flags_required_for_flag_condition = {
LowLevelILFlagCondition.LLFC_UGE: ['c'],
LowLevelILFlagCondition.LLFC_UGT: ['c'],
LowLevelILFlagCondition.LLFC_ULT: ['c'],
LowLevelILFlagCondition.LLFC_ULE: ['c'],
LowLevelILFlagCondition.LLFC_SGE: ['n', 'v'],
LowLevelILFlagCondition.LLFC_SLT: ['n', 'v'],
LowLevelILFlagCondition.LLFC_E: ['z'],
LowLevelILFlagCondition.LLFC_NE: ['z'],
LowLevelILFlagCondition.LLFC_NEG: ['n'],
LowLevelILFlagCondition.LLFC_POS: ['n']
}
def get_instruction_info(self, data, addr):
instr = Instruction.decode(data, addr)
if instr is None:
return None
result = InstructionInfo()
result.length = instr.length
# Add branches
if instr.mnemonic in ["ret", "reti"]:
result.add_branch(BranchType.FunctionReturn)
elif instr.mnemonic in ["jmp", "br"] and instr.src.value is not None:
result.add_branch(BranchType.UnconditionalBranch, instr.src.value)
elif instr.type == 3:
result.add_branch(BranchType.TrueBranch, instr.src.value)
result.add_branch(BranchType.FalseBranch, addr + 2)
elif instr.mnemonic == "call" and instr.src.value is not None:
result.add_branch(BranchType.CallDestination, instr.src.value)
return result
def get_instruction_text(self, data, addr):
instr = Instruction.decode(data, addr)
if instr is None:
return None
tokens = instr.generate_tokens()
return tokens, instr.length
def get_instruction_low_level_il(self, data, addr, il):
instr = Instruction.decode(data, addr)
if instr is None:
return None
# Halting the system means turning off interrupts and just looping
# indefinitely
if instr.mnemonic == "dint":
next_instr = Instruction.decode(data[instr.length :], addr + instr.length)
if next_instr.mnemonic == "jmp" and next_instr.src.value == addr:
instr.mnemonic = "hlt"
Lifter.lift(il, instr)
return instr.length | en | 0.828199 | # The first flag write type is ignored currently. # See: https://github.com/Vector35/binaryninja-api/issues/513 # Add branches # Halting the system means turning off interrupts and just looping # indefinitely | 1.994128 | 2 |
app/domain/__init__.py | emge1/tracardi | 0 | 10742 | __all__ = [
'session',
'event',
'profile',
'consent',
'segment',
'source',
'rule',
'entity'
]
| __all__ = [
'session',
'event',
'profile',
'consent',
'segment',
'source',
'rule',
'entity'
]
| none | 1 | 1.02159 | 1 |
|
metric_calculation/faster_metrics.py | imatge-upc/saliency-2018-videosalgan | 10 | 10743 | <reponame>imatge-upc/saliency-2018-videosalgan<filename>metric_calculation/faster_metrics.py<gh_stars>1-10
from salience_metrics import auc_judd, auc_shuff, cc, nss, similarity, normalize_map
"""
DHF1K paper: "we employ five classic met-rics, namely Normalized Scanpath Saliency (NSS), Sim-ilarity Metric (SIM), Linear Correlation Coefficient (CC),AUC-Judd (AUC-J), and shuffled AUC (s-AUC).""
"""
import cv2
import os
import numpy as np
import time
import pickle
gt_directory = "/imatge/lpanagiotis/work/DHF1K_extracted/maps"
sm_directory = "/imatge/lpanagiotis/work/DHF1K_extracted/predictions"
final_metric_list = []
# The directories are named 1-1000 so it should be easy to iterate over them
def inner_worker(i, packed, gt_path, sm_path): #packed should be a list of tuples (annotation, prediction)
gt, sm = packed
ground_truth = cv2.imread(os.path.join(gt_path, gt),cv2.IMREAD_GRAYSCALE)
saliency_map = cv2.imread(os.path.join(sm_path, sm),cv2.IMREAD_GRAYSCALE)
saliency_map_norm = normalize_map(saliency_map) # The functions are a bit haphazard. Some have normalization within and some do not.
# Calculate metrics
AUC_JUDD = auc_judd(saliency_map_norm, ground_truth)
AUC_SHUF = auc_shuff(saliency_map_norm, ground_truth, ground_truth)
NSS = nss(saliency_map_norm, ground_truth)
# the other ones have normalization within:
CC = cc(saliency_map, ground_truth)
SIM = similarity(saliency_map, ground_truth)
return ( AUC_JUDD,
AUC_SHUF,
NSS,
CC,
SIM )
for i in range(1,701):
start = time.clock()
gt_path = os.path.join(gt_directory, str(i))
sm_path = os.path.join(sm_directory, str(i))
gt_files = os.listdir(gt_path)
sm_files = os.listdir(sm_path)
#Now to sort based on their file number. The "key" parameter in sorted is a function based on which the sorting will happen (I use split to exclude the jpg/png from the).
gt_files_sorted = sorted(gt_files, key = lambda x: int(x.split(".")[0]) )
sm_files_sorted = sorted(sm_files, key = lambda x: int(x.split(".")[0]) )
pack = zip(gt_files_sorted, sm_files_sorted)
print("Files related to video {} sorted.".format(i))
##
##https://stackoverflow.com/questions/35663498/how-do-i-return-a-matrix-with-joblib-python
from joblib import Parallel, delayed
start = time.clock()
metric_list = Parallel(n_jobs=8)(delayed(inner_worker)(n, packed, gt_path, sm_path) for n, packed in enumerate(pack)) #run 8 frames simultaneously
aucj_mean = np.mean([x[0] for x in metric_list])
aucs_mean = np.mean([x[1] for x in metric_list])
nss_mean = np.mean([x[2] for x in metric_list])
cc_mean = np.mean([x[3] for x in metric_list])
sim_mean = np.mean([x[4] for x in metric_list])
print("For video number {} the metrics are:".format(i))
print("AUC-JUDD is {}".format(aucj_mean))
print("AUC-SHUFFLED is {}".format(aucs_mean))
print("NSS is {}".format(nss_mean))
print("CC is {}".format(cc_mean))
print("SIM is {}".format(sim_mean))
print("Time elapsed: {}".format(time.clock()-start))
print("==============================")
final_metric_list.append(( aucj_mean,
aucs_mean,
nss_mean,
cc_mean,
sim_mean ))
with open('metrics.txt', 'wb') as handle:
pickle.dump(final_metric_list, handle, protocol=pickle.HIGHEST_PROTOCOL)
Aucj = np.mean([y[0] for y in final_metric_list])
Aucs = np.mean([y[1] for y in final_metric_list])
Nss = np.mean([y[2] for y in final_metric_list])
Cc = np.mean([y[3] for y in final_metric_list])
Sim = np.mean([y[4] for y in final_metric_list])
print("Final average of metrics is:")
print("AUC-JUDD is {}".format(Aucj))
print("AUC-SHUFFLED is {}".format(Aucs))
print("NSS is {}".format(Nss))
print("CC is {}".format(Cc))
print("SIM is {}".format(Sim))
| from salience_metrics import auc_judd, auc_shuff, cc, nss, similarity, normalize_map
"""
DHF1K paper: "we employ five classic met-rics, namely Normalized Scanpath Saliency (NSS), Sim-ilarity Metric (SIM), Linear Correlation Coefficient (CC),AUC-Judd (AUC-J), and shuffled AUC (s-AUC).""
"""
import cv2
import os
import numpy as np
import time
import pickle
gt_directory = "/imatge/lpanagiotis/work/DHF1K_extracted/maps"
sm_directory = "/imatge/lpanagiotis/work/DHF1K_extracted/predictions"
final_metric_list = []
# The directories are named 1-1000 so it should be easy to iterate over them
def inner_worker(i, packed, gt_path, sm_path): #packed should be a list of tuples (annotation, prediction)
gt, sm = packed
ground_truth = cv2.imread(os.path.join(gt_path, gt),cv2.IMREAD_GRAYSCALE)
saliency_map = cv2.imread(os.path.join(sm_path, sm),cv2.IMREAD_GRAYSCALE)
saliency_map_norm = normalize_map(saliency_map) # The functions are a bit haphazard. Some have normalization within and some do not.
# Calculate metrics
AUC_JUDD = auc_judd(saliency_map_norm, ground_truth)
AUC_SHUF = auc_shuff(saliency_map_norm, ground_truth, ground_truth)
NSS = nss(saliency_map_norm, ground_truth)
# the other ones have normalization within:
CC = cc(saliency_map, ground_truth)
SIM = similarity(saliency_map, ground_truth)
return ( AUC_JUDD,
AUC_SHUF,
NSS,
CC,
SIM )
for i in range(1,701):
start = time.clock()
gt_path = os.path.join(gt_directory, str(i))
sm_path = os.path.join(sm_directory, str(i))
gt_files = os.listdir(gt_path)
sm_files = os.listdir(sm_path)
#Now to sort based on their file number. The "key" parameter in sorted is a function based on which the sorting will happen (I use split to exclude the jpg/png from the).
gt_files_sorted = sorted(gt_files, key = lambda x: int(x.split(".")[0]) )
sm_files_sorted = sorted(sm_files, key = lambda x: int(x.split(".")[0]) )
pack = zip(gt_files_sorted, sm_files_sorted)
print("Files related to video {} sorted.".format(i))
##
##https://stackoverflow.com/questions/35663498/how-do-i-return-a-matrix-with-joblib-python
from joblib import Parallel, delayed
start = time.clock()
metric_list = Parallel(n_jobs=8)(delayed(inner_worker)(n, packed, gt_path, sm_path) for n, packed in enumerate(pack)) #run 8 frames simultaneously
aucj_mean = np.mean([x[0] for x in metric_list])
aucs_mean = np.mean([x[1] for x in metric_list])
nss_mean = np.mean([x[2] for x in metric_list])
cc_mean = np.mean([x[3] for x in metric_list])
sim_mean = np.mean([x[4] for x in metric_list])
print("For video number {} the metrics are:".format(i))
print("AUC-JUDD is {}".format(aucj_mean))
print("AUC-SHUFFLED is {}".format(aucs_mean))
print("NSS is {}".format(nss_mean))
print("CC is {}".format(cc_mean))
print("SIM is {}".format(sim_mean))
print("Time elapsed: {}".format(time.clock()-start))
print("==============================")
final_metric_list.append(( aucj_mean,
aucs_mean,
nss_mean,
cc_mean,
sim_mean ))
with open('metrics.txt', 'wb') as handle:
pickle.dump(final_metric_list, handle, protocol=pickle.HIGHEST_PROTOCOL)
Aucj = np.mean([y[0] for y in final_metric_list])
Aucs = np.mean([y[1] for y in final_metric_list])
Nss = np.mean([y[2] for y in final_metric_list])
Cc = np.mean([y[3] for y in final_metric_list])
Sim = np.mean([y[4] for y in final_metric_list])
print("Final average of metrics is:")
print("AUC-JUDD is {}".format(Aucj))
print("AUC-SHUFFLED is {}".format(Aucs))
print("NSS is {}".format(Nss))
print("CC is {}".format(Cc))
print("SIM is {}".format(Sim)) | en | 0.85873 | DHF1K paper: "we employ five classic met-rics, namely Normalized Scanpath Saliency (NSS), Sim-ilarity Metric (SIM), Linear Correlation Coefficient (CC),AUC-Judd (AUC-J), and shuffled AUC (s-AUC)."" # The directories are named 1-1000 so it should be easy to iterate over them #packed should be a list of tuples (annotation, prediction) # The functions are a bit haphazard. Some have normalization within and some do not. # Calculate metrics # the other ones have normalization within: #Now to sort based on their file number. The "key" parameter in sorted is a function based on which the sorting will happen (I use split to exclude the jpg/png from the). ## ##https://stackoverflow.com/questions/35663498/how-do-i-return-a-matrix-with-joblib-python #run 8 frames simultaneously | 2.297996 | 2 |
tests/rules/test_pacman_invalid_option.py | RogueScholar/thefuck-termux | 0 | 10744 | import pytest
from thefuck.rules.pacman_invalid_option import get_new_command
from thefuck.rules.pacman_invalid_option import match
from thefuck.types import Command
good_output = """community/shared_meataxe 1.0-3
A set of programs for working with matrix representations over finite fields
"""
bad_output = "error: invalid option '-"
@pytest.mark.parametrize("option", "SURQFDVT")
def test_not_match_good_output(option):
assert not match(Command("pacman -{}s meat".format(option), good_output))
@pytest.mark.parametrize("option", "azxcbnm")
def test_not_match_bad_output(option):
assert not match(Command("pacman -{}v meat".format(option), bad_output))
@pytest.mark.parametrize("option", "surqfdvt")
def test_match(option):
assert match(Command("pacman -{}v meat".format(option), bad_output))
@pytest.mark.parametrize("option", "surqfdvt")
def test_get_new_command(option):
new_command = get_new_command(
Command("pacman -{}v meat".format(option), ""))
assert new_command == "pacman -{}v meat".format(option.upper())
| import pytest
from thefuck.rules.pacman_invalid_option import get_new_command
from thefuck.rules.pacman_invalid_option import match
from thefuck.types import Command
good_output = """community/shared_meataxe 1.0-3
A set of programs for working with matrix representations over finite fields
"""
bad_output = "error: invalid option '-"
@pytest.mark.parametrize("option", "SURQFDVT")
def test_not_match_good_output(option):
assert not match(Command("pacman -{}s meat".format(option), good_output))
@pytest.mark.parametrize("option", "azxcbnm")
def test_not_match_bad_output(option):
assert not match(Command("pacman -{}v meat".format(option), bad_output))
@pytest.mark.parametrize("option", "surqfdvt")
def test_match(option):
assert match(Command("pacman -{}v meat".format(option), bad_output))
@pytest.mark.parametrize("option", "surqfdvt")
def test_get_new_command(option):
new_command = get_new_command(
Command("pacman -{}v meat".format(option), ""))
assert new_command == "pacman -{}v meat".format(option.upper())
| en | 0.888282 | community/shared_meataxe 1.0-3 A set of programs for working with matrix representations over finite fields | 2.825345 | 3 |
dimod/reference/composites/scalecomposite.py | joseppinilla/dimod | 1 | 10745 | <reponame>joseppinilla/dimod<filename>dimod/reference/composites/scalecomposite.py
# Copyright 2019 D-Wave Systems Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# =============================================================================
"""
A composite that scales problem variables as directed. if scalar is not given
calculates it based on quadratic and bias ranges.
"""
try:
import collections.abc as abc
except ImportError:
import collections as abc
from numbers import Number
import numpy as np
from dimod.binary_quadratic_model import BinaryQuadraticModel
from dimod.core.composite import ComposedSampler
__all__ = 'ScaleComposite',
class ScaleComposite(ComposedSampler):
"""Composite to scale variables of a problem
Scales the variables of a bqm and modifies linear and quadratic terms
accordingly.
Args:
sampler (:obj:`dimod.Sampler`):
A dimod sampler
Examples:
This example uses :class:`.ScaleComposite` to instantiate a
composed sampler that submits a simple Ising problem to a sampler.
The composed sampler scales linear, quadratic biases and offset as
indicated by options.
>>> h = {'a': -4.0, 'b': -4.0}
>>> J = {('a', 'b'): 3.2}
>>> sampler = dimod.ScaleComposite(dimod.ExactSolver())
>>> response = sampler.sample_ising(h, J, scalar=0.5,
... ignored_interactions=[('a','b')])
"""
def __init__(self, child_sampler):
self._children = [child_sampler]
@property
def children(self):
return self._children
@property
def parameters(self):
param = self.child.parameters.copy()
param.update({'scalar': [],
'bias_range': [],
'quadratic_range': [],
'ignored_variables': [],
'ignored_interactions': [],
'ignore_offset': []})
return param
@property
def properties(self):
return {'child_properties': self.child.properties.copy()}
def sample(self, bqm, scalar=None, bias_range=1, quadratic_range=None,
ignored_variables=None, ignored_interactions=None,
ignore_offset=False, **parameters):
""" Scale and sample from the provided binary quadratic model.
if scalar is not given, problem is scaled based on bias and quadratic
ranges. See :meth:`.BinaryQuadraticModel.scale` and
:meth:`.BinaryQuadraticModel.normalize`
Args:
bqm (:obj:`dimod.BinaryQuadraticModel`):
Binary quadratic model to be sampled from.
scalar (number):
Value by which to scale the energy range of the binary quadratic model.
bias_range (number/pair):
Value/range by which to normalize the all the biases, or if
`quadratic_range` is provided, just the linear biases.
quadratic_range (number/pair):
Value/range by which to normalize the quadratic biases.
ignored_variables (iterable, optional):
Biases associated with these variables are not scaled.
ignored_interactions (iterable[tuple], optional):
As an iterable of 2-tuples. Biases associated with these interactions are not scaled.
ignore_offset (bool, default=False):
If True, the offset is not scaled.
**parameters:
Parameters for the sampling method, specified by the child sampler.
Returns:
:obj:`dimod.SampleSet`
"""
ignored_variables, ignored_interactions = _check_params(
ignored_variables, ignored_interactions)
child = self.child
bqm_copy = _scaled_bqm(bqm, scalar, bias_range, quadratic_range,
ignored_variables, ignored_interactions,
ignore_offset)
response = child.sample(bqm_copy, **parameters)
return _scale_back_response(bqm, response, bqm_copy.info['scalar'],
ignored_variables, ignored_interactions,
ignore_offset)
def sample_ising(self, h, J, offset=0, scalar=None,
bias_range=1, quadratic_range=None,
ignored_variables=None, ignored_interactions=None,
ignore_offset=False, **parameters):
""" Scale and sample from the problem provided by h, J, offset
if scalar is not given, problem is scaled based on bias and quadratic
ranges.
Args:
h (dict): linear biases
J (dict): quadratic or higher order biases
offset (float, optional): constant energy offset
scalar (number):
Value by which to scale the energy range of the binary quadratic model.
bias_range (number/pair):
Value/range by which to normalize the all the biases, or if
`quadratic_range` is provided, just the linear biases.
quadratic_range (number/pair):
Value/range by which to normalize the quadratic biases.
ignored_variables (iterable, optional):
Biases associated with these variables are not scaled.
ignored_interactions (iterable[tuple], optional):
As an iterable of 2-tuples. Biases associated with these interactions are not scaled.
ignore_offset (bool, default=False):
If True, the offset is not scaled.
**parameters:
Parameters for the sampling method, specified by the child sampler.
Returns:
:obj:`dimod.SampleSet`
"""
if any(len(inter) > 2 for inter in J):
# handle HUBO
import warnings
msg = ("Support for higher order Ising models in ScaleComposite is "
"deprecated and will be removed in dimod 0.9.0. Please use "
"PolyScaleComposite.sample_hising instead.")
warnings.warn(msg, DeprecationWarning)
from dimod.reference.composites.higherordercomposites import PolyScaleComposite
from dimod.higherorder.polynomial import BinaryPolynomial
poly = BinaryPolynomial.from_hising(h, J, offset=offset)
ignored_terms = set()
if ignored_variables is not None:
ignored_terms.update(frozenset(v) for v in ignored_variables)
if ignored_interactions is not None:
ignored_terms.update(frozenset(inter) for inter in ignored_interactions)
if ignore_offset:
ignored_terms.add(frozenset())
return PolyScaleComposite(self.child).sample_poly(poly, scalar=scalar,
bias_range=bias_range,
poly_range=quadratic_range,
ignored_terms=ignored_terms,
**parameters)
bqm = BinaryQuadraticModel.from_ising(h, J, offset=offset)
return self.sample(bqm, scalar=scalar,
bias_range=bias_range,
quadratic_range=quadratic_range,
ignored_variables=ignored_variables,
ignored_interactions=ignored_interactions,
ignore_offset=ignore_offset, **parameters)
def _scale_back_response(bqm, response, scalar, ignored_interactions,
ignored_variables, ignore_offset):
"""Helper function to scale back the response of sample method"""
if len(ignored_interactions) + len(
ignored_variables) + ignore_offset == 0:
response.record.energy = np.divide(response.record.energy, scalar)
else:
response.record.energy = bqm.energies((response.record.sample,
response.variables))
return response
def _check_params(ignored_variables, ignored_interactions):
"""Helper for sample methods"""
if ignored_variables is None:
ignored_variables = set()
elif not isinstance(ignored_variables, abc.Container):
ignored_variables = set(ignored_variables)
if ignored_interactions is None:
ignored_interactions = set()
elif not isinstance(ignored_interactions, abc.Container):
ignored_interactions = set(ignored_interactions)
return ignored_variables, ignored_interactions
def _calc_norm_coeff(h, J, bias_range, quadratic_range, ignored_variables,
ignored_interactions):
"""Helper function to calculate normalization coefficient"""
if ignored_variables is None or ignored_interactions is None:
raise ValueError('ignored interactions or variables cannot be None')
def parse_range(r):
if isinstance(r, Number):
return -abs(r), abs(r)
return r
def min_and_max(iterable):
if not iterable:
return 0, 0
return min(iterable), max(iterable)
if quadratic_range is None:
linear_range, quadratic_range = bias_range, bias_range
else:
linear_range = bias_range
lin_range, quad_range = map(parse_range, (linear_range,
quadratic_range))
lin_min, lin_max = min_and_max([v for k, v in h.items()
if k not in ignored_variables])
quad_min, quad_max = min_and_max([v for k, v in J.items()
if not check_isin(k,
ignored_interactions)])
inv_scalar = max(lin_min / lin_range[0], lin_max / lin_range[1],
quad_min / quad_range[0], quad_max / quad_range[1])
if inv_scalar != 0:
return 1. / inv_scalar
else:
return 1.
def _scaled_bqm(bqm, scalar, bias_range, quadratic_range,
ignored_variables, ignored_interactions,
ignore_offset):
"""Helper function of sample for scaling"""
bqm_copy = bqm.copy()
if scalar is None:
scalar = _calc_norm_coeff(bqm_copy.linear, bqm_copy.quadratic,
bias_range, quadratic_range,
ignored_variables, ignored_interactions)
bqm_copy.scale(scalar, ignored_variables=ignored_variables,
ignored_interactions=ignored_interactions,
ignore_offset=ignore_offset)
bqm_copy.info.update({'scalar': scalar})
return bqm_copy
def check_isin(key, key_list):
return sum(set(key) == set(key_tmp) for key_tmp in key_list)
| # Copyright 2019 D-Wave Systems Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# =============================================================================
"""
A composite that scales problem variables as directed. if scalar is not given
calculates it based on quadratic and bias ranges.
"""
try:
import collections.abc as abc
except ImportError:
import collections as abc
from numbers import Number
import numpy as np
from dimod.binary_quadratic_model import BinaryQuadraticModel
from dimod.core.composite import ComposedSampler
__all__ = 'ScaleComposite',
class ScaleComposite(ComposedSampler):
"""Composite to scale variables of a problem
Scales the variables of a bqm and modifies linear and quadratic terms
accordingly.
Args:
sampler (:obj:`dimod.Sampler`):
A dimod sampler
Examples:
This example uses :class:`.ScaleComposite` to instantiate a
composed sampler that submits a simple Ising problem to a sampler.
The composed sampler scales linear, quadratic biases and offset as
indicated by options.
>>> h = {'a': -4.0, 'b': -4.0}
>>> J = {('a', 'b'): 3.2}
>>> sampler = dimod.ScaleComposite(dimod.ExactSolver())
>>> response = sampler.sample_ising(h, J, scalar=0.5,
... ignored_interactions=[('a','b')])
"""
def __init__(self, child_sampler):
self._children = [child_sampler]
@property
def children(self):
return self._children
@property
def parameters(self):
param = self.child.parameters.copy()
param.update({'scalar': [],
'bias_range': [],
'quadratic_range': [],
'ignored_variables': [],
'ignored_interactions': [],
'ignore_offset': []})
return param
@property
def properties(self):
return {'child_properties': self.child.properties.copy()}
def sample(self, bqm, scalar=None, bias_range=1, quadratic_range=None,
ignored_variables=None, ignored_interactions=None,
ignore_offset=False, **parameters):
""" Scale and sample from the provided binary quadratic model.
if scalar is not given, problem is scaled based on bias and quadratic
ranges. See :meth:`.BinaryQuadraticModel.scale` and
:meth:`.BinaryQuadraticModel.normalize`
Args:
bqm (:obj:`dimod.BinaryQuadraticModel`):
Binary quadratic model to be sampled from.
scalar (number):
Value by which to scale the energy range of the binary quadratic model.
bias_range (number/pair):
Value/range by which to normalize the all the biases, or if
`quadratic_range` is provided, just the linear biases.
quadratic_range (number/pair):
Value/range by which to normalize the quadratic biases.
ignored_variables (iterable, optional):
Biases associated with these variables are not scaled.
ignored_interactions (iterable[tuple], optional):
As an iterable of 2-tuples. Biases associated with these interactions are not scaled.
ignore_offset (bool, default=False):
If True, the offset is not scaled.
**parameters:
Parameters for the sampling method, specified by the child sampler.
Returns:
:obj:`dimod.SampleSet`
"""
ignored_variables, ignored_interactions = _check_params(
ignored_variables, ignored_interactions)
child = self.child
bqm_copy = _scaled_bqm(bqm, scalar, bias_range, quadratic_range,
ignored_variables, ignored_interactions,
ignore_offset)
response = child.sample(bqm_copy, **parameters)
return _scale_back_response(bqm, response, bqm_copy.info['scalar'],
ignored_variables, ignored_interactions,
ignore_offset)
def sample_ising(self, h, J, offset=0, scalar=None,
bias_range=1, quadratic_range=None,
ignored_variables=None, ignored_interactions=None,
ignore_offset=False, **parameters):
""" Scale and sample from the problem provided by h, J, offset
if scalar is not given, problem is scaled based on bias and quadratic
ranges.
Args:
h (dict): linear biases
J (dict): quadratic or higher order biases
offset (float, optional): constant energy offset
scalar (number):
Value by which to scale the energy range of the binary quadratic model.
bias_range (number/pair):
Value/range by which to normalize the all the biases, or if
`quadratic_range` is provided, just the linear biases.
quadratic_range (number/pair):
Value/range by which to normalize the quadratic biases.
ignored_variables (iterable, optional):
Biases associated with these variables are not scaled.
ignored_interactions (iterable[tuple], optional):
As an iterable of 2-tuples. Biases associated with these interactions are not scaled.
ignore_offset (bool, default=False):
If True, the offset is not scaled.
**parameters:
Parameters for the sampling method, specified by the child sampler.
Returns:
:obj:`dimod.SampleSet`
"""
if any(len(inter) > 2 for inter in J):
# handle HUBO
import warnings
msg = ("Support for higher order Ising models in ScaleComposite is "
"deprecated and will be removed in dimod 0.9.0. Please use "
"PolyScaleComposite.sample_hising instead.")
warnings.warn(msg, DeprecationWarning)
from dimod.reference.composites.higherordercomposites import PolyScaleComposite
from dimod.higherorder.polynomial import BinaryPolynomial
poly = BinaryPolynomial.from_hising(h, J, offset=offset)
ignored_terms = set()
if ignored_variables is not None:
ignored_terms.update(frozenset(v) for v in ignored_variables)
if ignored_interactions is not None:
ignored_terms.update(frozenset(inter) for inter in ignored_interactions)
if ignore_offset:
ignored_terms.add(frozenset())
return PolyScaleComposite(self.child).sample_poly(poly, scalar=scalar,
bias_range=bias_range,
poly_range=quadratic_range,
ignored_terms=ignored_terms,
**parameters)
bqm = BinaryQuadraticModel.from_ising(h, J, offset=offset)
return self.sample(bqm, scalar=scalar,
bias_range=bias_range,
quadratic_range=quadratic_range,
ignored_variables=ignored_variables,
ignored_interactions=ignored_interactions,
ignore_offset=ignore_offset, **parameters)
def _scale_back_response(bqm, response, scalar, ignored_interactions,
ignored_variables, ignore_offset):
"""Helper function to scale back the response of sample method"""
if len(ignored_interactions) + len(
ignored_variables) + ignore_offset == 0:
response.record.energy = np.divide(response.record.energy, scalar)
else:
response.record.energy = bqm.energies((response.record.sample,
response.variables))
return response
def _check_params(ignored_variables, ignored_interactions):
"""Helper for sample methods"""
if ignored_variables is None:
ignored_variables = set()
elif not isinstance(ignored_variables, abc.Container):
ignored_variables = set(ignored_variables)
if ignored_interactions is None:
ignored_interactions = set()
elif not isinstance(ignored_interactions, abc.Container):
ignored_interactions = set(ignored_interactions)
return ignored_variables, ignored_interactions
def _calc_norm_coeff(h, J, bias_range, quadratic_range, ignored_variables,
ignored_interactions):
"""Helper function to calculate normalization coefficient"""
if ignored_variables is None or ignored_interactions is None:
raise ValueError('ignored interactions or variables cannot be None')
def parse_range(r):
if isinstance(r, Number):
return -abs(r), abs(r)
return r
def min_and_max(iterable):
if not iterable:
return 0, 0
return min(iterable), max(iterable)
if quadratic_range is None:
linear_range, quadratic_range = bias_range, bias_range
else:
linear_range = bias_range
lin_range, quad_range = map(parse_range, (linear_range,
quadratic_range))
lin_min, lin_max = min_and_max([v for k, v in h.items()
if k not in ignored_variables])
quad_min, quad_max = min_and_max([v for k, v in J.items()
if not check_isin(k,
ignored_interactions)])
inv_scalar = max(lin_min / lin_range[0], lin_max / lin_range[1],
quad_min / quad_range[0], quad_max / quad_range[1])
if inv_scalar != 0:
return 1. / inv_scalar
else:
return 1.
def _scaled_bqm(bqm, scalar, bias_range, quadratic_range,
ignored_variables, ignored_interactions,
ignore_offset):
"""Helper function of sample for scaling"""
bqm_copy = bqm.copy()
if scalar is None:
scalar = _calc_norm_coeff(bqm_copy.linear, bqm_copy.quadratic,
bias_range, quadratic_range,
ignored_variables, ignored_interactions)
bqm_copy.scale(scalar, ignored_variables=ignored_variables,
ignored_interactions=ignored_interactions,
ignore_offset=ignore_offset)
bqm_copy.info.update({'scalar': scalar})
return bqm_copy
def check_isin(key, key_list):
return sum(set(key) == set(key_tmp) for key_tmp in key_list) | en | 0.774857 | # Copyright 2019 D-Wave Systems Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # ============================================================================= A composite that scales problem variables as directed. if scalar is not given calculates it based on quadratic and bias ranges. Composite to scale variables of a problem Scales the variables of a bqm and modifies linear and quadratic terms accordingly. Args: sampler (:obj:`dimod.Sampler`): A dimod sampler Examples: This example uses :class:`.ScaleComposite` to instantiate a composed sampler that submits a simple Ising problem to a sampler. The composed sampler scales linear, quadratic biases and offset as indicated by options. >>> h = {'a': -4.0, 'b': -4.0} >>> J = {('a', 'b'): 3.2} >>> sampler = dimod.ScaleComposite(dimod.ExactSolver()) >>> response = sampler.sample_ising(h, J, scalar=0.5, ... ignored_interactions=[('a','b')]) Scale and sample from the provided binary quadratic model. if scalar is not given, problem is scaled based on bias and quadratic ranges. See :meth:`.BinaryQuadraticModel.scale` and :meth:`.BinaryQuadraticModel.normalize` Args: bqm (:obj:`dimod.BinaryQuadraticModel`): Binary quadratic model to be sampled from. scalar (number): Value by which to scale the energy range of the binary quadratic model. bias_range (number/pair): Value/range by which to normalize the all the biases, or if `quadratic_range` is provided, just the linear biases. quadratic_range (number/pair): Value/range by which to normalize the quadratic biases. ignored_variables (iterable, optional): Biases associated with these variables are not scaled. ignored_interactions (iterable[tuple], optional): As an iterable of 2-tuples. Biases associated with these interactions are not scaled. ignore_offset (bool, default=False): If True, the offset is not scaled. **parameters: Parameters for the sampling method, specified by the child sampler. Returns: :obj:`dimod.SampleSet` Scale and sample from the problem provided by h, J, offset if scalar is not given, problem is scaled based on bias and quadratic ranges. Args: h (dict): linear biases J (dict): quadratic or higher order biases offset (float, optional): constant energy offset scalar (number): Value by which to scale the energy range of the binary quadratic model. bias_range (number/pair): Value/range by which to normalize the all the biases, or if `quadratic_range` is provided, just the linear biases. quadratic_range (number/pair): Value/range by which to normalize the quadratic biases. ignored_variables (iterable, optional): Biases associated with these variables are not scaled. ignored_interactions (iterable[tuple], optional): As an iterable of 2-tuples. Biases associated with these interactions are not scaled. ignore_offset (bool, default=False): If True, the offset is not scaled. **parameters: Parameters for the sampling method, specified by the child sampler. Returns: :obj:`dimod.SampleSet` # handle HUBO Helper function to scale back the response of sample method Helper for sample methods Helper function to calculate normalization coefficient Helper function of sample for scaling | 2.267074 | 2 |
tests/test_config.py | savilard/flask-ecom-api | 1 | 10746 | import os
def test_development_config(test_app):
test_app.config.from_object('flask_ecom_api.config.DevelopmentConfig')
assert not test_app.config['TESTING']
assert test_app.config['SQLALCHEMY_DATABASE_URI'] == os.environ.get('DATABASE_URL')
def test_testing_config(test_app):
test_app.config.from_object('flask_ecom_api.config.TestingConfig')
assert test_app.config['TESTING']
assert test_app.config['SQLALCHEMY_DATABASE_URI'] == os.environ.get('DATABASE_TEST_URL')
def test_production_config(test_app):
test_app.config.from_object('flask_ecom_api.config.ProductionConfig')
assert not test_app.config['TESTING']
assert test_app.config['SQLALCHEMY_DATABASE_URI'] == os.environ.get('DATABASE_URL')
| import os
def test_development_config(test_app):
test_app.config.from_object('flask_ecom_api.config.DevelopmentConfig')
assert not test_app.config['TESTING']
assert test_app.config['SQLALCHEMY_DATABASE_URI'] == os.environ.get('DATABASE_URL')
def test_testing_config(test_app):
test_app.config.from_object('flask_ecom_api.config.TestingConfig')
assert test_app.config['TESTING']
assert test_app.config['SQLALCHEMY_DATABASE_URI'] == os.environ.get('DATABASE_TEST_URL')
def test_production_config(test_app):
test_app.config.from_object('flask_ecom_api.config.ProductionConfig')
assert not test_app.config['TESTING']
assert test_app.config['SQLALCHEMY_DATABASE_URI'] == os.environ.get('DATABASE_URL')
| none | 1 | 2.309867 | 2 |
|
leasing/forms.py | suutari-ai/mvj | 1 | 10747 | from django import forms
from django.core import validators
from django.core.exceptions import ValidationError
from leasing.enums import (
InfillDevelopmentCompensationState,
LeaseState,
TenantContactType,
)
from leasing.models import Contact, DecisionMaker, District, LeaseType, Municipality
from leasing.validators import validate_business_id
class CommaSeparatedChoiceField(forms.ChoiceField):
def to_python(self, value):
if value in validators.EMPTY_VALUES:
return []
value = [item.strip() for item in str(value).split(",") if item.strip()]
return value
def validate(self, value):
if self.required and not value:
raise ValidationError(self.error_messages["required"], code="required")
# Validate that each value in the value list is in self.choices.
for val in value:
if not self.valid_value(val):
raise ValidationError(
self.error_messages["invalid_choice"],
code="invalid_choice",
params={"value": val},
)
class LeaseSearchForm(forms.Form):
succinct = forms.BooleanField(label="Succinct", required=False)
identifier = forms.CharField(
label="Lease identifier", max_length=255, required=False, empty_value=None
)
tenant_name = forms.CharField(label="Tenant name", max_length=255, required=False)
tenantcontact_type = CommaSeparatedChoiceField(
label="Tenant role",
required=False,
choices=tuple((x.value, str(x)) for x in TenantContactType),
)
only_past_tenants = forms.BooleanField(label="Only past tenants", required=False)
tenant_activity = forms.ChoiceField(
label="Tenants",
required=False,
choices=(
("all", "All"),
("past", "Only past tenants"),
("active", "Only active tenants"),
),
)
lease_start_date_start = forms.DateField(required=False)
lease_start_date_end = forms.DateField(required=False)
lease_end_date_start = forms.DateField(required=False)
lease_end_date_end = forms.DateField(required=False)
only_active_leases = forms.BooleanField(label="Active", required=False)
only_expired_leases = forms.BooleanField(label="Expired", required=False)
has_geometry = forms.NullBooleanField(label="Has geometry", required=False)
property_identifier = forms.CharField(
label="Real property identifier",
max_length=255,
required=False,
empty_value=None,
)
address = forms.CharField(
label="Address", max_length=255, required=False, empty_value=None
)
lease_type = forms.ModelChoiceField(
label="Lease type", queryset=LeaseType.objects.all(), required=False
)
municipality = forms.ModelChoiceField(
label="Municipality", queryset=Municipality.objects.all(), required=False
)
district = forms.ModelChoiceField(
label="District", queryset=District.objects.all(), required=False
)
sequence = forms.IntegerField(label="Sequence", required=False)
lease_state = CommaSeparatedChoiceField(
label="Lease state",
required=False,
choices=tuple((x.value, str(x)) for x in LeaseState),
)
business_id = forms.CharField(
label="Business id",
max_length=255,
required=False,
empty_value=None,
validators=[validate_business_id],
)
national_identification_number = forms.CharField(
label="National identification number",
max_length=255,
required=False,
empty_value=None,
)
lessor = forms.ModelChoiceField(
label="Lessor", queryset=Contact.objects.filter(is_lessor=True), required=False
)
contract_number = forms.CharField(
label="Contract number", max_length=255, required=False, empty_value=None
)
decision_maker = forms.ModelChoiceField(
label="Decision maker", queryset=DecisionMaker.objects.all(), required=False
)
decision_date = forms.DateField(required=False)
decision_section = forms.CharField(
label="Decision section", max_length=255, required=False, empty_value=None
)
reference_number = forms.CharField(
label="Reference number", max_length=255, required=False, empty_value=None
)
invoice_number = forms.CharField(
label="Invoice number", max_length=255, required=False, empty_value=None
)
class BasisOfRentSearchForm(forms.Form):
search = forms.CharField(
label="Search", max_length=255, required=False, empty_value=None
)
decision_maker = forms.ModelChoiceField(
label="Decision maker", queryset=DecisionMaker.objects.all(), required=False
)
decision_date = forms.DateField(required=False)
decision_section = forms.CharField(
label="Decision section", max_length=255, required=False, empty_value=None
)
reference_number = forms.CharField(
label="Reference number", max_length=255, required=False, empty_value=None
)
class InfillDevelopmentCompensationSearchForm(forms.Form):
search = forms.CharField(
label="Search", max_length=255, required=False, empty_value=None
)
state = CommaSeparatedChoiceField(
label="State",
required=False,
choices=tuple((x.value, str(x)) for x in InfillDevelopmentCompensationState),
)
decision_maker = forms.ModelChoiceField(
label="Decision maker", queryset=DecisionMaker.objects.all(), required=False
)
decision_date = forms.DateField(required=False)
decision_section = forms.CharField(
label="Decision section", max_length=255, required=False, empty_value=None
)
reference_number = forms.CharField(
label="Reference number", max_length=255, required=False, empty_value=None
)
class AuditLogSearchForm(forms.Form):
type = forms.ChoiceField(
label="Type",
required=True,
choices=(("lease", "Lease"), ("contact", "Contact")),
)
id = forms.IntegerField(label="Id", required=False)
| from django import forms
from django.core import validators
from django.core.exceptions import ValidationError
from leasing.enums import (
InfillDevelopmentCompensationState,
LeaseState,
TenantContactType,
)
from leasing.models import Contact, DecisionMaker, District, LeaseType, Municipality
from leasing.validators import validate_business_id
class CommaSeparatedChoiceField(forms.ChoiceField):
def to_python(self, value):
if value in validators.EMPTY_VALUES:
return []
value = [item.strip() for item in str(value).split(",") if item.strip()]
return value
def validate(self, value):
if self.required and not value:
raise ValidationError(self.error_messages["required"], code="required")
# Validate that each value in the value list is in self.choices.
for val in value:
if not self.valid_value(val):
raise ValidationError(
self.error_messages["invalid_choice"],
code="invalid_choice",
params={"value": val},
)
class LeaseSearchForm(forms.Form):
succinct = forms.BooleanField(label="Succinct", required=False)
identifier = forms.CharField(
label="Lease identifier", max_length=255, required=False, empty_value=None
)
tenant_name = forms.CharField(label="Tenant name", max_length=255, required=False)
tenantcontact_type = CommaSeparatedChoiceField(
label="Tenant role",
required=False,
choices=tuple((x.value, str(x)) for x in TenantContactType),
)
only_past_tenants = forms.BooleanField(label="Only past tenants", required=False)
tenant_activity = forms.ChoiceField(
label="Tenants",
required=False,
choices=(
("all", "All"),
("past", "Only past tenants"),
("active", "Only active tenants"),
),
)
lease_start_date_start = forms.DateField(required=False)
lease_start_date_end = forms.DateField(required=False)
lease_end_date_start = forms.DateField(required=False)
lease_end_date_end = forms.DateField(required=False)
only_active_leases = forms.BooleanField(label="Active", required=False)
only_expired_leases = forms.BooleanField(label="Expired", required=False)
has_geometry = forms.NullBooleanField(label="Has geometry", required=False)
property_identifier = forms.CharField(
label="Real property identifier",
max_length=255,
required=False,
empty_value=None,
)
address = forms.CharField(
label="Address", max_length=255, required=False, empty_value=None
)
lease_type = forms.ModelChoiceField(
label="Lease type", queryset=LeaseType.objects.all(), required=False
)
municipality = forms.ModelChoiceField(
label="Municipality", queryset=Municipality.objects.all(), required=False
)
district = forms.ModelChoiceField(
label="District", queryset=District.objects.all(), required=False
)
sequence = forms.IntegerField(label="Sequence", required=False)
lease_state = CommaSeparatedChoiceField(
label="Lease state",
required=False,
choices=tuple((x.value, str(x)) for x in LeaseState),
)
business_id = forms.CharField(
label="Business id",
max_length=255,
required=False,
empty_value=None,
validators=[validate_business_id],
)
national_identification_number = forms.CharField(
label="National identification number",
max_length=255,
required=False,
empty_value=None,
)
lessor = forms.ModelChoiceField(
label="Lessor", queryset=Contact.objects.filter(is_lessor=True), required=False
)
contract_number = forms.CharField(
label="Contract number", max_length=255, required=False, empty_value=None
)
decision_maker = forms.ModelChoiceField(
label="Decision maker", queryset=DecisionMaker.objects.all(), required=False
)
decision_date = forms.DateField(required=False)
decision_section = forms.CharField(
label="Decision section", max_length=255, required=False, empty_value=None
)
reference_number = forms.CharField(
label="Reference number", max_length=255, required=False, empty_value=None
)
invoice_number = forms.CharField(
label="Invoice number", max_length=255, required=False, empty_value=None
)
class BasisOfRentSearchForm(forms.Form):
search = forms.CharField(
label="Search", max_length=255, required=False, empty_value=None
)
decision_maker = forms.ModelChoiceField(
label="Decision maker", queryset=DecisionMaker.objects.all(), required=False
)
decision_date = forms.DateField(required=False)
decision_section = forms.CharField(
label="Decision section", max_length=255, required=False, empty_value=None
)
reference_number = forms.CharField(
label="Reference number", max_length=255, required=False, empty_value=None
)
class InfillDevelopmentCompensationSearchForm(forms.Form):
search = forms.CharField(
label="Search", max_length=255, required=False, empty_value=None
)
state = CommaSeparatedChoiceField(
label="State",
required=False,
choices=tuple((x.value, str(x)) for x in InfillDevelopmentCompensationState),
)
decision_maker = forms.ModelChoiceField(
label="Decision maker", queryset=DecisionMaker.objects.all(), required=False
)
decision_date = forms.DateField(required=False)
decision_section = forms.CharField(
label="Decision section", max_length=255, required=False, empty_value=None
)
reference_number = forms.CharField(
label="Reference number", max_length=255, required=False, empty_value=None
)
class AuditLogSearchForm(forms.Form):
type = forms.ChoiceField(
label="Type",
required=True,
choices=(("lease", "Lease"), ("contact", "Contact")),
)
id = forms.IntegerField(label="Id", required=False)
| en | 0.882929 | # Validate that each value in the value list is in self.choices. | 2.165782 | 2 |
src/main/management/commands/create_admin_user.py | LokotamaTheMastermind/website-portfolio-django-project | 0 | 10748 | # polls/management/commands/create_admin_user.py
import sys
import logging
from django.core.management.base import BaseCommand, CommandError
from django.contrib.auth.models import User
from django.conf import settings
class Command(BaseCommand):
help = 'Creates the initial admin user'
def handle(self, *args, **options):
if User.objects.filter(username="admin").exists():
print("admin exists")
else:
u = User(username='admin')
u.set_password('<PASSWORD>')
u.is_superuser = True
u.is_staff = True
u.save()
print("admin created")
sys.exit()
| # polls/management/commands/create_admin_user.py
import sys
import logging
from django.core.management.base import BaseCommand, CommandError
from django.contrib.auth.models import User
from django.conf import settings
class Command(BaseCommand):
help = 'Creates the initial admin user'
def handle(self, *args, **options):
if User.objects.filter(username="admin").exists():
print("admin exists")
else:
u = User(username='admin')
u.set_password('<PASSWORD>')
u.is_superuser = True
u.is_staff = True
u.save()
print("admin created")
sys.exit()
| en | 0.594959 | # polls/management/commands/create_admin_user.py | 2.291732 | 2 |
app.py | jdanper/incredipaper | 0 | 10749 | import unirest
import json
import requests
import os
import subprocess
import time
import argparse
rootUrl = "https://api.unsplash.com/"
unirest.default_header("Accept", "application/json")
unirest.default_header("Accept-Version", "v1")
unirest.default_header("Authorization","<CLIENT-ID>")
def downloadPic(randomPic_response):
content = randomPic_response.body
print 'getting an amazing photo from Unsplash by %s ' % content["user"]["username"]
picData = requests.get(randomPic_response.body["urls"]["regular"]).content#, callback=applyWallpaper)#.body["urls"]["regular"]
applyWallpaper(picData)
def applyWallpaper(picStream):
path = os.path.expanduser('~')+'/.tempWallpaper.jpg'
with open(path, 'wb') as handler:
print "saving"
handler.write(picStream)
print "enjoy your new wallpaper."
if os.environ.get('DESKTOP_SESSION') == "xubuntu":
os.system('xfconf-query -c xfce4-desktop -p /backdrop/screen0/monitor0/workspace0/last-image && xfconf-query -c xfce4-desktop -p /backdrop/screen0/monitor0/workspace0/last-image -s %s' %path)
else:
os.system('gsettings set org.gnome.desktop.background picture-uri file:///%s' % path)
while True:
parser = argparse.ArgumentParser()
parser.add_argument('integers', metavar='int', type=int, help='time between wallpaper change (in seconds)')
args = parser.parse_args()
print "waiting for %s seconds" % args.integers
time.sleep(args.integers)
downloadPic(unirest.get(rootUrl + "photos/random", params={"orientation":"landscape"}))#.body["id"]
| import unirest
import json
import requests
import os
import subprocess
import time
import argparse
rootUrl = "https://api.unsplash.com/"
unirest.default_header("Accept", "application/json")
unirest.default_header("Accept-Version", "v1")
unirest.default_header("Authorization","<CLIENT-ID>")
def downloadPic(randomPic_response):
content = randomPic_response.body
print 'getting an amazing photo from Unsplash by %s ' % content["user"]["username"]
picData = requests.get(randomPic_response.body["urls"]["regular"]).content#, callback=applyWallpaper)#.body["urls"]["regular"]
applyWallpaper(picData)
def applyWallpaper(picStream):
path = os.path.expanduser('~')+'/.tempWallpaper.jpg'
with open(path, 'wb') as handler:
print "saving"
handler.write(picStream)
print "enjoy your new wallpaper."
if os.environ.get('DESKTOP_SESSION') == "xubuntu":
os.system('xfconf-query -c xfce4-desktop -p /backdrop/screen0/monitor0/workspace0/last-image && xfconf-query -c xfce4-desktop -p /backdrop/screen0/monitor0/workspace0/last-image -s %s' %path)
else:
os.system('gsettings set org.gnome.desktop.background picture-uri file:///%s' % path)
while True:
parser = argparse.ArgumentParser()
parser.add_argument('integers', metavar='int', type=int, help='time between wallpaper change (in seconds)')
args = parser.parse_args()
print "waiting for %s seconds" % args.integers
time.sleep(args.integers)
downloadPic(unirest.get(rootUrl + "photos/random", params={"orientation":"landscape"}))#.body["id"]
| zh | 0.431891 | #, callback=applyWallpaper)#.body["urls"]["regular"] #.body["id"] | 2.627622 | 3 |
tests/client_asyncio_test.py | ninchat/ninchat-python | 0 | 10750 | <gh_stars>0
# Copyright (c) 2017, <NAME>
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
import asyncio
import logging
from functools import partial
from ninchat.client.asyncio import Session
log = logging.getLogger(__name__)
async def async_test():
def on_session_event(params):
pass
def on_event(params, payload, last_reply):
if params["event"] == "message_received":
log.debug("received %s", payload[0].decode())
s = Session()
s.on_session_event = on_session_event
s.on_event = on_event
s.set_params({"user_attrs": {"name": "ninchat-python"}, "message_types": ["*"]})
async with s as params:
log.debug("opened params = %s", params)
user_id = params["user_id"]
params, _ = await s.call({"action": "describe_conn"})
log.debug("called params = %s", params)
await s.call({"action": "send_message", "message_type": "ninchat.com/text", "user_id": user_id}, [b'{"text": "Hello, me!"}'])
log.info("ok")
def test_client_asyncio():
asyncio.get_event_loop().run_until_complete(async_test())
| # Copyright (c) 2017, <NAME>
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
import asyncio
import logging
from functools import partial
from ninchat.client.asyncio import Session
log = logging.getLogger(__name__)
async def async_test():
def on_session_event(params):
pass
def on_event(params, payload, last_reply):
if params["event"] == "message_received":
log.debug("received %s", payload[0].decode())
s = Session()
s.on_session_event = on_session_event
s.on_event = on_event
s.set_params({"user_attrs": {"name": "ninchat-python"}, "message_types": ["*"]})
async with s as params:
log.debug("opened params = %s", params)
user_id = params["user_id"]
params, _ = await s.call({"action": "describe_conn"})
log.debug("called params = %s", params)
await s.call({"action": "send_message", "message_type": "ninchat.com/text", "user_id": user_id}, [b'{"text": "Hello, me!"}'])
log.info("ok")
def test_client_asyncio():
asyncio.get_event_loop().run_until_complete(async_test()) | en | 0.684785 | # Copyright (c) 2017, <NAME> # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions # are met: # 1. Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # 2. Redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in the # documentation and/or other materials provided with the distribution. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE # ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE # LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR # CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF # SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS # INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN # CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) # ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE # POSSIBILITY OF SUCH DAMAGE. | 1.643574 | 2 |
msgraph/base.py | jstacoder/python-msgraph | 2 | 10751 | <reponame>jstacoder/python-msgraph
from datetime import datetime
class Base(object):
date_format = '%Y-%m-%d'
time_format = '%H:%M:%S'
datetime_format = date_format + 'T%s' % time_format
full_datetime_format = date_format + 'T' + time_format + '.%f'
iso_format = date_format + 'T%sZ' % time_format
standard_datetime_format = date_format + ' ' + time_format
extended_datetime_format = date_format + 'T' + time_format +'.%fZ'
@classmethod
def parse_date_time(cls, text):
instance = None
formats = [cls.extended_datetime_format, cls.full_datetime_format, cls.datetime_format, cls.standard_datetime_format, cls.iso_format, cls.date_format]
for format in formats:
try:
instance = datetime.strptime(text, format)
except Exception:
pass
else:
break
return instance
| from datetime import datetime
class Base(object):
date_format = '%Y-%m-%d'
time_format = '%H:%M:%S'
datetime_format = date_format + 'T%s' % time_format
full_datetime_format = date_format + 'T' + time_format + '.%f'
iso_format = date_format + 'T%sZ' % time_format
standard_datetime_format = date_format + ' ' + time_format
extended_datetime_format = date_format + 'T' + time_format +'.%fZ'
@classmethod
def parse_date_time(cls, text):
instance = None
formats = [cls.extended_datetime_format, cls.full_datetime_format, cls.datetime_format, cls.standard_datetime_format, cls.iso_format, cls.date_format]
for format in formats:
try:
instance = datetime.strptime(text, format)
except Exception:
pass
else:
break
return instance | none | 1 | 3.420516 | 3 |
|
nc/models.py | caktus/Traffic-Stops | 1 | 10752 | from caching.base import CachingManager, CachingMixin
from django.db import models
from tsdata.models import CensusProfile
PURPOSE_CHOICES = (
(1, "Speed Limit Violation"),
(2, "Stop Light/Sign Violation"),
(3, "Driving While Impaired"),
(4, "Safe Movement Violation"),
(5, "Vehicle Equipment Violation"),
(6, "Vehicle Regulatory Violation"),
(7, "Seat Belt Violation"),
(8, "Investigation"),
(9, "Other Motor Vehicle Violation"),
(10, "Checkpoint"),
)
ACTION_CHOICES = (
(1, "Verbal Warning"),
(2, "Written Warning"),
(3, "Citation Issued"),
(4, "On-View Arrest"),
(5, "No Action Taken"),
)
PERSON_TYPE_CHOICES = (("D", "Driver"), ("P", "Passenger"))
GENDER_CHOICES = (("M", "Male"), ("F", "Female"))
ETHNICITY_CHOICES = (("H", "Hispanic"), ("N", "Non-Hispanic"))
RACE_CHOICES = (
("A", "Asian"),
("B", "Black"),
("I", "Native American"),
("U", "Other"),
("W", "White"),
)
SEARCH_TYPE_CHOICES = (
(1, "Consent"),
(2, "Search Warrant"),
(3, "Probable Cause"),
(4, "Search Incident to Arrest"),
(5, "Protective Frisk"),
)
SEARCH_BASIS_CHOICES = (
("ER", "Erratic/Suspicious Behavior"),
("OB", "Observation of Suspected Contraband"),
("OI", "Other Official Information"),
("SM", "Suspicious Movement"),
("TIP", "Informant Tip"),
("WTNS", "Witness Observation"),
)
class Stop(CachingMixin, models.Model):
stop_id = models.PositiveIntegerField(primary_key=True)
agency_description = models.CharField(max_length=100)
agency = models.ForeignKey("Agency", null=True, related_name="stops", on_delete=models.CASCADE)
date = models.DateTimeField(db_index=True)
purpose = models.PositiveSmallIntegerField(choices=PURPOSE_CHOICES)
action = models.PositiveSmallIntegerField(choices=ACTION_CHOICES)
driver_arrest = models.BooleanField(default=False)
passenger_arrest = models.BooleanField(default=False)
encounter_force = models.BooleanField(default=False)
engage_force = models.BooleanField(default=False)
officer_injury = models.BooleanField(default=False)
driver_injury = models.BooleanField(default=False)
passenger_injury = models.BooleanField(default=False)
officer_id = models.CharField(max_length=15) # todo: keys
stop_location = models.CharField(max_length=15) # todo: keys
stop_city = models.CharField(max_length=20)
objects = CachingManager()
class Person(CachingMixin, models.Model):
person_id = models.IntegerField(primary_key=True)
stop = models.ForeignKey(Stop, on_delete=models.CASCADE)
type = models.CharField(max_length=2, choices=PERSON_TYPE_CHOICES)
age = models.PositiveSmallIntegerField()
gender = models.CharField(max_length=2, choices=GENDER_CHOICES)
ethnicity = models.CharField(max_length=2, choices=ETHNICITY_CHOICES)
race = models.CharField(max_length=2, choices=RACE_CHOICES)
objects = CachingManager()
class Search(CachingMixin, models.Model):
search_id = models.IntegerField(primary_key=True)
stop = models.ForeignKey(Stop, on_delete=models.CASCADE)
person = models.ForeignKey(Person, on_delete=models.CASCADE)
type = models.PositiveSmallIntegerField(choices=SEARCH_TYPE_CHOICES)
vehicle_search = models.BooleanField(default=False)
driver_search = models.BooleanField(default=False)
passenger_search = models.BooleanField(default=False)
property_search = models.BooleanField(default=False)
vehicle_siezed = models.BooleanField(default=False)
personal_property_siezed = models.BooleanField(default=False)
other_property_sized = models.BooleanField(default=False)
objects = CachingManager()
class Contraband(CachingMixin, models.Model):
contraband_id = models.IntegerField(primary_key=True)
search = models.ForeignKey(Search, on_delete=models.CASCADE)
person = models.ForeignKey(Person, on_delete=models.CASCADE)
stop = models.ForeignKey(Stop, on_delete=models.CASCADE)
ounces = models.FloatField(default=0, null=True)
pounds = models.FloatField(default=0, null=True)
pints = models.FloatField(default=0, null=True)
gallons = models.FloatField(default=0, null=True)
dosages = models.FloatField(default=0, null=True)
grams = models.FloatField(default=0, null=True)
kilos = models.FloatField(default=0, null=True)
money = models.FloatField(default=0, null=True)
weapons = models.FloatField(default=0, null=True)
dollar_amount = models.FloatField(default=0, null=True)
objects = CachingManager()
class SearchBasis(CachingMixin, models.Model):
search_basis_id = models.IntegerField(primary_key=True)
search = models.ForeignKey(Search, on_delete=models.CASCADE)
person = models.ForeignKey(Person, on_delete=models.CASCADE)
stop = models.ForeignKey(Stop, on_delete=models.CASCADE)
basis = models.CharField(max_length=4, choices=SEARCH_BASIS_CHOICES)
objects = CachingManager()
class Agency(CachingMixin, models.Model):
name = models.CharField(max_length=255)
# link to CensusProfile (no cross-database foreign key)
census_profile_id = models.CharField(max_length=16, blank=True, default="")
last_reported_stop = models.DateField(null=True)
objects = CachingManager()
class Meta(object):
verbose_name_plural = "Agencies"
def __str__(self):
return self.name
@property
def census_profile(self):
if self.census_profile_id:
profile = CensusProfile.objects.get(id=self.census_profile_id)
return profile.get_census_dict()
else:
return dict()
| from caching.base import CachingManager, CachingMixin
from django.db import models
from tsdata.models import CensusProfile
PURPOSE_CHOICES = (
(1, "Speed Limit Violation"),
(2, "Stop Light/Sign Violation"),
(3, "Driving While Impaired"),
(4, "Safe Movement Violation"),
(5, "Vehicle Equipment Violation"),
(6, "Vehicle Regulatory Violation"),
(7, "Seat Belt Violation"),
(8, "Investigation"),
(9, "Other Motor Vehicle Violation"),
(10, "Checkpoint"),
)
ACTION_CHOICES = (
(1, "Verbal Warning"),
(2, "Written Warning"),
(3, "Citation Issued"),
(4, "On-View Arrest"),
(5, "No Action Taken"),
)
PERSON_TYPE_CHOICES = (("D", "Driver"), ("P", "Passenger"))
GENDER_CHOICES = (("M", "Male"), ("F", "Female"))
ETHNICITY_CHOICES = (("H", "Hispanic"), ("N", "Non-Hispanic"))
RACE_CHOICES = (
("A", "Asian"),
("B", "Black"),
("I", "Native American"),
("U", "Other"),
("W", "White"),
)
SEARCH_TYPE_CHOICES = (
(1, "Consent"),
(2, "Search Warrant"),
(3, "Probable Cause"),
(4, "Search Incident to Arrest"),
(5, "Protective Frisk"),
)
SEARCH_BASIS_CHOICES = (
("ER", "Erratic/Suspicious Behavior"),
("OB", "Observation of Suspected Contraband"),
("OI", "Other Official Information"),
("SM", "Suspicious Movement"),
("TIP", "Informant Tip"),
("WTNS", "Witness Observation"),
)
class Stop(CachingMixin, models.Model):
stop_id = models.PositiveIntegerField(primary_key=True)
agency_description = models.CharField(max_length=100)
agency = models.ForeignKey("Agency", null=True, related_name="stops", on_delete=models.CASCADE)
date = models.DateTimeField(db_index=True)
purpose = models.PositiveSmallIntegerField(choices=PURPOSE_CHOICES)
action = models.PositiveSmallIntegerField(choices=ACTION_CHOICES)
driver_arrest = models.BooleanField(default=False)
passenger_arrest = models.BooleanField(default=False)
encounter_force = models.BooleanField(default=False)
engage_force = models.BooleanField(default=False)
officer_injury = models.BooleanField(default=False)
driver_injury = models.BooleanField(default=False)
passenger_injury = models.BooleanField(default=False)
officer_id = models.CharField(max_length=15) # todo: keys
stop_location = models.CharField(max_length=15) # todo: keys
stop_city = models.CharField(max_length=20)
objects = CachingManager()
class Person(CachingMixin, models.Model):
person_id = models.IntegerField(primary_key=True)
stop = models.ForeignKey(Stop, on_delete=models.CASCADE)
type = models.CharField(max_length=2, choices=PERSON_TYPE_CHOICES)
age = models.PositiveSmallIntegerField()
gender = models.CharField(max_length=2, choices=GENDER_CHOICES)
ethnicity = models.CharField(max_length=2, choices=ETHNICITY_CHOICES)
race = models.CharField(max_length=2, choices=RACE_CHOICES)
objects = CachingManager()
class Search(CachingMixin, models.Model):
search_id = models.IntegerField(primary_key=True)
stop = models.ForeignKey(Stop, on_delete=models.CASCADE)
person = models.ForeignKey(Person, on_delete=models.CASCADE)
type = models.PositiveSmallIntegerField(choices=SEARCH_TYPE_CHOICES)
vehicle_search = models.BooleanField(default=False)
driver_search = models.BooleanField(default=False)
passenger_search = models.BooleanField(default=False)
property_search = models.BooleanField(default=False)
vehicle_siezed = models.BooleanField(default=False)
personal_property_siezed = models.BooleanField(default=False)
other_property_sized = models.BooleanField(default=False)
objects = CachingManager()
class Contraband(CachingMixin, models.Model):
contraband_id = models.IntegerField(primary_key=True)
search = models.ForeignKey(Search, on_delete=models.CASCADE)
person = models.ForeignKey(Person, on_delete=models.CASCADE)
stop = models.ForeignKey(Stop, on_delete=models.CASCADE)
ounces = models.FloatField(default=0, null=True)
pounds = models.FloatField(default=0, null=True)
pints = models.FloatField(default=0, null=True)
gallons = models.FloatField(default=0, null=True)
dosages = models.FloatField(default=0, null=True)
grams = models.FloatField(default=0, null=True)
kilos = models.FloatField(default=0, null=True)
money = models.FloatField(default=0, null=True)
weapons = models.FloatField(default=0, null=True)
dollar_amount = models.FloatField(default=0, null=True)
objects = CachingManager()
class SearchBasis(CachingMixin, models.Model):
search_basis_id = models.IntegerField(primary_key=True)
search = models.ForeignKey(Search, on_delete=models.CASCADE)
person = models.ForeignKey(Person, on_delete=models.CASCADE)
stop = models.ForeignKey(Stop, on_delete=models.CASCADE)
basis = models.CharField(max_length=4, choices=SEARCH_BASIS_CHOICES)
objects = CachingManager()
class Agency(CachingMixin, models.Model):
name = models.CharField(max_length=255)
# link to CensusProfile (no cross-database foreign key)
census_profile_id = models.CharField(max_length=16, blank=True, default="")
last_reported_stop = models.DateField(null=True)
objects = CachingManager()
class Meta(object):
verbose_name_plural = "Agencies"
def __str__(self):
return self.name
@property
def census_profile(self):
if self.census_profile_id:
profile = CensusProfile.objects.get(id=self.census_profile_id)
return profile.get_census_dict()
else:
return dict()
| en | 0.443183 | # todo: keys # todo: keys # link to CensusProfile (no cross-database foreign key) | 1.987904 | 2 |
flasky/auth/forms/__init__.py | by46/fasky | 0 | 10753 | <reponame>by46/fasky<gh_stars>0
from .login import LoginForm
from .registration import RegistrationForm
| from .login import LoginForm
from .registration import RegistrationForm | none | 1 | 1.02854 | 1 |
|
API_SIMIT_Mail/multapp/urls.py | profefonso/Services-SM | 0 | 10754 | from django.urls import path
from django.contrib import admin
from rest_framework_swagger.views import get_swagger_view
from .views import notification
schema_view = get_swagger_view(title='MAIL API')
urlpatterns = [
path('front/betsy/irish/embargo/admin/', admin.site.urls),
# Swagger API
path(
'api/',
schema_view,
name='api'
),
# notification
path(
'notification/',
notification.NotificationServicesRest.as_view(),
name=notification.NotificationServicesRest.name
),
]
| from django.urls import path
from django.contrib import admin
from rest_framework_swagger.views import get_swagger_view
from .views import notification
schema_view = get_swagger_view(title='MAIL API')
urlpatterns = [
path('front/betsy/irish/embargo/admin/', admin.site.urls),
# Swagger API
path(
'api/',
schema_view,
name='api'
),
# notification
path(
'notification/',
notification.NotificationServicesRest.as_view(),
name=notification.NotificationServicesRest.name
),
]
| en | 0.476398 | # Swagger API # notification | 1.602737 | 2 |
tests/legacy_mocket.py | jepler/Adafruit_CircuitPython_Requests | 0 | 10755 | <filename>tests/legacy_mocket.py
from unittest import mock
SOCK_STREAM = 0
set_interface = mock.Mock()
interface = mock.MagicMock()
getaddrinfo = mock.Mock()
socket = mock.Mock()
class Mocket:
def __init__(self, response):
self.settimeout = mock.Mock()
self.close = mock.Mock()
self.connect = mock.Mock()
self.send = mock.Mock(side_effect=self._send)
self.readline = mock.Mock(side_effect=self._readline)
self.recv = mock.Mock(side_effect=self._recv)
self.fail_next_send = False
self._response = response
self._position = 0
def _send(self, data):
if self.fail_next_send:
self.fail_next_send = False
raise RuntimeError("Send failed")
return None
def _readline(self):
i = self._response.find(b"\r\n", self._position)
r = self._response[self._position : i + 2]
self._position = i + 2
return r
def _recv(self, count):
end = self._position + count
r = self._response[self._position : end]
self._position = end
print(r)
return r
| <filename>tests/legacy_mocket.py
from unittest import mock
SOCK_STREAM = 0
set_interface = mock.Mock()
interface = mock.MagicMock()
getaddrinfo = mock.Mock()
socket = mock.Mock()
class Mocket:
def __init__(self, response):
self.settimeout = mock.Mock()
self.close = mock.Mock()
self.connect = mock.Mock()
self.send = mock.Mock(side_effect=self._send)
self.readline = mock.Mock(side_effect=self._readline)
self.recv = mock.Mock(side_effect=self._recv)
self.fail_next_send = False
self._response = response
self._position = 0
def _send(self, data):
if self.fail_next_send:
self.fail_next_send = False
raise RuntimeError("Send failed")
return None
def _readline(self):
i = self._response.find(b"\r\n", self._position)
r = self._response[self._position : i + 2]
self._position = i + 2
return r
def _recv(self, count):
end = self._position + count
r = self._response[self._position : end]
self._position = end
print(r)
return r
| none | 1 | 2.849916 | 3 |
|
run.py | romeroyakovlev/ii | 1 | 10756 | # -*- coding: utf-8 -*-
import api,points
from api.bottle import *
II_PATH=os.path.dirname(__file__) or '.'
TEMPLATE_PATH.insert(0,II_PATH)
@route('/list.txt')
def list_txt():
response.set_header ('content-type','text/plain; charset=utf-8')
lst = api.load_echo(False)[1:]
if request.query.n:
return '\n'.join([t[0] for t in lst])
else:
return '\n'.join(['%s:%s:%s' % t for t in lst])
@route('/blacklist.txt')
def blacklist_txt():
response.set_header ('content-type','text/plain; charset=utf-8')
return api.ru('blacklist.txt')
@route('/u/m/<h:path>')
def jt_outmsg(h):
response.set_header ('content-type','text/plain; charset=iso-8859-1')
lst = [x for x in h.split('/') if len(x) == 20]
return '\n'.join( [api.mk_jt(x,api.raw_msg(x)) for x in lst] )
@route('/u/e/<names:path>')
def index_list(names):
response.set_header ('content-type','text/plain; charset=utf-8')
return api.echoareas(names.split('/'))
def _point_msg(pauth,tmsg):
msgfrom, addr = points.check_hash(pauth)
if not addr: return 'auth error!'
cfg = api.load_echo(False)
mo = api.toss(msgfrom,'%s,%s' % (cfg[0][1],addr),tmsg.strip())
if mo.msg.startswith('@repto:'):
tmpmsg = mo.msg.splitlines()
mo.repto = tmpmsg[0][7:]
mo.msg = '\n'.join(tmpmsg[1:])
# а ещё лучше - засунуть это в api.toss
if len(mo.msg.encode('utf-8')) < 64100:
h = api.point_newmsg(mo)
if h:
return 'msg ok:%s: <a href="/%s">%s</a>' % (h, mo.echoarea, mo.echoarea)
else:
return 'error:unknown'
else:
return 'msg big!'
@route('/u/point/<pauth>/<tmsg:path>')
def point_msg_get(pauth,tmsg):
return _point_msg(pauth,tmsg)
@post('/u/point')
def point_msg_get():
return _point_msg(request.POST['pauth'],request.POST['tmsg'])
@route('/m/<msg>')
def get_msg(msg):
response.set_header ('content-type','text/plain; charset=utf-8')
return api.raw_msg(msg)
@route('/e/<echoarea>')
def get_echolist(echoarea):
response.set_header ('content-type','text/plain; charset=utf-8')
return api.get_echoarea(echoarea,True)
import iitpl
iitpl.II_PATH=II_PATH
run(host='127.0.0.1',port=62220,debug=False)
| # -*- coding: utf-8 -*-
import api,points
from api.bottle import *
II_PATH=os.path.dirname(__file__) or '.'
TEMPLATE_PATH.insert(0,II_PATH)
@route('/list.txt')
def list_txt():
response.set_header ('content-type','text/plain; charset=utf-8')
lst = api.load_echo(False)[1:]
if request.query.n:
return '\n'.join([t[0] for t in lst])
else:
return '\n'.join(['%s:%s:%s' % t for t in lst])
@route('/blacklist.txt')
def blacklist_txt():
response.set_header ('content-type','text/plain; charset=utf-8')
return api.ru('blacklist.txt')
@route('/u/m/<h:path>')
def jt_outmsg(h):
response.set_header ('content-type','text/plain; charset=iso-8859-1')
lst = [x for x in h.split('/') if len(x) == 20]
return '\n'.join( [api.mk_jt(x,api.raw_msg(x)) for x in lst] )
@route('/u/e/<names:path>')
def index_list(names):
response.set_header ('content-type','text/plain; charset=utf-8')
return api.echoareas(names.split('/'))
def _point_msg(pauth,tmsg):
msgfrom, addr = points.check_hash(pauth)
if not addr: return 'auth error!'
cfg = api.load_echo(False)
mo = api.toss(msgfrom,'%s,%s' % (cfg[0][1],addr),tmsg.strip())
if mo.msg.startswith('@repto:'):
tmpmsg = mo.msg.splitlines()
mo.repto = tmpmsg[0][7:]
mo.msg = '\n'.join(tmpmsg[1:])
# а ещё лучше - засунуть это в api.toss
if len(mo.msg.encode('utf-8')) < 64100:
h = api.point_newmsg(mo)
if h:
return 'msg ok:%s: <a href="/%s">%s</a>' % (h, mo.echoarea, mo.echoarea)
else:
return 'error:unknown'
else:
return 'msg big!'
@route('/u/point/<pauth>/<tmsg:path>')
def point_msg_get(pauth,tmsg):
return _point_msg(pauth,tmsg)
@post('/u/point')
def point_msg_get():
return _point_msg(request.POST['pauth'],request.POST['tmsg'])
@route('/m/<msg>')
def get_msg(msg):
response.set_header ('content-type','text/plain; charset=utf-8')
return api.raw_msg(msg)
@route('/e/<echoarea>')
def get_echolist(echoarea):
response.set_header ('content-type','text/plain; charset=utf-8')
return api.get_echoarea(echoarea,True)
import iitpl
iitpl.II_PATH=II_PATH
run(host='127.0.0.1',port=62220,debug=False)
| ru | 0.983673 | # -*- coding: utf-8 -*- # а ещё лучше - засунуть это в api.toss | 2.218755 | 2 |
learning_labs/yang/01-yang/add_loopback_ip.py | hpreston/sbx_nxos | 33 | 10757 | #!/usr/bin/env python
from ncclient import manager
import sys
from lxml import etree
# Set the device variables
DEVICES = ['172.16.30.101', '172.16.30.102']
USER = 'admin'
PASS = '<PASSWORD>'
PORT = 830
LOOPBACK_IP = {
'172.16.30.101': '10.99.99.1/24',
'172.16.30.102': '10.99.99.2/24'
}
DEVICE_NAMES = {'172.16.30.101': '(nx-osv9000-1)',
'172.16.30.102': '(nx-osv9000-2)' }
# create a main() method
def main():
"""
Main method that adds an IP address to interface loopback 99 to
both the spine switches.
"""
loopback_ip_add = """
<config>
<System xmlns="http://cisco.com/ns/yang/cisco-nx-os-device">
<ipv4-items>
<inst-items>
<dom-items>
<Dom-list>
<name>default</name>
<if-items>
<If-list>
<id>lo99</id>
<addr-items>
<Addr-list>
<addr>{}</addr>
</Addr-list>
</addr-items>
</If-list>
</if-items>
</Dom-list>
</dom-items>
</inst-items>
</ipv4-items>
</System>
</config>"""
for device in DEVICES:
with manager.connect(host=device, port=PORT, username=USER,
password=<PASSWORD>, hostkey_verify=False,
device_params={'name': 'nexus'},
look_for_keys=False, allow_agent=False) as m:
# Add the loopback interface
print("\nNow adding IP address {} to device {} {}...\n".format(LOOPBACK_IP[device], DEVICE_NAMES[device],
device))
new_ip = loopback_ip_add.format(LOOPBACK_IP[device])
netconf_response = m.edit_config(target='running', config=new_ip)
# Parse the XML response
print(netconf_response)
if __name__ == '__main__':
sys.exit(main())
| #!/usr/bin/env python
from ncclient import manager
import sys
from lxml import etree
# Set the device variables
DEVICES = ['172.16.30.101', '172.16.30.102']
USER = 'admin'
PASS = '<PASSWORD>'
PORT = 830
LOOPBACK_IP = {
'172.16.30.101': '10.99.99.1/24',
'172.16.30.102': '10.99.99.2/24'
}
DEVICE_NAMES = {'172.16.30.101': '(nx-osv9000-1)',
'172.16.30.102': '(nx-osv9000-2)' }
# create a main() method
def main():
"""
Main method that adds an IP address to interface loopback 99 to
both the spine switches.
"""
loopback_ip_add = """
<config>
<System xmlns="http://cisco.com/ns/yang/cisco-nx-os-device">
<ipv4-items>
<inst-items>
<dom-items>
<Dom-list>
<name>default</name>
<if-items>
<If-list>
<id>lo99</id>
<addr-items>
<Addr-list>
<addr>{}</addr>
</Addr-list>
</addr-items>
</If-list>
</if-items>
</Dom-list>
</dom-items>
</inst-items>
</ipv4-items>
</System>
</config>"""
for device in DEVICES:
with manager.connect(host=device, port=PORT, username=USER,
password=<PASSWORD>, hostkey_verify=False,
device_params={'name': 'nexus'},
look_for_keys=False, allow_agent=False) as m:
# Add the loopback interface
print("\nNow adding IP address {} to device {} {}...\n".format(LOOPBACK_IP[device], DEVICE_NAMES[device],
device))
new_ip = loopback_ip_add.format(LOOPBACK_IP[device])
netconf_response = m.edit_config(target='running', config=new_ip)
# Parse the XML response
print(netconf_response)
if __name__ == '__main__':
sys.exit(main())
| en | 0.323542 | #!/usr/bin/env python # Set the device variables # create a main() method Main method that adds an IP address to interface loopback 99 to both the spine switches. <config> <System xmlns="http://cisco.com/ns/yang/cisco-nx-os-device"> <ipv4-items> <inst-items> <dom-items> <Dom-list> <name>default</name> <if-items> <If-list> <id>lo99</id> <addr-items> <Addr-list> <addr>{}</addr> </Addr-list> </addr-items> </If-list> </if-items> </Dom-list> </dom-items> </inst-items> </ipv4-items> </System> </config> # Add the loopback interface # Parse the XML response | 2.660932 | 3 |
Plotly_Dash/spacex_dash_app.py | AtypicalLogic/Coursera-IBM_DS-Applied_Data_Science_Capstone | 0 | 10758 | <reponame>AtypicalLogic/Coursera-IBM_DS-Applied_Data_Science_Capstone
# To run this file, Win Start > cmd > file dir > run: python spacex_dash_app.py
# Import required libraries
import pandas as pd
import dash
from dash import html
from dash import dcc
from dash.dependencies import Input, Output
import plotly.express as px
# Read the airline data into pandas dataframe
spacex_df = pd.read_csv("spacex_launch_dash.csv")
max_payload = spacex_df['Payload Mass (kg)'].max()
min_payload = spacex_df['Payload Mass (kg)'].min()
# Dropdown list(s)
launch_site_list = []
launch_site_list.append('ALL')
for index, row in spacex_df['Launch Site'].value_counts().to_frame().iterrows():
launch_site_list.append(row.name)
# Create a dash application
app = dash.Dash(__name__)
# Create an app layout
app.layout = html.Div(children=[html.H1('SpaceX Launch Records Dashboard',
style={'textAlign': 'center', 'color': '#503D36',
'font-size': 40}),
# TASK 1: Add a dropdown list to enable Launch Site selection
# The default select value is for ALL sites
# dcc.Dropdown(id='site-dropdown',...)
dcc.Dropdown(id='site-dropdown',
options=[{'label': i, 'value': i} for i in launch_site_list],
style={'width':'100%', 'padding':'3px', 'font-size': '20px', 'text-align-last': 'left'},
value='ALL'),
html.Br(),
# TASK 2: Add a pie chart to show the total successful launches count for all sites
# If a specific launch site was selected, show the Success vs. Failed counts for the site
html.Div(dcc.Graph(id='success-pie-chart')),
html.Br(),
html.P("Payload range (Kg):"),
# TASK 3: Add a slider to select payload range
#dcc.RangeSlider(id='payload-slider',...)
dcc.RangeSlider(id='payload-slider', min=min_payload, max=max_payload, step=1000, value=[min_payload, max_payload]),
# TASK 4: Add a scatter chart to show the correlation between payload and launch success
html.Div(dcc.Graph(id='success-payload-scatter-chart')),
])
# TASK 2:
# Add a callback function for `site-dropdown` as input, `success-pie-chart` as output
@app.callback(Output(component_id='success-pie-chart', component_property='figure'),
Input(component_id='site-dropdown', component_property='value'))
def get_pie_chart(entered_site):
if entered_site == 'ALL':
filtered_df = spacex_df[['Launch Site', 'class']].groupby(by=['Launch Site'], as_index=False).mean()
fig = px.pie(filtered_df, values='class',
names='Launch Site',
title='Total Success Launches by Site')
return fig
else:
# return the outcomes piechart for a selected site
filtered_df = spacex_df[['Launch Site', 'class']][spacex_df['Launch Site'] == entered_site]
mean = filtered_df.groupby(by='Launch Site', as_index=False).mean()
means = {}
means[1] = mean['class'][0]
means[0] = 1 - means[1]
fig = px.pie(values=means.values(), names=means.keys(),
title=f'Total Success Launches by Site: {entered_site}')
return fig
# TASK 4:
# Add a callback function for `site-dropdown` and `payload-slider` as inputs, `success-payload-scatter-chart` as output
@app.callback(Output(component_id='success-payload-scatter-chart', component_property='figure'),
[Input(component_id='site-dropdown', component_property='value'),
Input(component_id="payload-slider", component_property="value")])
def get_scatter_plot(entered_site, payload_range):
# print('min:', payload_range[0], '\tmax:', payload_range[1])
# print(entered_site)
if entered_site == 'ALL':
payload_filtered_df = spacex_df[['Payload Mass (kg)', 'Booster Version Category', 'Launch Site', 'class']][(spacex_df['Payload Mass (kg)'] <= payload_range[1]) & (spacex_df['Payload Mass (kg)'] >= payload_range[0])]
else:
payload_filtered_df = spacex_df[['Payload Mass (kg)', 'Booster Version Category', 'Launch Site', 'class']][(spacex_df['Payload Mass (kg)'] <= payload_range[1]) &
(spacex_df['Payload Mass (kg)'] >= payload_range[0]) &
(spacex_df['Launch Site'] == entered_site)]
fig = px.scatter(data_frame=payload_filtered_df, x='Payload Mass (kg)', y='class', color='Booster Version Category')
return fig
# Run the app
if __name__ == '__main__':
app.run_server(debug=True)
# Finding Insights Visually
# Now with the dashboard completed, you should be able to use it to analyze SpaceX launch data, and answer the following questions:
#
# Which site has the largest successful launches?
### KSC LC-39A
# Which site has the highest launch success rate?
### KSC LC-39A
# Which payload range(s) has the highest launch success rate?
### 2000 - 4000
# Which payload range(s) has the lowest launch success rate?
### 6000 - 9000
# Which F9 Booster version (v1.0, v1.1, FT, B4, B5, etc.) has the highest launch success rate?
### B5
| # To run this file, Win Start > cmd > file dir > run: python spacex_dash_app.py
# Import required libraries
import pandas as pd
import dash
from dash import html
from dash import dcc
from dash.dependencies import Input, Output
import plotly.express as px
# Read the airline data into pandas dataframe
spacex_df = pd.read_csv("spacex_launch_dash.csv")
max_payload = spacex_df['Payload Mass (kg)'].max()
min_payload = spacex_df['Payload Mass (kg)'].min()
# Dropdown list(s)
launch_site_list = []
launch_site_list.append('ALL')
for index, row in spacex_df['Launch Site'].value_counts().to_frame().iterrows():
launch_site_list.append(row.name)
# Create a dash application
app = dash.Dash(__name__)
# Create an app layout
app.layout = html.Div(children=[html.H1('SpaceX Launch Records Dashboard',
style={'textAlign': 'center', 'color': '#503D36',
'font-size': 40}),
# TASK 1: Add a dropdown list to enable Launch Site selection
# The default select value is for ALL sites
# dcc.Dropdown(id='site-dropdown',...)
dcc.Dropdown(id='site-dropdown',
options=[{'label': i, 'value': i} for i in launch_site_list],
style={'width':'100%', 'padding':'3px', 'font-size': '20px', 'text-align-last': 'left'},
value='ALL'),
html.Br(),
# TASK 2: Add a pie chart to show the total successful launches count for all sites
# If a specific launch site was selected, show the Success vs. Failed counts for the site
html.Div(dcc.Graph(id='success-pie-chart')),
html.Br(),
html.P("Payload range (Kg):"),
# TASK 3: Add a slider to select payload range
#dcc.RangeSlider(id='payload-slider',...)
dcc.RangeSlider(id='payload-slider', min=min_payload, max=max_payload, step=1000, value=[min_payload, max_payload]),
# TASK 4: Add a scatter chart to show the correlation between payload and launch success
html.Div(dcc.Graph(id='success-payload-scatter-chart')),
])
# TASK 2:
# Add a callback function for `site-dropdown` as input, `success-pie-chart` as output
@app.callback(Output(component_id='success-pie-chart', component_property='figure'),
Input(component_id='site-dropdown', component_property='value'))
def get_pie_chart(entered_site):
if entered_site == 'ALL':
filtered_df = spacex_df[['Launch Site', 'class']].groupby(by=['Launch Site'], as_index=False).mean()
fig = px.pie(filtered_df, values='class',
names='Launch Site',
title='Total Success Launches by Site')
return fig
else:
# return the outcomes piechart for a selected site
filtered_df = spacex_df[['Launch Site', 'class']][spacex_df['Launch Site'] == entered_site]
mean = filtered_df.groupby(by='Launch Site', as_index=False).mean()
means = {}
means[1] = mean['class'][0]
means[0] = 1 - means[1]
fig = px.pie(values=means.values(), names=means.keys(),
title=f'Total Success Launches by Site: {entered_site}')
return fig
# TASK 4:
# Add a callback function for `site-dropdown` and `payload-slider` as inputs, `success-payload-scatter-chart` as output
@app.callback(Output(component_id='success-payload-scatter-chart', component_property='figure'),
[Input(component_id='site-dropdown', component_property='value'),
Input(component_id="payload-slider", component_property="value")])
def get_scatter_plot(entered_site, payload_range):
# print('min:', payload_range[0], '\tmax:', payload_range[1])
# print(entered_site)
if entered_site == 'ALL':
payload_filtered_df = spacex_df[['Payload Mass (kg)', 'Booster Version Category', 'Launch Site', 'class']][(spacex_df['Payload Mass (kg)'] <= payload_range[1]) & (spacex_df['Payload Mass (kg)'] >= payload_range[0])]
else:
payload_filtered_df = spacex_df[['Payload Mass (kg)', 'Booster Version Category', 'Launch Site', 'class']][(spacex_df['Payload Mass (kg)'] <= payload_range[1]) &
(spacex_df['Payload Mass (kg)'] >= payload_range[0]) &
(spacex_df['Launch Site'] == entered_site)]
fig = px.scatter(data_frame=payload_filtered_df, x='Payload Mass (kg)', y='class', color='Booster Version Category')
return fig
# Run the app
if __name__ == '__main__':
app.run_server(debug=True)
# Finding Insights Visually
# Now with the dashboard completed, you should be able to use it to analyze SpaceX launch data, and answer the following questions:
#
# Which site has the largest successful launches?
### KSC LC-39A
# Which site has the highest launch success rate?
### KSC LC-39A
# Which payload range(s) has the highest launch success rate?
### 2000 - 4000
# Which payload range(s) has the lowest launch success rate?
### 6000 - 9000
# Which F9 Booster version (v1.0, v1.1, FT, B4, B5, etc.) has the highest launch success rate?
### B5 | en | 0.776066 | # To run this file, Win Start > cmd > file dir > run: python spacex_dash_app.py # Import required libraries # Read the airline data into pandas dataframe # Dropdown list(s) # Create a dash application # Create an app layout # TASK 1: Add a dropdown list to enable Launch Site selection # The default select value is for ALL sites # dcc.Dropdown(id='site-dropdown',...) # TASK 2: Add a pie chart to show the total successful launches count for all sites # If a specific launch site was selected, show the Success vs. Failed counts for the site # TASK 3: Add a slider to select payload range #dcc.RangeSlider(id='payload-slider',...) # TASK 4: Add a scatter chart to show the correlation between payload and launch success # TASK 2: # Add a callback function for `site-dropdown` as input, `success-pie-chart` as output # return the outcomes piechart for a selected site # TASK 4: # Add a callback function for `site-dropdown` and `payload-slider` as inputs, `success-payload-scatter-chart` as output # print('min:', payload_range[0], '\tmax:', payload_range[1]) # print(entered_site) # Run the app # Finding Insights Visually # Now with the dashboard completed, you should be able to use it to analyze SpaceX launch data, and answer the following questions: # # Which site has the largest successful launches? ### KSC LC-39A # Which site has the highest launch success rate? ### KSC LC-39A # Which payload range(s) has the highest launch success rate? ### 2000 - 4000 # Which payload range(s) has the lowest launch success rate? ### 6000 - 9000 # Which F9 Booster version (v1.0, v1.1, FT, B4, B5, etc.) has the highest launch success rate? ### B5 | 3.500594 | 4 |
configs/mnist_paper_residual_cnn_gp.py | rhaps0dy/cnn-gp | 23 | 10759 | <reponame>rhaps0dy/cnn-gp
"""
The best randomly-searched ResNet reported in the paper.
In the original paper there is a bug. This network sums together layers after
the ReLU nonlinearity, which are not Gaussian, and also do not have mean 0. As
a result, the overall network does not converge to a Gaussian process. The
defined kernel is still valid, even if it doesn't correspond to a NN.
In the interest of making the results replicable, we have replicated this bug
as well.
The correct way to use ResNets is to sum things after a Conv2d layer, see for
example the `resnet_block` in `cnn_gp/kernels.py`.
"""
import torchvision
from cnn_gp import Conv2d, ReLU, Sequential, Sum
train_range = range(5000, 55000)
validation_range = list(range(55000, 60000)) + list(range(0, 5000))
test_range = range(60000, 70000)
dataset_name = "MNIST"
model_name = "ResNet"
dataset = torchvision.datasets.MNIST
transforms = []
epochs = 0
in_channels = 1
out_channels = 10
var_bias = 4.69
var_weight = 7.27
initial_model = Sequential(
*(Sum([
Sequential(),
Sequential(
Conv2d(kernel_size=4, padding="same", var_weight=var_weight * 4**2,
var_bias=var_bias),
ReLU(),
)]) for _ in range(8)),
Conv2d(kernel_size=4, padding="same", var_weight=var_weight * 4**2,
var_bias=var_bias),
ReLU(),
Conv2d(kernel_size=28, padding=0, var_weight=var_weight,
var_bias=var_bias),
)
| """
The best randomly-searched ResNet reported in the paper.
In the original paper there is a bug. This network sums together layers after
the ReLU nonlinearity, which are not Gaussian, and also do not have mean 0. As
a result, the overall network does not converge to a Gaussian process. The
defined kernel is still valid, even if it doesn't correspond to a NN.
In the interest of making the results replicable, we have replicated this bug
as well.
The correct way to use ResNets is to sum things after a Conv2d layer, see for
example the `resnet_block` in `cnn_gp/kernels.py`.
"""
import torchvision
from cnn_gp import Conv2d, ReLU, Sequential, Sum
train_range = range(5000, 55000)
validation_range = list(range(55000, 60000)) + list(range(0, 5000))
test_range = range(60000, 70000)
dataset_name = "MNIST"
model_name = "ResNet"
dataset = torchvision.datasets.MNIST
transforms = []
epochs = 0
in_channels = 1
out_channels = 10
var_bias = 4.69
var_weight = 7.27
initial_model = Sequential(
*(Sum([
Sequential(),
Sequential(
Conv2d(kernel_size=4, padding="same", var_weight=var_weight * 4**2,
var_bias=var_bias),
ReLU(),
)]) for _ in range(8)),
Conv2d(kernel_size=4, padding="same", var_weight=var_weight * 4**2,
var_bias=var_bias),
ReLU(),
Conv2d(kernel_size=28, padding=0, var_weight=var_weight,
var_bias=var_bias),
) | en | 0.942041 | The best randomly-searched ResNet reported in the paper. In the original paper there is a bug. This network sums together layers after the ReLU nonlinearity, which are not Gaussian, and also do not have mean 0. As a result, the overall network does not converge to a Gaussian process. The defined kernel is still valid, even if it doesn't correspond to a NN. In the interest of making the results replicable, we have replicated this bug as well. The correct way to use ResNets is to sum things after a Conv2d layer, see for example the `resnet_block` in `cnn_gp/kernels.py`. | 3.108507 | 3 |
python/learn/PythonDataVisualizationCookbookSE_Code/Chapter 07/ch07_rec08_scatterplot.py | flyingwjw/Documentation | 0 | 10760 | <gh_stars>0
import matplotlib.pyplot as plt
import numpy as np
# daily search trend for keyword 'flowers' for a year
d = [
1.04, 1.04, 1.16, 1.22, 1.46, 2.34, 1.16, 1.12, 1.24, 1.30, 1.44, 1.22, 1.26,
1.34, 1.26, 1.40, 1.52, 2.56, 1.36, 1.30, 1.20, 1.12, 1.12, 1.12, 1.06, 1.06,
1.00, 1.02, 1.04, 1.02, 1.06, 1.02, 1.04, 0.98, 0.98, 0.98, 1.00, 1.02, 1.02,
1.00, 1.02, 0.96, 0.94, 0.94, 0.94, 0.96, 0.86, 0.92, 0.98, 1.08, 1.04, 0.74,
0.98, 1.02, 1.02, 1.12, 1.34, 2.02, 1.68, 1.12, 1.38, 1.14, 1.16, 1.22, 1.10,
1.14, 1.16, 1.28, 1.44, 2.58, 1.30, 1.20, 1.16, 1.06, 1.06, 1.08, 1.00, 1.00,
0.92, 1.00, 1.02, 1.00, 1.06, 1.10, 1.14, 1.08, 1.00, 1.04, 1.10, 1.06, 1.06,
1.06, 1.02, 1.04, 0.96, 0.96, 0.96, 0.92, 0.84, 0.88, 0.90, 1.00, 1.08, 0.80,
0.90, 0.98, 1.00, 1.10, 1.24, 1.66, 1.94, 1.02, 1.06, 1.08, 1.10, 1.30, 1.10,
1.12, 1.20, 1.16, 1.26, 1.42, 2.18, 1.26, 1.06, 1.00, 1.04, 1.00, 0.98, 0.94,
0.88, 0.98, 0.96, 0.92, 0.94, 0.96, 0.96, 0.94, 0.90, 0.92, 0.96, 0.96, 0.96,
0.98, 0.90, 0.90, 0.88, 0.88, 0.88, 0.90, 0.78, 0.84, 0.86, 0.92, 1.00, 0.68,
0.82, 0.90, 0.88, 0.98, 1.08, 1.36, 2.04, 0.98, 0.96, 1.02, 1.20, 0.98, 1.00,
1.08, 0.98, 1.02, 1.14, 1.28, 2.04, 1.16, 1.04, 0.96, 0.98, 0.92, 0.86, 0.88,
0.82, 0.92, 0.90, 0.86, 0.84, 0.86, 0.90, 0.84, 0.82, 0.82, 0.86, 0.86, 0.84,
0.84, 0.82, 0.80, 0.78, 0.78, 0.76, 0.74, 0.68, 0.74, 0.80, 0.80, 0.90, 0.60,
0.72, 0.80, 0.82, 0.86, 0.94, 1.24, 1.92, 0.92, 1.12, 0.90, 0.90, 0.94, 0.90,
0.90, 0.94, 0.98, 1.08, 1.24, 2.04, 1.04, 0.94, 0.86, 0.86, 0.86, 0.82, 0.84,
0.76, 0.80, 0.80, 0.80, 0.78, 0.80, 0.82, 0.76, 0.76, 0.76, 0.76, 0.78, 0.78,
0.76, 0.76, 0.72, 0.74, 0.70, 0.68, 0.72, 0.70, 0.64, 0.70, 0.72, 0.74, 0.64,
0.62, 0.74, 0.80, 0.82, 0.88, 1.02, 1.66, 0.94, 0.94, 0.96, 1.00, 1.16, 1.02,
1.04, 1.06, 1.02, 1.10, 1.22, 1.94, 1.18, 1.12, 1.06, 1.06, 1.04, 1.02, 0.94,
0.94, 0.98, 0.96, 0.96, 0.98, 1.00, 0.96, 0.92, 0.90, 0.86, 0.82, 0.90, 0.84,
0.84, 0.82, 0.80, 0.80, 0.76, 0.80, 0.82, 0.80, 0.72, 0.72, 0.76, 0.80, 0.76,
0.70, 0.74, 0.82, 0.84, 0.88, 0.98, 1.44, 0.96, 0.88, 0.92, 1.08, 0.90, 0.92,
0.96, 0.94, 1.04, 1.08, 1.14, 1.66, 1.08, 0.96, 0.90, 0.86, 0.84, 0.86, 0.82,
0.84, 0.82, 0.84, 0.84, 0.84, 0.84, 0.82, 0.86, 0.82, 0.82, 0.86, 0.90, 0.84,
0.82, 0.78, 0.80, 0.78, 0.74, 0.78, 0.76, 0.76, 0.70, 0.72, 0.76, 0.72, 0.70,
0.64]
# Now let's generate random data for the same period
d1 = np.random.random(365)
assert len(d) == len(d1)
fig = plt.figure()
ax1 = fig.add_subplot(221)
ax1.scatter(d, d1, alpha=0.5)
ax1.set_title('No correlation')
ax1.grid(True)
ax2 = fig.add_subplot(222)
ax2.scatter(d1, d1, alpha=0.5)
ax2.set_title('Ideal positive correlation')
ax2.grid(True)
ax3 = fig.add_subplot(223)
ax3.scatter(d1, d1*-1, alpha=0.5)
ax3.set_title('Ideal negative correlation')
ax3.grid(True)
ax4 = fig.add_subplot(224)
ax4.scatter(d1, d1+d, alpha=0.5)
ax4.set_title('Non ideal positive correlation')
ax4.grid(True)
plt.tight_layout()
plt.show() | import matplotlib.pyplot as plt
import numpy as np
# daily search trend for keyword 'flowers' for a year
d = [
1.04, 1.04, 1.16, 1.22, 1.46, 2.34, 1.16, 1.12, 1.24, 1.30, 1.44, 1.22, 1.26,
1.34, 1.26, 1.40, 1.52, 2.56, 1.36, 1.30, 1.20, 1.12, 1.12, 1.12, 1.06, 1.06,
1.00, 1.02, 1.04, 1.02, 1.06, 1.02, 1.04, 0.98, 0.98, 0.98, 1.00, 1.02, 1.02,
1.00, 1.02, 0.96, 0.94, 0.94, 0.94, 0.96, 0.86, 0.92, 0.98, 1.08, 1.04, 0.74,
0.98, 1.02, 1.02, 1.12, 1.34, 2.02, 1.68, 1.12, 1.38, 1.14, 1.16, 1.22, 1.10,
1.14, 1.16, 1.28, 1.44, 2.58, 1.30, 1.20, 1.16, 1.06, 1.06, 1.08, 1.00, 1.00,
0.92, 1.00, 1.02, 1.00, 1.06, 1.10, 1.14, 1.08, 1.00, 1.04, 1.10, 1.06, 1.06,
1.06, 1.02, 1.04, 0.96, 0.96, 0.96, 0.92, 0.84, 0.88, 0.90, 1.00, 1.08, 0.80,
0.90, 0.98, 1.00, 1.10, 1.24, 1.66, 1.94, 1.02, 1.06, 1.08, 1.10, 1.30, 1.10,
1.12, 1.20, 1.16, 1.26, 1.42, 2.18, 1.26, 1.06, 1.00, 1.04, 1.00, 0.98, 0.94,
0.88, 0.98, 0.96, 0.92, 0.94, 0.96, 0.96, 0.94, 0.90, 0.92, 0.96, 0.96, 0.96,
0.98, 0.90, 0.90, 0.88, 0.88, 0.88, 0.90, 0.78, 0.84, 0.86, 0.92, 1.00, 0.68,
0.82, 0.90, 0.88, 0.98, 1.08, 1.36, 2.04, 0.98, 0.96, 1.02, 1.20, 0.98, 1.00,
1.08, 0.98, 1.02, 1.14, 1.28, 2.04, 1.16, 1.04, 0.96, 0.98, 0.92, 0.86, 0.88,
0.82, 0.92, 0.90, 0.86, 0.84, 0.86, 0.90, 0.84, 0.82, 0.82, 0.86, 0.86, 0.84,
0.84, 0.82, 0.80, 0.78, 0.78, 0.76, 0.74, 0.68, 0.74, 0.80, 0.80, 0.90, 0.60,
0.72, 0.80, 0.82, 0.86, 0.94, 1.24, 1.92, 0.92, 1.12, 0.90, 0.90, 0.94, 0.90,
0.90, 0.94, 0.98, 1.08, 1.24, 2.04, 1.04, 0.94, 0.86, 0.86, 0.86, 0.82, 0.84,
0.76, 0.80, 0.80, 0.80, 0.78, 0.80, 0.82, 0.76, 0.76, 0.76, 0.76, 0.78, 0.78,
0.76, 0.76, 0.72, 0.74, 0.70, 0.68, 0.72, 0.70, 0.64, 0.70, 0.72, 0.74, 0.64,
0.62, 0.74, 0.80, 0.82, 0.88, 1.02, 1.66, 0.94, 0.94, 0.96, 1.00, 1.16, 1.02,
1.04, 1.06, 1.02, 1.10, 1.22, 1.94, 1.18, 1.12, 1.06, 1.06, 1.04, 1.02, 0.94,
0.94, 0.98, 0.96, 0.96, 0.98, 1.00, 0.96, 0.92, 0.90, 0.86, 0.82, 0.90, 0.84,
0.84, 0.82, 0.80, 0.80, 0.76, 0.80, 0.82, 0.80, 0.72, 0.72, 0.76, 0.80, 0.76,
0.70, 0.74, 0.82, 0.84, 0.88, 0.98, 1.44, 0.96, 0.88, 0.92, 1.08, 0.90, 0.92,
0.96, 0.94, 1.04, 1.08, 1.14, 1.66, 1.08, 0.96, 0.90, 0.86, 0.84, 0.86, 0.82,
0.84, 0.82, 0.84, 0.84, 0.84, 0.84, 0.82, 0.86, 0.82, 0.82, 0.86, 0.90, 0.84,
0.82, 0.78, 0.80, 0.78, 0.74, 0.78, 0.76, 0.76, 0.70, 0.72, 0.76, 0.72, 0.70,
0.64]
# Now let's generate random data for the same period
d1 = np.random.random(365)
assert len(d) == len(d1)
fig = plt.figure()
ax1 = fig.add_subplot(221)
ax1.scatter(d, d1, alpha=0.5)
ax1.set_title('No correlation')
ax1.grid(True)
ax2 = fig.add_subplot(222)
ax2.scatter(d1, d1, alpha=0.5)
ax2.set_title('Ideal positive correlation')
ax2.grid(True)
ax3 = fig.add_subplot(223)
ax3.scatter(d1, d1*-1, alpha=0.5)
ax3.set_title('Ideal negative correlation')
ax3.grid(True)
ax4 = fig.add_subplot(224)
ax4.scatter(d1, d1+d, alpha=0.5)
ax4.set_title('Non ideal positive correlation')
ax4.grid(True)
plt.tight_layout()
plt.show() | en | 0.668601 | # daily search trend for keyword 'flowers' for a year # Now let's generate random data for the same period | 2.276516 | 2 |
src/thespian/tweaks.py | mtttech/dndpersonae | 1 | 10761 | from dataclasses import dataclass
import logging
from attributes import get_ability_modifier
from sourcetree.utils import (
get_feats_list,
get_feat_perks,
get_feat_proficiencies,
get_feat_requirements,
)
from stdio import prompt
log = logging.getLogger("thespian.tweaks")
class AbilityScoreImprovementError(Exception):
"""Handles ability score improvement errors."""
class FlagParserError(Exception):
"""Handles an invalid flag format error."""
class FeatOptionParser:
"""Generates and parses feat characteristic flags by feat.
FLAG OPTION PARSER SYSTEM
PIPEBAR: Used to separate flags. i.e: ability=Strength|proficiency=skills
Two flag options are designated in the above example: 'ability', and 'proficiency'.
ALLOWED FLAG OPTIONS:
Designates certain instructions for applying feat related "perks" to a character.
- ability
- proficiency
- savingthrows
- speed
COMMA: Used to identify the number of occurences of a flag. i.e: languages,2
The example above means that a player can choose two languages.
EQUAL SIGN: Used to separate option parameters. i.e ability=Strength,0
The example above means Strength is a designated parameter for the ability option.
In this case the character would get an enhancement to Strength.
There is more to this and is explained further below.
DOUBLE AMPERSAND: Used to separater parameter options. i.e ability=Strength&&Dexerity,1
The example above means the player can choose a one time ehancement to Strength or Dexterity.
PLUS SIGN: Used to seperate parameter options. i.e ability=Strength+Dexterity
The example above means the player can gain an enhancement in both Strength and Dexterity.
"""
# Parser Option Separators
PARSER_OPTIONS = "|"
OPTION_INCREMENT = ","
OPTION_PARAMETER = "="
PARAM_SINGLE_SELECTION = "&&"
PARAM_MULTIPLE_SELECTION = "+"
def __init__(self, feat, prof):
self.feat = feat
self.profile = prof
self.perks = get_feat_perks(self.feat)
def _get_proficiency_options(self, prof_type: str) -> list:
"""Returns a list of bonus proficiencies for a feat by proficiency type."""
return get_feat_proficiencies(self.feat, prof_type)
def _get_sub_menu_options(self, available_options) -> dict | bool:
"""Creates a dictionary of sub menu options, if applicable."""
if self.is_sub_menu(available_options):
sub_options = dict()
for option in available_options:
sub_options[option] = self._get_proficiency_options(option)
return sub_options
return False
@staticmethod
def _is_sub_menu(available_options) -> bool:
"""Returns True if sub menu options are available. False otherwise."""
for option in available_options:
if not option.islower():
return False
return True
def _parse_flags(self) -> dict:
"""Generates the characteristics for the specified feat."""
parsed_flags = dict()
raw_flags = self.perks.get("flags")
if raw_flags is None:
return parsed_flags
flag_pairs = raw_flags.split(self.PARSER_OPTIONS)
for flag_pair in flag_pairs:
if self.OPTION_INCREMENT not in flag_pair:
raise FlagParserError("Pairs must be formatted in 'name,value' pairs.")
attribute_name, increment = flag_pair.split(self.OPTION_INCREMENT)
if self.OPTION_PARAMETER not in attribute_name:
parsed_flags[attribute_name] = {"increment": increment}
else:
flag_options = attribute_name.split(self.OPTION_PARAMETER)
# Allowable flags: ability, proficiency, savingthrows, speed
attribute_name = flag_options[0]
try:
if attribute_name not in (
"ability",
"proficiency",
"savingthrows",
"speed",
):
raise FlagParserError(
f"Illegal flag name '{attribute_name}' specified."
)
except FlagParserError:
# pass
return parsed_flags
if self.PARAM_SINGLE_SELECTION in flag_options[1]:
options = flag_options[1].split(self.PARAM_SINGLE_SELECTION)
else:
options = flag_options[1]
parsed_flags[attribute_name] = {
"increment": increment,
"options": options,
}
return parsed_flags
def parse(self) -> dict:
"""Parses the generated flags for the chosen feat."""
final_flag = self._parse_flags()
if len(final_flag) == 0:
return
parsed_flag = dict()
for flag, options in final_flag.items():
if flag in ("ability", "proficiency"):
increment = int(options["increment"])
menu_options = options["options"]
if len(menu_options) < 1:
raise FlagParserError("Malformed parser instructions error.")
if flag == "ability":
if increment == 0:
raise FlagParserError(
"Flag attribute 'ability' requires a positive integer value."
)
# For feats that use the 'savingthrows' flag.
# Limits choices based on current saving throw proficiencies.
if "savingthrows" in final_flag:
menu_options = [
x for x in menu_options if x not in self.profile["savingthrows"]
]
if isinstance(menu_options, str):
my_ability = menu_options
elif isinstance(menu_options, list):
for _ in range(increment):
my_ability = prompt(
"Choose the ability you would like to apply a bonus to.",
menu_options,
)
menu_options.remove(my_ability)
log.info(f"You selected the ability '{my_ability}'.")
# If 'savingthrows' flag specified, add proficiency for ability saving throw.
if "savingthrows" in final_flag:
self.profile["savingthrows"].append(my_ability)
log.info(
f"You gained proficiency in the '{my_ability}' saving throw."
)
bonus_value = self.perks[flag][my_ability]
parsed_flag[flag] = (my_ability, bonus_value)
elif flag == "proficiency":
# Increment value of 0 means append ALL listed bonuses.
# Increment values other than 0 means add # of bonuses == increment value.
chosen_options = dict()
submenu_options = None
if isinstance(menu_options, str) and increment == 0:
chosen_options[menu_options] = self._get_proficiency_options(
menu_options
)
elif isinstance(menu_options, list):
for _ in range(increment):
my_bonus = prompt(f"Choose your bonus: '{flag}'.", menu_options)
if not self._is_sub_menu(menu_options):
menu_options.remove(my_bonus)
else:
# Generate submenu options, if applicable.
if submenu_options is None:
submenu_options = self._get_sub_menu_options(
menu_options
)
submenu_options[my_bonus] = [
x
for x in submenu_options[my_bonus]
if x not in self.profile[my_bonus]
]
# Create storage handler for selections, if applicable.
if len(chosen_options) == 0:
for opt in submenu_options:
chosen_options[opt] = list()
submenu_choice = prompt(
f"Choose submenu option: '{my_bonus}'.",
submenu_options.get(my_bonus),
)
chosen_options[my_bonus].append(submenu_choice)
submenu_options[my_bonus].remove(submenu_choice)
# Reset the submenu options after use
submenu_options = None
log.info(
f"You selected the {flag} ({my_bonus}) bonus '{submenu_choice}'."
)
elif isinstance(menu_options, str):
for prof_type in menu_options.split(self.PARAM_MULTIPLE_SELECTION):
chosen_proficiencies = list()
# Pull full collection of bonus proficiencies,
proficiency_options = get_feat_proficiencies(
self.feat, prof_type
)
# If collection is dict, sort through sub categories,
# And choose only the unselected options in that category.
# Otherwise, simply sort out the unselected options
if isinstance(proficiency_options, dict):
temp = list()
for types in tuple(proficiency_options.keys()):
if types not in self.profile[prof_type]:
temp += proficiency_options[types]
proficiency_options = temp
else:
proficiency_options = [
x
for x in proficiency_options
if x not in self.profile[prof_type]
]
for _ in range(increment):
# Clear out the temporarily chosen options.
proficiency_options = [
x
for x in proficiency_options
if x not in chosen_proficiencies
]
my_bonus = prompt(
f"Choose your bonus: {flag}.", proficiency_options
)
chosen_proficiencies.append(my_bonus)
proficiency_options.remove(my_bonus)
log.info(
f"You selected the {flag} ({prof_type}) bonus '{my_bonus}'."
)
chosen_options[prof_type] = chosen_proficiencies
for k, v in chosen_options.items():
parsed_flag[k] = v
elif flag == "speed":
speed_value = self.perks[flag]
if speed_value != 0:
parsed_flag[flag] = speed_value
elif flag == "spells":
bonus_spells = self.perks[flag]
for index, spell in enumerate(bonus_spells):
if isinstance(spell, list):
spell_choice = prompt("Choose your bonus spell.", spell)
bonus_spells[index] = spell_choice
log.info(f"You selected the spell {spell_choice}.")
parsed_flag[flag] = bonus_spells
return parsed_flag
@dataclass
class AbilityScoreImprovement:
"""Used to apply ability and/or feat upgrades."""
character: dict
def _add_feat_perks(self, feat: str) -> None:
"""Applies feat related perks."""
parsed_attributes = FeatOptionParser(feat, self.character).parse()
if parsed_attributes is None:
return
for flag, options in parsed_attributes.items():
if flag == "ability":
ability, bonus = options
self._set_ability_score(ability, bonus)
else:
self.character[flag] += options
def _count_upgrades(self) -> int:
"""Returns the number of available upgrades."""
upgrade_count = 0
for x in range(1, self.character["level"] + 1):
if (x % 4) == 0 and x != 20:
upgrade_count += 1
if self.character["klass"] == "Fighter" and self.character["level"] >= 6:
upgrade_count += 1
if self.character["klass"] == "Rogue" and self.character["level"] >= 8:
upgrade_count += 1
if self.character["klass"] == "Fighter" and self.character["level"] >= 14:
upgrade_count += 1
if self.character["level"] >= 19:
upgrade_count += 1
return upgrade_count
def _has_requirements(self, feat: str) -> bool:
"""Checks if feat requirements have been met."""
# Character already has feat
if feat in self.character["feats"]:
return False
# If Heavily, Lightly, or Moderately Armored feat or a Monk.
# "Armor Related" or Weapon Master feat but already proficient.
if (
feat
in (
"Heavily Armored",
"Lightly Armored",
"Moderately Armored",
)
and self.character["klass"] == "Monk"
):
return False
elif feat in (
"Heavily Armored",
"Lightly Armored",
"Moderately Armored",
"Weapon Master",
):
# Heavily Armored: Character already has heavy armor proficiency.
# Lightly Armored: Character already has light armor proficiency.
# Moderately Armored: Character already has medium armor proficiency.
# Weapon Master: Character already has martial weapon proficiency.
if feat == "Heavily Armored" and "Heavy" in self.character["armors"]:
return False
elif feat == "Lightly Armored" and "Light" in self.character["armors"]:
return False
elif feat == "Moderately Armored" and "Medium" in self.character["armors"]:
return False
elif feat == "Weapon Master" and "Martial" in self.character["weapons"]:
return False
# Cycle through ALL prerequisites for the feat.
prerequisite = get_feat_requirements(feat)
for requirement, _ in prerequisite.items():
# Ignore requirements that are None
if prerequisite.get(requirement) is None:
continue
# Check ability requirements
if requirement == "ability":
for ability, required_score in prerequisite.get(requirement).items():
my_score = self.character["scores"][ability]
if my_score < required_score:
return False
# Check caster requirements
if requirement == "caster":
# If no spellcasting ability.
if prerequisite[requirement] and self.character["spellslots"] == "0":
return False
# Magic Initiative requirements check
if feat == "Magic Initiative" and self.character["klass"] not in (
"Bard",
"Cleric",
"Druid",
"Sorcerer",
"Warlock",
"Wizard",
):
return False
# Ritual Caster requirements check
if feat == "Ritual Caster":
primary_ability = self.ability[0]
my_score = self.scores[primary_ability]
required_score = prerequisite["ability"][primary_ability]
if my_score < required_score:
return False
# Check proficiency requirements
if requirement == "proficiency":
if feat in (
"Heavy Armor Master",
"Heavily Armored",
"Medium Armor Master",
"Moderately Armored",
):
armors = prerequisite.get(requirement).get("armors")
for armor in armors:
if armor not in self.character["armors"]:
return False
# Check race requirements
if requirement == "race":
if self.character["race"] not in prerequisite.get(requirement):
return False
# Check subrace requirements
if requirement == "subrace":
if self.character["subrace"] not in prerequisite.get(requirement):
return False
return True
def _is_adjustable(self, ability: str, bonus: int = 1) -> bool:
"""Checks if ability is adjustable < 20."""
if not isinstance(ability, str):
raise AbilityScoreImprovementError(
"Argument 'ability' must be of type 'str'."
)
if not isinstance(bonus, int):
raise AbilityScoreImprovementError(
"Argument 'bonus' must be of type 'int'."
)
if ability not in self.character["scores"]:
raise AbilityScoreImprovementError(
f"Invalid ability '{ability}' specified."
)
if (self.character["scores"][ability] + bonus) > 20:
return False
return True
def run(self) -> None:
"""Executes the ability score improvement class."""
# Determine actual hp.
modifier = get_ability_modifier("Constitution", self.character["scores"])
log.info(f"You have a Constitution modifier of {modifier}.")
bonus_hit_points = modifier * self.character["level"]
log.info(f"Your modifier*level provide {bonus_hit_points} bonus hit points.")
total_hit_points = self.character["hit_points"] + bonus_hit_points
self.character["hit_points"] = total_hit_points
log.info(f"You have {total_hit_points} total hit points.")
if self.character["level"] < 4:
return
num_of_upgrades = self._count_upgrades()
while num_of_upgrades > 0:
if num_of_upgrades > 1:
log.info(f"You have {num_of_upgrades} upgrades available.")
else:
log.info("You have 1 upgrade available.")
my_path = prompt(
"Follow which upgrade path?", ["Upgrade Ability", "Choose Feat"]
)
# Path #1: Upgrade an Ability.
if my_path == "Upgrade Ability":
my_bonus = prompt("Apply how many points?", ["1", "2"])
log.info(f"You chose an ability bonus of: +{my_bonus}.")
my_bonus = int(my_bonus)
ability_options = [
a
for a in (
"Strength",
"Dexterity",
"Constitution",
"Intelligence",
"Wisdom",
"Charisma",
)
if self._is_adjustable(a, my_bonus)
]
# Apply +2 bonus to one ability.
# Apply +1 bonus to two abilities.
if my_bonus == 1:
for _ in range(2):
my_ability = prompt(
"Which ability?",
ability_options,
)
ability_options.remove(my_ability)
self._set_ability_score(my_ability, my_bonus)
elif my_bonus == 2:
my_ability = prompt(
"Which ability?",
ability_options,
)
self._set_ability_score(my_ability, my_bonus)
# Path #2: Add a new Feat.
elif my_path == "Choose Feat":
feat_options = [
x for x in get_feats_list() if x not in self.character["feats"]
]
my_feat = prompt(
"Which feat do you want to acquire?",
feat_options,
)
log.info(f"Checking requirements for the requested feat {my_feat}...")
while not self._has_requirements(my_feat):
feat_options.remove(my_feat)
log.warn(
f"You don't meet the requirements for '{my_feat}'.",
)
my_feat = prompt(
f"Which feat do you want to acquire?",
feat_options,
)
else:
self._add_feat_perks(my_feat)
self.character["feats"].append(my_feat)
log.info(f"You selected the feat {my_feat}.")
num_of_upgrades -= 1
def _set_ability_score(self, ability, bonus=1) -> None:
"""Applies a bonus to a specified ability."""
if not self._is_adjustable(ability, bonus):
log.warn(f"Ability '{ability}' is not adjustable.")
else:
new_score = self.character.get("scores").get(ability) + bonus
self.character["scores"][ability] = new_score
log.info(f"You applied a +{bonus} bonus to your {ability}.")
log.info(f"Your {ability} score is now a {new_score}.")
| from dataclasses import dataclass
import logging
from attributes import get_ability_modifier
from sourcetree.utils import (
get_feats_list,
get_feat_perks,
get_feat_proficiencies,
get_feat_requirements,
)
from stdio import prompt
log = logging.getLogger("thespian.tweaks")
class AbilityScoreImprovementError(Exception):
"""Handles ability score improvement errors."""
class FlagParserError(Exception):
"""Handles an invalid flag format error."""
class FeatOptionParser:
"""Generates and parses feat characteristic flags by feat.
FLAG OPTION PARSER SYSTEM
PIPEBAR: Used to separate flags. i.e: ability=Strength|proficiency=skills
Two flag options are designated in the above example: 'ability', and 'proficiency'.
ALLOWED FLAG OPTIONS:
Designates certain instructions for applying feat related "perks" to a character.
- ability
- proficiency
- savingthrows
- speed
COMMA: Used to identify the number of occurences of a flag. i.e: languages,2
The example above means that a player can choose two languages.
EQUAL SIGN: Used to separate option parameters. i.e ability=Strength,0
The example above means Strength is a designated parameter for the ability option.
In this case the character would get an enhancement to Strength.
There is more to this and is explained further below.
DOUBLE AMPERSAND: Used to separater parameter options. i.e ability=Strength&&Dexerity,1
The example above means the player can choose a one time ehancement to Strength or Dexterity.
PLUS SIGN: Used to seperate parameter options. i.e ability=Strength+Dexterity
The example above means the player can gain an enhancement in both Strength and Dexterity.
"""
# Parser Option Separators
PARSER_OPTIONS = "|"
OPTION_INCREMENT = ","
OPTION_PARAMETER = "="
PARAM_SINGLE_SELECTION = "&&"
PARAM_MULTIPLE_SELECTION = "+"
def __init__(self, feat, prof):
self.feat = feat
self.profile = prof
self.perks = get_feat_perks(self.feat)
def _get_proficiency_options(self, prof_type: str) -> list:
"""Returns a list of bonus proficiencies for a feat by proficiency type."""
return get_feat_proficiencies(self.feat, prof_type)
def _get_sub_menu_options(self, available_options) -> dict | bool:
"""Creates a dictionary of sub menu options, if applicable."""
if self.is_sub_menu(available_options):
sub_options = dict()
for option in available_options:
sub_options[option] = self._get_proficiency_options(option)
return sub_options
return False
@staticmethod
def _is_sub_menu(available_options) -> bool:
"""Returns True if sub menu options are available. False otherwise."""
for option in available_options:
if not option.islower():
return False
return True
def _parse_flags(self) -> dict:
"""Generates the characteristics for the specified feat."""
parsed_flags = dict()
raw_flags = self.perks.get("flags")
if raw_flags is None:
return parsed_flags
flag_pairs = raw_flags.split(self.PARSER_OPTIONS)
for flag_pair in flag_pairs:
if self.OPTION_INCREMENT not in flag_pair:
raise FlagParserError("Pairs must be formatted in 'name,value' pairs.")
attribute_name, increment = flag_pair.split(self.OPTION_INCREMENT)
if self.OPTION_PARAMETER not in attribute_name:
parsed_flags[attribute_name] = {"increment": increment}
else:
flag_options = attribute_name.split(self.OPTION_PARAMETER)
# Allowable flags: ability, proficiency, savingthrows, speed
attribute_name = flag_options[0]
try:
if attribute_name not in (
"ability",
"proficiency",
"savingthrows",
"speed",
):
raise FlagParserError(
f"Illegal flag name '{attribute_name}' specified."
)
except FlagParserError:
# pass
return parsed_flags
if self.PARAM_SINGLE_SELECTION in flag_options[1]:
options = flag_options[1].split(self.PARAM_SINGLE_SELECTION)
else:
options = flag_options[1]
parsed_flags[attribute_name] = {
"increment": increment,
"options": options,
}
return parsed_flags
def parse(self) -> dict:
"""Parses the generated flags for the chosen feat."""
final_flag = self._parse_flags()
if len(final_flag) == 0:
return
parsed_flag = dict()
for flag, options in final_flag.items():
if flag in ("ability", "proficiency"):
increment = int(options["increment"])
menu_options = options["options"]
if len(menu_options) < 1:
raise FlagParserError("Malformed parser instructions error.")
if flag == "ability":
if increment == 0:
raise FlagParserError(
"Flag attribute 'ability' requires a positive integer value."
)
# For feats that use the 'savingthrows' flag.
# Limits choices based on current saving throw proficiencies.
if "savingthrows" in final_flag:
menu_options = [
x for x in menu_options if x not in self.profile["savingthrows"]
]
if isinstance(menu_options, str):
my_ability = menu_options
elif isinstance(menu_options, list):
for _ in range(increment):
my_ability = prompt(
"Choose the ability you would like to apply a bonus to.",
menu_options,
)
menu_options.remove(my_ability)
log.info(f"You selected the ability '{my_ability}'.")
# If 'savingthrows' flag specified, add proficiency for ability saving throw.
if "savingthrows" in final_flag:
self.profile["savingthrows"].append(my_ability)
log.info(
f"You gained proficiency in the '{my_ability}' saving throw."
)
bonus_value = self.perks[flag][my_ability]
parsed_flag[flag] = (my_ability, bonus_value)
elif flag == "proficiency":
# Increment value of 0 means append ALL listed bonuses.
# Increment values other than 0 means add # of bonuses == increment value.
chosen_options = dict()
submenu_options = None
if isinstance(menu_options, str) and increment == 0:
chosen_options[menu_options] = self._get_proficiency_options(
menu_options
)
elif isinstance(menu_options, list):
for _ in range(increment):
my_bonus = prompt(f"Choose your bonus: '{flag}'.", menu_options)
if not self._is_sub_menu(menu_options):
menu_options.remove(my_bonus)
else:
# Generate submenu options, if applicable.
if submenu_options is None:
submenu_options = self._get_sub_menu_options(
menu_options
)
submenu_options[my_bonus] = [
x
for x in submenu_options[my_bonus]
if x not in self.profile[my_bonus]
]
# Create storage handler for selections, if applicable.
if len(chosen_options) == 0:
for opt in submenu_options:
chosen_options[opt] = list()
submenu_choice = prompt(
f"Choose submenu option: '{my_bonus}'.",
submenu_options.get(my_bonus),
)
chosen_options[my_bonus].append(submenu_choice)
submenu_options[my_bonus].remove(submenu_choice)
# Reset the submenu options after use
submenu_options = None
log.info(
f"You selected the {flag} ({my_bonus}) bonus '{submenu_choice}'."
)
elif isinstance(menu_options, str):
for prof_type in menu_options.split(self.PARAM_MULTIPLE_SELECTION):
chosen_proficiencies = list()
# Pull full collection of bonus proficiencies,
proficiency_options = get_feat_proficiencies(
self.feat, prof_type
)
# If collection is dict, sort through sub categories,
# And choose only the unselected options in that category.
# Otherwise, simply sort out the unselected options
if isinstance(proficiency_options, dict):
temp = list()
for types in tuple(proficiency_options.keys()):
if types not in self.profile[prof_type]:
temp += proficiency_options[types]
proficiency_options = temp
else:
proficiency_options = [
x
for x in proficiency_options
if x not in self.profile[prof_type]
]
for _ in range(increment):
# Clear out the temporarily chosen options.
proficiency_options = [
x
for x in proficiency_options
if x not in chosen_proficiencies
]
my_bonus = prompt(
f"Choose your bonus: {flag}.", proficiency_options
)
chosen_proficiencies.append(my_bonus)
proficiency_options.remove(my_bonus)
log.info(
f"You selected the {flag} ({prof_type}) bonus '{my_bonus}'."
)
chosen_options[prof_type] = chosen_proficiencies
for k, v in chosen_options.items():
parsed_flag[k] = v
elif flag == "speed":
speed_value = self.perks[flag]
if speed_value != 0:
parsed_flag[flag] = speed_value
elif flag == "spells":
bonus_spells = self.perks[flag]
for index, spell in enumerate(bonus_spells):
if isinstance(spell, list):
spell_choice = prompt("Choose your bonus spell.", spell)
bonus_spells[index] = spell_choice
log.info(f"You selected the spell {spell_choice}.")
parsed_flag[flag] = bonus_spells
return parsed_flag
@dataclass
class AbilityScoreImprovement:
"""Used to apply ability and/or feat upgrades."""
character: dict
def _add_feat_perks(self, feat: str) -> None:
"""Applies feat related perks."""
parsed_attributes = FeatOptionParser(feat, self.character).parse()
if parsed_attributes is None:
return
for flag, options in parsed_attributes.items():
if flag == "ability":
ability, bonus = options
self._set_ability_score(ability, bonus)
else:
self.character[flag] += options
def _count_upgrades(self) -> int:
"""Returns the number of available upgrades."""
upgrade_count = 0
for x in range(1, self.character["level"] + 1):
if (x % 4) == 0 and x != 20:
upgrade_count += 1
if self.character["klass"] == "Fighter" and self.character["level"] >= 6:
upgrade_count += 1
if self.character["klass"] == "Rogue" and self.character["level"] >= 8:
upgrade_count += 1
if self.character["klass"] == "Fighter" and self.character["level"] >= 14:
upgrade_count += 1
if self.character["level"] >= 19:
upgrade_count += 1
return upgrade_count
def _has_requirements(self, feat: str) -> bool:
"""Checks if feat requirements have been met."""
# Character already has feat
if feat in self.character["feats"]:
return False
# If Heavily, Lightly, or Moderately Armored feat or a Monk.
# "Armor Related" or Weapon Master feat but already proficient.
if (
feat
in (
"Heavily Armored",
"Lightly Armored",
"Moderately Armored",
)
and self.character["klass"] == "Monk"
):
return False
elif feat in (
"Heavily Armored",
"Lightly Armored",
"Moderately Armored",
"Weapon Master",
):
# Heavily Armored: Character already has heavy armor proficiency.
# Lightly Armored: Character already has light armor proficiency.
# Moderately Armored: Character already has medium armor proficiency.
# Weapon Master: Character already has martial weapon proficiency.
if feat == "Heavily Armored" and "Heavy" in self.character["armors"]:
return False
elif feat == "Lightly Armored" and "Light" in self.character["armors"]:
return False
elif feat == "Moderately Armored" and "Medium" in self.character["armors"]:
return False
elif feat == "Weapon Master" and "Martial" in self.character["weapons"]:
return False
# Cycle through ALL prerequisites for the feat.
prerequisite = get_feat_requirements(feat)
for requirement, _ in prerequisite.items():
# Ignore requirements that are None
if prerequisite.get(requirement) is None:
continue
# Check ability requirements
if requirement == "ability":
for ability, required_score in prerequisite.get(requirement).items():
my_score = self.character["scores"][ability]
if my_score < required_score:
return False
# Check caster requirements
if requirement == "caster":
# If no spellcasting ability.
if prerequisite[requirement] and self.character["spellslots"] == "0":
return False
# Magic Initiative requirements check
if feat == "Magic Initiative" and self.character["klass"] not in (
"Bard",
"Cleric",
"Druid",
"Sorcerer",
"Warlock",
"Wizard",
):
return False
# Ritual Caster requirements check
if feat == "Ritual Caster":
primary_ability = self.ability[0]
my_score = self.scores[primary_ability]
required_score = prerequisite["ability"][primary_ability]
if my_score < required_score:
return False
# Check proficiency requirements
if requirement == "proficiency":
if feat in (
"Heavy Armor Master",
"Heavily Armored",
"Medium Armor Master",
"Moderately Armored",
):
armors = prerequisite.get(requirement).get("armors")
for armor in armors:
if armor not in self.character["armors"]:
return False
# Check race requirements
if requirement == "race":
if self.character["race"] not in prerequisite.get(requirement):
return False
# Check subrace requirements
if requirement == "subrace":
if self.character["subrace"] not in prerequisite.get(requirement):
return False
return True
def _is_adjustable(self, ability: str, bonus: int = 1) -> bool:
"""Checks if ability is adjustable < 20."""
if not isinstance(ability, str):
raise AbilityScoreImprovementError(
"Argument 'ability' must be of type 'str'."
)
if not isinstance(bonus, int):
raise AbilityScoreImprovementError(
"Argument 'bonus' must be of type 'int'."
)
if ability not in self.character["scores"]:
raise AbilityScoreImprovementError(
f"Invalid ability '{ability}' specified."
)
if (self.character["scores"][ability] + bonus) > 20:
return False
return True
def run(self) -> None:
"""Executes the ability score improvement class."""
# Determine actual hp.
modifier = get_ability_modifier("Constitution", self.character["scores"])
log.info(f"You have a Constitution modifier of {modifier}.")
bonus_hit_points = modifier * self.character["level"]
log.info(f"Your modifier*level provide {bonus_hit_points} bonus hit points.")
total_hit_points = self.character["hit_points"] + bonus_hit_points
self.character["hit_points"] = total_hit_points
log.info(f"You have {total_hit_points} total hit points.")
if self.character["level"] < 4:
return
num_of_upgrades = self._count_upgrades()
while num_of_upgrades > 0:
if num_of_upgrades > 1:
log.info(f"You have {num_of_upgrades} upgrades available.")
else:
log.info("You have 1 upgrade available.")
my_path = prompt(
"Follow which upgrade path?", ["Upgrade Ability", "Choose Feat"]
)
# Path #1: Upgrade an Ability.
if my_path == "Upgrade Ability":
my_bonus = prompt("Apply how many points?", ["1", "2"])
log.info(f"You chose an ability bonus of: +{my_bonus}.")
my_bonus = int(my_bonus)
ability_options = [
a
for a in (
"Strength",
"Dexterity",
"Constitution",
"Intelligence",
"Wisdom",
"Charisma",
)
if self._is_adjustable(a, my_bonus)
]
# Apply +2 bonus to one ability.
# Apply +1 bonus to two abilities.
if my_bonus == 1:
for _ in range(2):
my_ability = prompt(
"Which ability?",
ability_options,
)
ability_options.remove(my_ability)
self._set_ability_score(my_ability, my_bonus)
elif my_bonus == 2:
my_ability = prompt(
"Which ability?",
ability_options,
)
self._set_ability_score(my_ability, my_bonus)
# Path #2: Add a new Feat.
elif my_path == "Choose Feat":
feat_options = [
x for x in get_feats_list() if x not in self.character["feats"]
]
my_feat = prompt(
"Which feat do you want to acquire?",
feat_options,
)
log.info(f"Checking requirements for the requested feat {my_feat}...")
while not self._has_requirements(my_feat):
feat_options.remove(my_feat)
log.warn(
f"You don't meet the requirements for '{my_feat}'.",
)
my_feat = prompt(
f"Which feat do you want to acquire?",
feat_options,
)
else:
self._add_feat_perks(my_feat)
self.character["feats"].append(my_feat)
log.info(f"You selected the feat {my_feat}.")
num_of_upgrades -= 1
def _set_ability_score(self, ability, bonus=1) -> None:
"""Applies a bonus to a specified ability."""
if not self._is_adjustable(ability, bonus):
log.warn(f"Ability '{ability}' is not adjustable.")
else:
new_score = self.character.get("scores").get(ability) + bonus
self.character["scores"][ability] = new_score
log.info(f"You applied a +{bonus} bonus to your {ability}.")
log.info(f"Your {ability} score is now a {new_score}.")
| en | 0.74451 | Handles ability score improvement errors. Handles an invalid flag format error. Generates and parses feat characteristic flags by feat. FLAG OPTION PARSER SYSTEM PIPEBAR: Used to separate flags. i.e: ability=Strength|proficiency=skills Two flag options are designated in the above example: 'ability', and 'proficiency'. ALLOWED FLAG OPTIONS: Designates certain instructions for applying feat related "perks" to a character. - ability - proficiency - savingthrows - speed COMMA: Used to identify the number of occurences of a flag. i.e: languages,2 The example above means that a player can choose two languages. EQUAL SIGN: Used to separate option parameters. i.e ability=Strength,0 The example above means Strength is a designated parameter for the ability option. In this case the character would get an enhancement to Strength. There is more to this and is explained further below. DOUBLE AMPERSAND: Used to separater parameter options. i.e ability=Strength&&Dexerity,1 The example above means the player can choose a one time ehancement to Strength or Dexterity. PLUS SIGN: Used to seperate parameter options. i.e ability=Strength+Dexterity The example above means the player can gain an enhancement in both Strength and Dexterity. # Parser Option Separators Returns a list of bonus proficiencies for a feat by proficiency type. Creates a dictionary of sub menu options, if applicable. Returns True if sub menu options are available. False otherwise. Generates the characteristics for the specified feat. # Allowable flags: ability, proficiency, savingthrows, speed # pass Parses the generated flags for the chosen feat. # For feats that use the 'savingthrows' flag. # Limits choices based on current saving throw proficiencies. # If 'savingthrows' flag specified, add proficiency for ability saving throw. # Increment value of 0 means append ALL listed bonuses. # Increment values other than 0 means add # of bonuses == increment value. # Generate submenu options, if applicable. # Create storage handler for selections, if applicable. # Reset the submenu options after use # Pull full collection of bonus proficiencies, # If collection is dict, sort through sub categories, # And choose only the unselected options in that category. # Otherwise, simply sort out the unselected options # Clear out the temporarily chosen options. Used to apply ability and/or feat upgrades. Applies feat related perks. Returns the number of available upgrades. Checks if feat requirements have been met. # Character already has feat # If Heavily, Lightly, or Moderately Armored feat or a Monk. # "Armor Related" or Weapon Master feat but already proficient. # Heavily Armored: Character already has heavy armor proficiency. # Lightly Armored: Character already has light armor proficiency. # Moderately Armored: Character already has medium armor proficiency. # Weapon Master: Character already has martial weapon proficiency. # Cycle through ALL prerequisites for the feat. # Ignore requirements that are None # Check ability requirements # Check caster requirements # If no spellcasting ability. # Magic Initiative requirements check # Ritual Caster requirements check # Check proficiency requirements # Check race requirements # Check subrace requirements Checks if ability is adjustable < 20. Executes the ability score improvement class. # Determine actual hp. # Path #1: Upgrade an Ability. # Apply +2 bonus to one ability. # Apply +1 bonus to two abilities. # Path #2: Add a new Feat. Applies a bonus to a specified ability. | 3.367597 | 3 |
atomic1D/ImpuritySpecies.py | TBody/atomic1D | 1 | 10762 | class ImpuritySpecies(object):
# For storing OpenADAS data related to a particular impurity species
# Loosely based on cfe316/atomic/atomic_data.py/AtomicData class (although with much less code since
# all of the F77 importing is done in the seperate <<make json_update>> code since BOUT++ protocol
# requires fortran code be isolated from main operation)
def __init__(self,symbol,adas_files_dict={},rate_coefficients={},impurity_fraction=None):
# Searches for year, atomic_number, has_charge_exchange from user_input.json
#
# Default initialiser for class
# symbol : (str) | element symbol (e.g. 'C')
# name : (str) | full name of element (for printing only)
# year : (int) | year for which OpenADAS data was searched (1996)
# has_charge_exchange : (bool) | whether cx_power (prc) was found for this element-year combination (True)
# atomic_number : (int) | number of protons for impurity species (6)
# adas_files_dict : (str -> str) | dictionary of OpenADAS files, indexed by file-type ('ionisation': 'scd96_c', ...)
# rate_coefficients : (str -> RateCoefficient) | dictionary of RateCoefficient objects corresponding to adas files ('ionisation': <RateCoefficientObject>, ...)
import json
self = ImpuritySpecies
with open('user_input.json','r') as fp:
data_dict = json.load(fp)
element_dict = data_dict[symbol]
assert symbol == element_dict['symbol']
self.symbol = symbol
self.name = element_dict['name']
self.year = element_dict['year']
self.has_charge_exchange = element_dict['has_charge_exchange']
self.atomic_number = element_dict['atomic_number']
self.adas_files_dict = adas_files_dict
self.rate_coefficients = rate_coefficients
def __str__(self):
# Printing method, for easier inspection of object data
_print_adas_dict = ''
if len(self.adas_files_dict) == 0:
_print_adas_check = 'Not initialised'
else:
_print_adas_check = 'Initialised'
for key, value in self.adas_files_dict.items():
_print_adas_dict = _print_adas_dict + '{:>25} -> {}\n'.format(key,value)
if len(self.rate_coefficients) == 0:
_print_rate_check = 'Not initialised'
else:
_print_rate_check = 'Initialised'
_printing_string = 'ImpuritySpecies object with attributes'+\
'\n{:>25} = {}'.format('symbol', self.symbol)+\
'\n{:>25} = {}'.format('year', self.year)+\
'\n{:>25} = {}'.format('has_charge_exchange', self.has_charge_exchange)+\
'\n{:>25} = {}'.format('atomic_number', self.atomic_number)+\
'\n{:>25} = {}'.format('adas_files_dict', _print_adas_check)+\
'\n{:>25} = {}'.format('rate_coefficients', _print_rate_check)
if len(self.adas_files_dict) != 0:
_printing_string += '\n--------------------------------------------------\n'+_print_adas_dict
return _printing_string
def addJSONFiles(self,physics_process,filetype_code,JSON_database_path):
# 1. Make the filename string expected for the json adas file
# 2. Check that this file exists in the JSON_database_path/json_data directory
# 3. Add this file to the atomic data .adas_files_dict attribute
import os.path
filename = '{}{}_{}.json'.format(filetype_code,str(self.year)[-2:],self.symbol)
full_path = '{}/json_data/{}'.format(JSON_database_path,filename)
if not(os.path.isfile(full_path)):
raise FileNotFoundError('File {} not found in {}/json_data'.format(filename,JSON_database_path))
self.adas_files_dict[physics_process] = filename
def makeRateCoefficients(self,JSON_database_path):
# Calls the RateCoefficient.__init__ method for each entry in the .adas_files_dict
# Generates a dictionary of RateCoefficient objects as .rate_coefficients
from atomic1D import RateCoefficient
for physics_process, filename in self.adas_files_dict.items():
full_path = '{}/json_data/{}'.format(JSON_database_path,filename)
self.rate_coefficients[physics_process] = RateCoefficient(full_path)
| class ImpuritySpecies(object):
# For storing OpenADAS data related to a particular impurity species
# Loosely based on cfe316/atomic/atomic_data.py/AtomicData class (although with much less code since
# all of the F77 importing is done in the seperate <<make json_update>> code since BOUT++ protocol
# requires fortran code be isolated from main operation)
def __init__(self,symbol,adas_files_dict={},rate_coefficients={},impurity_fraction=None):
# Searches for year, atomic_number, has_charge_exchange from user_input.json
#
# Default initialiser for class
# symbol : (str) | element symbol (e.g. 'C')
# name : (str) | full name of element (for printing only)
# year : (int) | year for which OpenADAS data was searched (1996)
# has_charge_exchange : (bool) | whether cx_power (prc) was found for this element-year combination (True)
# atomic_number : (int) | number of protons for impurity species (6)
# adas_files_dict : (str -> str) | dictionary of OpenADAS files, indexed by file-type ('ionisation': 'scd96_c', ...)
# rate_coefficients : (str -> RateCoefficient) | dictionary of RateCoefficient objects corresponding to adas files ('ionisation': <RateCoefficientObject>, ...)
import json
self = ImpuritySpecies
with open('user_input.json','r') as fp:
data_dict = json.load(fp)
element_dict = data_dict[symbol]
assert symbol == element_dict['symbol']
self.symbol = symbol
self.name = element_dict['name']
self.year = element_dict['year']
self.has_charge_exchange = element_dict['has_charge_exchange']
self.atomic_number = element_dict['atomic_number']
self.adas_files_dict = adas_files_dict
self.rate_coefficients = rate_coefficients
def __str__(self):
# Printing method, for easier inspection of object data
_print_adas_dict = ''
if len(self.adas_files_dict) == 0:
_print_adas_check = 'Not initialised'
else:
_print_adas_check = 'Initialised'
for key, value in self.adas_files_dict.items():
_print_adas_dict = _print_adas_dict + '{:>25} -> {}\n'.format(key,value)
if len(self.rate_coefficients) == 0:
_print_rate_check = 'Not initialised'
else:
_print_rate_check = 'Initialised'
_printing_string = 'ImpuritySpecies object with attributes'+\
'\n{:>25} = {}'.format('symbol', self.symbol)+\
'\n{:>25} = {}'.format('year', self.year)+\
'\n{:>25} = {}'.format('has_charge_exchange', self.has_charge_exchange)+\
'\n{:>25} = {}'.format('atomic_number', self.atomic_number)+\
'\n{:>25} = {}'.format('adas_files_dict', _print_adas_check)+\
'\n{:>25} = {}'.format('rate_coefficients', _print_rate_check)
if len(self.adas_files_dict) != 0:
_printing_string += '\n--------------------------------------------------\n'+_print_adas_dict
return _printing_string
def addJSONFiles(self,physics_process,filetype_code,JSON_database_path):
# 1. Make the filename string expected for the json adas file
# 2. Check that this file exists in the JSON_database_path/json_data directory
# 3. Add this file to the atomic data .adas_files_dict attribute
import os.path
filename = '{}{}_{}.json'.format(filetype_code,str(self.year)[-2:],self.symbol)
full_path = '{}/json_data/{}'.format(JSON_database_path,filename)
if not(os.path.isfile(full_path)):
raise FileNotFoundError('File {} not found in {}/json_data'.format(filename,JSON_database_path))
self.adas_files_dict[physics_process] = filename
def makeRateCoefficients(self,JSON_database_path):
# Calls the RateCoefficient.__init__ method for each entry in the .adas_files_dict
# Generates a dictionary of RateCoefficient objects as .rate_coefficients
from atomic1D import RateCoefficient
for physics_process, filename in self.adas_files_dict.items():
full_path = '{}/json_data/{}'.format(JSON_database_path,filename)
self.rate_coefficients[physics_process] = RateCoefficient(full_path)
| en | 0.706586 | # For storing OpenADAS data related to a particular impurity species # Loosely based on cfe316/atomic/atomic_data.py/AtomicData class (although with much less code since # all of the F77 importing is done in the seperate <<make json_update>> code since BOUT++ protocol # requires fortran code be isolated from main operation) # Searches for year, atomic_number, has_charge_exchange from user_input.json # # Default initialiser for class # symbol : (str) | element symbol (e.g. 'C') # name : (str) | full name of element (for printing only) # year : (int) | year for which OpenADAS data was searched (1996) # has_charge_exchange : (bool) | whether cx_power (prc) was found for this element-year combination (True) # atomic_number : (int) | number of protons for impurity species (6) # adas_files_dict : (str -> str) | dictionary of OpenADAS files, indexed by file-type ('ionisation': 'scd96_c', ...) # rate_coefficients : (str -> RateCoefficient) | dictionary of RateCoefficient objects corresponding to adas files ('ionisation': <RateCoefficientObject>, ...) # Printing method, for easier inspection of object data # 1. Make the filename string expected for the json adas file # 2. Check that this file exists in the JSON_database_path/json_data directory # 3. Add this file to the atomic data .adas_files_dict attribute # Calls the RateCoefficient.__init__ method for each entry in the .adas_files_dict # Generates a dictionary of RateCoefficient objects as .rate_coefficients | 2.7742 | 3 |
Utils/Matrix.py | valavanisleonidas/Machine_Learning_Toolkit | 0 | 10763 | <reponame>valavanisleonidas/Machine_Learning_Toolkit
import os
import platform
import numpy
class Matrix:
def __init__(self):
if platform.system() == "Windows":
self.delimiterForPath = "\\"
else:
self.delimiterForPath = "/"
self.labelsDType = numpy.int32
self.imagesDType = numpy.float32
def deleteRows(self, array, rows, axis):
return numpy.delete(array, rows, axis)
def swapAxes(self, array, axe1, axe2):
return numpy.swapaxes(array, axe1, axe2)
def getImageCategoryFromPath(self, imagePath):
# path in format : ..\\Category\\ImageName
return numpy.array(imagePath.split(self.delimiterForPath, len(imagePath))[
len(imagePath.split(self.delimiterForPath, len(imagePath))) - 2], dtype=self.labelsDType)
def getNumberOfClasses(self, array):
return len(numpy.unique(array))
def getImagesInDirectory(self, folderPath, extensions=('.jpg', '.jpeg', '.png', '.bmp', '.gif')):
imagesList = []
assert os.path.isdir(folderPath), 'No folder with that name exists : %r ' % folderPath
# for all images in folder path
for root, dirs, files in os.walk(folderPath):
for name in files:
if name.endswith(extensions):
imagesList.append(root + self.delimiterForPath + name)
return imagesList
def addDimension(self, array, axis):
return numpy.expand_dims(a=array, axis=axis)
def ExtractImages(self, folderPath, image_size=(256, 256), convertion=None, imageChannels=3,
preprocessImages=False ,normalize=True ,normalizeRange=(0,1) ):
from Images.ImageProcessing import ImageProcessing
imageList = self.getImagesInDirectory(folderPath=folderPath)
assert len(imageList) > 0, 'No images in folder : %r' % folderPath
if convertion != "Grayscale" and imageChannels != 3:
if convertion == None:
convertion = "RGB"
raise ValueError(' %r supports only 3 image channels!' % convertion)
images_list = []
labels_list = []
# for all images in folder path
for imagePath in imageList:
# get category of image and add category to array
labels_list.append(
self.getImageCategoryFromPath(imagePath=imagePath))
# get image array and add image to array
images_list.append(
ImageProcessing().getImageArray(imagePath=imagePath, imageSize=image_size, convertion=convertion,
imageChannels=imageChannels,preprocessImages=preprocessImages,
Normalize=normalize,NormalizeRange=normalizeRange))
# convert lists to numpy array
allLabelsArray = numpy.array(labels_list).reshape(len(labels_list))
allImagesArray = numpy.array(images_list).reshape(len(imageList), imageChannels, image_size[0], image_size[1])
return [allImagesArray, allLabelsArray]
# returns batches from data with size batchSize
def chunker(self,data, batchSize):
return (data[pos:pos + batchSize] for pos in xrange(0, len(data), batchSize))
def shuffleMatrix(self,array):
numpy.random.shuffle(array)
def shuffleMatrixAlongWithLabels(self, array1, array2):
# shuffle array1 (images) with corresponding labels array2
from random import shuffle
array1_shuf = []
array2_shuf = []
index_shuf = range(len(array1))
shuffle(index_shuf)
for i in index_shuf:
array1_shuf.append(array1[i])
array2_shuf.append(array2[i])
return [numpy.array(array1_shuf, dtype=self.imagesDType).astype('float32'), numpy.array(array2_shuf, dtype=self.labelsDType).astype('float32')]
def TakeExamplesFromEachCategory(self,features,labels,maxImagesPerCategory=10):
import gc
import os
validationArray = []
validation_labels=[]
# for 0 to number of output classes
for index in range(0,self.getNumberOfClasses(labels)):
print ('mpika 1')
# find indexes of category index
indexes = numpy.where(labels == index)
# if train has 1 instance don't take it for validation
if len(indexes[0]) in [ 0 , 1 ]:
continue
# if instances are less than max categories given
if len(indexes[0]) <= maxImagesPerCategory:
# take half for validation
maxImagesPerCategory= len(indexes[0])/2
print ('mpika 2')
assert len(indexes[0]) >= maxImagesPerCategory ,\
"Error : Validation examples per category more than train instances. Category: {0}" \
" validation pes category : {1} , training examples : {2} ".format(index,maxImagesPerCategory,len(indexes[0]),)
count = 0
# for indexes in category
for catIndex in indexes[0]:
print ('mpika 3')
count +=1
if count > maxImagesPerCategory:
print ('mpika 3.1')
break
print ('mpika 3.2')
validationArray.append(features[catIndex])
print ('mpika 3.3')
validation_labels.append(labels[catIndex ])
print ('mpika 3.4 catIndex' , catIndex)
features = numpy.delete(features,catIndex,axis=0)
print ('mpika 3.5')
labels = numpy.delete(labels,catIndex,axis=0)
print ('mpika 3.6')
gc.collect()
print ('mpika 4')
return [features, numpy.array(validationArray,dtype=self.imagesDType).astype('float32'), labels,
numpy.array(validation_labels,dtype=self.labelsDType).astype('int32')]
def takeLastExamples(self,trainArray, train_labels, validationPercentage=.2):
# take validationPercentage of training data for validation
validationExamples = int(validationPercentage * len(trainArray))
# We reserve the last validationExamples training examples for validation.
trainArray, validationArray = trainArray[:-validationExamples], trainArray[-validationExamples:]
train_labels, validation_labels = train_labels[:-validationExamples], train_labels[-validationExamples:]
return [trainArray, validationArray, train_labels, validation_labels]
def SplitTrainValidation(self, trainArray, train_labels, validationPercentage=.2,takeLastExamples=False,maxImagesPerCategory=10):
if takeLastExamples:
return self.takeLastExamples(trainArray, train_labels, validationPercentage)
else:
return self.TakeExamplesFromEachCategory(trainArray, train_labels,maxImagesPerCategory)
def moveFile(self, src, dest):
import shutil
shutil.move(src, dest)
if __name__ == '__main__':
trainFolder = 'C:\Users\l.valavanis\Desktop\Clef2013\TrainSet'
testFolder = 'C:\Users\l.valavanis\Desktop\Clef2013\TestSet'
#
# trainFolder = 'C:\Users\l.valavanis\Desktop\Clef2013\SampleImages - Copy'
# testFolder = 'C:\Users\l.valavanis\Desktop\Clef2013\SampleImages - Copy - Copy'
#
# # trainFolder = '/home/leonidas/Desktop/images/train'
# # testFolder = '/home/leonidas/Desktop/images/test'
#
# [trainArray, train_labels, testArray, test_labels, validationArray, validation_labels, outputClasses] = \
# load_dataset(trainFolder, testFolder,imageSize=(3,3),convertion='L',imageChannels=1)
#
# print trainArray.shape
# print trainArray
# # print validation_labels
# # print train_labels
# # print trainArray
#
# print trainArray.shape
# print train_labels.shape
# print testArray.shape
# print test_labels.shape
# print validationArray.shape
# print validation_labels.shape
#
# trainPath = 'C:\\Users\\l.valavanis\\Desktop\\Clef2013\\GBoC\Features\\train_2x2_CIELab_512.txt'
# testPath = 'C:\\Users\\l.valavanis\\Desktop\\Clef2013\\GBoC\Features\\test_2x2_CIELab_512.txt'
# trainLabelPath = 'C:\\Users\\l.valavanis\\Desktop\\Clef2013\\GBoC\Features\\train_2x2_CIELab_512_labels.txt'
# testLabelPath = 'C:\\Users\\l.valavanis\\Desktop\\Clef2013\\GBoC\Features\\test_2x2_CIELab_512_labels.txt'
# [trainArray, train_labels, testArray, test_labels, validationArray, validation_labels,
# outputClasses] = loadFeatures(trainPath=trainPath, trainLabels=trainLabelPath, testPath=testPath,
# testLabels=testLabelPath);
i=0;
for trainArray,train_labels in Matrix().getArrayOfImagesUsingMiniBatches(folderPath=trainFolder,image_size=(100,100),batch_size=15):
print (trainArray.shape)
print (train_labels.shape)
i+=len(trainArray)
print "aaasdasdas d : ",i
# # print validation_labels
# # print train_labels
# # print trainArray
#
# print trainArray.shape
# print train_labels.shape
# print testArray.shape
# print test_labels.shape
# print validationArray.shape
# print validation_labels.shape
| import os
import platform
import numpy
class Matrix:
def __init__(self):
if platform.system() == "Windows":
self.delimiterForPath = "\\"
else:
self.delimiterForPath = "/"
self.labelsDType = numpy.int32
self.imagesDType = numpy.float32
def deleteRows(self, array, rows, axis):
return numpy.delete(array, rows, axis)
def swapAxes(self, array, axe1, axe2):
return numpy.swapaxes(array, axe1, axe2)
def getImageCategoryFromPath(self, imagePath):
# path in format : ..\\Category\\ImageName
return numpy.array(imagePath.split(self.delimiterForPath, len(imagePath))[
len(imagePath.split(self.delimiterForPath, len(imagePath))) - 2], dtype=self.labelsDType)
def getNumberOfClasses(self, array):
return len(numpy.unique(array))
def getImagesInDirectory(self, folderPath, extensions=('.jpg', '.jpeg', '.png', '.bmp', '.gif')):
imagesList = []
assert os.path.isdir(folderPath), 'No folder with that name exists : %r ' % folderPath
# for all images in folder path
for root, dirs, files in os.walk(folderPath):
for name in files:
if name.endswith(extensions):
imagesList.append(root + self.delimiterForPath + name)
return imagesList
def addDimension(self, array, axis):
return numpy.expand_dims(a=array, axis=axis)
def ExtractImages(self, folderPath, image_size=(256, 256), convertion=None, imageChannels=3,
preprocessImages=False ,normalize=True ,normalizeRange=(0,1) ):
from Images.ImageProcessing import ImageProcessing
imageList = self.getImagesInDirectory(folderPath=folderPath)
assert len(imageList) > 0, 'No images in folder : %r' % folderPath
if convertion != "Grayscale" and imageChannels != 3:
if convertion == None:
convertion = "RGB"
raise ValueError(' %r supports only 3 image channels!' % convertion)
images_list = []
labels_list = []
# for all images in folder path
for imagePath in imageList:
# get category of image and add category to array
labels_list.append(
self.getImageCategoryFromPath(imagePath=imagePath))
# get image array and add image to array
images_list.append(
ImageProcessing().getImageArray(imagePath=imagePath, imageSize=image_size, convertion=convertion,
imageChannels=imageChannels,preprocessImages=preprocessImages,
Normalize=normalize,NormalizeRange=normalizeRange))
# convert lists to numpy array
allLabelsArray = numpy.array(labels_list).reshape(len(labels_list))
allImagesArray = numpy.array(images_list).reshape(len(imageList), imageChannels, image_size[0], image_size[1])
return [allImagesArray, allLabelsArray]
# returns batches from data with size batchSize
def chunker(self,data, batchSize):
return (data[pos:pos + batchSize] for pos in xrange(0, len(data), batchSize))
def shuffleMatrix(self,array):
numpy.random.shuffle(array)
def shuffleMatrixAlongWithLabels(self, array1, array2):
# shuffle array1 (images) with corresponding labels array2
from random import shuffle
array1_shuf = []
array2_shuf = []
index_shuf = range(len(array1))
shuffle(index_shuf)
for i in index_shuf:
array1_shuf.append(array1[i])
array2_shuf.append(array2[i])
return [numpy.array(array1_shuf, dtype=self.imagesDType).astype('float32'), numpy.array(array2_shuf, dtype=self.labelsDType).astype('float32')]
def TakeExamplesFromEachCategory(self,features,labels,maxImagesPerCategory=10):
import gc
import os
validationArray = []
validation_labels=[]
# for 0 to number of output classes
for index in range(0,self.getNumberOfClasses(labels)):
print ('mpika 1')
# find indexes of category index
indexes = numpy.where(labels == index)
# if train has 1 instance don't take it for validation
if len(indexes[0]) in [ 0 , 1 ]:
continue
# if instances are less than max categories given
if len(indexes[0]) <= maxImagesPerCategory:
# take half for validation
maxImagesPerCategory= len(indexes[0])/2
print ('mpika 2')
assert len(indexes[0]) >= maxImagesPerCategory ,\
"Error : Validation examples per category more than train instances. Category: {0}" \
" validation pes category : {1} , training examples : {2} ".format(index,maxImagesPerCategory,len(indexes[0]),)
count = 0
# for indexes in category
for catIndex in indexes[0]:
print ('mpika 3')
count +=1
if count > maxImagesPerCategory:
print ('mpika 3.1')
break
print ('mpika 3.2')
validationArray.append(features[catIndex])
print ('mpika 3.3')
validation_labels.append(labels[catIndex ])
print ('mpika 3.4 catIndex' , catIndex)
features = numpy.delete(features,catIndex,axis=0)
print ('mpika 3.5')
labels = numpy.delete(labels,catIndex,axis=0)
print ('mpika 3.6')
gc.collect()
print ('mpika 4')
return [features, numpy.array(validationArray,dtype=self.imagesDType).astype('float32'), labels,
numpy.array(validation_labels,dtype=self.labelsDType).astype('int32')]
def takeLastExamples(self,trainArray, train_labels, validationPercentage=.2):
# take validationPercentage of training data for validation
validationExamples = int(validationPercentage * len(trainArray))
# We reserve the last validationExamples training examples for validation.
trainArray, validationArray = trainArray[:-validationExamples], trainArray[-validationExamples:]
train_labels, validation_labels = train_labels[:-validationExamples], train_labels[-validationExamples:]
return [trainArray, validationArray, train_labels, validation_labels]
def SplitTrainValidation(self, trainArray, train_labels, validationPercentage=.2,takeLastExamples=False,maxImagesPerCategory=10):
if takeLastExamples:
return self.takeLastExamples(trainArray, train_labels, validationPercentage)
else:
return self.TakeExamplesFromEachCategory(trainArray, train_labels,maxImagesPerCategory)
def moveFile(self, src, dest):
import shutil
shutil.move(src, dest)
if __name__ == '__main__':
trainFolder = 'C:\Users\l.valavanis\Desktop\Clef2013\TrainSet'
testFolder = 'C:\Users\l.valavanis\Desktop\Clef2013\TestSet'
#
# trainFolder = 'C:\Users\l.valavanis\Desktop\Clef2013\SampleImages - Copy'
# testFolder = 'C:\Users\l.valavanis\Desktop\Clef2013\SampleImages - Copy - Copy'
#
# # trainFolder = '/home/leonidas/Desktop/images/train'
# # testFolder = '/home/leonidas/Desktop/images/test'
#
# [trainArray, train_labels, testArray, test_labels, validationArray, validation_labels, outputClasses] = \
# load_dataset(trainFolder, testFolder,imageSize=(3,3),convertion='L',imageChannels=1)
#
# print trainArray.shape
# print trainArray
# # print validation_labels
# # print train_labels
# # print trainArray
#
# print trainArray.shape
# print train_labels.shape
# print testArray.shape
# print test_labels.shape
# print validationArray.shape
# print validation_labels.shape
#
# trainPath = 'C:\\Users\\l.valavanis\\Desktop\\Clef2013\\GBoC\Features\\train_2x2_CIELab_512.txt'
# testPath = 'C:\\Users\\l.valavanis\\Desktop\\Clef2013\\GBoC\Features\\test_2x2_CIELab_512.txt'
# trainLabelPath = 'C:\\Users\\l.valavanis\\Desktop\\Clef2013\\GBoC\Features\\train_2x2_CIELab_512_labels.txt'
# testLabelPath = 'C:\\Users\\l.valavanis\\Desktop\\Clef2013\\GBoC\Features\\test_2x2_CIELab_512_labels.txt'
# [trainArray, train_labels, testArray, test_labels, validationArray, validation_labels,
# outputClasses] = loadFeatures(trainPath=trainPath, trainLabels=trainLabelPath, testPath=testPath,
# testLabels=testLabelPath);
i=0;
for trainArray,train_labels in Matrix().getArrayOfImagesUsingMiniBatches(folderPath=trainFolder,image_size=(100,100),batch_size=15):
print (trainArray.shape)
print (train_labels.shape)
i+=len(trainArray)
print "aaasdasdas d : ",i
# # print validation_labels
# # print train_labels
# # print trainArray
#
# print trainArray.shape
# print train_labels.shape
# print testArray.shape
# print test_labels.shape
# print validationArray.shape
# print validation_labels.shape | en | 0.561047 | # path in format : ..\\Category\\ImageName # for all images in folder path # for all images in folder path # get category of image and add category to array # get image array and add image to array # convert lists to numpy array # returns batches from data with size batchSize # shuffle array1 (images) with corresponding labels array2 # for 0 to number of output classes # find indexes of category index # if train has 1 instance don't take it for validation # if instances are less than max categories given # take half for validation # for indexes in category # take validationPercentage of training data for validation # We reserve the last validationExamples training examples for validation. # # trainFolder = 'C:\Users\l.valavanis\Desktop\Clef2013\SampleImages - Copy' # testFolder = 'C:\Users\l.valavanis\Desktop\Clef2013\SampleImages - Copy - Copy' # # # trainFolder = '/home/leonidas/Desktop/images/train' # # testFolder = '/home/leonidas/Desktop/images/test' # # [trainArray, train_labels, testArray, test_labels, validationArray, validation_labels, outputClasses] = \ # load_dataset(trainFolder, testFolder,imageSize=(3,3),convertion='L',imageChannels=1) # # print trainArray.shape # print trainArray # # print validation_labels # # print train_labels # # print trainArray # # print trainArray.shape # print train_labels.shape # print testArray.shape # print test_labels.shape # print validationArray.shape # print validation_labels.shape # # trainPath = 'C:\\Users\\l.valavanis\\Desktop\\Clef2013\\GBoC\Features\\train_2x2_CIELab_512.txt' # testPath = 'C:\\Users\\l.valavanis\\Desktop\\Clef2013\\GBoC\Features\\test_2x2_CIELab_512.txt' # trainLabelPath = 'C:\\Users\\l.valavanis\\Desktop\\Clef2013\\GBoC\Features\\train_2x2_CIELab_512_labels.txt' # testLabelPath = 'C:\\Users\\l.valavanis\\Desktop\\Clef2013\\GBoC\Features\\test_2x2_CIELab_512_labels.txt' # [trainArray, train_labels, testArray, test_labels, validationArray, validation_labels, # outputClasses] = loadFeatures(trainPath=trainPath, trainLabels=trainLabelPath, testPath=testPath, # testLabels=testLabelPath); # # print validation_labels # # print train_labels # # print trainArray # # print trainArray.shape # print train_labels.shape # print testArray.shape # print test_labels.shape # print validationArray.shape # print validation_labels.shape | 2.618847 | 3 |
networking_calico/plugins/ml2/drivers/calico/policy.py | manojcode/networking-calico | 0 | 10764 | <reponame>manojcode/networking-calico
# -*- coding: utf-8 -*-
# Copyright (c) 2018 Tigera, Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from networking_calico.common import config as calico_config
from networking_calico.compat import log
from networking_calico import datamodel_v3
from networking_calico.plugins.ml2.drivers.calico.syncer import ResourceSyncer
LOG = log.getLogger(__name__)
# Each OpenStack security group is mapped to a Calico NetworkPolicy. A VM's
# security group membership is represented by the VM having a label for each
# security group that it belongs to; thus the selector
# 'has(<security-group-label>)' represents the VMs that belong to that security
# group.
#
# The label for each security group is 'sg.projectcalico.org/openstack-'
# followed by the security group ID, and the name of the NetworkPolicy for each
# security group is 'ossg.default.' followed by the security group ID.
SG_LABEL_PREFIX = 'sg.projectcalico.org/openstack-'
SG_NAME_LABEL_PREFIX = 'sg-name.projectcalico.org/openstack-'
SG_NAME_MAX_LENGTH = (datamodel_v3.SANITIZE_LABEL_MAX_LENGTH -
len(SG_NAME_LABEL_PREFIX))
SG_NAME_PREFIX = 'ossg.default.'
class PolicySyncer(ResourceSyncer):
def __init__(self, db, txn_from_context):
super(PolicySyncer, self).__init__(db,
txn_from_context,
"NetworkPolicy")
self.region_string = calico_config.get_region_string()
self.namespace = datamodel_v3.get_namespace(self.region_string)
def delete_legacy_etcd_data(self):
if self.namespace != datamodel_v3.NO_REGION_NAMESPACE:
datamodel_v3.delete_legacy(self.resource_kind, SG_NAME_PREFIX)
def get_all_from_etcd(self):
results = []
for r in datamodel_v3.get_all(self.resource_kind, self.namespace):
name, _, _ = r
if name.startswith(SG_NAME_PREFIX):
results.append(r)
return results
def create_in_etcd(self, name, spec):
return datamodel_v3.put(self.resource_kind,
self.namespace,
name,
spec,
mod_revision=0)
def update_in_etcd(self, name, spec, mod_revision=None):
return datamodel_v3.put(self.resource_kind,
self.namespace,
name,
spec,
mod_revision=mod_revision)
def delete_from_etcd(self, name, mod_revision):
return datamodel_v3.delete(self.resource_kind,
self.namespace,
name,
mod_revision=mod_revision)
def get_all_from_neutron(self, context):
return dict((SG_NAME_PREFIX + sg['id'], sg)
for sg in self.db.get_security_groups(context))
def neutron_to_etcd_write_data(self, sg, context, reread=False):
if reread:
# We don't need to reread the SG row itself here, because we don't
# use any information from it, apart from its ID as a key for the
# following rules.
pass
rules = self.db.get_security_group_rules(
context,
filters={'security_group_id': [sg['id']]}
)
return policy_spec(sg['id'], rules)
def write_sgs_to_etcd(self, sgids, context):
rules = self.db.get_security_group_rules(
context, filters={'security_group_id': sgids}
)
for sgid in sgids:
self.update_in_etcd(SG_NAME_PREFIX + sgid,
policy_spec(sgid, rules))
def policy_spec(sgid, rules):
"""Generate JSON NetworkPolicySpec for the given security group."""
# <rules> can include those for several security groups. Pick out the
# rules for the security group that we are translating right now.
sg_rules = (r for r in rules if r['security_group_id'] == sgid)
# Split the rules based on direction, and map to Calico form.
inbound_rules = []
outbound_rules = []
for rule in sg_rules:
if rule['direction'] == 'ingress':
inbound_rules.append(_neutron_rule_to_etcd_rule(rule))
else:
outbound_rules.append(_neutron_rule_to_etcd_rule(rule))
return {
'ingress': inbound_rules,
'egress': outbound_rules,
'selector': 'has(%s)' % (SG_LABEL_PREFIX + sgid),
}
def _neutron_rule_to_etcd_rule(rule):
"""_neutron_rule_to_etcd_rule
Translate a single Neutron rule dict to a single dict in our
etcd format.
"""
ethertype = rule['ethertype']
etcd_rule = {'action': 'Allow'}
# Map the ethertype field from Neutron to etcd format.
etcd_rule['ipVersion'] = {'IPv4': 4,
'IPv6': 6}[ethertype]
# Map the protocol field from Neutron to etcd format.
if rule['protocol'] is None or rule['protocol'] == -1:
pass
elif rule['protocol'] == 'ipv6-icmp':
etcd_rule['protocol'] = 'ICMPv6'
elif rule['protocol'] == 'icmp':
etcd_rule['protocol'] = {'IPv4': 'ICMP',
'IPv6': 'ICMPv6'}[ethertype]
elif isinstance(rule['protocol'], int):
etcd_rule['protocol'] = rule['protocol']
else:
etcd_rule['protocol'] = rule['protocol'].upper()
port_spec = None
if rule['protocol'] == 'icmp' or rule['protocol'] == 'ipv6-icmp':
# OpenStack stashes the ICMP match criteria in
# port_range_min/max.
icmp_fields = {}
icmp_type = rule['port_range_min']
if icmp_type is not None and icmp_type != -1:
icmp_fields['type'] = icmp_type
icmp_code = rule['port_range_max']
if icmp_code is not None and icmp_code != -1:
icmp_fields['code'] = icmp_code
if icmp_fields:
etcd_rule['icmp'] = icmp_fields
else:
# src/dst_ports is a list in which each entry can be a
# single number, or a string describing a port range.
if rule['port_range_min'] == -1:
port_spec = None
elif rule['port_range_min'] == rule['port_range_max']:
if rule['port_range_min'] is not None:
port_spec = [rule['port_range_min']]
else:
port_spec = ['%s:%s' % (rule['port_range_min'],
rule['port_range_max'])]
entity_rule = {}
if rule['remote_group_id'] is not None:
entity_rule['selector'] = 'has(%s)' % (SG_LABEL_PREFIX +
rule['remote_group_id'])
if rule['remote_ip_prefix'] is not None:
entity_rule['nets'] = [rule['remote_ip_prefix']]
LOG.debug("=> Entity rule %s" % entity_rule)
# Store in source or destination field of the overall rule.
if entity_rule:
if rule['direction'] == 'ingress':
etcd_rule['source'] = entity_rule
if port_spec is not None:
etcd_rule['destination'] = {'ports': port_spec}
else:
if port_spec is not None:
entity_rule['ports'] = port_spec
etcd_rule['destination'] = entity_rule
LOG.debug("=> %s Calico rule %s" % (rule['direction'], etcd_rule))
return etcd_rule
| # -*- coding: utf-8 -*-
# Copyright (c) 2018 Tigera, Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from networking_calico.common import config as calico_config
from networking_calico.compat import log
from networking_calico import datamodel_v3
from networking_calico.plugins.ml2.drivers.calico.syncer import ResourceSyncer
LOG = log.getLogger(__name__)
# Each OpenStack security group is mapped to a Calico NetworkPolicy. A VM's
# security group membership is represented by the VM having a label for each
# security group that it belongs to; thus the selector
# 'has(<security-group-label>)' represents the VMs that belong to that security
# group.
#
# The label for each security group is 'sg.projectcalico.org/openstack-'
# followed by the security group ID, and the name of the NetworkPolicy for each
# security group is 'ossg.default.' followed by the security group ID.
SG_LABEL_PREFIX = 'sg.projectcalico.org/openstack-'
SG_NAME_LABEL_PREFIX = 'sg-name.projectcalico.org/openstack-'
SG_NAME_MAX_LENGTH = (datamodel_v3.SANITIZE_LABEL_MAX_LENGTH -
len(SG_NAME_LABEL_PREFIX))
SG_NAME_PREFIX = 'ossg.default.'
class PolicySyncer(ResourceSyncer):
def __init__(self, db, txn_from_context):
super(PolicySyncer, self).__init__(db,
txn_from_context,
"NetworkPolicy")
self.region_string = calico_config.get_region_string()
self.namespace = datamodel_v3.get_namespace(self.region_string)
def delete_legacy_etcd_data(self):
if self.namespace != datamodel_v3.NO_REGION_NAMESPACE:
datamodel_v3.delete_legacy(self.resource_kind, SG_NAME_PREFIX)
def get_all_from_etcd(self):
results = []
for r in datamodel_v3.get_all(self.resource_kind, self.namespace):
name, _, _ = r
if name.startswith(SG_NAME_PREFIX):
results.append(r)
return results
def create_in_etcd(self, name, spec):
return datamodel_v3.put(self.resource_kind,
self.namespace,
name,
spec,
mod_revision=0)
def update_in_etcd(self, name, spec, mod_revision=None):
return datamodel_v3.put(self.resource_kind,
self.namespace,
name,
spec,
mod_revision=mod_revision)
def delete_from_etcd(self, name, mod_revision):
return datamodel_v3.delete(self.resource_kind,
self.namespace,
name,
mod_revision=mod_revision)
def get_all_from_neutron(self, context):
return dict((SG_NAME_PREFIX + sg['id'], sg)
for sg in self.db.get_security_groups(context))
def neutron_to_etcd_write_data(self, sg, context, reread=False):
if reread:
# We don't need to reread the SG row itself here, because we don't
# use any information from it, apart from its ID as a key for the
# following rules.
pass
rules = self.db.get_security_group_rules(
context,
filters={'security_group_id': [sg['id']]}
)
return policy_spec(sg['id'], rules)
def write_sgs_to_etcd(self, sgids, context):
rules = self.db.get_security_group_rules(
context, filters={'security_group_id': sgids}
)
for sgid in sgids:
self.update_in_etcd(SG_NAME_PREFIX + sgid,
policy_spec(sgid, rules))
def policy_spec(sgid, rules):
"""Generate JSON NetworkPolicySpec for the given security group."""
# <rules> can include those for several security groups. Pick out the
# rules for the security group that we are translating right now.
sg_rules = (r for r in rules if r['security_group_id'] == sgid)
# Split the rules based on direction, and map to Calico form.
inbound_rules = []
outbound_rules = []
for rule in sg_rules:
if rule['direction'] == 'ingress':
inbound_rules.append(_neutron_rule_to_etcd_rule(rule))
else:
outbound_rules.append(_neutron_rule_to_etcd_rule(rule))
return {
'ingress': inbound_rules,
'egress': outbound_rules,
'selector': 'has(%s)' % (SG_LABEL_PREFIX + sgid),
}
def _neutron_rule_to_etcd_rule(rule):
"""_neutron_rule_to_etcd_rule
Translate a single Neutron rule dict to a single dict in our
etcd format.
"""
ethertype = rule['ethertype']
etcd_rule = {'action': 'Allow'}
# Map the ethertype field from Neutron to etcd format.
etcd_rule['ipVersion'] = {'IPv4': 4,
'IPv6': 6}[ethertype]
# Map the protocol field from Neutron to etcd format.
if rule['protocol'] is None or rule['protocol'] == -1:
pass
elif rule['protocol'] == 'ipv6-icmp':
etcd_rule['protocol'] = 'ICMPv6'
elif rule['protocol'] == 'icmp':
etcd_rule['protocol'] = {'IPv4': 'ICMP',
'IPv6': 'ICMPv6'}[ethertype]
elif isinstance(rule['protocol'], int):
etcd_rule['protocol'] = rule['protocol']
else:
etcd_rule['protocol'] = rule['protocol'].upper()
port_spec = None
if rule['protocol'] == 'icmp' or rule['protocol'] == 'ipv6-icmp':
# OpenStack stashes the ICMP match criteria in
# port_range_min/max.
icmp_fields = {}
icmp_type = rule['port_range_min']
if icmp_type is not None and icmp_type != -1:
icmp_fields['type'] = icmp_type
icmp_code = rule['port_range_max']
if icmp_code is not None and icmp_code != -1:
icmp_fields['code'] = icmp_code
if icmp_fields:
etcd_rule['icmp'] = icmp_fields
else:
# src/dst_ports is a list in which each entry can be a
# single number, or a string describing a port range.
if rule['port_range_min'] == -1:
port_spec = None
elif rule['port_range_min'] == rule['port_range_max']:
if rule['port_range_min'] is not None:
port_spec = [rule['port_range_min']]
else:
port_spec = ['%s:%s' % (rule['port_range_min'],
rule['port_range_max'])]
entity_rule = {}
if rule['remote_group_id'] is not None:
entity_rule['selector'] = 'has(%s)' % (SG_LABEL_PREFIX +
rule['remote_group_id'])
if rule['remote_ip_prefix'] is not None:
entity_rule['nets'] = [rule['remote_ip_prefix']]
LOG.debug("=> Entity rule %s" % entity_rule)
# Store in source or destination field of the overall rule.
if entity_rule:
if rule['direction'] == 'ingress':
etcd_rule['source'] = entity_rule
if port_spec is not None:
etcd_rule['destination'] = {'ports': port_spec}
else:
if port_spec is not None:
entity_rule['ports'] = port_spec
etcd_rule['destination'] = entity_rule
LOG.debug("=> %s Calico rule %s" % (rule['direction'], etcd_rule))
return etcd_rule | en | 0.87971 | # -*- coding: utf-8 -*- # Copyright (c) 2018 Tigera, Inc. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # Each OpenStack security group is mapped to a Calico NetworkPolicy. A VM's # security group membership is represented by the VM having a label for each # security group that it belongs to; thus the selector # 'has(<security-group-label>)' represents the VMs that belong to that security # group. # # The label for each security group is 'sg.projectcalico.org/openstack-' # followed by the security group ID, and the name of the NetworkPolicy for each # security group is 'ossg.default.' followed by the security group ID. # We don't need to reread the SG row itself here, because we don't # use any information from it, apart from its ID as a key for the # following rules. Generate JSON NetworkPolicySpec for the given security group. # <rules> can include those for several security groups. Pick out the # rules for the security group that we are translating right now. # Split the rules based on direction, and map to Calico form. _neutron_rule_to_etcd_rule Translate a single Neutron rule dict to a single dict in our etcd format. # Map the ethertype field from Neutron to etcd format. # Map the protocol field from Neutron to etcd format. # OpenStack stashes the ICMP match criteria in # port_range_min/max. # src/dst_ports is a list in which each entry can be a # single number, or a string describing a port range. # Store in source or destination field of the overall rule. | 1.828248 | 2 |
25/main.py | gosha20777/mipt-bioinfo-2021 | 0 | 10765 | <filename>25/main.py<gh_stars>0
def global_alignment(seq1, seq2, score_matrix, penalty):
len1, len2 = len(seq1), len(seq2)
s = [[0] * (len2 + 1) for i in range(len1 + 1)]
backtrack = [[0] * (len2 + 1) for i in range(len1 + 1)]
for i in range(1, len1 + 1):
s[i][0] = - i * penalty
for j in range(1, len2 + 1):
s[0][j] = - j * penalty
for i in range(1, len1 + 1):
for j in range(1, len2 + 1):
score_list = [s[i - 1][j] - penalty, s[i][j - 1] - penalty,
s[i - 1][j - 1] + score_matrix[seq1[i - 1], seq2[j - 1]]]
s[i][j] = max(score_list)
backtrack[i][j] = score_list.index(s[i][j])
indel_insert = lambda seq, i: seq[:i] + '-' + seq[i:]
align1, align2 = seq1, seq2
a, b = len1, len2
max_score = str(s[a][b])
while a * b != 0:
if backtrack[a][b] == 0:
a -= 1
align2 = indel_insert(align2, b)
elif backtrack[a][b] == 1:
b -= 1
align1 = indel_insert(align1, a)
else:
a -= 1
b -= 1
for i in range(a):
align2 = indel_insert(align2, 0)
for j in range(b):
align1 = indel_insert(align1, 0)
return max_score, align1, align2
def mid_column_score(v, w, score_matrix, penalty):
s = [[i * j * penalty for i in range(-1, 1)] for j in range(len(v) + 1)]
s[0][1] = -penalty
backtrack = [0] * (len(v) + 1)
for j in range(1, len(w) // 2 + 1):
for i in range(0, len(v) + 1):
if i == 0:
s[i][1] = -j * penalty
else:
scores = [s[i - 1][0] + score_matrix[v[i - 1], w[j - 1]], s[i][0] - penalty, s[i - 1][1] - penalty]
s[i][1] = max(scores)
backtrack[i] = scores.index(s[i][1])
if j != len(w) // 2:
s = [[row[1]] * 2 for row in s]
return [i[1] for i in s], backtrack
def mid_edge(v, w, score_matrix, penalty):
source = mid_column_score(v, w, score_matrix, penalty)[0]
mid_to_sink, backtrack = list(map(lambda l: l[::-1], mid_column_score(v[::-1], w[::-1] + ['', '$'][
len(w) % 2 == 1 and len(w) > 1], score_matrix, penalty)))
scores = list(map(sum, zip(source, mid_to_sink)))
max_mid = max(range(len(scores)), key = lambda i: scores[i])
if max_mid == len(scores) - 1:
next_node = (max_mid, len(w) // 2 + 1)
else:
next_node = [(max_mid + 1, len(w) // 2 + 1), (max_mid, len(w) // 2 + 1), (max_mid + 1, len(w) // 2), ][
backtrack[max_mid]]
return (max_mid, len(w) // 2), next_node
def linear_space_alignment(top, bottom, left, right, score_matrix):
v = seq1
w = seq2
if left == right:
return [v[top:bottom], '-' * (bottom - top)]
elif top == bottom:
return ['-' * (right - left), w[left:right]]
elif bottom - top == 1 or right - left == 1:
return global_alignment(v[top:bottom], w[left:right], score_matrix, penalty)[1:]
else:
mid_node, next_node = mid_edge(v[top:bottom], w[left:right], score_matrix, penalty)
mid_node = tuple(map(sum, zip(mid_node, [top, left])))
next_node = tuple(map(sum, zip(next_node, [top, left])))
current = [['-', v[mid_node[0] % len(v)]][next_node[0] - mid_node[0]],
['-', w[mid_node[1] % len(w)]][next_node[1] - mid_node[1]]]
a = linear_space_alignment(top, mid_node[0], left, mid_node[1], score_matrix)
b = linear_space_alignment(next_node[0], bottom, next_node[1], right, score_matrix)
return [a[i] + current[i] + b[i] for i in range(2)]
def linear_space_global_alignment(v, w, score_matrix, penalty):
align1, align2 = linear_space_alignment(0, len(v), 0, len(w), score_matrix)
p = []
for i in zip(align1, align2):
if '-' in i:
p.append(-penalty)
else:
p.append(score_matrix[i])
score = sum(p)
return str(score), align1, align2
if __name__ == '__main__':
with open('input.txt') as f:
seq1 = f.readline().strip()
seq2 = f.readline().strip()
with open('BLOSUM62.txt') as f1:
lines = [line.strip().split() for line in f1.readlines()]
matrix = {(i[0], i[1]): int(i[2]) for i in lines}
penalty = 5
alignment = '\n'.join(linear_space_global_alignment(seq1, seq2, matrix, penalty))
print(alignment)
| <filename>25/main.py<gh_stars>0
def global_alignment(seq1, seq2, score_matrix, penalty):
len1, len2 = len(seq1), len(seq2)
s = [[0] * (len2 + 1) for i in range(len1 + 1)]
backtrack = [[0] * (len2 + 1) for i in range(len1 + 1)]
for i in range(1, len1 + 1):
s[i][0] = - i * penalty
for j in range(1, len2 + 1):
s[0][j] = - j * penalty
for i in range(1, len1 + 1):
for j in range(1, len2 + 1):
score_list = [s[i - 1][j] - penalty, s[i][j - 1] - penalty,
s[i - 1][j - 1] + score_matrix[seq1[i - 1], seq2[j - 1]]]
s[i][j] = max(score_list)
backtrack[i][j] = score_list.index(s[i][j])
indel_insert = lambda seq, i: seq[:i] + '-' + seq[i:]
align1, align2 = seq1, seq2
a, b = len1, len2
max_score = str(s[a][b])
while a * b != 0:
if backtrack[a][b] == 0:
a -= 1
align2 = indel_insert(align2, b)
elif backtrack[a][b] == 1:
b -= 1
align1 = indel_insert(align1, a)
else:
a -= 1
b -= 1
for i in range(a):
align2 = indel_insert(align2, 0)
for j in range(b):
align1 = indel_insert(align1, 0)
return max_score, align1, align2
def mid_column_score(v, w, score_matrix, penalty):
s = [[i * j * penalty for i in range(-1, 1)] for j in range(len(v) + 1)]
s[0][1] = -penalty
backtrack = [0] * (len(v) + 1)
for j in range(1, len(w) // 2 + 1):
for i in range(0, len(v) + 1):
if i == 0:
s[i][1] = -j * penalty
else:
scores = [s[i - 1][0] + score_matrix[v[i - 1], w[j - 1]], s[i][0] - penalty, s[i - 1][1] - penalty]
s[i][1] = max(scores)
backtrack[i] = scores.index(s[i][1])
if j != len(w) // 2:
s = [[row[1]] * 2 for row in s]
return [i[1] for i in s], backtrack
def mid_edge(v, w, score_matrix, penalty):
source = mid_column_score(v, w, score_matrix, penalty)[0]
mid_to_sink, backtrack = list(map(lambda l: l[::-1], mid_column_score(v[::-1], w[::-1] + ['', '$'][
len(w) % 2 == 1 and len(w) > 1], score_matrix, penalty)))
scores = list(map(sum, zip(source, mid_to_sink)))
max_mid = max(range(len(scores)), key = lambda i: scores[i])
if max_mid == len(scores) - 1:
next_node = (max_mid, len(w) // 2 + 1)
else:
next_node = [(max_mid + 1, len(w) // 2 + 1), (max_mid, len(w) // 2 + 1), (max_mid + 1, len(w) // 2), ][
backtrack[max_mid]]
return (max_mid, len(w) // 2), next_node
def linear_space_alignment(top, bottom, left, right, score_matrix):
v = seq1
w = seq2
if left == right:
return [v[top:bottom], '-' * (bottom - top)]
elif top == bottom:
return ['-' * (right - left), w[left:right]]
elif bottom - top == 1 or right - left == 1:
return global_alignment(v[top:bottom], w[left:right], score_matrix, penalty)[1:]
else:
mid_node, next_node = mid_edge(v[top:bottom], w[left:right], score_matrix, penalty)
mid_node = tuple(map(sum, zip(mid_node, [top, left])))
next_node = tuple(map(sum, zip(next_node, [top, left])))
current = [['-', v[mid_node[0] % len(v)]][next_node[0] - mid_node[0]],
['-', w[mid_node[1] % len(w)]][next_node[1] - mid_node[1]]]
a = linear_space_alignment(top, mid_node[0], left, mid_node[1], score_matrix)
b = linear_space_alignment(next_node[0], bottom, next_node[1], right, score_matrix)
return [a[i] + current[i] + b[i] for i in range(2)]
def linear_space_global_alignment(v, w, score_matrix, penalty):
align1, align2 = linear_space_alignment(0, len(v), 0, len(w), score_matrix)
p = []
for i in zip(align1, align2):
if '-' in i:
p.append(-penalty)
else:
p.append(score_matrix[i])
score = sum(p)
return str(score), align1, align2
if __name__ == '__main__':
with open('input.txt') as f:
seq1 = f.readline().strip()
seq2 = f.readline().strip()
with open('BLOSUM62.txt') as f1:
lines = [line.strip().split() for line in f1.readlines()]
matrix = {(i[0], i[1]): int(i[2]) for i in lines}
penalty = 5
alignment = '\n'.join(linear_space_global_alignment(seq1, seq2, matrix, penalty))
print(alignment)
| none | 1 | 2.90685 | 3 |
|
utils/visualize_tree.py | moyiming1/Retrosynthesis-pathway-ranking | 10 | 10766 | <gh_stars>1-10
import os, sys
project_path = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
sys.path.append(project_path)
import pickle
def construct_tree_for_visual(tree, node_info_key, depth=0):
tree_for_visual = {'smiles': 'http://askcos.mit.edu/draw/smiles/' + str(tree['smiles']).replace('#', '%23'),
'depth': depth,
'children': []}
if node_info_key in tree.keys():
if type(tree[node_info_key]) is not str:
tree_for_visual['score'] = '{:.3f}'.format(tree[node_info_key])
else:
tree_for_visual['score'] = tree[node_info_key]
else:
tree_for_visual['score'] = ''
if tree['child']:
# tree_for_visual['children'] = []
for child in tree['child']:
tree_for_visual['children'].append(construct_tree_for_visual(child, node_info_key, depth+1))
return tree_for_visual
def construct_tree_for_d3_visualization(tree, depth, new_tree={}, max_children=0):
# if 'is_chemical' in tree.keys():
new_tree['smiles'] = 'http://askcos.mit.edu/draw/smiles/' + str(tree['smiles']).replace('#', '%23')
if 'score' in tree.keys():
new_tree['score'] = str(tree['score'])
else:
new_tree['score'] = ''
# new_tree['smiles'] = str(new_tree['smiles'])
new_tree['children'] = []
if tree['child']:
# print(len(tree['child']))
if max_children < len(tree['child']):
max_children = len(tree['child'])
for child in tree['child']:
new_tree['children'].append({})
_, max_children = construct_tree_for_d3_visualization(child, depth + 1, new_tree['children'][-1], max_children)
return new_tree, max_children
def count_tree_depth_children(tree, count):
count[tree['depth']] += 1
if tree['children']:
for child in tree['children']:
count = count_tree_depth_children(child, count)
return count
def create_tree_html(trees, file_name, tree_info=None, node_info_key='score',
width_factor=1, max_depth=10):
try:
outfile = file_name
except Exception as e:
print(e)
print('Need to specify file name to write results to')
trees_for_visualization = {'name': 'dummy_root', 'children': []}
max_children = 1
for i, tree in enumerate(trees):
output = construct_tree_for_visual(tree, node_info_key)
trees_for_visualization['children'].append(output)
# print()
current_children = max(count_tree_depth_children(output, count=[0]*20))
if current_children > max_children:
max_children = current_children
if tree_info:
# print(tree_info[i])
trees_for_visualization['children'][-1]['_id'] = tree_info[i]
else:
trees_for_visualization['children'][-1]['_id'] = ('T%d' % i)
# print(max_children)
max_children = max(max_children, 3)
height = 300 * len(trees) * max_children / 3 * width_factor
page_width = max_depth * 300
fid_out = open(outfile + '.html', 'w')
fid_out.write('<!DOCTYPE html>\n')
fid_out.write(' <head>\n')
fid_out.write(' <meta charset="utf-8">\n')
fid_out.write(' <title>{}</title>\n'.format(outfile))
fid_out.write(' <style>\n')
fid_out.write(' .node circle {\n')
fid_out.write(' fill: #fff;\n')
fid_out.write(' stroke: steelblue;\n')
fid_out.write(' stroke-width: 3px;\n')
fid_out.write(' }\n')
fid_out.write(' .node rect {\n')
fid_out.write(' fill: #fff;\n')
fid_out.write(' stroke: steelblue;\n')
fid_out.write(' stroke_width: 3px;\n')
fid_out.write(' }\n')
fid_out.write(' .node text { font: 12px sans-serif; }\n')
fid_out.write(' .link {\n')
fid_out.write(' fill: none;\n')
fid_out.write(' stroke: #ccc;\n')
fid_out.write(' stroke-width: 2px;\n')
fid_out.write(' }\n')
fid_out.write(' </style>\n')
fid_out.write(' </head>\n')
fid_out.write(' <body>\n')
fid_out.write('<!-- load the d3.js library --> \n')
fid_out.write('<script src="http://d3js.org/d3.v3.min.js"></script>\n')
fid_out.write('<script>\n')
fid_out.write('var treeData = [\n')
fid_out.write('{}\n'.format(trees_for_visualization))
fid_out.write('];\n')
fid_out.write('var margin = {top: 20, right: 120, bottom: 20, left: 0},\n')
fid_out.write(' width = {} - margin.right - margin.left,\n'.format(page_width))
fid_out.write(' height = {} - margin.top - margin.bottom;\n'.format(height))
fid_out.write('var i = 0;\n')
fid_out.write('var tree = d3.layout.tree()\n')
fid_out.write(' .size([height, width]);\n')
fid_out.write('var diagonal = d3.svg.diagonal()\n')
fid_out.write(' .projection(function(d) { return [d.y, d.x]; });\n')
fid_out.write('var svg = d3.select("body").append("svg")\n')
fid_out.write(' .attr("width", width + margin.right + margin.left)\n')
fid_out.write(' .attr("height", height + margin.top + margin.bottom)\n')
fid_out.write(' .append("g")\n')
fid_out.write(' .attr("transform", \n')
fid_out.write(' "translate(" + margin.left + "," + margin.top + ")");\n')
fid_out.write('root = treeData[0];\n')
fid_out.write('update(root);\n')
fid_out.write('function update(source) {\n')
fid_out.write(' // Compute the new tree layout.\n')
fid_out.write(' var nodes = tree.nodes(root).reverse(),\n')
fid_out.write(' links = tree.links(nodes);\n')
fid_out.write(' // Normalize for fixed-depth.\n')
fid_out.write(' nodes.forEach(function(d) { d.y = d.depth * 250; });\n')
fid_out.write(' // Declare the nodes…\n')
fid_out.write(' var node = svg.selectAll("g.node")\n')
fid_out.write(' .data(nodes, function(d) { return d.id || (d.id = ++i); });\n')
fid_out.write(' // Enter the nodes.\n')
fid_out.write(' var nodeEnter = node.enter().append("g")\n')
fid_out.write(' .attr("class", "node")\n')
fid_out.write(' .attr("transform", function(d) { \n')
fid_out.write(' return "translate(" + d.y + "," + d.x + ")"; });\n')
fid_out.write(' nodeEnter.append("image")\n')
fid_out.write(' .attr("xlink:href", function(d) { return d.smiles; })\n')
fid_out.write(' .attr("x", "-60px")\n')
fid_out.write(' .attr("y", "-60px")\n')
fid_out.write(' .attr("width", "120px")\n')
fid_out.write(' .attr("height", "120px");\n')
fid_out.write(' nodeEnter.append("path")\n')
fid_out.write(' .style("stroke", "black")\n')
fid_out.write(' .style("fill", function(d) { if (d.freq==1) { return "white"; }\n')
fid_out.write(' else if (d.freq==2) { return "yellow";}\n')
fid_out.write(' else if (d.freq==3) { return "orange"; }\n')
fid_out.write(' else if (d.freq>=4) { return "red"; }\n')
fid_out.write(' else {return "white";}\n')
fid_out.write(' })\n')
fid_out.write(' .attr("d", d3.svg.symbol()\n')
fid_out.write(' .size(0)\n')
fid_out.write(' .type(function(d) {if\n')
fid_out.write(' (d.rc_type == "chemical") {return "circle";} else if\n')
fid_out.write(' (d.rc_type == "reaction") {return "cross";}\n')
fid_out.write(' }));\n')
fid_out.write(' nodeEnter.append("text")\n')
fid_out.write(' .attr("x", 0)\n')
fid_out.write(' .attr("y", 35)\n')
fid_out.write(' .attr("text-anchor", function(d) { \n')
fid_out.write(' return d.children || d._children ? "end" : "start"; })\n')
fid_out.write(' .text(function(d) { return d.names; })\n')
fid_out.write(' .style("fill-opacity", 1);\n')
fid_out.write(' nodeEnter.append("text")\n')
fid_out.write(' .attr("x", 200)\n')
fid_out.write(' .attr("y", 120)\n')
fid_out.write(' .attr("text-anchor", function(d) { \n')
fid_out.write(' return d.children || d._children ? "end" : "start"; })\n')
fid_out.write(' .text(function(d) { return d._id; })\n')
fid_out.write(' .style("fill-opacity", 1);\n')
fid_out.write(' nodeEnter.append("text")\n')
fid_out.write(' .attr("x", 0)\n')
fid_out.write(' .attr("y", -30)\n')
fid_out.write(' .attr("text-anchor", function(d) { \n')
fid_out.write(' return d.children || d._children ? "end" : "start"; })\n')
fid_out.write(' .text(function(d) { return d.score; })\n')
fid_out.write(' .style("fill-opacity", 1);\n')
fid_out.write(' // Declare the links…\n')
fid_out.write(' var link = svg.selectAll("path.link")\n')
fid_out.write(' .data(links, function(d) { return d.target.id; });\n')
fid_out.write(' // Enter the links.\n')
fid_out.write(' link.enter().insert("path", "g")\n')
fid_out.write(' .attr("class", "link")\n')
fid_out.write(' .style("stroke", function(d) { return d.target.level; })\n')
fid_out.write(' .attr("d", diagonal);\n')
fid_out.write(' // remove the first level, leaving the targets as the first level\n')
fid_out.write(' node.each(function(d){\n')
fid_out.write(' if (d.name == "dummy_root")\n')
fid_out.write(' d3.select(this).remove();});\n')
fid_out.write(' link.each(function(d){\n')
fid_out.write(' if (d.source.name == "dummy_root") \n')
fid_out.write(' d3.select(this).remove();});\n')
fid_out.write('}\n')
fid_out.write('</script>\n')
fid_out.write(' </body>\n')
fid_out.write('</html>\n')
fid_out.close()
if __name__ == "__main__":
file_name = project_path + '/data/pathway_train_example.pkl'
with open(file_name, 'rb') as f:
data = pickle.load(f)
trees_to_plot = [d['tree'] for d in data['generated_paths'][0:10]]
create_tree_html(trees_to_plot, 'plotted_trees')
| import os, sys
project_path = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
sys.path.append(project_path)
import pickle
def construct_tree_for_visual(tree, node_info_key, depth=0):
tree_for_visual = {'smiles': 'http://askcos.mit.edu/draw/smiles/' + str(tree['smiles']).replace('#', '%23'),
'depth': depth,
'children': []}
if node_info_key in tree.keys():
if type(tree[node_info_key]) is not str:
tree_for_visual['score'] = '{:.3f}'.format(tree[node_info_key])
else:
tree_for_visual['score'] = tree[node_info_key]
else:
tree_for_visual['score'] = ''
if tree['child']:
# tree_for_visual['children'] = []
for child in tree['child']:
tree_for_visual['children'].append(construct_tree_for_visual(child, node_info_key, depth+1))
return tree_for_visual
def construct_tree_for_d3_visualization(tree, depth, new_tree={}, max_children=0):
# if 'is_chemical' in tree.keys():
new_tree['smiles'] = 'http://askcos.mit.edu/draw/smiles/' + str(tree['smiles']).replace('#', '%23')
if 'score' in tree.keys():
new_tree['score'] = str(tree['score'])
else:
new_tree['score'] = ''
# new_tree['smiles'] = str(new_tree['smiles'])
new_tree['children'] = []
if tree['child']:
# print(len(tree['child']))
if max_children < len(tree['child']):
max_children = len(tree['child'])
for child in tree['child']:
new_tree['children'].append({})
_, max_children = construct_tree_for_d3_visualization(child, depth + 1, new_tree['children'][-1], max_children)
return new_tree, max_children
def count_tree_depth_children(tree, count):
count[tree['depth']] += 1
if tree['children']:
for child in tree['children']:
count = count_tree_depth_children(child, count)
return count
def create_tree_html(trees, file_name, tree_info=None, node_info_key='score',
width_factor=1, max_depth=10):
try:
outfile = file_name
except Exception as e:
print(e)
print('Need to specify file name to write results to')
trees_for_visualization = {'name': 'dummy_root', 'children': []}
max_children = 1
for i, tree in enumerate(trees):
output = construct_tree_for_visual(tree, node_info_key)
trees_for_visualization['children'].append(output)
# print()
current_children = max(count_tree_depth_children(output, count=[0]*20))
if current_children > max_children:
max_children = current_children
if tree_info:
# print(tree_info[i])
trees_for_visualization['children'][-1]['_id'] = tree_info[i]
else:
trees_for_visualization['children'][-1]['_id'] = ('T%d' % i)
# print(max_children)
max_children = max(max_children, 3)
height = 300 * len(trees) * max_children / 3 * width_factor
page_width = max_depth * 300
fid_out = open(outfile + '.html', 'w')
fid_out.write('<!DOCTYPE html>\n')
fid_out.write(' <head>\n')
fid_out.write(' <meta charset="utf-8">\n')
fid_out.write(' <title>{}</title>\n'.format(outfile))
fid_out.write(' <style>\n')
fid_out.write(' .node circle {\n')
fid_out.write(' fill: #fff;\n')
fid_out.write(' stroke: steelblue;\n')
fid_out.write(' stroke-width: 3px;\n')
fid_out.write(' }\n')
fid_out.write(' .node rect {\n')
fid_out.write(' fill: #fff;\n')
fid_out.write(' stroke: steelblue;\n')
fid_out.write(' stroke_width: 3px;\n')
fid_out.write(' }\n')
fid_out.write(' .node text { font: 12px sans-serif; }\n')
fid_out.write(' .link {\n')
fid_out.write(' fill: none;\n')
fid_out.write(' stroke: #ccc;\n')
fid_out.write(' stroke-width: 2px;\n')
fid_out.write(' }\n')
fid_out.write(' </style>\n')
fid_out.write(' </head>\n')
fid_out.write(' <body>\n')
fid_out.write('<!-- load the d3.js library --> \n')
fid_out.write('<script src="http://d3js.org/d3.v3.min.js"></script>\n')
fid_out.write('<script>\n')
fid_out.write('var treeData = [\n')
fid_out.write('{}\n'.format(trees_for_visualization))
fid_out.write('];\n')
fid_out.write('var margin = {top: 20, right: 120, bottom: 20, left: 0},\n')
fid_out.write(' width = {} - margin.right - margin.left,\n'.format(page_width))
fid_out.write(' height = {} - margin.top - margin.bottom;\n'.format(height))
fid_out.write('var i = 0;\n')
fid_out.write('var tree = d3.layout.tree()\n')
fid_out.write(' .size([height, width]);\n')
fid_out.write('var diagonal = d3.svg.diagonal()\n')
fid_out.write(' .projection(function(d) { return [d.y, d.x]; });\n')
fid_out.write('var svg = d3.select("body").append("svg")\n')
fid_out.write(' .attr("width", width + margin.right + margin.left)\n')
fid_out.write(' .attr("height", height + margin.top + margin.bottom)\n')
fid_out.write(' .append("g")\n')
fid_out.write(' .attr("transform", \n')
fid_out.write(' "translate(" + margin.left + "," + margin.top + ")");\n')
fid_out.write('root = treeData[0];\n')
fid_out.write('update(root);\n')
fid_out.write('function update(source) {\n')
fid_out.write(' // Compute the new tree layout.\n')
fid_out.write(' var nodes = tree.nodes(root).reverse(),\n')
fid_out.write(' links = tree.links(nodes);\n')
fid_out.write(' // Normalize for fixed-depth.\n')
fid_out.write(' nodes.forEach(function(d) { d.y = d.depth * 250; });\n')
fid_out.write(' // Declare the nodes…\n')
fid_out.write(' var node = svg.selectAll("g.node")\n')
fid_out.write(' .data(nodes, function(d) { return d.id || (d.id = ++i); });\n')
fid_out.write(' // Enter the nodes.\n')
fid_out.write(' var nodeEnter = node.enter().append("g")\n')
fid_out.write(' .attr("class", "node")\n')
fid_out.write(' .attr("transform", function(d) { \n')
fid_out.write(' return "translate(" + d.y + "," + d.x + ")"; });\n')
fid_out.write(' nodeEnter.append("image")\n')
fid_out.write(' .attr("xlink:href", function(d) { return d.smiles; })\n')
fid_out.write(' .attr("x", "-60px")\n')
fid_out.write(' .attr("y", "-60px")\n')
fid_out.write(' .attr("width", "120px")\n')
fid_out.write(' .attr("height", "120px");\n')
fid_out.write(' nodeEnter.append("path")\n')
fid_out.write(' .style("stroke", "black")\n')
fid_out.write(' .style("fill", function(d) { if (d.freq==1) { return "white"; }\n')
fid_out.write(' else if (d.freq==2) { return "yellow";}\n')
fid_out.write(' else if (d.freq==3) { return "orange"; }\n')
fid_out.write(' else if (d.freq>=4) { return "red"; }\n')
fid_out.write(' else {return "white";}\n')
fid_out.write(' })\n')
fid_out.write(' .attr("d", d3.svg.symbol()\n')
fid_out.write(' .size(0)\n')
fid_out.write(' .type(function(d) {if\n')
fid_out.write(' (d.rc_type == "chemical") {return "circle";} else if\n')
fid_out.write(' (d.rc_type == "reaction") {return "cross";}\n')
fid_out.write(' }));\n')
fid_out.write(' nodeEnter.append("text")\n')
fid_out.write(' .attr("x", 0)\n')
fid_out.write(' .attr("y", 35)\n')
fid_out.write(' .attr("text-anchor", function(d) { \n')
fid_out.write(' return d.children || d._children ? "end" : "start"; })\n')
fid_out.write(' .text(function(d) { return d.names; })\n')
fid_out.write(' .style("fill-opacity", 1);\n')
fid_out.write(' nodeEnter.append("text")\n')
fid_out.write(' .attr("x", 200)\n')
fid_out.write(' .attr("y", 120)\n')
fid_out.write(' .attr("text-anchor", function(d) { \n')
fid_out.write(' return d.children || d._children ? "end" : "start"; })\n')
fid_out.write(' .text(function(d) { return d._id; })\n')
fid_out.write(' .style("fill-opacity", 1);\n')
fid_out.write(' nodeEnter.append("text")\n')
fid_out.write(' .attr("x", 0)\n')
fid_out.write(' .attr("y", -30)\n')
fid_out.write(' .attr("text-anchor", function(d) { \n')
fid_out.write(' return d.children || d._children ? "end" : "start"; })\n')
fid_out.write(' .text(function(d) { return d.score; })\n')
fid_out.write(' .style("fill-opacity", 1);\n')
fid_out.write(' // Declare the links…\n')
fid_out.write(' var link = svg.selectAll("path.link")\n')
fid_out.write(' .data(links, function(d) { return d.target.id; });\n')
fid_out.write(' // Enter the links.\n')
fid_out.write(' link.enter().insert("path", "g")\n')
fid_out.write(' .attr("class", "link")\n')
fid_out.write(' .style("stroke", function(d) { return d.target.level; })\n')
fid_out.write(' .attr("d", diagonal);\n')
fid_out.write(' // remove the first level, leaving the targets as the first level\n')
fid_out.write(' node.each(function(d){\n')
fid_out.write(' if (d.name == "dummy_root")\n')
fid_out.write(' d3.select(this).remove();});\n')
fid_out.write(' link.each(function(d){\n')
fid_out.write(' if (d.source.name == "dummy_root") \n')
fid_out.write(' d3.select(this).remove();});\n')
fid_out.write('}\n')
fid_out.write('</script>\n')
fid_out.write(' </body>\n')
fid_out.write('</html>\n')
fid_out.close()
if __name__ == "__main__":
file_name = project_path + '/data/pathway_train_example.pkl'
with open(file_name, 'rb') as f:
data = pickle.load(f)
trees_to_plot = [d['tree'] for d in data['generated_paths'][0:10]]
create_tree_html(trees_to_plot, 'plotted_trees') | en | 0.525148 | # tree_for_visual['children'] = [] # if 'is_chemical' in tree.keys(): # new_tree['smiles'] = str(new_tree['smiles']) # print(len(tree['child'])) # print() # print(tree_info[i]) # print(max_children) #fff;\n') #fff;\n') #ccc;\n') | 2.675075 | 3 |
python-is-easy/assignments/snowman/main.py | eDyablo/pirple | 0 | 10767 | <filename>python-is-easy/assignments/snowman/main.py<gh_stars>0
'''
Homework assignment for the 'Python is easy' course by Pirple.
Written be <NAME>.
Snowman(Hangman) game.
'''
from os import (
name as os_name,
system as system_call,
)
from os.path import (
abspath,
dirname,
join as join_path,
)
'''
Screen displays game output
'''
class Screen:
def clear(self):
if os_name == 'nt':
system_call('cls')
else:
system_call('clear')
def draw(self, frame):
for line in frame:
print(line)
'''
Input represents game input device
'''
class Input:
def ask(self, message):
answer = ''
while answer == '':
answer = input(message)
return answer
'''
Art is a game art which is set of frames that get loaded from a text file.
Draws its current frame on a screen.
'''
class Art:
def __init__(self):
self.frames = []
self.current_frame = 0
def load(self, name):
frames = []
art_path = join_path(dirname(abspath(__file__)), join_path('arts', name))
with open(art_path, 'r') as art_file:
frame_height = int(art_file.readline())
frame = []
line_count = 0
for line in art_file:
frame.append(line.strip('\n\r'))
line_count += 1
if line_count % frame_height == 0:
frames.append(frame)
frame = []
self.frames = frames
self.current_frame = 0
def draw(self, screen):
screen.draw(self.frames[self.current_frame])
def frames_number(self):
return len(self.frames)
def next_frame(self):
self.current_frame = (self.current_frame + 1) % self.frames_number()
return self.current_frame
'''
Riddle holds secret word and gets solved by guesses
'''
class Riddle:
def __init__(self, key):
self.key = key
self.clue = ['_'] * len(key)
def length(self):
return len(self.key)
def range(self):
return range(0, self.length())
def guess(self, g):
guess_count = 0
for i in self.range():
if g == self.key[i]:
guess_count += 1
self.clue[i] = g
return guess_count
def solved(self):
for i in self.range():
if self.clue[i] != self.key[i]:
return False
return True
def unsolved(self):
return self.solved() == False
def draw(self, screen):
screen.draw([' '.join(self.clue)])
'''
Game is a game itself
'''
class Game:
def __init__(self):
self.screen = Screen()
self.input = Input()
self.art = Art()
self.riddle = Riddle('riddle')
def play(self):
self.start()
self.propose_riddle()
while self.in_progress():
self.play_round()
self.display_result()
def start(self):
self.art.load('snowman')
self.game_over = False
def propose_riddle(self):
self.riddle = Riddle(self.input.ask('Player 1 pick a word: ').lower())
def in_progress(self):
return self.riddle.unsolved() and self.game_over == False
def draw_frame(self):
self.screen.clear()
self.art.draw(self.screen)
self.riddle.draw(self.screen)
def play_round(self):
self.draw_frame()
clue = input('Player 2 guess a letter: ').lower()
if len(clue) > 0:
if clue[0] == '.':
self.stop()
elif self.riddle.guess(clue[0]) == 0:
self.art.next_frame()
if self.art.current_frame == self.art.frames_number() - 1:
self.stop()
def stop(self):
self.game_over = True
def display_result(self):
self.draw_frame()
if self.game_over:
self.screen.draw(['Player 2 lost'])
else:
self.screen.draw(['Player 2 wins'])
Game().play()
| <filename>python-is-easy/assignments/snowman/main.py<gh_stars>0
'''
Homework assignment for the 'Python is easy' course by Pirple.
Written be <NAME>.
Snowman(Hangman) game.
'''
from os import (
name as os_name,
system as system_call,
)
from os.path import (
abspath,
dirname,
join as join_path,
)
'''
Screen displays game output
'''
class Screen:
def clear(self):
if os_name == 'nt':
system_call('cls')
else:
system_call('clear')
def draw(self, frame):
for line in frame:
print(line)
'''
Input represents game input device
'''
class Input:
def ask(self, message):
answer = ''
while answer == '':
answer = input(message)
return answer
'''
Art is a game art which is set of frames that get loaded from a text file.
Draws its current frame on a screen.
'''
class Art:
def __init__(self):
self.frames = []
self.current_frame = 0
def load(self, name):
frames = []
art_path = join_path(dirname(abspath(__file__)), join_path('arts', name))
with open(art_path, 'r') as art_file:
frame_height = int(art_file.readline())
frame = []
line_count = 0
for line in art_file:
frame.append(line.strip('\n\r'))
line_count += 1
if line_count % frame_height == 0:
frames.append(frame)
frame = []
self.frames = frames
self.current_frame = 0
def draw(self, screen):
screen.draw(self.frames[self.current_frame])
def frames_number(self):
return len(self.frames)
def next_frame(self):
self.current_frame = (self.current_frame + 1) % self.frames_number()
return self.current_frame
'''
Riddle holds secret word and gets solved by guesses
'''
class Riddle:
def __init__(self, key):
self.key = key
self.clue = ['_'] * len(key)
def length(self):
return len(self.key)
def range(self):
return range(0, self.length())
def guess(self, g):
guess_count = 0
for i in self.range():
if g == self.key[i]:
guess_count += 1
self.clue[i] = g
return guess_count
def solved(self):
for i in self.range():
if self.clue[i] != self.key[i]:
return False
return True
def unsolved(self):
return self.solved() == False
def draw(self, screen):
screen.draw([' '.join(self.clue)])
'''
Game is a game itself
'''
class Game:
def __init__(self):
self.screen = Screen()
self.input = Input()
self.art = Art()
self.riddle = Riddle('riddle')
def play(self):
self.start()
self.propose_riddle()
while self.in_progress():
self.play_round()
self.display_result()
def start(self):
self.art.load('snowman')
self.game_over = False
def propose_riddle(self):
self.riddle = Riddle(self.input.ask('Player 1 pick a word: ').lower())
def in_progress(self):
return self.riddle.unsolved() and self.game_over == False
def draw_frame(self):
self.screen.clear()
self.art.draw(self.screen)
self.riddle.draw(self.screen)
def play_round(self):
self.draw_frame()
clue = input('Player 2 guess a letter: ').lower()
if len(clue) > 0:
if clue[0] == '.':
self.stop()
elif self.riddle.guess(clue[0]) == 0:
self.art.next_frame()
if self.art.current_frame == self.art.frames_number() - 1:
self.stop()
def stop(self):
self.game_over = True
def display_result(self):
self.draw_frame()
if self.game_over:
self.screen.draw(['Player 2 lost'])
else:
self.screen.draw(['Player 2 wins'])
Game().play()
| en | 0.940596 | Homework assignment for the 'Python is easy' course by Pirple. Written be <NAME>. Snowman(Hangman) game. Screen displays game output Input represents game input device Art is a game art which is set of frames that get loaded from a text file. Draws its current frame on a screen. Riddle holds secret word and gets solved by guesses Game is a game itself | 3.827163 | 4 |
src/GenericTsvReader.py | getzlab/ABSOLUTE | 0 | 10768 | <reponame>getzlab/ABSOLUTE
"""
Created on Jul 5, 2012
@author: lichtens
"""
import csv
import os
class GenericTsvReader(object):
"""
Read a TSV file.
This class wraps a DictReader, but handles comments, which are not handled gracefully in the python csv library.
The next() method assumes user is interested in the content, not the comments.
Get the comments using getComments or getCommentsAsList. The latter assumes each comment is a line of text.
Notes:
IMPORTANT: At this time, this class does not support comments below the header line.
This class will load all comment lines into RAM at one time. This could theoretically cause a bottleneck in some files.
"""
def __init__(self, filename, commentPrepend='#', fieldNames=None, delimiter='\t'):
"""
Constructor
"""
self.__dict__.update(locals())
self.inputContentFP = open(filename, 'r')
self.commentLines = ''
self.commentPrepend = commentPrepend
# The comment lines must be loaded before the dict reader is initialized.
self._loadCommentLines()
self.dictReader = csv.DictReader(self.inputContentFP, delimiter=delimiter, fieldnames=fieldNames)
def _loadCommentLines(self):
resetLocation = self.inputContentFP.tell()
nextChar = self.inputContentFP.read(1)
# Get rid of blank lines
while nextChar in ['\n', '\r']:
resetLocation = self.inputContentFP.tell()
nextChar = self.inputContentFP.read(1)
while nextChar == self.commentPrepend:
self.commentLines = self.commentLines + (self.commentPrepend + self.inputContentFP.readline())
resetLocation = self.inputContentFP.tell()
nextChar = self.inputContentFP.read(1)
# Go back one character to make sure that we have moved the file pointer to the
# beginning of the first non-comment line.
self.inputContentFP.seek(resetLocation, os.SEEK_SET)
# python3 needs __next__ instead of next
def __next__(self):
return self.dictReader.__next__()
def getFieldNames(self):
return self.dictReader.fieldnames
def getComments(self):
return self.commentLines
def getCommentsAsList(self):
""" Return each comment line as an entry in a list """
return self.commentLines.strip().split('\n')
def getInputContentFP(self):
return self.inputContentFP
def __iter__(self):
return self | """
Created on Jul 5, 2012
@author: lichtens
"""
import csv
import os
class GenericTsvReader(object):
"""
Read a TSV file.
This class wraps a DictReader, but handles comments, which are not handled gracefully in the python csv library.
The next() method assumes user is interested in the content, not the comments.
Get the comments using getComments or getCommentsAsList. The latter assumes each comment is a line of text.
Notes:
IMPORTANT: At this time, this class does not support comments below the header line.
This class will load all comment lines into RAM at one time. This could theoretically cause a bottleneck in some files.
"""
def __init__(self, filename, commentPrepend='#', fieldNames=None, delimiter='\t'):
"""
Constructor
"""
self.__dict__.update(locals())
self.inputContentFP = open(filename, 'r')
self.commentLines = ''
self.commentPrepend = commentPrepend
# The comment lines must be loaded before the dict reader is initialized.
self._loadCommentLines()
self.dictReader = csv.DictReader(self.inputContentFP, delimiter=delimiter, fieldnames=fieldNames)
def _loadCommentLines(self):
resetLocation = self.inputContentFP.tell()
nextChar = self.inputContentFP.read(1)
# Get rid of blank lines
while nextChar in ['\n', '\r']:
resetLocation = self.inputContentFP.tell()
nextChar = self.inputContentFP.read(1)
while nextChar == self.commentPrepend:
self.commentLines = self.commentLines + (self.commentPrepend + self.inputContentFP.readline())
resetLocation = self.inputContentFP.tell()
nextChar = self.inputContentFP.read(1)
# Go back one character to make sure that we have moved the file pointer to the
# beginning of the first non-comment line.
self.inputContentFP.seek(resetLocation, os.SEEK_SET)
# python3 needs __next__ instead of next
def __next__(self):
return self.dictReader.__next__()
def getFieldNames(self):
return self.dictReader.fieldnames
def getComments(self):
return self.commentLines
def getCommentsAsList(self):
""" Return each comment line as an entry in a list """
return self.commentLines.strip().split('\n')
def getInputContentFP(self):
return self.inputContentFP
def __iter__(self):
return self | en | 0.892763 | Created on Jul 5, 2012 @author: lichtens Read a TSV file. This class wraps a DictReader, but handles comments, which are not handled gracefully in the python csv library. The next() method assumes user is interested in the content, not the comments. Get the comments using getComments or getCommentsAsList. The latter assumes each comment is a line of text. Notes: IMPORTANT: At this time, this class does not support comments below the header line. This class will load all comment lines into RAM at one time. This could theoretically cause a bottleneck in some files. Constructor # The comment lines must be loaded before the dict reader is initialized. # Get rid of blank lines # Go back one character to make sure that we have moved the file pointer to the # beginning of the first non-comment line. # python3 needs __next__ instead of next Return each comment line as an entry in a list | 3.262261 | 3 |
examples/applications/plot_impact_imbalanced_classes.py | cdchushig/imbalanced-learn | 5,678 | 10769 | """
==========================================================
Fitting model on imbalanced datasets and how to fight bias
==========================================================
This example illustrates the problem induced by learning on datasets having
imbalanced classes. Subsequently, we compare different approaches alleviating
these negative effects.
"""
# Authors: <NAME> <<EMAIL>>
# License: MIT
# %%
print(__doc__)
# %% [markdown]
# Problem definition
# ------------------
#
# We are dropping the following features:
#
# - "fnlwgt": this feature was created while studying the "adult" dataset.
# Thus, we will not use this feature which is not acquired during the survey.
# - "education-num": it is encoding the same information than "education".
# Thus, we are removing one of these 2 features.
# %%
from sklearn.datasets import fetch_openml
df, y = fetch_openml("adult", version=2, as_frame=True, return_X_y=True)
df = df.drop(columns=["fnlwgt", "education-num"])
# %% [markdown]
# The "adult" dataset as a class ratio of about 3:1
# %%
classes_count = y.value_counts()
classes_count
# %% [markdown]
# This dataset is only slightly imbalanced. To better highlight the effect of
# learning from an imbalanced dataset, we will increase its ratio to 30:1
# %%
from imblearn.datasets import make_imbalance
ratio = 30
df_res, y_res = make_imbalance(
df,
y,
sampling_strategy={classes_count.idxmin(): classes_count.max() // ratio},
)
y_res.value_counts()
# %% [markdown]
# We will perform a cross-validation evaluation to get an estimate of the test
# score.
#
# As a baseline, we could use a classifier which will always predict the
# majority class independently of the features provided.
# %%
from sklearn.model_selection import cross_validate
from sklearn.dummy import DummyClassifier
dummy_clf = DummyClassifier(strategy="most_frequent")
scoring = ["accuracy", "balanced_accuracy"]
cv_result = cross_validate(dummy_clf, df_res, y_res, scoring=scoring)
print(f"Accuracy score of a dummy classifier: {cv_result['test_accuracy'].mean():.3f}")
# %% [markdown]
# Instead of using the accuracy, we can use the balanced accuracy which will
# take into account the balancing issue.
# %%
print(
f"Balanced accuracy score of a dummy classifier: "
f"{cv_result['test_balanced_accuracy'].mean():.3f}"
)
# %% [markdown]
# Strategies to learn from an imbalanced dataset
# ----------------------------------------------
# We will use a dictionary and a list to continuously store the results of
# our experiments and show them as a pandas dataframe.
# %%
index = []
scores = {"Accuracy": [], "Balanced accuracy": []}
# %% [markdown]
# Dummy baseline
# ..............
#
# Before to train a real machine learning model, we can store the results
# obtained with our :class:`~sklearn.dummy.DummyClassifier`.
# %%
import pandas as pd
index += ["Dummy classifier"]
cv_result = cross_validate(dummy_clf, df_res, y_res, scoring=scoring)
scores["Accuracy"].append(cv_result["test_accuracy"].mean())
scores["Balanced accuracy"].append(cv_result["test_balanced_accuracy"].mean())
df_scores = pd.DataFrame(scores, index=index)
df_scores
# %% [markdown]
# Linear classifier baseline
# ..........................
#
# We will create a machine learning pipeline using a
# :class:`~sklearn.linear_model.LogisticRegression` classifier. In this regard,
# we will need to one-hot encode the categorical columns and standardized the
# numerical columns before to inject the data into the
# :class:`~sklearn.linear_model.LogisticRegression` classifier.
#
# First, we define our numerical and categorical pipelines.
# %%
from sklearn.impute import SimpleImputer
from sklearn.preprocessing import StandardScaler
from sklearn.preprocessing import OneHotEncoder
from sklearn.pipeline import make_pipeline
num_pipe = make_pipeline(
StandardScaler(), SimpleImputer(strategy="mean", add_indicator=True)
)
cat_pipe = make_pipeline(
SimpleImputer(strategy="constant", fill_value="missing"),
OneHotEncoder(handle_unknown="ignore"),
)
# %% [markdown]
# Then, we can create a preprocessor which will dispatch the categorical
# columns to the categorical pipeline and the numerical columns to the
# numerical pipeline
# %%
from sklearn.compose import make_column_transformer
from sklearn.compose import make_column_selector as selector
preprocessor_linear = make_column_transformer(
(num_pipe, selector(dtype_include="number")),
(cat_pipe, selector(dtype_include="category")),
n_jobs=2,
)
# %% [markdown]
# Finally, we connect our preprocessor with our
# :class:`~sklearn.linear_model.LogisticRegression`. We can then evaluate our
# model.
# %%
from sklearn.linear_model import LogisticRegression
lr_clf = make_pipeline(preprocessor_linear, LogisticRegression(max_iter=1000))
# %%
index += ["Logistic regression"]
cv_result = cross_validate(lr_clf, df_res, y_res, scoring=scoring)
scores["Accuracy"].append(cv_result["test_accuracy"].mean())
scores["Balanced accuracy"].append(cv_result["test_balanced_accuracy"].mean())
df_scores = pd.DataFrame(scores, index=index)
df_scores
# %% [markdown]
# We can see that our linear model is learning slightly better than our dummy
# baseline. However, it is impacted by the class imbalance.
#
# We can verify that something similar is happening with a tree-based model
# such as :class:`~sklearn.ensemble.RandomForestClassifier`. With this type of
# classifier, we will not need to scale the numerical data, and we will only
# need to ordinal encode the categorical data.
# %%
from sklearn.preprocessing import OrdinalEncoder
from sklearn.ensemble import RandomForestClassifier
num_pipe = SimpleImputer(strategy="mean", add_indicator=True)
cat_pipe = make_pipeline(
SimpleImputer(strategy="constant", fill_value="missing"),
OrdinalEncoder(handle_unknown="use_encoded_value", unknown_value=-1),
)
preprocessor_tree = make_column_transformer(
(num_pipe, selector(dtype_include="number")),
(cat_pipe, selector(dtype_include="category")),
n_jobs=2,
)
rf_clf = make_pipeline(
preprocessor_tree, RandomForestClassifier(random_state=42, n_jobs=2)
)
# %%
index += ["Random forest"]
cv_result = cross_validate(rf_clf, df_res, y_res, scoring=scoring)
scores["Accuracy"].append(cv_result["test_accuracy"].mean())
scores["Balanced accuracy"].append(cv_result["test_balanced_accuracy"].mean())
df_scores = pd.DataFrame(scores, index=index)
df_scores
# %% [markdown]
# The :class:`~sklearn.ensemble.RandomForestClassifier` is as well affected by
# the class imbalanced, slightly less than the linear model. Now, we will
# present different approach to improve the performance of these 2 models.
#
# Use `class_weight`
# ..................
#
# Most of the models in `scikit-learn` have a parameter `class_weight`. This
# parameter will affect the computation of the loss in linear model or the
# criterion in the tree-based model to penalize differently a false
# classification from the minority and majority class. We can set
# `class_weight="balanced"` such that the weight applied is inversely
# proportional to the class frequency. We test this parametrization in both
# linear model and tree-based model.
# %%
lr_clf.set_params(logisticregression__class_weight="balanced")
index += ["Logistic regression with balanced class weights"]
cv_result = cross_validate(lr_clf, df_res, y_res, scoring=scoring)
scores["Accuracy"].append(cv_result["test_accuracy"].mean())
scores["Balanced accuracy"].append(cv_result["test_balanced_accuracy"].mean())
df_scores = pd.DataFrame(scores, index=index)
df_scores
# %%
rf_clf.set_params(randomforestclassifier__class_weight="balanced")
index += ["Random forest with balanced class weights"]
cv_result = cross_validate(rf_clf, df_res, y_res, scoring=scoring)
scores["Accuracy"].append(cv_result["test_accuracy"].mean())
scores["Balanced accuracy"].append(cv_result["test_balanced_accuracy"].mean())
df_scores = pd.DataFrame(scores, index=index)
df_scores
# %% [markdown]
# We can see that using `class_weight` was really effective for the linear
# model, alleviating the issue of learning from imbalanced classes. However,
# the :class:`~sklearn.ensemble.RandomForestClassifier` is still biased toward
# the majority class, mainly due to the criterion which is not suited enough to
# fight the class imbalance.
#
# Resample the training set during learning
# .........................................
#
# Another way is to resample the training set by under-sampling or
# over-sampling some of the samples. `imbalanced-learn` provides some samplers
# to do such processing.
# %%
from imblearn.pipeline import make_pipeline as make_pipeline_with_sampler
from imblearn.under_sampling import RandomUnderSampler
lr_clf = make_pipeline_with_sampler(
preprocessor_linear,
RandomUnderSampler(random_state=42),
LogisticRegression(max_iter=1000),
)
# %%
index += ["Under-sampling + Logistic regression"]
cv_result = cross_validate(lr_clf, df_res, y_res, scoring=scoring)
scores["Accuracy"].append(cv_result["test_accuracy"].mean())
scores["Balanced accuracy"].append(cv_result["test_balanced_accuracy"].mean())
df_scores = pd.DataFrame(scores, index=index)
df_scores
# %%
rf_clf = make_pipeline_with_sampler(
preprocessor_tree,
RandomUnderSampler(random_state=42),
RandomForestClassifier(random_state=42, n_jobs=2),
)
# %%
index += ["Under-sampling + Random forest"]
cv_result = cross_validate(rf_clf, df_res, y_res, scoring=scoring)
scores["Accuracy"].append(cv_result["test_accuracy"].mean())
scores["Balanced accuracy"].append(cv_result["test_balanced_accuracy"].mean())
df_scores = pd.DataFrame(scores, index=index)
df_scores
# %% [markdown]
# Applying a random under-sampler before the training of the linear model or
# random forest, allows to not focus on the majority class at the cost of
# making more mistake for samples in the majority class (i.e. decreased
# accuracy).
#
# We could apply any type of samplers and find which sampler is working best
# on the current dataset.
#
# Instead, we will present another way by using classifiers which will apply
# sampling internally.
#
# Use of specific balanced algorithms from imbalanced-learn
# .........................................................
#
# We already showed that random under-sampling can be effective on decision
# tree. However, instead of under-sampling once the dataset, one could
# under-sample the original dataset before to take a bootstrap sample. This is
# the base of the :class:`imblearn.ensemble.BalancedRandomForestClassifier` and
# :class:`~imblearn.ensemble.BalancedBaggingClassifier`.
# %%
from imblearn.ensemble import BalancedRandomForestClassifier
rf_clf = make_pipeline(
preprocessor_tree,
BalancedRandomForestClassifier(random_state=42, n_jobs=2),
)
# %%
index += ["Balanced random forest"]
cv_result = cross_validate(rf_clf, df_res, y_res, scoring=scoring)
scores["Accuracy"].append(cv_result["test_accuracy"].mean())
scores["Balanced accuracy"].append(cv_result["test_balanced_accuracy"].mean())
df_scores = pd.DataFrame(scores, index=index)
df_scores
# %% [markdown]
# The performance with the
# :class:`~imblearn.ensemble.BalancedRandomForestClassifier` is better than
# applying a single random under-sampling. We will use a gradient-boosting
# classifier within a :class:`~imblearn.ensemble.BalancedBaggingClassifier`.
from sklearn.experimental import enable_hist_gradient_boosting # noqa
from sklearn.ensemble import HistGradientBoostingClassifier
from imblearn.ensemble import BalancedBaggingClassifier
bag_clf = make_pipeline(
preprocessor_tree,
BalancedBaggingClassifier(
base_estimator=HistGradientBoostingClassifier(random_state=42),
n_estimators=10,
random_state=42,
n_jobs=2,
),
)
index += ["Balanced bag of histogram gradient boosting"]
cv_result = cross_validate(bag_clf, df_res, y_res, scoring=scoring)
scores["Accuracy"].append(cv_result["test_accuracy"].mean())
scores["Balanced accuracy"].append(cv_result["test_balanced_accuracy"].mean())
df_scores = pd.DataFrame(scores, index=index)
df_scores
# %% [markdown]
# This last approach is the most effective. The different under-sampling allows
# to bring some diversity for the different GBDT to learn and not focus on a
# portion of the majority class.
| """
==========================================================
Fitting model on imbalanced datasets and how to fight bias
==========================================================
This example illustrates the problem induced by learning on datasets having
imbalanced classes. Subsequently, we compare different approaches alleviating
these negative effects.
"""
# Authors: <NAME> <<EMAIL>>
# License: MIT
# %%
print(__doc__)
# %% [markdown]
# Problem definition
# ------------------
#
# We are dropping the following features:
#
# - "fnlwgt": this feature was created while studying the "adult" dataset.
# Thus, we will not use this feature which is not acquired during the survey.
# - "education-num": it is encoding the same information than "education".
# Thus, we are removing one of these 2 features.
# %%
from sklearn.datasets import fetch_openml
df, y = fetch_openml("adult", version=2, as_frame=True, return_X_y=True)
df = df.drop(columns=["fnlwgt", "education-num"])
# %% [markdown]
# The "adult" dataset as a class ratio of about 3:1
# %%
classes_count = y.value_counts()
classes_count
# %% [markdown]
# This dataset is only slightly imbalanced. To better highlight the effect of
# learning from an imbalanced dataset, we will increase its ratio to 30:1
# %%
from imblearn.datasets import make_imbalance
ratio = 30
df_res, y_res = make_imbalance(
df,
y,
sampling_strategy={classes_count.idxmin(): classes_count.max() // ratio},
)
y_res.value_counts()
# %% [markdown]
# We will perform a cross-validation evaluation to get an estimate of the test
# score.
#
# As a baseline, we could use a classifier which will always predict the
# majority class independently of the features provided.
# %%
from sklearn.model_selection import cross_validate
from sklearn.dummy import DummyClassifier
dummy_clf = DummyClassifier(strategy="most_frequent")
scoring = ["accuracy", "balanced_accuracy"]
cv_result = cross_validate(dummy_clf, df_res, y_res, scoring=scoring)
print(f"Accuracy score of a dummy classifier: {cv_result['test_accuracy'].mean():.3f}")
# %% [markdown]
# Instead of using the accuracy, we can use the balanced accuracy which will
# take into account the balancing issue.
# %%
print(
f"Balanced accuracy score of a dummy classifier: "
f"{cv_result['test_balanced_accuracy'].mean():.3f}"
)
# %% [markdown]
# Strategies to learn from an imbalanced dataset
# ----------------------------------------------
# We will use a dictionary and a list to continuously store the results of
# our experiments and show them as a pandas dataframe.
# %%
index = []
scores = {"Accuracy": [], "Balanced accuracy": []}
# %% [markdown]
# Dummy baseline
# ..............
#
# Before to train a real machine learning model, we can store the results
# obtained with our :class:`~sklearn.dummy.DummyClassifier`.
# %%
import pandas as pd
index += ["Dummy classifier"]
cv_result = cross_validate(dummy_clf, df_res, y_res, scoring=scoring)
scores["Accuracy"].append(cv_result["test_accuracy"].mean())
scores["Balanced accuracy"].append(cv_result["test_balanced_accuracy"].mean())
df_scores = pd.DataFrame(scores, index=index)
df_scores
# %% [markdown]
# Linear classifier baseline
# ..........................
#
# We will create a machine learning pipeline using a
# :class:`~sklearn.linear_model.LogisticRegression` classifier. In this regard,
# we will need to one-hot encode the categorical columns and standardized the
# numerical columns before to inject the data into the
# :class:`~sklearn.linear_model.LogisticRegression` classifier.
#
# First, we define our numerical and categorical pipelines.
# %%
from sklearn.impute import SimpleImputer
from sklearn.preprocessing import StandardScaler
from sklearn.preprocessing import OneHotEncoder
from sklearn.pipeline import make_pipeline
num_pipe = make_pipeline(
StandardScaler(), SimpleImputer(strategy="mean", add_indicator=True)
)
cat_pipe = make_pipeline(
SimpleImputer(strategy="constant", fill_value="missing"),
OneHotEncoder(handle_unknown="ignore"),
)
# %% [markdown]
# Then, we can create a preprocessor which will dispatch the categorical
# columns to the categorical pipeline and the numerical columns to the
# numerical pipeline
# %%
from sklearn.compose import make_column_transformer
from sklearn.compose import make_column_selector as selector
preprocessor_linear = make_column_transformer(
(num_pipe, selector(dtype_include="number")),
(cat_pipe, selector(dtype_include="category")),
n_jobs=2,
)
# %% [markdown]
# Finally, we connect our preprocessor with our
# :class:`~sklearn.linear_model.LogisticRegression`. We can then evaluate our
# model.
# %%
from sklearn.linear_model import LogisticRegression
lr_clf = make_pipeline(preprocessor_linear, LogisticRegression(max_iter=1000))
# %%
index += ["Logistic regression"]
cv_result = cross_validate(lr_clf, df_res, y_res, scoring=scoring)
scores["Accuracy"].append(cv_result["test_accuracy"].mean())
scores["Balanced accuracy"].append(cv_result["test_balanced_accuracy"].mean())
df_scores = pd.DataFrame(scores, index=index)
df_scores
# %% [markdown]
# We can see that our linear model is learning slightly better than our dummy
# baseline. However, it is impacted by the class imbalance.
#
# We can verify that something similar is happening with a tree-based model
# such as :class:`~sklearn.ensemble.RandomForestClassifier`. With this type of
# classifier, we will not need to scale the numerical data, and we will only
# need to ordinal encode the categorical data.
# %%
from sklearn.preprocessing import OrdinalEncoder
from sklearn.ensemble import RandomForestClassifier
num_pipe = SimpleImputer(strategy="mean", add_indicator=True)
cat_pipe = make_pipeline(
SimpleImputer(strategy="constant", fill_value="missing"),
OrdinalEncoder(handle_unknown="use_encoded_value", unknown_value=-1),
)
preprocessor_tree = make_column_transformer(
(num_pipe, selector(dtype_include="number")),
(cat_pipe, selector(dtype_include="category")),
n_jobs=2,
)
rf_clf = make_pipeline(
preprocessor_tree, RandomForestClassifier(random_state=42, n_jobs=2)
)
# %%
index += ["Random forest"]
cv_result = cross_validate(rf_clf, df_res, y_res, scoring=scoring)
scores["Accuracy"].append(cv_result["test_accuracy"].mean())
scores["Balanced accuracy"].append(cv_result["test_balanced_accuracy"].mean())
df_scores = pd.DataFrame(scores, index=index)
df_scores
# %% [markdown]
# The :class:`~sklearn.ensemble.RandomForestClassifier` is as well affected by
# the class imbalanced, slightly less than the linear model. Now, we will
# present different approach to improve the performance of these 2 models.
#
# Use `class_weight`
# ..................
#
# Most of the models in `scikit-learn` have a parameter `class_weight`. This
# parameter will affect the computation of the loss in linear model or the
# criterion in the tree-based model to penalize differently a false
# classification from the minority and majority class. We can set
# `class_weight="balanced"` such that the weight applied is inversely
# proportional to the class frequency. We test this parametrization in both
# linear model and tree-based model.
# %%
lr_clf.set_params(logisticregression__class_weight="balanced")
index += ["Logistic regression with balanced class weights"]
cv_result = cross_validate(lr_clf, df_res, y_res, scoring=scoring)
scores["Accuracy"].append(cv_result["test_accuracy"].mean())
scores["Balanced accuracy"].append(cv_result["test_balanced_accuracy"].mean())
df_scores = pd.DataFrame(scores, index=index)
df_scores
# %%
rf_clf.set_params(randomforestclassifier__class_weight="balanced")
index += ["Random forest with balanced class weights"]
cv_result = cross_validate(rf_clf, df_res, y_res, scoring=scoring)
scores["Accuracy"].append(cv_result["test_accuracy"].mean())
scores["Balanced accuracy"].append(cv_result["test_balanced_accuracy"].mean())
df_scores = pd.DataFrame(scores, index=index)
df_scores
# %% [markdown]
# We can see that using `class_weight` was really effective for the linear
# model, alleviating the issue of learning from imbalanced classes. However,
# the :class:`~sklearn.ensemble.RandomForestClassifier` is still biased toward
# the majority class, mainly due to the criterion which is not suited enough to
# fight the class imbalance.
#
# Resample the training set during learning
# .........................................
#
# Another way is to resample the training set by under-sampling or
# over-sampling some of the samples. `imbalanced-learn` provides some samplers
# to do such processing.
# %%
from imblearn.pipeline import make_pipeline as make_pipeline_with_sampler
from imblearn.under_sampling import RandomUnderSampler
lr_clf = make_pipeline_with_sampler(
preprocessor_linear,
RandomUnderSampler(random_state=42),
LogisticRegression(max_iter=1000),
)
# %%
index += ["Under-sampling + Logistic regression"]
cv_result = cross_validate(lr_clf, df_res, y_res, scoring=scoring)
scores["Accuracy"].append(cv_result["test_accuracy"].mean())
scores["Balanced accuracy"].append(cv_result["test_balanced_accuracy"].mean())
df_scores = pd.DataFrame(scores, index=index)
df_scores
# %%
rf_clf = make_pipeline_with_sampler(
preprocessor_tree,
RandomUnderSampler(random_state=42),
RandomForestClassifier(random_state=42, n_jobs=2),
)
# %%
index += ["Under-sampling + Random forest"]
cv_result = cross_validate(rf_clf, df_res, y_res, scoring=scoring)
scores["Accuracy"].append(cv_result["test_accuracy"].mean())
scores["Balanced accuracy"].append(cv_result["test_balanced_accuracy"].mean())
df_scores = pd.DataFrame(scores, index=index)
df_scores
# %% [markdown]
# Applying a random under-sampler before the training of the linear model or
# random forest, allows to not focus on the majority class at the cost of
# making more mistake for samples in the majority class (i.e. decreased
# accuracy).
#
# We could apply any type of samplers and find which sampler is working best
# on the current dataset.
#
# Instead, we will present another way by using classifiers which will apply
# sampling internally.
#
# Use of specific balanced algorithms from imbalanced-learn
# .........................................................
#
# We already showed that random under-sampling can be effective on decision
# tree. However, instead of under-sampling once the dataset, one could
# under-sample the original dataset before to take a bootstrap sample. This is
# the base of the :class:`imblearn.ensemble.BalancedRandomForestClassifier` and
# :class:`~imblearn.ensemble.BalancedBaggingClassifier`.
# %%
from imblearn.ensemble import BalancedRandomForestClassifier
rf_clf = make_pipeline(
preprocessor_tree,
BalancedRandomForestClassifier(random_state=42, n_jobs=2),
)
# %%
index += ["Balanced random forest"]
cv_result = cross_validate(rf_clf, df_res, y_res, scoring=scoring)
scores["Accuracy"].append(cv_result["test_accuracy"].mean())
scores["Balanced accuracy"].append(cv_result["test_balanced_accuracy"].mean())
df_scores = pd.DataFrame(scores, index=index)
df_scores
# %% [markdown]
# The performance with the
# :class:`~imblearn.ensemble.BalancedRandomForestClassifier` is better than
# applying a single random under-sampling. We will use a gradient-boosting
# classifier within a :class:`~imblearn.ensemble.BalancedBaggingClassifier`.
from sklearn.experimental import enable_hist_gradient_boosting # noqa
from sklearn.ensemble import HistGradientBoostingClassifier
from imblearn.ensemble import BalancedBaggingClassifier
bag_clf = make_pipeline(
preprocessor_tree,
BalancedBaggingClassifier(
base_estimator=HistGradientBoostingClassifier(random_state=42),
n_estimators=10,
random_state=42,
n_jobs=2,
),
)
index += ["Balanced bag of histogram gradient boosting"]
cv_result = cross_validate(bag_clf, df_res, y_res, scoring=scoring)
scores["Accuracy"].append(cv_result["test_accuracy"].mean())
scores["Balanced accuracy"].append(cv_result["test_balanced_accuracy"].mean())
df_scores = pd.DataFrame(scores, index=index)
df_scores
# %% [markdown]
# This last approach is the most effective. The different under-sampling allows
# to bring some diversity for the different GBDT to learn and not focus on a
# portion of the majority class.
| en | 0.831571 | ========================================================== Fitting model on imbalanced datasets and how to fight bias ========================================================== This example illustrates the problem induced by learning on datasets having imbalanced classes. Subsequently, we compare different approaches alleviating these negative effects. # Authors: <NAME> <<EMAIL>> # License: MIT # %% # %% [markdown] # Problem definition # ------------------ # # We are dropping the following features: # # - "fnlwgt": this feature was created while studying the "adult" dataset. # Thus, we will not use this feature which is not acquired during the survey. # - "education-num": it is encoding the same information than "education". # Thus, we are removing one of these 2 features. # %% # %% [markdown] # The "adult" dataset as a class ratio of about 3:1 # %% # %% [markdown] # This dataset is only slightly imbalanced. To better highlight the effect of # learning from an imbalanced dataset, we will increase its ratio to 30:1 # %% # %% [markdown] # We will perform a cross-validation evaluation to get an estimate of the test # score. # # As a baseline, we could use a classifier which will always predict the # majority class independently of the features provided. # %% # %% [markdown] # Instead of using the accuracy, we can use the balanced accuracy which will # take into account the balancing issue. # %% # %% [markdown] # Strategies to learn from an imbalanced dataset # ---------------------------------------------- # We will use a dictionary and a list to continuously store the results of # our experiments and show them as a pandas dataframe. # %% # %% [markdown] # Dummy baseline # .............. # # Before to train a real machine learning model, we can store the results # obtained with our :class:`~sklearn.dummy.DummyClassifier`. # %% # %% [markdown] # Linear classifier baseline # .......................... # # We will create a machine learning pipeline using a # :class:`~sklearn.linear_model.LogisticRegression` classifier. In this regard, # we will need to one-hot encode the categorical columns and standardized the # numerical columns before to inject the data into the # :class:`~sklearn.linear_model.LogisticRegression` classifier. # # First, we define our numerical and categorical pipelines. # %% # %% [markdown] # Then, we can create a preprocessor which will dispatch the categorical # columns to the categorical pipeline and the numerical columns to the # numerical pipeline # %% # %% [markdown] # Finally, we connect our preprocessor with our # :class:`~sklearn.linear_model.LogisticRegression`. We can then evaluate our # model. # %% # %% # %% [markdown] # We can see that our linear model is learning slightly better than our dummy # baseline. However, it is impacted by the class imbalance. # # We can verify that something similar is happening with a tree-based model # such as :class:`~sklearn.ensemble.RandomForestClassifier`. With this type of # classifier, we will not need to scale the numerical data, and we will only # need to ordinal encode the categorical data. # %% # %% # %% [markdown] # The :class:`~sklearn.ensemble.RandomForestClassifier` is as well affected by # the class imbalanced, slightly less than the linear model. Now, we will # present different approach to improve the performance of these 2 models. # # Use `class_weight` # .................. # # Most of the models in `scikit-learn` have a parameter `class_weight`. This # parameter will affect the computation of the loss in linear model or the # criterion in the tree-based model to penalize differently a false # classification from the minority and majority class. We can set # `class_weight="balanced"` such that the weight applied is inversely # proportional to the class frequency. We test this parametrization in both # linear model and tree-based model. # %% # %% # %% [markdown] # We can see that using `class_weight` was really effective for the linear # model, alleviating the issue of learning from imbalanced classes. However, # the :class:`~sklearn.ensemble.RandomForestClassifier` is still biased toward # the majority class, mainly due to the criterion which is not suited enough to # fight the class imbalance. # # Resample the training set during learning # ......................................... # # Another way is to resample the training set by under-sampling or # over-sampling some of the samples. `imbalanced-learn` provides some samplers # to do such processing. # %% # %% # %% # %% # %% [markdown] # Applying a random under-sampler before the training of the linear model or # random forest, allows to not focus on the majority class at the cost of # making more mistake for samples in the majority class (i.e. decreased # accuracy). # # We could apply any type of samplers and find which sampler is working best # on the current dataset. # # Instead, we will present another way by using classifiers which will apply # sampling internally. # # Use of specific balanced algorithms from imbalanced-learn # ......................................................... # # We already showed that random under-sampling can be effective on decision # tree. However, instead of under-sampling once the dataset, one could # under-sample the original dataset before to take a bootstrap sample. This is # the base of the :class:`imblearn.ensemble.BalancedRandomForestClassifier` and # :class:`~imblearn.ensemble.BalancedBaggingClassifier`. # %% # %% # %% [markdown] # The performance with the # :class:`~imblearn.ensemble.BalancedRandomForestClassifier` is better than # applying a single random under-sampling. We will use a gradient-boosting # classifier within a :class:`~imblearn.ensemble.BalancedBaggingClassifier`. # noqa # %% [markdown] # This last approach is the most effective. The different under-sampling allows # to bring some diversity for the different GBDT to learn and not focus on a # portion of the majority class. | 3.613719 | 4 |
python/fix-page-breaks.py | utcompling/GeoAnnotate | 9 | 10770 | <filename>python/fix-page-breaks.py
#!/usr/bin/python
import argparse
import re
parser = argparse.ArgumentParser(description='Fix page breaks in War of The Rebellion text')
parser.add_argument('files', nargs='*',
help='Files to process')
args = parser.parse_args()
for file in args.files:
outfile = open(file + ".joined-pagebreak", "w")
text = ''.join(open(file).readlines())
pages = re.split("PAGEBREAK\n", text)
# Remove empty pages
pages = [x for x in pages if x]
for i in xrange(0, len(pages) - 1):
# Remove extraneous blank lines
pages[i] = re.sub("\n\n\n+", "\n\n", pages[i])
# Undo HTML entities
pages[i] = re.sub("&", "&", pages[i])
pages[i] = re.sub("<", "<", pages[i])
pages[i] = re.sub(">", ">", pages[i])
# Do the following a second time to handle cases of
# &amp;, which are common
pages[i] = re.sub("&", "&", pages[i])
m = re.match(r"^( *\[*CHAP\. [A-Z]+\.\]* *\n\n?)(.*)", pages[i], re.S)
if m:
pages[i] = m.group(2)
print "Removed CHAP heading on page %s:\n[%s]\n" % (i, m.group(1))
m = re.match("(.*?)(\n?(?: *[0-9]+|S) *(?:R R(?: *[-_VY]+ *[^\n]*)?|R *-+ *[^\n]*)\n)(.*)$", pages[i], re.S)
if m:
pages[i] = m.group(1) + m.group(3)
print "Removed R R notation on page %s:\n[%s]\n" % (i, m.group(2))
m = re.match(r"(.*?\n)(\n* *------+\n( *(?:[*+#@$|^\\/&~=>!?]|[abc] |[abc][A-Z])[^\n]*\n|\n)* *-------+\n+(?:[*+#@$|^\\/&~=>!?] *[A-Z][^\n]*\n|\n)*)$", pages[i], re.S)
if m:
pages[i] = m.group(1)
print "Removed footnote on page %s:\n[%s]\n" % (i, m.group(2))
m = re.match("(.*?\n)(\n*[*]?MAP[^\n]*\n+)$", pages[i], re.S)
if m:
pages[i] = m.group(1)
print "Removed MAP notation on page %s:\n[%s]\n" % (i, m.group(2))
while pages[i] and pages[i][-1] == "\n":
pages[i] = pages[i][0:-1]
if "\n" not in pages[i]:
lastlinelen = len(pages[i])
else:
m = re.match(".*\n([^\n]*)$", pages[i], re.S)
assert m
lastlinelen = len(m.group(1))
shortline = lastlinelen < 60
join = False
hyphenjoin = False
if not pages[i]:
continue
if len(pages[i]) >= 2 and pages[i][-1] == '-' and pages[i][-2].islower():
if shortline:
msg = "PAGEBREAK SHORT-LINE HYPHEN, NOT JOINED"
else:
msg = "PAGEBREAK HYPHEN-JOINED"
hyphenjoin = True
join = True
elif pages[i + 1] and pages[i + 1][0].islower():
if shortline:
msg = "PAGEBREAK SHORT-LINE NEXT PAGE STARTS LOWERCASE, NOT JOINED"
else:
msg = "PAGEBREAK NEXT PAGE STARTS LOWERCASE, JOINED"
join = True
elif len(pages[i]) >= 3 and pages[i][-1] == '.' and pages[i][-2].isupper() and pages[i][-3] in ['.', ' ']:
if shortline:
msg = "PAGEBREAK SHORT-LINE ENDS WITH ABBREVIATION PERIOD, NOT JOINED"
else:
msg = "PAGEBREAK ENDS ABBREV-PERIOD, JOINED"
join = True
elif pages[i][-1] == '.':
msg = "PAGEBREAK ENDS PERIOD, NOT JOINED"
elif len(pages[i]) >= 2 and pages[i][-1] == '*' and pages[i][-2] == '.':
msg = "PAGEBREAK ENDS PERIOD STAR, NOT JOINED"
elif len(pages[i]) >= 2 and pages[i][-1] == '"' and pages[i][-2] == '.':
msg = "PAGEBREAK ENDS PERIOD QUOTE, NOT JOINED"
elif pages[i][-1] == ':':
msg = "PAGEBREAK ENDS COLON, NOT JOINED"
elif pages[i][-1] == ',':
if shortline:
msg = "PAGEBREAK ENDS SHORT-LINE COMMA, NOT JOINED"
else:
msg = "PAGEBREAK ENDS COMMA, JOINED"
join = True
else:
if shortline:
msg = "PAGEBREAK ENDS SHORT-LINE OTHER, NOT JOINED"
else:
msg = "PAGEBREAK ENDS OTHER, JOINED"
join = True
print "Page %s: %s" % (i, msg)
if hyphenjoin:
outfile.write(pages[i][0:-1])
elif join:
outfile.write(pages[i] + " ")
else:
outfile.write(pages[i])
outfile.write("\n\n")
outfile.write("\n%s\n" % msg)
outfile.close()
| <filename>python/fix-page-breaks.py
#!/usr/bin/python
import argparse
import re
parser = argparse.ArgumentParser(description='Fix page breaks in War of The Rebellion text')
parser.add_argument('files', nargs='*',
help='Files to process')
args = parser.parse_args()
for file in args.files:
outfile = open(file + ".joined-pagebreak", "w")
text = ''.join(open(file).readlines())
pages = re.split("PAGEBREAK\n", text)
# Remove empty pages
pages = [x for x in pages if x]
for i in xrange(0, len(pages) - 1):
# Remove extraneous blank lines
pages[i] = re.sub("\n\n\n+", "\n\n", pages[i])
# Undo HTML entities
pages[i] = re.sub("&", "&", pages[i])
pages[i] = re.sub("<", "<", pages[i])
pages[i] = re.sub(">", ">", pages[i])
# Do the following a second time to handle cases of
# &amp;, which are common
pages[i] = re.sub("&", "&", pages[i])
m = re.match(r"^( *\[*CHAP\. [A-Z]+\.\]* *\n\n?)(.*)", pages[i], re.S)
if m:
pages[i] = m.group(2)
print "Removed CHAP heading on page %s:\n[%s]\n" % (i, m.group(1))
m = re.match("(.*?)(\n?(?: *[0-9]+|S) *(?:R R(?: *[-_VY]+ *[^\n]*)?|R *-+ *[^\n]*)\n)(.*)$", pages[i], re.S)
if m:
pages[i] = m.group(1) + m.group(3)
print "Removed R R notation on page %s:\n[%s]\n" % (i, m.group(2))
m = re.match(r"(.*?\n)(\n* *------+\n( *(?:[*+#@$|^\\/&~=>!?]|[abc] |[abc][A-Z])[^\n]*\n|\n)* *-------+\n+(?:[*+#@$|^\\/&~=>!?] *[A-Z][^\n]*\n|\n)*)$", pages[i], re.S)
if m:
pages[i] = m.group(1)
print "Removed footnote on page %s:\n[%s]\n" % (i, m.group(2))
m = re.match("(.*?\n)(\n*[*]?MAP[^\n]*\n+)$", pages[i], re.S)
if m:
pages[i] = m.group(1)
print "Removed MAP notation on page %s:\n[%s]\n" % (i, m.group(2))
while pages[i] and pages[i][-1] == "\n":
pages[i] = pages[i][0:-1]
if "\n" not in pages[i]:
lastlinelen = len(pages[i])
else:
m = re.match(".*\n([^\n]*)$", pages[i], re.S)
assert m
lastlinelen = len(m.group(1))
shortline = lastlinelen < 60
join = False
hyphenjoin = False
if not pages[i]:
continue
if len(pages[i]) >= 2 and pages[i][-1] == '-' and pages[i][-2].islower():
if shortline:
msg = "PAGEBREAK SHORT-LINE HYPHEN, NOT JOINED"
else:
msg = "PAGEBREAK HYPHEN-JOINED"
hyphenjoin = True
join = True
elif pages[i + 1] and pages[i + 1][0].islower():
if shortline:
msg = "PAGEBREAK SHORT-LINE NEXT PAGE STARTS LOWERCASE, NOT JOINED"
else:
msg = "PAGEBREAK NEXT PAGE STARTS LOWERCASE, JOINED"
join = True
elif len(pages[i]) >= 3 and pages[i][-1] == '.' and pages[i][-2].isupper() and pages[i][-3] in ['.', ' ']:
if shortline:
msg = "PAGEBREAK SHORT-LINE ENDS WITH ABBREVIATION PERIOD, NOT JOINED"
else:
msg = "PAGEBREAK ENDS ABBREV-PERIOD, JOINED"
join = True
elif pages[i][-1] == '.':
msg = "PAGEBREAK ENDS PERIOD, NOT JOINED"
elif len(pages[i]) >= 2 and pages[i][-1] == '*' and pages[i][-2] == '.':
msg = "PAGEBREAK ENDS PERIOD STAR, NOT JOINED"
elif len(pages[i]) >= 2 and pages[i][-1] == '"' and pages[i][-2] == '.':
msg = "PAGEBREAK ENDS PERIOD QUOTE, NOT JOINED"
elif pages[i][-1] == ':':
msg = "PAGEBREAK ENDS COLON, NOT JOINED"
elif pages[i][-1] == ',':
if shortline:
msg = "PAGEBREAK ENDS SHORT-LINE COMMA, NOT JOINED"
else:
msg = "PAGEBREAK ENDS COMMA, JOINED"
join = True
else:
if shortline:
msg = "PAGEBREAK ENDS SHORT-LINE OTHER, NOT JOINED"
else:
msg = "PAGEBREAK ENDS OTHER, JOINED"
join = True
print "Page %s: %s" % (i, msg)
if hyphenjoin:
outfile.write(pages[i][0:-1])
elif join:
outfile.write(pages[i] + " ")
else:
outfile.write(pages[i])
outfile.write("\n\n")
outfile.write("\n%s\n" % msg)
outfile.close()
| en | 0.475347 | #!/usr/bin/python # Remove empty pages # Remove extraneous blank lines # Undo HTML entities # Do the following a second time to handle cases of # &amp;, which are common #@$|^\\/&~=>!?]|[abc] |[abc][A-Z])[^\n]*\n|\n)* *-------+\n+(?:[*+#@$|^\\/&~=>!?] *[A-Z][^\n]*\n|\n)*)$", pages[i], re.S) | 3.105685 | 3 |
2021/02/part2.py | FranciscoAT/advent-of-code | 0 | 10771 | <gh_stars>0
def main(file: str) -> None:
depth = 0
distance = 0
aim = 0
with open(f"{file}.in") as f:
for line in f.readlines():
line = line.rstrip().split(" ")
command = line[0]
unit = int(line[1])
if command == "forward":
distance += unit
depth += aim * unit
elif command == "down":
aim += unit
else:
aim -= unit
print(f"{file}: {depth * distance}")
if __name__ == "__main__":
main("test")
main("puzzle")
| def main(file: str) -> None:
depth = 0
distance = 0
aim = 0
with open(f"{file}.in") as f:
for line in f.readlines():
line = line.rstrip().split(" ")
command = line[0]
unit = int(line[1])
if command == "forward":
distance += unit
depth += aim * unit
elif command == "down":
aim += unit
else:
aim -= unit
print(f"{file}: {depth * distance}")
if __name__ == "__main__":
main("test")
main("puzzle") | none | 1 | 3.549312 | 4 |
|
tf_seal/python/tensor.py | karlhigley/tf-seal | 94 | 10772 | <reponame>karlhigley/tf-seal<filename>tf_seal/python/tensor.py
import numpy as np
import tensorflow as tf
import tf_seal.python.ops.seal_ops as ops
from tensorflow.python.keras.utils import tf_utils
from tensorflow.python.client import session as tf_session
from tensorflow.python.framework import ops as tf_ops
class Tensor(object):
def __init__(self, value, secret_key, public_keys):
assert isinstance(value, tf.Tensor), type(value)
assert value.dtype is tf.variant, value.dtype
self._raw = value
self._public_keys = public_keys
self._secret_key = secret_key
@property
def shape(self):
return self._raw.shape
@property
def name(self):
return self._raw.name
@property
def dtype(self):
return tf.int32
# return tf.string
def eval(self, session=None, dtype=None):
tf_tensor = convert_from_tensor(self, dtype=dtype)
evaluated = tf_tensor.eval(session=session)
return evaluated
def __add__(self, other):
if isinstance(other, Tensor):
res = ops.seal_add(self._raw, other._raw)
else:
res = ops.seal_add_plain(self._raw, other)
return Tensor(res, self._secret_key, self._public_keys)
# def __sub__(self, other):
# other = convert_to_tensor(other)
# res = ops.big_sub(self._raw, other._raw)
# return Tensor(res)
def __mul__(self, other):
if isinstance(other, Tensor):
res = ops.seal_mul(self._raw, other._raw, self._public_keys)
else:
res = ops.seal_mul_plain(self._raw, other)
return Tensor(res, self._secret_key, self._public_keys)
def matmul(self, other):
if isinstance(other, Tensor):
res = ops.seal_mat_mul(self._raw, other._raw, self._public_keys)
else:
res = ops.seal_mat_mul_plain(self._raw, other, self._public_keys)
return Tensor(res, self._secret_key, self._public_keys)
def _fetch_function(seal_tensor):
unwrapped = [convert_from_tensor(seal_tensor, dtype=tf.float64)]
rewrapper = lambda components_fetched: components_fetched[0].astype(np.float64)
return unwrapped, rewrapper
def _feed_function(seal_tensor, feed_value):
return [(seal_tensor._raw, feed_value)]
def _feed_function_for_partial_run(seal_tensor):
return [seal_tensor._raw]
# this allows tf_seal.Tensor to be passed directly to tf.Session.run,
# unwrapping and converting the result as needed
tf_session.register_session_run_conversion_functions(
tensor_type=Tensor,
fetch_function=_fetch_function,
feed_function=_feed_function,
feed_function_for_partial_run=_feed_function_for_partial_run,
)
def _tensor_conversion_function(tensor, dtype=None, name=None, as_ref=False):
assert name is None, "Not implemented, name='{}'".format(name)
assert not as_ref, "Not implemented, as_ref={}".format(as_ref)
assert dtype in [tf.float32, tf.float64, None], dtype
return convert_from_tensor(tensor, dtype=dtype)
# TODO(Morten)
# this allows implicit convertion of tf_seal.Tensor to tf.Tensor,
# but since the output dtype is determined by the outer context
# we essentially have to export with the implied risk of data loss
tf_ops.register_tensor_conversion_function(Tensor, _tensor_conversion_function)
# this allows Tensor to pass the tf.is_tensor test
tf_ops.register_dense_tensor_like_type(Tensor)
# this allows tf_big.Tensor to be plumbed through Keras layers
# but seems only truly useful when used in conjunction with
# `register_tensor_conversion_function`
tf_utils.register_symbolic_tensor_type(Tensor)
def constant(tensor, secret_key, public_keys):
assert isinstance(tensor, (np.ndarray, list, tuple)), type(tensor)
return convert_to_tensor(tensor, secret_key, public_keys)
def _convert_numpy_tensor(tensor, secret_key, public_keys):
if len(tensor.shape) > 2:
raise ValueError("Only matrices are supported for now.")
# make sure we have a full matrix
while len(tensor.shape) < 2:
tensor = np.expand_dims(tensor, 0)
if np.issubdtype(tensor.dtype, np.float32) \
or np.issubdtype(tensor.dtype, np.float64):
# supported as-is
return Tensor(ops.seal_encrypt(tensor, public_keys), secret_key, public_keys)
raise ValueError("Don't know how to convert NumPy tensor with dtype '{}'".format(tensor.dtype))
def _convert_tensorflow_tensor(tensor, secret_key, public_keys):
if len(tensor.shape) > 2:
raise ValueError("Only matrices are supported for now.")
# make sure we have a full matrix
while len(tensor.shape) < 2:
tensor = tf.expand_dims(tensor, 0)
if tensor.dtype in (tf.float32, tf.float64):
# supported as-is
return Tensor(ops.seal_encrypt(tensor, public_keys), secret_key, public_keys)
raise ValueError("Don't know how to convert TensorFlow tensor with dtype '{}'".format(tensor.dtype))
def convert_to_tensor(tensor, secret_key, public_keys):
if isinstance(tensor, Tensor):
return tensor
if tensor is None:
return None
if isinstance(tensor, (float)):
return _convert_numpy_tensor(np.array([tensor]), secret_key, public_keys)
if isinstance(tensor, (list, tuple)):
return _convert_numpy_tensor(np.array(tensor), secret_key, public_keys)
if isinstance(tensor, np.ndarray):
return _convert_numpy_tensor(tensor, secret_key, public_keys)
if isinstance(tensor, tf.Tensor):
return _convert_tensorflow_tensor(tensor, secret_key, public_keys)
raise ValueError("Don't know how to convert value of type {}".format(type(tensor)))
def convert_from_tensor(value, dtype=None):
assert isinstance(value, Tensor), type(value)
if dtype is None:
dtype = tf.float64
if dtype in [tf.float32, tf.float64]:
return ops.seal_decrypt(value._raw, value._secret_key, dtype=dtype)
raise ValueError("Don't know how to evaluate to dtype '{}'".format(dtype))
def add(x, y):
# TODO(Morten) lifting etc
return x + y
def sub(x, y):
# TODO(Morten) lifting etc
return x - y
def mul(x, y):
# TODO(Morten) lifting etc
return x * y
def matmul(x, y):
# TODO(Morten) lifting etc
return x.matmul(y)
def poly_eval(x, coeffs):
res = ops.seal_poly_eval(x._raw, coeffs, x._public_keys)
return Tensor(res, x._secret_key, x._public_keys)
| import numpy as np
import tensorflow as tf
import tf_seal.python.ops.seal_ops as ops
from tensorflow.python.keras.utils import tf_utils
from tensorflow.python.client import session as tf_session
from tensorflow.python.framework import ops as tf_ops
class Tensor(object):
def __init__(self, value, secret_key, public_keys):
assert isinstance(value, tf.Tensor), type(value)
assert value.dtype is tf.variant, value.dtype
self._raw = value
self._public_keys = public_keys
self._secret_key = secret_key
@property
def shape(self):
return self._raw.shape
@property
def name(self):
return self._raw.name
@property
def dtype(self):
return tf.int32
# return tf.string
def eval(self, session=None, dtype=None):
tf_tensor = convert_from_tensor(self, dtype=dtype)
evaluated = tf_tensor.eval(session=session)
return evaluated
def __add__(self, other):
if isinstance(other, Tensor):
res = ops.seal_add(self._raw, other._raw)
else:
res = ops.seal_add_plain(self._raw, other)
return Tensor(res, self._secret_key, self._public_keys)
# def __sub__(self, other):
# other = convert_to_tensor(other)
# res = ops.big_sub(self._raw, other._raw)
# return Tensor(res)
def __mul__(self, other):
if isinstance(other, Tensor):
res = ops.seal_mul(self._raw, other._raw, self._public_keys)
else:
res = ops.seal_mul_plain(self._raw, other)
return Tensor(res, self._secret_key, self._public_keys)
def matmul(self, other):
if isinstance(other, Tensor):
res = ops.seal_mat_mul(self._raw, other._raw, self._public_keys)
else:
res = ops.seal_mat_mul_plain(self._raw, other, self._public_keys)
return Tensor(res, self._secret_key, self._public_keys)
def _fetch_function(seal_tensor):
unwrapped = [convert_from_tensor(seal_tensor, dtype=tf.float64)]
rewrapper = lambda components_fetched: components_fetched[0].astype(np.float64)
return unwrapped, rewrapper
def _feed_function(seal_tensor, feed_value):
return [(seal_tensor._raw, feed_value)]
def _feed_function_for_partial_run(seal_tensor):
return [seal_tensor._raw]
# this allows tf_seal.Tensor to be passed directly to tf.Session.run,
# unwrapping and converting the result as needed
tf_session.register_session_run_conversion_functions(
tensor_type=Tensor,
fetch_function=_fetch_function,
feed_function=_feed_function,
feed_function_for_partial_run=_feed_function_for_partial_run,
)
def _tensor_conversion_function(tensor, dtype=None, name=None, as_ref=False):
assert name is None, "Not implemented, name='{}'".format(name)
assert not as_ref, "Not implemented, as_ref={}".format(as_ref)
assert dtype in [tf.float32, tf.float64, None], dtype
return convert_from_tensor(tensor, dtype=dtype)
# TODO(Morten)
# this allows implicit convertion of tf_seal.Tensor to tf.Tensor,
# but since the output dtype is determined by the outer context
# we essentially have to export with the implied risk of data loss
tf_ops.register_tensor_conversion_function(Tensor, _tensor_conversion_function)
# this allows Tensor to pass the tf.is_tensor test
tf_ops.register_dense_tensor_like_type(Tensor)
# this allows tf_big.Tensor to be plumbed through Keras layers
# but seems only truly useful when used in conjunction with
# `register_tensor_conversion_function`
tf_utils.register_symbolic_tensor_type(Tensor)
def constant(tensor, secret_key, public_keys):
assert isinstance(tensor, (np.ndarray, list, tuple)), type(tensor)
return convert_to_tensor(tensor, secret_key, public_keys)
def _convert_numpy_tensor(tensor, secret_key, public_keys):
if len(tensor.shape) > 2:
raise ValueError("Only matrices are supported for now.")
# make sure we have a full matrix
while len(tensor.shape) < 2:
tensor = np.expand_dims(tensor, 0)
if np.issubdtype(tensor.dtype, np.float32) \
or np.issubdtype(tensor.dtype, np.float64):
# supported as-is
return Tensor(ops.seal_encrypt(tensor, public_keys), secret_key, public_keys)
raise ValueError("Don't know how to convert NumPy tensor with dtype '{}'".format(tensor.dtype))
def _convert_tensorflow_tensor(tensor, secret_key, public_keys):
if len(tensor.shape) > 2:
raise ValueError("Only matrices are supported for now.")
# make sure we have a full matrix
while len(tensor.shape) < 2:
tensor = tf.expand_dims(tensor, 0)
if tensor.dtype in (tf.float32, tf.float64):
# supported as-is
return Tensor(ops.seal_encrypt(tensor, public_keys), secret_key, public_keys)
raise ValueError("Don't know how to convert TensorFlow tensor with dtype '{}'".format(tensor.dtype))
def convert_to_tensor(tensor, secret_key, public_keys):
if isinstance(tensor, Tensor):
return tensor
if tensor is None:
return None
if isinstance(tensor, (float)):
return _convert_numpy_tensor(np.array([tensor]), secret_key, public_keys)
if isinstance(tensor, (list, tuple)):
return _convert_numpy_tensor(np.array(tensor), secret_key, public_keys)
if isinstance(tensor, np.ndarray):
return _convert_numpy_tensor(tensor, secret_key, public_keys)
if isinstance(tensor, tf.Tensor):
return _convert_tensorflow_tensor(tensor, secret_key, public_keys)
raise ValueError("Don't know how to convert value of type {}".format(type(tensor)))
def convert_from_tensor(value, dtype=None):
assert isinstance(value, Tensor), type(value)
if dtype is None:
dtype = tf.float64
if dtype in [tf.float32, tf.float64]:
return ops.seal_decrypt(value._raw, value._secret_key, dtype=dtype)
raise ValueError("Don't know how to evaluate to dtype '{}'".format(dtype))
def add(x, y):
# TODO(Morten) lifting etc
return x + y
def sub(x, y):
# TODO(Morten) lifting etc
return x - y
def mul(x, y):
# TODO(Morten) lifting etc
return x * y
def matmul(x, y):
# TODO(Morten) lifting etc
return x.matmul(y)
def poly_eval(x, coeffs):
res = ops.seal_poly_eval(x._raw, coeffs, x._public_keys)
return Tensor(res, x._secret_key, x._public_keys) | en | 0.724403 | # return tf.string # def __sub__(self, other): # other = convert_to_tensor(other) # res = ops.big_sub(self._raw, other._raw) # return Tensor(res) # this allows tf_seal.Tensor to be passed directly to tf.Session.run, # unwrapping and converting the result as needed # TODO(Morten) # this allows implicit convertion of tf_seal.Tensor to tf.Tensor, # but since the output dtype is determined by the outer context # we essentially have to export with the implied risk of data loss # this allows Tensor to pass the tf.is_tensor test # this allows tf_big.Tensor to be plumbed through Keras layers # but seems only truly useful when used in conjunction with # `register_tensor_conversion_function` # make sure we have a full matrix # supported as-is # make sure we have a full matrix # supported as-is # TODO(Morten) lifting etc # TODO(Morten) lifting etc # TODO(Morten) lifting etc # TODO(Morten) lifting etc | 2.538666 | 3 |
program/admin.py | Dumbaz/autoradio-pv | 0 | 10773 | from django.core.exceptions import ObjectDoesNotExist
from django.contrib import admin
from django.utils.translation import ugettext_lazy as _
from django.shortcuts import render
from django.conf import settings
from .models import Language, Type, MusicFocus, Category, Topic, RTRCategory, Host, Note, RRule, Schedule, Show, TimeSlot
from .forms import MusicFocusForm
from datetime import date, datetime, time, timedelta
class ActivityFilter(admin.SimpleListFilter):
title = _("Activity")
def lookups(self, request, model_admin):
return (
('yes', _("active")),
('no', _("inactive"))
)
def queryset(self, request, queryset):
if self.parameter_name == 'has_timeslots': # active/inactive Schedules
if self.value() == 'yes':
return queryset.filter(until__gt=datetime.now()).distinct()
if self.value() == 'no':
return queryset.filter(until__lt=datetime.now()).distinct()
if self.parameter_name == 'has_schedules_timeslots': # active/inactive Shows
if self.value() == 'yes':
return queryset.filter(schedules__until__gt=datetime.now()).distinct()
if self.value() == 'no':
return queryset.filter(schedules__until__lt=datetime.now()).distinct()
if self.parameter_name == 'has_shows_schedules_timeslots': # active/inactive Hosts
if self.value() == 'yes':
return queryset.filter(shows__schedules__until__gt=datetime.now()).distinct()
if self.value() == 'no':
return queryset.filter(shows__schedules__until__lt=datetime.now()).distinct()
class ActiveSchedulesFilter(ActivityFilter):
parameter_name = 'has_timeslots'
class ActiveShowsFilter(ActivityFilter):
parameter_name = 'has_schedules_timeslots'
class ActiveHostsFilter(ActivityFilter):
parameter_name = 'has_shows_schedules_timeslots'
class TypeAdmin(admin.ModelAdmin):
list_display = ('type', 'admin_color', 'is_active')
list_filter = ('is_active',)
prepopulated_fields = {'slug': ('type',)}
class MusicFocusAdmin(admin.ModelAdmin):
form = MusicFocusForm
list_display = ('focus', 'abbrev', 'admin_buttons', 'is_active')
list_filter = ('is_active',)
prepopulated_fields = {'slug': ('focus',)}
class CategoryAdmin(admin.ModelAdmin):
list_display = ('category', 'abbrev', 'admin_buttons', 'is_active')
list_filter = ('is_active',)
prepopulated_fields = {'slug': ('category',)}
class LanguageAdmin(admin.ModelAdmin):
list_display = ('name', 'is_active')
list_filter = ('is_active',)
class TopicAdmin(admin.ModelAdmin):
list_display = ('topic', 'abbrev', 'admin_buttons', 'is_active')
list_filter = ('is_active',)
prepopulated_fields = {'slug': ('topic',)}
class RTRCategoryAdmin(admin.ModelAdmin):
list_display = ('rtrcategory', 'abbrev', 'is_active' )
list_filter = ('is_active',)
prepopulated_fields = {'slug': ('rtrcategory',)}
class HostAdmin(admin.ModelAdmin):
list_display = ('name', 'email', 'is_active')
list_filter = (ActiveHostsFilter, 'is_active',)
def get_queryset(self, request):
if request.user.is_superuser:
return Host.objects.all()
# Common users only see hosts of shows they own
return Host.objects.filter(shows__in=request.user.shows.all()).distinct()
class NoteAdmin(admin.ModelAdmin):
date_hierarchy = 'start'
list_display = ('title', 'show', 'start', 'status', 'user')
fields = (( 'show', 'timeslot'), 'title', 'slug', 'summary', 'content', 'image', 'host', 'status', 'cba_id')
prepopulated_fields = {'slug': ('title',)}
list_filter = ('status',)
ordering = ('timeslot',)
save_as = True
class Media:
js = [ settings.MEDIA_URL + 'js/calendar/lib/moment.min.js',
settings.MEDIA_URL + 'js/note_change.js', ]
def get_queryset(self, request):
if request.user.is_superuser:
shows = Show.objects.all()
else:
# Commons users only see notes of shows they own
shows = request.user.shows.all()
return super(NoteAdmin, self).get_queryset(request).filter(show__in=shows)
def formfield_for_foreignkey(self, db_field, request=None, **kwargs):
four_weeks_ago = datetime.now() - timedelta(weeks=4)
in_twelve_weeks = datetime.now() + timedelta(weeks=12)
if db_field.name == 'timeslot':
# Adding/Editing a note: load timeslots of the user's shows into the dropdown
# TODO: Don't show any timeslot in the select by default.
# User should first choose show, then timeslots are loaded into the select via ajax.
#
# How to do this while not constraining the queryset?
# Saving won't be possible otherwise, if queryset doesn't contain the selectable elements beforehand
#kwargs['queryset'] = TimeSlot.objects.filter(show=-1)
# Superusers see every timeslot for every show
if request.user.is_superuser:
kwargs['queryset'] = TimeSlot.objects.filter(start__gt=four_weeks_ago,
start__lt=in_twelve_weeks) # note__isnull=True
# Users see timeslots of shows they own
else:
kwargs['queryset'] = TimeSlot.objects.filter(show__in=request.user.shows.all(), start__gt=four_weeks_ago,
start__lt=in_twelve_weeks) # note__isnull=True
if db_field.name == 'show':
# Adding/Editing a note: load user's shows into the dropdown
# Common users only see shows they own
if not request.user.is_superuser:
kwargs['queryset'] = Show.objects.filter(pk__in=request.user.shows.all(), is_active=True)
if db_field.name == 'host':
# Common users only see hosts of shows they own
if not request.user.is_superuser:
kwargs['queryset'] = Host.objects.filter(shows__in=request.user.shows.all(), is_active=True).distinct()
return super(NoteAdmin, self).formfield_for_foreignkey(db_field, request, **kwargs)
def save_model(self, request, obj, form, change):
# Save the creator when adding a note
if not change:
obj.user = request.user
# Try to get direct audio URL from CBA
obj.audio_url = Note.get_audio_url(obj.cba_id)
obj.save()
class TimeSlotInline(admin.TabularInline):
model = TimeSlot
ordering = ('-end',)
class TimeSlotAdmin(admin.ModelAdmin):
model = TimeSlot
class ScheduleAdmin(admin.ModelAdmin):
actions = ('renew',)
inlines = (TimeSlotInline,)
fields = (('rrule', 'byweekday'), ('dstart', 'tstart', 'tend'), 'until', 'is_repetition', 'automation_id', 'fallback_id')
list_display = ('get_show_name', 'byweekday', 'rrule', 'tstart', 'tend', 'until')
list_filter = (ActiveSchedulesFilter, 'byweekday', 'rrule', 'is_repetition')
ordering = ('byweekday', 'dstart')
save_on_top = True
search_fields = ('show__name',)
def renew(self, request, queryset):
next_year = date.today().year + 1
until = date(next_year, 12, 31)
renewed = queryset.update(until=until)
if renewed == 1:
message = _("1 schedule was renewed until %s") % until
else:
message = _("%s schedule were renewed until %s") % (renewed, until)
self.message_user(request, message)
renew.short_description = _("Renew selected schedules")
def get_show_name(self, obj):
return obj.show.name
get_show_name.admin_order_field = 'show'
get_show_name.short_description = "Show"
class ScheduleInline(admin.TabularInline):
model = Schedule
ordering = ('pk', '-until', 'byweekday')
class ShowAdmin(admin.ModelAdmin):
filter_horizontal = ('hosts', 'owners', 'musicfocus', 'category', 'topic', 'language')
inlines = (ScheduleInline,)
list_display = ('name', 'short_description')
list_filter = (ActiveShowsFilter, 'type', 'category', 'topic', 'musicfocus', 'rtrcategory', 'language')
ordering = ('slug',)
prepopulated_fields = {'slug': ('name',)}
search_fields = ('name', 'short_description', 'description')
fields = (
'predecessor', 'type', 'name', 'slug', 'image', 'logo', 'short_description', 'description',
'email', 'website', 'hosts', 'owners', 'language', 'category', 'rtrcategory', 'topic',
'musicfocus', 'fallback_id', 'cba_series_id',
)
class Media:
js = [ settings.MEDIA_URL + 'js/calendar/lib/moment.min.js',
settings.MEDIA_URL + 'js/show_change.js', ]
css = { 'all': ('/program/styles.css',) }
def get_queryset(self, request):
if request.user.is_superuser:
# Superusers see all shows
shows = Show.objects.all()
else:
# Users only see shows they own
shows = request.user.shows.all()
return super(ShowAdmin, self).get_queryset(request).filter(pk__in=shows)
def get_readonly_fields(self, request, obj=None):
'''Limit field access for common users'''
if not request.user.is_superuser:
# TODO: how to set field 'name' readonly although it's required?
return ('predecessor', 'type', 'hosts', 'owners', 'language', 'category', 'topic', 'musicfocus', 'rtrcategory')
return list()
def formfield_for_foreignkey(self, db_field, request=None, **kwargs):
try:
show_id = int(request.get_full_path().split('/')[-2])
except ValueError:
show_id = None
print(db_field.name)
if db_field.name == 'predecessor' and show_id:
kwargs['queryset'] = Show.objects.exclude(pk=show_id)
if db_field.name == 'type':
kwargs['queryset'] = Type.objects.filter(is_active=True)
if db_field.name == 'rtrcategory':
kwargs['queryset'] = RTRCategory.objects.filter(is_active=True)
return super(ShowAdmin, self).formfield_for_foreignkey(db_field, request, **kwargs)
def formfield_for_manytomany(self, db_field, request, **kwargs):
if db_field.name == 'hosts':
kwargs["queryset"] = Host.objects.filter(is_active=True)
if db_field.name == 'language':
kwargs["queryset"] = Language.objects.filter(is_active=True)
if db_field.name == 'category':
kwargs["queryset"] = Category.objects.filter(is_active=True)
if db_field.name == 'topic':
kwargs["queryset"] = Topic.objects.filter(is_active=True)
if db_field.name == 'musicfocus':
kwargs["queryset"] = MusicFocus.objects.filter(is_active=True)
return super(ShowAdmin, self).formfield_for_manytomany(db_field, request, **kwargs)
def save_formset(self, request, form, formset, change):
"""
Is called after the "save show"-form or collision-form were submitted
Saves the show after first submit
If any changes in schedules happened
* added/changed schedules are used to generate new timeslots and
matched against existing ones, which will be displayed in the collision form
If a collision form was submitted
* save the current schedule
* delete/create timeslots and relink notes after confirmation
Each step passes on to response_add or response_change which will
* either display the collision form for the next step
* or redirect to the original show-form if the resolving process has been finished
(= if either max_steps was surpassed or end_reached was True)
"""
self.end_reached = False
schedule_instances = formset.save(commit=False)
# If there are no schedules to save, do nothing
if schedule_instances:
show_id = schedule_instances[0].show.id
else:
self.end_reached = True
schedule = []
timeslots = []
max_steps = int(len(schedule_instances)) if len(schedule_instances) > 0 else 1
step = 1
if request.POST.get('step') == None:
# First save-show submit
# Generate thumbnails
if form.instance.image.name and settings.THUMBNAIL_SIZES:
for size in settings.THUMBNAIL_SIZES:
thumbnail = form.instance.image.crop[size].name
# Save show data only
form.save();
# Delete schedules (as well as related timeslots and notes) if flagged as such
for obj in formset.deleted_objects:
obj.delete()
# If nothing else changed, do nothing and redirect to show-form
if not formset.changed_objects and not formset.new_objects:
self.end_reached = True
else:
# If a collision form was submitted
step = int(request.POST.get('step'))
if request.POST.get('num_inputs') != None and int(request.POST.get('num_inputs')) > 0:
print("Resolving conflicts...")
'''Declare and retrieve variables'''
# Either datetimes as string (e.g. '2017-01-01 00:00:00 - 2017-01-01 01:00:00') to create
# or ints of colliding timeslots to keep otherwise
resolved_timeslots = []
# IDs of colliding timeslots found in the db. If there's no corresponding collision to the
# same index in create_timeslot, value will be None
collisions = []
# Datetimes as string (e.g. '2017-01-01 00:00:00 - 2017-01-01 01:00:00') for timeslots to create
create_timeslots = []
# IDs of timeslots to delete
delete_timeslots = set()
# Number of timeslots to be generated
num_inputs = int(request.POST.get('num_inputs'))
# Numbers of notes to relink for existing timeslots and newly created ones
# each of them relating to one of these POST vars:
# POST.ntids[idx][id] and POST.ntids[idx][note_id] contain ids of existing timeslots and note_ids to link, while
# POST.ntind[idx][id] and POST.ntind[idx][note_id] contain indices of corresponding elements in create_timeslots
# and note_ids which will be linked after they're created and thus split into two lists beforehand
num_ntids = int(request.POST.get('num_ntids'))
num_ntind = int(request.POST.get('num_ntind'))
# Retrieve POST vars of current schedule
schedule_id = int(request.POST.get('ps_save_id')) if request.POST.get('ps_save_id') != 'None' else None
rrule = RRule.objects.get(pk=int(request.POST.get('ps_save_rrule_id')))
show = Show.objects.get(pk=show_id)
byweekday = int(request.POST.get('ps_save_byweekday'))
tstart = datetime.strptime(request.POST.get('ps_save_tstart'), '%H:%M').time()
tend = datetime.strptime(request.POST.get('ps_save_tend'), '%H:%M').time()
dstart = datetime.strptime(request.POST.get('ps_save_dstart'), '%Y-%m-%d').date()
if dstart < datetime.today().date(): # Create or delete upcoming timeslots only
dstart = datetime.today().date()
until = datetime.strptime(request.POST.get('ps_save_until'), '%Y-%m-%d').date()
is_repetition = request.POST.get('ps_save_is_repetition')
automation_id = int(request.POST.get('ps_save_automation_id')) if request.POST.get('ps_save_automation_id') != 'None' else 0
fallback_id = int(request.POST.get('ps_save_fallback_id')) if request.POST.get('ps_save_fallback_id') != 'None' else 0
# Put timeslot POST vars into lists with same indices
for i in range(num_inputs):
resolved_ts = request.POST.get('resolved_timeslots[' + str(i) + ']')
if resolved_ts != None:
resolved_timeslots.append( resolved_ts )
create_timeslots.append( request.POST.get('create_timeslots[' + str(i) + ']') ) # May contain None
collisions.append( request.POST.get('collisions[' + str(i) + ']') ) # May contain None
else:
num_inputs -= 1
'''Prepare resolved timeslots'''
# Separate timeslots to delete from those to create
keep_collisions = []
for x in range(num_inputs):
if resolved_timeslots[x] == None or resolved_timeslots[x].isdigit():
# If it's a digit, keep the existing timeslot by preventing the new one from being created
create_timeslots[x] = None
keep_collisions.append(int(collisions[x]))
else:
# Otherwise collect the timeslot ids to be deleted later
if len(collisions[x]) > 0:
delete_timeslots.add(int(collisions[x]))
# Collect IDs of upcoming timeslots of the same schedule to delete except those in keep_collision
if schedule_id != None:
for ts in TimeSlot.objects.filter(start__gte=dstart,end__lte=until,schedule_id=schedule_id).exclude(pk__in=keep_collisions).values_list('id', flat=True):
delete_timeslots.add(ts)
'''Save schedule'''
new_schedule = Schedule(pk=schedule_id,
rrule=rrule,
byweekday=byweekday,
show=show,
dstart=dstart,
tstart=tstart,
tend=tend,
until=until,
is_repetition=is_repetition,
automation_id=automation_id,
fallback_id=fallback_id)
# Only save schedule if any timeslots changed
if len(resolved_timeslots) > 0:
new_schedule.save()
'''Relink notes to existing timeslots and prepare those to be linked'''
# Relink notes with existing timeslot ids
for i in range(num_ntids):
try:
note = Note.objects.get(pk=int(request.POST.get('ntids[' + str(i) + '][note_id]')))
note.timeslot_id = int(request.POST.get('ntids[' + str(i) + '][id]'))
note.save(update_fields=["timeslot_id"])
print("Rewrote note " + str(note.id) + "...to timeslot_id " + str(note.timeslot_id))
except ObjectDoesNotExist:
pass
# Put list indices of yet to be created timeslots and note_ids in corresponding lists to relink them during creation
note_indices = []
note_ids = []
for i in range(num_ntind):
note_indices.append( int(request.POST.get('ntind[' + str(i) + '][id]')) )
note_ids.append( int(request.POST.get('ntind[' + str(i) + '][note_id]')) )
'''Database changes for resolved timeslots and relinked notes for newly created'''
for idx, ts in enumerate(create_timeslots):
if ts != None:
start_end = ts.split(' - ')
# Only create upcoming timeslots
if datetime.strptime(start_end[0], "%Y-%m-%d %H:%M:%S") > datetime.today():
timeslot_created = TimeSlot.objects.create(schedule=new_schedule, is_repetition=new_schedule.is_repetition, start=start_end[0], end=start_end[1])
# Link a note to the new timeslot
if idx in note_indices:
note_idx = note_indices.index( idx ) # Get the note_id's index...
note_id = note_ids[note_idx] # ...which contains the note_id to relate to
try:
note = Note.objects.get(pk=note_id)
note.timeslot_id = timeslot_created.id
note.save(update_fields=["timeslot_id"])
print("Timeslot " + str(timeslot_created.id) + " linked to note " + str(note_id))
except ObjectDoesNotExist:
pass
# Finally delete discarded timeslots
for timeslot_id in delete_timeslots:
TimeSlot.objects.filter(pk=timeslot_id).delete()
if step > max_steps:
self.end_reached = True
'''
Everything below here is called when a new collision is loaded before being handed over to the client
'''
# Generate timeslots from current schedule
k = 1
for instance in schedule_instances:
if isinstance(instance, Schedule):
if k == step:
timeslots = Schedule.generate_timeslots(instance)
schedule = instance
break
k += 1
# Get collisions for timeslots
collisions = Schedule.get_collisions(timeslots)
# Get notes of colliding timeslots
notes = []
for id in collisions:
try:
notes.append( Note.objects.get(timeslot_id=id) )
except ObjectDoesNotExist:
pass
self.schedule = schedule
self.timeslots = timeslots
self.collisions = collisions
self.num_collisions = len([ s for s in self.collisions if s != 'None']) # Number of real collisions displayed to the user
self.notes = notes
self.showform = form
self.schedulesform = formset
self.step = step + 1 # Becomes upcoming step
self.max_steps = max_steps
# Pass it on to response_add() or response_change()
return self
def response_add(self, request, obj):
return ShowAdmin.respond(self, request, obj)
def response_change(self, request, obj):
return ShowAdmin.respond(self, request, obj)
def respond(self, request, obj):
"""
Redirects to the show-change-form if no schedules changed or resolving has been finished (or any other form validation error occured)
Displays the collision form for the current schedule otherwise
"""
# Never check for collisions if not superuser
# Common users can't edit the formset, so save_formset() will never be called thus end_reached wasn't set yet
if not request.user.is_superuser:
self.end_reached = True
if self.end_reached:
return super(ShowAdmin, self).response_change(request, obj)
timeslots_to_collisions = list(zip(self.timeslots, self.collisions))
return render(request, 'collisions.html', {'self' : self, 'obj': obj, 'request': request,
'timeslots': self.timeslots,
'collisions': self.collisions,
'schedule': self.schedule,
'timeslots_to_collisions': timeslots_to_collisions,
'schedulesform': self.schedulesform,
'showform': self.showform,
'num_inputs': len(self.timeslots),
'step': self.step,
'max_steps': self.max_steps,
'now': datetime.now(),
'num_collisions': self.num_collisions})
admin.site.register(Language, LanguageAdmin)
admin.site.register(Type, TypeAdmin)
admin.site.register(MusicFocus, MusicFocusAdmin)
admin.site.register(Category, CategoryAdmin)
admin.site.register(Topic, TopicAdmin)
admin.site.register(RTRCategory, RTRCategoryAdmin)
admin.site.register(Host, HostAdmin)
admin.site.register(Note, NoteAdmin)
#admin.site.register(Schedule, ScheduleAdmin)
admin.site.register(TimeSlot, TimeSlotAdmin)
admin.site.register(Show, ShowAdmin) | from django.core.exceptions import ObjectDoesNotExist
from django.contrib import admin
from django.utils.translation import ugettext_lazy as _
from django.shortcuts import render
from django.conf import settings
from .models import Language, Type, MusicFocus, Category, Topic, RTRCategory, Host, Note, RRule, Schedule, Show, TimeSlot
from .forms import MusicFocusForm
from datetime import date, datetime, time, timedelta
class ActivityFilter(admin.SimpleListFilter):
title = _("Activity")
def lookups(self, request, model_admin):
return (
('yes', _("active")),
('no', _("inactive"))
)
def queryset(self, request, queryset):
if self.parameter_name == 'has_timeslots': # active/inactive Schedules
if self.value() == 'yes':
return queryset.filter(until__gt=datetime.now()).distinct()
if self.value() == 'no':
return queryset.filter(until__lt=datetime.now()).distinct()
if self.parameter_name == 'has_schedules_timeslots': # active/inactive Shows
if self.value() == 'yes':
return queryset.filter(schedules__until__gt=datetime.now()).distinct()
if self.value() == 'no':
return queryset.filter(schedules__until__lt=datetime.now()).distinct()
if self.parameter_name == 'has_shows_schedules_timeslots': # active/inactive Hosts
if self.value() == 'yes':
return queryset.filter(shows__schedules__until__gt=datetime.now()).distinct()
if self.value() == 'no':
return queryset.filter(shows__schedules__until__lt=datetime.now()).distinct()
class ActiveSchedulesFilter(ActivityFilter):
parameter_name = 'has_timeslots'
class ActiveShowsFilter(ActivityFilter):
parameter_name = 'has_schedules_timeslots'
class ActiveHostsFilter(ActivityFilter):
parameter_name = 'has_shows_schedules_timeslots'
class TypeAdmin(admin.ModelAdmin):
list_display = ('type', 'admin_color', 'is_active')
list_filter = ('is_active',)
prepopulated_fields = {'slug': ('type',)}
class MusicFocusAdmin(admin.ModelAdmin):
form = MusicFocusForm
list_display = ('focus', 'abbrev', 'admin_buttons', 'is_active')
list_filter = ('is_active',)
prepopulated_fields = {'slug': ('focus',)}
class CategoryAdmin(admin.ModelAdmin):
list_display = ('category', 'abbrev', 'admin_buttons', 'is_active')
list_filter = ('is_active',)
prepopulated_fields = {'slug': ('category',)}
class LanguageAdmin(admin.ModelAdmin):
list_display = ('name', 'is_active')
list_filter = ('is_active',)
class TopicAdmin(admin.ModelAdmin):
list_display = ('topic', 'abbrev', 'admin_buttons', 'is_active')
list_filter = ('is_active',)
prepopulated_fields = {'slug': ('topic',)}
class RTRCategoryAdmin(admin.ModelAdmin):
list_display = ('rtrcategory', 'abbrev', 'is_active' )
list_filter = ('is_active',)
prepopulated_fields = {'slug': ('rtrcategory',)}
class HostAdmin(admin.ModelAdmin):
list_display = ('name', 'email', 'is_active')
list_filter = (ActiveHostsFilter, 'is_active',)
def get_queryset(self, request):
if request.user.is_superuser:
return Host.objects.all()
# Common users only see hosts of shows they own
return Host.objects.filter(shows__in=request.user.shows.all()).distinct()
class NoteAdmin(admin.ModelAdmin):
date_hierarchy = 'start'
list_display = ('title', 'show', 'start', 'status', 'user')
fields = (( 'show', 'timeslot'), 'title', 'slug', 'summary', 'content', 'image', 'host', 'status', 'cba_id')
prepopulated_fields = {'slug': ('title',)}
list_filter = ('status',)
ordering = ('timeslot',)
save_as = True
class Media:
js = [ settings.MEDIA_URL + 'js/calendar/lib/moment.min.js',
settings.MEDIA_URL + 'js/note_change.js', ]
def get_queryset(self, request):
if request.user.is_superuser:
shows = Show.objects.all()
else:
# Commons users only see notes of shows they own
shows = request.user.shows.all()
return super(NoteAdmin, self).get_queryset(request).filter(show__in=shows)
def formfield_for_foreignkey(self, db_field, request=None, **kwargs):
four_weeks_ago = datetime.now() - timedelta(weeks=4)
in_twelve_weeks = datetime.now() + timedelta(weeks=12)
if db_field.name == 'timeslot':
# Adding/Editing a note: load timeslots of the user's shows into the dropdown
# TODO: Don't show any timeslot in the select by default.
# User should first choose show, then timeslots are loaded into the select via ajax.
#
# How to do this while not constraining the queryset?
# Saving won't be possible otherwise, if queryset doesn't contain the selectable elements beforehand
#kwargs['queryset'] = TimeSlot.objects.filter(show=-1)
# Superusers see every timeslot for every show
if request.user.is_superuser:
kwargs['queryset'] = TimeSlot.objects.filter(start__gt=four_weeks_ago,
start__lt=in_twelve_weeks) # note__isnull=True
# Users see timeslots of shows they own
else:
kwargs['queryset'] = TimeSlot.objects.filter(show__in=request.user.shows.all(), start__gt=four_weeks_ago,
start__lt=in_twelve_weeks) # note__isnull=True
if db_field.name == 'show':
# Adding/Editing a note: load user's shows into the dropdown
# Common users only see shows they own
if not request.user.is_superuser:
kwargs['queryset'] = Show.objects.filter(pk__in=request.user.shows.all(), is_active=True)
if db_field.name == 'host':
# Common users only see hosts of shows they own
if not request.user.is_superuser:
kwargs['queryset'] = Host.objects.filter(shows__in=request.user.shows.all(), is_active=True).distinct()
return super(NoteAdmin, self).formfield_for_foreignkey(db_field, request, **kwargs)
def save_model(self, request, obj, form, change):
# Save the creator when adding a note
if not change:
obj.user = request.user
# Try to get direct audio URL from CBA
obj.audio_url = Note.get_audio_url(obj.cba_id)
obj.save()
class TimeSlotInline(admin.TabularInline):
model = TimeSlot
ordering = ('-end',)
class TimeSlotAdmin(admin.ModelAdmin):
model = TimeSlot
class ScheduleAdmin(admin.ModelAdmin):
actions = ('renew',)
inlines = (TimeSlotInline,)
fields = (('rrule', 'byweekday'), ('dstart', 'tstart', 'tend'), 'until', 'is_repetition', 'automation_id', 'fallback_id')
list_display = ('get_show_name', 'byweekday', 'rrule', 'tstart', 'tend', 'until')
list_filter = (ActiveSchedulesFilter, 'byweekday', 'rrule', 'is_repetition')
ordering = ('byweekday', 'dstart')
save_on_top = True
search_fields = ('show__name',)
def renew(self, request, queryset):
next_year = date.today().year + 1
until = date(next_year, 12, 31)
renewed = queryset.update(until=until)
if renewed == 1:
message = _("1 schedule was renewed until %s") % until
else:
message = _("%s schedule were renewed until %s") % (renewed, until)
self.message_user(request, message)
renew.short_description = _("Renew selected schedules")
def get_show_name(self, obj):
return obj.show.name
get_show_name.admin_order_field = 'show'
get_show_name.short_description = "Show"
class ScheduleInline(admin.TabularInline):
model = Schedule
ordering = ('pk', '-until', 'byweekday')
class ShowAdmin(admin.ModelAdmin):
filter_horizontal = ('hosts', 'owners', 'musicfocus', 'category', 'topic', 'language')
inlines = (ScheduleInline,)
list_display = ('name', 'short_description')
list_filter = (ActiveShowsFilter, 'type', 'category', 'topic', 'musicfocus', 'rtrcategory', 'language')
ordering = ('slug',)
prepopulated_fields = {'slug': ('name',)}
search_fields = ('name', 'short_description', 'description')
fields = (
'predecessor', 'type', 'name', 'slug', 'image', 'logo', 'short_description', 'description',
'email', 'website', 'hosts', 'owners', 'language', 'category', 'rtrcategory', 'topic',
'musicfocus', 'fallback_id', 'cba_series_id',
)
class Media:
js = [ settings.MEDIA_URL + 'js/calendar/lib/moment.min.js',
settings.MEDIA_URL + 'js/show_change.js', ]
css = { 'all': ('/program/styles.css',) }
def get_queryset(self, request):
if request.user.is_superuser:
# Superusers see all shows
shows = Show.objects.all()
else:
# Users only see shows they own
shows = request.user.shows.all()
return super(ShowAdmin, self).get_queryset(request).filter(pk__in=shows)
def get_readonly_fields(self, request, obj=None):
'''Limit field access for common users'''
if not request.user.is_superuser:
# TODO: how to set field 'name' readonly although it's required?
return ('predecessor', 'type', 'hosts', 'owners', 'language', 'category', 'topic', 'musicfocus', 'rtrcategory')
return list()
def formfield_for_foreignkey(self, db_field, request=None, **kwargs):
try:
show_id = int(request.get_full_path().split('/')[-2])
except ValueError:
show_id = None
print(db_field.name)
if db_field.name == 'predecessor' and show_id:
kwargs['queryset'] = Show.objects.exclude(pk=show_id)
if db_field.name == 'type':
kwargs['queryset'] = Type.objects.filter(is_active=True)
if db_field.name == 'rtrcategory':
kwargs['queryset'] = RTRCategory.objects.filter(is_active=True)
return super(ShowAdmin, self).formfield_for_foreignkey(db_field, request, **kwargs)
def formfield_for_manytomany(self, db_field, request, **kwargs):
if db_field.name == 'hosts':
kwargs["queryset"] = Host.objects.filter(is_active=True)
if db_field.name == 'language':
kwargs["queryset"] = Language.objects.filter(is_active=True)
if db_field.name == 'category':
kwargs["queryset"] = Category.objects.filter(is_active=True)
if db_field.name == 'topic':
kwargs["queryset"] = Topic.objects.filter(is_active=True)
if db_field.name == 'musicfocus':
kwargs["queryset"] = MusicFocus.objects.filter(is_active=True)
return super(ShowAdmin, self).formfield_for_manytomany(db_field, request, **kwargs)
def save_formset(self, request, form, formset, change):
"""
Is called after the "save show"-form or collision-form were submitted
Saves the show after first submit
If any changes in schedules happened
* added/changed schedules are used to generate new timeslots and
matched against existing ones, which will be displayed in the collision form
If a collision form was submitted
* save the current schedule
* delete/create timeslots and relink notes after confirmation
Each step passes on to response_add or response_change which will
* either display the collision form for the next step
* or redirect to the original show-form if the resolving process has been finished
(= if either max_steps was surpassed or end_reached was True)
"""
self.end_reached = False
schedule_instances = formset.save(commit=False)
# If there are no schedules to save, do nothing
if schedule_instances:
show_id = schedule_instances[0].show.id
else:
self.end_reached = True
schedule = []
timeslots = []
max_steps = int(len(schedule_instances)) if len(schedule_instances) > 0 else 1
step = 1
if request.POST.get('step') == None:
# First save-show submit
# Generate thumbnails
if form.instance.image.name and settings.THUMBNAIL_SIZES:
for size in settings.THUMBNAIL_SIZES:
thumbnail = form.instance.image.crop[size].name
# Save show data only
form.save();
# Delete schedules (as well as related timeslots and notes) if flagged as such
for obj in formset.deleted_objects:
obj.delete()
# If nothing else changed, do nothing and redirect to show-form
if not formset.changed_objects and not formset.new_objects:
self.end_reached = True
else:
# If a collision form was submitted
step = int(request.POST.get('step'))
if request.POST.get('num_inputs') != None and int(request.POST.get('num_inputs')) > 0:
print("Resolving conflicts...")
'''Declare and retrieve variables'''
# Either datetimes as string (e.g. '2017-01-01 00:00:00 - 2017-01-01 01:00:00') to create
# or ints of colliding timeslots to keep otherwise
resolved_timeslots = []
# IDs of colliding timeslots found in the db. If there's no corresponding collision to the
# same index in create_timeslot, value will be None
collisions = []
# Datetimes as string (e.g. '2017-01-01 00:00:00 - 2017-01-01 01:00:00') for timeslots to create
create_timeslots = []
# IDs of timeslots to delete
delete_timeslots = set()
# Number of timeslots to be generated
num_inputs = int(request.POST.get('num_inputs'))
# Numbers of notes to relink for existing timeslots and newly created ones
# each of them relating to one of these POST vars:
# POST.ntids[idx][id] and POST.ntids[idx][note_id] contain ids of existing timeslots and note_ids to link, while
# POST.ntind[idx][id] and POST.ntind[idx][note_id] contain indices of corresponding elements in create_timeslots
# and note_ids which will be linked after they're created and thus split into two lists beforehand
num_ntids = int(request.POST.get('num_ntids'))
num_ntind = int(request.POST.get('num_ntind'))
# Retrieve POST vars of current schedule
schedule_id = int(request.POST.get('ps_save_id')) if request.POST.get('ps_save_id') != 'None' else None
rrule = RRule.objects.get(pk=int(request.POST.get('ps_save_rrule_id')))
show = Show.objects.get(pk=show_id)
byweekday = int(request.POST.get('ps_save_byweekday'))
tstart = datetime.strptime(request.POST.get('ps_save_tstart'), '%H:%M').time()
tend = datetime.strptime(request.POST.get('ps_save_tend'), '%H:%M').time()
dstart = datetime.strptime(request.POST.get('ps_save_dstart'), '%Y-%m-%d').date()
if dstart < datetime.today().date(): # Create or delete upcoming timeslots only
dstart = datetime.today().date()
until = datetime.strptime(request.POST.get('ps_save_until'), '%Y-%m-%d').date()
is_repetition = request.POST.get('ps_save_is_repetition')
automation_id = int(request.POST.get('ps_save_automation_id')) if request.POST.get('ps_save_automation_id') != 'None' else 0
fallback_id = int(request.POST.get('ps_save_fallback_id')) if request.POST.get('ps_save_fallback_id') != 'None' else 0
# Put timeslot POST vars into lists with same indices
for i in range(num_inputs):
resolved_ts = request.POST.get('resolved_timeslots[' + str(i) + ']')
if resolved_ts != None:
resolved_timeslots.append( resolved_ts )
create_timeslots.append( request.POST.get('create_timeslots[' + str(i) + ']') ) # May contain None
collisions.append( request.POST.get('collisions[' + str(i) + ']') ) # May contain None
else:
num_inputs -= 1
'''Prepare resolved timeslots'''
# Separate timeslots to delete from those to create
keep_collisions = []
for x in range(num_inputs):
if resolved_timeslots[x] == None or resolved_timeslots[x].isdigit():
# If it's a digit, keep the existing timeslot by preventing the new one from being created
create_timeslots[x] = None
keep_collisions.append(int(collisions[x]))
else:
# Otherwise collect the timeslot ids to be deleted later
if len(collisions[x]) > 0:
delete_timeslots.add(int(collisions[x]))
# Collect IDs of upcoming timeslots of the same schedule to delete except those in keep_collision
if schedule_id != None:
for ts in TimeSlot.objects.filter(start__gte=dstart,end__lte=until,schedule_id=schedule_id).exclude(pk__in=keep_collisions).values_list('id', flat=True):
delete_timeslots.add(ts)
'''Save schedule'''
new_schedule = Schedule(pk=schedule_id,
rrule=rrule,
byweekday=byweekday,
show=show,
dstart=dstart,
tstart=tstart,
tend=tend,
until=until,
is_repetition=is_repetition,
automation_id=automation_id,
fallback_id=fallback_id)
# Only save schedule if any timeslots changed
if len(resolved_timeslots) > 0:
new_schedule.save()
'''Relink notes to existing timeslots and prepare those to be linked'''
# Relink notes with existing timeslot ids
for i in range(num_ntids):
try:
note = Note.objects.get(pk=int(request.POST.get('ntids[' + str(i) + '][note_id]')))
note.timeslot_id = int(request.POST.get('ntids[' + str(i) + '][id]'))
note.save(update_fields=["timeslot_id"])
print("Rewrote note " + str(note.id) + "...to timeslot_id " + str(note.timeslot_id))
except ObjectDoesNotExist:
pass
# Put list indices of yet to be created timeslots and note_ids in corresponding lists to relink them during creation
note_indices = []
note_ids = []
for i in range(num_ntind):
note_indices.append( int(request.POST.get('ntind[' + str(i) + '][id]')) )
note_ids.append( int(request.POST.get('ntind[' + str(i) + '][note_id]')) )
'''Database changes for resolved timeslots and relinked notes for newly created'''
for idx, ts in enumerate(create_timeslots):
if ts != None:
start_end = ts.split(' - ')
# Only create upcoming timeslots
if datetime.strptime(start_end[0], "%Y-%m-%d %H:%M:%S") > datetime.today():
timeslot_created = TimeSlot.objects.create(schedule=new_schedule, is_repetition=new_schedule.is_repetition, start=start_end[0], end=start_end[1])
# Link a note to the new timeslot
if idx in note_indices:
note_idx = note_indices.index( idx ) # Get the note_id's index...
note_id = note_ids[note_idx] # ...which contains the note_id to relate to
try:
note = Note.objects.get(pk=note_id)
note.timeslot_id = timeslot_created.id
note.save(update_fields=["timeslot_id"])
print("Timeslot " + str(timeslot_created.id) + " linked to note " + str(note_id))
except ObjectDoesNotExist:
pass
# Finally delete discarded timeslots
for timeslot_id in delete_timeslots:
TimeSlot.objects.filter(pk=timeslot_id).delete()
if step > max_steps:
self.end_reached = True
'''
Everything below here is called when a new collision is loaded before being handed over to the client
'''
# Generate timeslots from current schedule
k = 1
for instance in schedule_instances:
if isinstance(instance, Schedule):
if k == step:
timeslots = Schedule.generate_timeslots(instance)
schedule = instance
break
k += 1
# Get collisions for timeslots
collisions = Schedule.get_collisions(timeslots)
# Get notes of colliding timeslots
notes = []
for id in collisions:
try:
notes.append( Note.objects.get(timeslot_id=id) )
except ObjectDoesNotExist:
pass
self.schedule = schedule
self.timeslots = timeslots
self.collisions = collisions
self.num_collisions = len([ s for s in self.collisions if s != 'None']) # Number of real collisions displayed to the user
self.notes = notes
self.showform = form
self.schedulesform = formset
self.step = step + 1 # Becomes upcoming step
self.max_steps = max_steps
# Pass it on to response_add() or response_change()
return self
def response_add(self, request, obj):
return ShowAdmin.respond(self, request, obj)
def response_change(self, request, obj):
return ShowAdmin.respond(self, request, obj)
def respond(self, request, obj):
"""
Redirects to the show-change-form if no schedules changed or resolving has been finished (or any other form validation error occured)
Displays the collision form for the current schedule otherwise
"""
# Never check for collisions if not superuser
# Common users can't edit the formset, so save_formset() will never be called thus end_reached wasn't set yet
if not request.user.is_superuser:
self.end_reached = True
if self.end_reached:
return super(ShowAdmin, self).response_change(request, obj)
timeslots_to_collisions = list(zip(self.timeslots, self.collisions))
return render(request, 'collisions.html', {'self' : self, 'obj': obj, 'request': request,
'timeslots': self.timeslots,
'collisions': self.collisions,
'schedule': self.schedule,
'timeslots_to_collisions': timeslots_to_collisions,
'schedulesform': self.schedulesform,
'showform': self.showform,
'num_inputs': len(self.timeslots),
'step': self.step,
'max_steps': self.max_steps,
'now': datetime.now(),
'num_collisions': self.num_collisions})
admin.site.register(Language, LanguageAdmin)
admin.site.register(Type, TypeAdmin)
admin.site.register(MusicFocus, MusicFocusAdmin)
admin.site.register(Category, CategoryAdmin)
admin.site.register(Topic, TopicAdmin)
admin.site.register(RTRCategory, RTRCategoryAdmin)
admin.site.register(Host, HostAdmin)
admin.site.register(Note, NoteAdmin)
#admin.site.register(Schedule, ScheduleAdmin)
admin.site.register(TimeSlot, TimeSlotAdmin)
admin.site.register(Show, ShowAdmin) | en | 0.86693 | # active/inactive Schedules # active/inactive Shows # active/inactive Hosts # Common users only see hosts of shows they own # Commons users only see notes of shows they own # Adding/Editing a note: load timeslots of the user's shows into the dropdown # TODO: Don't show any timeslot in the select by default. # User should first choose show, then timeslots are loaded into the select via ajax. # # How to do this while not constraining the queryset? # Saving won't be possible otherwise, if queryset doesn't contain the selectable elements beforehand #kwargs['queryset'] = TimeSlot.objects.filter(show=-1) # Superusers see every timeslot for every show # note__isnull=True # Users see timeslots of shows they own # note__isnull=True # Adding/Editing a note: load user's shows into the dropdown # Common users only see shows they own # Common users only see hosts of shows they own # Save the creator when adding a note # Try to get direct audio URL from CBA # Superusers see all shows # Users only see shows they own Limit field access for common users # TODO: how to set field 'name' readonly although it's required? Is called after the "save show"-form or collision-form were submitted Saves the show after first submit If any changes in schedules happened * added/changed schedules are used to generate new timeslots and matched against existing ones, which will be displayed in the collision form If a collision form was submitted * save the current schedule * delete/create timeslots and relink notes after confirmation Each step passes on to response_add or response_change which will * either display the collision form for the next step * or redirect to the original show-form if the resolving process has been finished (= if either max_steps was surpassed or end_reached was True) # If there are no schedules to save, do nothing # First save-show submit # Generate thumbnails # Save show data only # Delete schedules (as well as related timeslots and notes) if flagged as such # If nothing else changed, do nothing and redirect to show-form # If a collision form was submitted Declare and retrieve variables # Either datetimes as string (e.g. '2017-01-01 00:00:00 - 2017-01-01 01:00:00') to create # or ints of colliding timeslots to keep otherwise # IDs of colliding timeslots found in the db. If there's no corresponding collision to the # same index in create_timeslot, value will be None # Datetimes as string (e.g. '2017-01-01 00:00:00 - 2017-01-01 01:00:00') for timeslots to create # IDs of timeslots to delete # Number of timeslots to be generated # Numbers of notes to relink for existing timeslots and newly created ones # each of them relating to one of these POST vars: # POST.ntids[idx][id] and POST.ntids[idx][note_id] contain ids of existing timeslots and note_ids to link, while # POST.ntind[idx][id] and POST.ntind[idx][note_id] contain indices of corresponding elements in create_timeslots # and note_ids which will be linked after they're created and thus split into two lists beforehand # Retrieve POST vars of current schedule # Create or delete upcoming timeslots only # Put timeslot POST vars into lists with same indices # May contain None # May contain None Prepare resolved timeslots # Separate timeslots to delete from those to create # If it's a digit, keep the existing timeslot by preventing the new one from being created # Otherwise collect the timeslot ids to be deleted later # Collect IDs of upcoming timeslots of the same schedule to delete except those in keep_collision Save schedule # Only save schedule if any timeslots changed Relink notes to existing timeslots and prepare those to be linked # Relink notes with existing timeslot ids # Put list indices of yet to be created timeslots and note_ids in corresponding lists to relink them during creation Database changes for resolved timeslots and relinked notes for newly created # Only create upcoming timeslots # Link a note to the new timeslot # Get the note_id's index... # ...which contains the note_id to relate to # Finally delete discarded timeslots Everything below here is called when a new collision is loaded before being handed over to the client # Generate timeslots from current schedule # Get collisions for timeslots # Get notes of colliding timeslots # Number of real collisions displayed to the user # Becomes upcoming step # Pass it on to response_add() or response_change() Redirects to the show-change-form if no schedules changed or resolving has been finished (or any other form validation error occured) Displays the collision form for the current schedule otherwise # Never check for collisions if not superuser # Common users can't edit the formset, so save_formset() will never be called thus end_reached wasn't set yet #admin.site.register(Schedule, ScheduleAdmin) | 2.039994 | 2 |
test/logic/test_block_features.py | Sam-prog-sudo/Sam.github.io | 3 | 10774 | <gh_stars>1-10
import hashlib
import json
from time import time
import pytest
from app.chaine.blockchain import Blockchain
@pytest.fixture
def first_block():
return {
'index': 1,
'timestamp': time(),
'transactions': [],
'proof': 1989,
'previous_hash': 1,
}
def test_initialization_blockchain(first_block):
bc = Blockchain()
assert bc.chain[0]['index'] == first_block['index']
assert isinstance(
bc.chain[0]['timestamp'],
type(first_block['timestamp'])
)
assert bc.chain[0]['transactions'] == first_block['transactions']
assert bc.chain[0]['proof'] == first_block['proof']
assert bc.chain[0]['previous_hash'] == first_block['previous_hash']
def test_last_block():
bc = Blockchain()
assert bc.last_block == bc.chain[-1]
@pytest.fixture
def a_valid_block():
block_1 = {
'index': 2,
'timestamp': time(),
'transactions': [],
'proof': 123,
'previous_hash': 'abc',
}
return block_1
@pytest.fixture
def an_invalid_block():
block_2 = {
'index': 'salut',
'timestamp': list('cava',),
'transactions': 22,
'proof': None,
'previous_hash': 46,
}
return block_2
@pytest.mark.parametrize('some_blocks', [
'a_valid_block',
'an_invalid_block'
]
)
def test_hachage(some_blocks):
bc = Blockchain()
block_json = json.dumps(
some_blocks,
sort_keys=True
).encode()
hash_test = hashlib.sha256(block_json).hexdigest()
assert len(hash_test) == 64
assert isinstance(
hash_test,
type(bc.hachage(some_blocks))
)
assert hash_test == bc.hachage(some_blocks)
def test_block_creation(a_valid_block, proof=123, previous_hash='abc'):
bc = Blockchain()
block_a_tester = bc.new_block(proof, previous_hash)
assert block_a_tester['index'] == a_valid_block['index']
assert isinstance(
block_a_tester['timestamp'],
type(a_valid_block['timestamp'])
)
assert block_a_tester['proof'] == a_valid_block['proof']
assert block_a_tester['previous_hash'] == a_valid_block['previous_hash']
| import hashlib
import json
from time import time
import pytest
from app.chaine.blockchain import Blockchain
@pytest.fixture
def first_block():
return {
'index': 1,
'timestamp': time(),
'transactions': [],
'proof': 1989,
'previous_hash': 1,
}
def test_initialization_blockchain(first_block):
bc = Blockchain()
assert bc.chain[0]['index'] == first_block['index']
assert isinstance(
bc.chain[0]['timestamp'],
type(first_block['timestamp'])
)
assert bc.chain[0]['transactions'] == first_block['transactions']
assert bc.chain[0]['proof'] == first_block['proof']
assert bc.chain[0]['previous_hash'] == first_block['previous_hash']
def test_last_block():
bc = Blockchain()
assert bc.last_block == bc.chain[-1]
@pytest.fixture
def a_valid_block():
block_1 = {
'index': 2,
'timestamp': time(),
'transactions': [],
'proof': 123,
'previous_hash': 'abc',
}
return block_1
@pytest.fixture
def an_invalid_block():
block_2 = {
'index': 'salut',
'timestamp': list('cava',),
'transactions': 22,
'proof': None,
'previous_hash': 46,
}
return block_2
@pytest.mark.parametrize('some_blocks', [
'a_valid_block',
'an_invalid_block'
]
)
def test_hachage(some_blocks):
bc = Blockchain()
block_json = json.dumps(
some_blocks,
sort_keys=True
).encode()
hash_test = hashlib.sha256(block_json).hexdigest()
assert len(hash_test) == 64
assert isinstance(
hash_test,
type(bc.hachage(some_blocks))
)
assert hash_test == bc.hachage(some_blocks)
def test_block_creation(a_valid_block, proof=123, previous_hash='abc'):
bc = Blockchain()
block_a_tester = bc.new_block(proof, previous_hash)
assert block_a_tester['index'] == a_valid_block['index']
assert isinstance(
block_a_tester['timestamp'],
type(a_valid_block['timestamp'])
)
assert block_a_tester['proof'] == a_valid_block['proof']
assert block_a_tester['previous_hash'] == a_valid_block['previous_hash'] | none | 1 | 2.210034 | 2 |
|
urls.py | cartologic/cartoview_graduated_styler | 0 | 10775 | <gh_stars>0
# from django.conf.urls import patterns, url, include
# from django.views.generic import TemplateView
# from . import views, APP_NAME
#
# urlpatterns = patterns('',
# url(r'^$', views.index, name='%s.index' % APP_NAME),
# )
from django.urls import path, re_path, include
from . import views, APP_NAME
from .api import LayerResource
from tastypie.api import Api
Resources_api = Api(api_name="api")
Resources_api.register(LayerResource())
urlpatterns = [
re_path(r'^$', views.index, name='%s.index' % APP_NAME),
path('styles/<str:layername>/', views.layer_styles, name='%s.layer_styles' % APP_NAME),
path('styles/save/<str:layer_name>/<str:style_name>', views.save_style, name='%s.save_style' % APP_NAME),
re_path(r'^proxy/geoserver/rest/(?P<suburl>.*)$', views.geoserver_rest_proxy, name='%s.proxy' % APP_NAME),
re_path(r'^', include(Resources_api.urls)),
]
| # from django.conf.urls import patterns, url, include
# from django.views.generic import TemplateView
# from . import views, APP_NAME
#
# urlpatterns = patterns('',
# url(r'^$', views.index, name='%s.index' % APP_NAME),
# )
from django.urls import path, re_path, include
from . import views, APP_NAME
from .api import LayerResource
from tastypie.api import Api
Resources_api = Api(api_name="api")
Resources_api.register(LayerResource())
urlpatterns = [
re_path(r'^$', views.index, name='%s.index' % APP_NAME),
path('styles/<str:layername>/', views.layer_styles, name='%s.layer_styles' % APP_NAME),
path('styles/save/<str:layer_name>/<str:style_name>', views.save_style, name='%s.save_style' % APP_NAME),
re_path(r'^proxy/geoserver/rest/(?P<suburl>.*)$', views.geoserver_rest_proxy, name='%s.proxy' % APP_NAME),
re_path(r'^', include(Resources_api.urls)),
] | en | 0.578693 | # from django.conf.urls import patterns, url, include # from django.views.generic import TemplateView # from . import views, APP_NAME # # urlpatterns = patterns('', # url(r'^$', views.index, name='%s.index' % APP_NAME), # ) | 1.956869 | 2 |
core/rest/wscdn.py | cybert79/Osmedeus | 1 | 10776 | import os
import glob
import json
from pathlib import Path
from flask_restful import Api, Resource, reqparse
from flask_jwt_extended import jwt_required
from flask import Flask, request, escape, make_response, send_from_directory
import utils
# incase you can't install ansi2html it's won't break the api
try:
from ansi2html import Ansi2HTMLConverter
except:
pass
current_path = os.path.dirname(os.path.realpath(__file__))
'''
render stdout content
'''
class Wscdn(Resource):
def verify_file(self, filename):
option_files = glob.glob(
current_path + '/storages/**/options.json', recursive=True)
# loop though all options avalible
for option in option_files:
json_option = utils.reading_json(option)
stdout_path = json_option.get('WORKSPACES') + "/" + filename
if utils.not_empty_file(stdout_path):
return json_option.get('WORKSPACES'), os.path.normpath(filename)
# get real path
p = Path(filename)
ws = p.parts[0]
if ws != utils.url_encode(ws):
# just replace the first one
filename_encode = filename.replace(ws, utils.url_encode(ws), 1)
stdout_path_encode = json_option.get('WORKSPACES') + filename_encode
if utils.not_empty_file(stdout_path_encode):
return json_option.get('WORKSPACES'), os.path.normpath(filename_encode)
return False, False
def get(self, filename):
ws_path, stdout_path = self.verify_file(filename)
if not stdout_path:
return 'Custom 404 here', 404
return send_from_directory(ws_path, stdout_path)
| import os
import glob
import json
from pathlib import Path
from flask_restful import Api, Resource, reqparse
from flask_jwt_extended import jwt_required
from flask import Flask, request, escape, make_response, send_from_directory
import utils
# incase you can't install ansi2html it's won't break the api
try:
from ansi2html import Ansi2HTMLConverter
except:
pass
current_path = os.path.dirname(os.path.realpath(__file__))
'''
render stdout content
'''
class Wscdn(Resource):
def verify_file(self, filename):
option_files = glob.glob(
current_path + '/storages/**/options.json', recursive=True)
# loop though all options avalible
for option in option_files:
json_option = utils.reading_json(option)
stdout_path = json_option.get('WORKSPACES') + "/" + filename
if utils.not_empty_file(stdout_path):
return json_option.get('WORKSPACES'), os.path.normpath(filename)
# get real path
p = Path(filename)
ws = p.parts[0]
if ws != utils.url_encode(ws):
# just replace the first one
filename_encode = filename.replace(ws, utils.url_encode(ws), 1)
stdout_path_encode = json_option.get('WORKSPACES') + filename_encode
if utils.not_empty_file(stdout_path_encode):
return json_option.get('WORKSPACES'), os.path.normpath(filename_encode)
return False, False
def get(self, filename):
ws_path, stdout_path = self.verify_file(filename)
if not stdout_path:
return 'Custom 404 here', 404
return send_from_directory(ws_path, stdout_path)
| en | 0.804155 | # incase you can't install ansi2html it's won't break the api render stdout content # loop though all options avalible # get real path # just replace the first one | 2.484005 | 2 |
custom_components/hahm/services.py | noxhirsch/custom_homematic | 0 | 10777 | """Module with hahomematic services."""
from __future__ import annotations
from datetime import datetime
import logging
from hahomematic.const import (
ATTR_ADDRESS,
ATTR_INTERFACE_ID,
ATTR_NAME,
ATTR_PARAMETER,
ATTR_VALUE,
HmPlatform,
)
from hahomematic.device import HmDevice
from hahomematic.entity import BaseEntity, GenericEntity
import voluptuous as vol
from homeassistant.const import ATTR_ENTITY_ID, ATTR_MODE, ATTR_TIME
from homeassistant.core import HomeAssistant, ServiceCall
from homeassistant.helpers import device_registry as dr
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.config_validation import comp_entity_ids
from homeassistant.helpers.device_registry import DeviceEntry
from homeassistant.helpers.service import (
async_register_admin_service,
verify_domain_control,
)
from .const import (
ATTR_PARAMSET,
ATTR_PARAMSET_KEY,
ATTR_RX_MODE,
ATTR_VALUE_TYPE,
DOMAIN,
)
from .control_unit import ControlUnit, HaHub
from .helpers import get_device_address_at_interface_from_identifiers
_LOGGER = logging.getLogger(__name__)
ATTR_CHANNEL = "channel"
ATTR_DEVICE_ID = "device_id"
DEFAULT_CHANNEL = 1
SERVICE_EXPORT_DEVICE_DEFINITION = "export_device_definition"
SERVICE_PUT_PARAMSET = "put_paramset"
SERVICE_SET_DEVICE_VALUE = "set_device_value"
SERVICE_SET_INSTALL_MODE = "set_install_mode"
SERVICE_SET_VARIABLE_VALUE = "set_variable_value"
HAHM_SERVICES = [
SERVICE_EXPORT_DEVICE_DEFINITION,
SERVICE_PUT_PARAMSET,
SERVICE_SET_DEVICE_VALUE,
SERVICE_SET_INSTALL_MODE,
SERVICE_SET_VARIABLE_VALUE,
]
SCHEMA_SERVICE_EXPORT_DEVICE_DEFINITION = vol.Schema(
{
vol.Required(ATTR_DEVICE_ID): cv.string,
}
)
SCHEMA_SERVICE_SET_VARIABLE_VALUE = vol.Schema(
{
vol.Required(ATTR_ENTITY_ID): comp_entity_ids,
vol.Required(ATTR_NAME): cv.string,
vol.Required(ATTR_VALUE): cv.match_all,
}
)
SCHEMA_SERVICE_SET_INSTALL_MODE = vol.Schema(
{
vol.Required(ATTR_INTERFACE_ID): cv.string,
vol.Optional(ATTR_TIME, default=60): cv.positive_int,
vol.Optional(ATTR_MODE, default=1): vol.All(vol.Coerce(int), vol.In([1, 2])),
vol.Optional(ATTR_ADDRESS): vol.All(cv.string, vol.Upper),
}
)
SCHEMA_SERVICE_SET_DEVICE_VALUE = vol.Schema(
{
vol.Required(ATTR_DEVICE_ID): cv.string,
vol.Required(ATTR_CHANNEL, default=DEFAULT_CHANNEL): vol.Coerce(int),
vol.Required(ATTR_PARAMETER): vol.All(cv.string, vol.Upper),
vol.Required(ATTR_VALUE): cv.match_all,
vol.Optional(ATTR_VALUE_TYPE): vol.In(
["boolean", "dateTime.iso8601", "double", "int", "string"]
),
vol.Optional(ATTR_RX_MODE): vol.All(cv.string, vol.Upper),
}
)
SCHEMA_SERVICE_PUT_PARAMSET = vol.Schema(
{
vol.Required(ATTR_DEVICE_ID): cv.string,
vol.Required(ATTR_CHANNEL, default=DEFAULT_CHANNEL): vol.Coerce(int),
vol.Required(ATTR_PARAMSET_KEY): vol.All(cv.string, vol.Upper),
vol.Required(ATTR_PARAMSET): dict,
vol.Optional(ATTR_RX_MODE): vol.All(cv.string, vol.Upper),
}
)
async def async_setup_services(hass: HomeAssistant) -> None:
"""Create the hahomematic services."""
@verify_domain_control(hass, DOMAIN)
async def async_call_hahm_service(service: ServiceCall) -> None:
"""Call correct HomematicIP Cloud service."""
service_name = service.service
if service_name == SERVICE_EXPORT_DEVICE_DEFINITION:
await _async_service_export_device_definition(hass=hass, service=service)
elif service_name == SERVICE_PUT_PARAMSET:
await _async_service_put_paramset(hass=hass, service=service)
elif service_name == SERVICE_SET_INSTALL_MODE:
await _async_service_set_install_mode(hass=hass, service=service)
elif service_name == SERVICE_SET_DEVICE_VALUE:
await _async_service_set_device_value(hass=hass, service=service)
elif service_name == SERVICE_SET_VARIABLE_VALUE:
await _async_service_set_variable_value(hass=hass, service=service)
hass.services.async_register(
domain=DOMAIN,
service=SERVICE_EXPORT_DEVICE_DEFINITION,
service_func=async_call_hahm_service,
schema=SCHEMA_SERVICE_EXPORT_DEVICE_DEFINITION,
)
hass.services.async_register(
domain=DOMAIN,
service=SERVICE_SET_VARIABLE_VALUE,
service_func=async_call_hahm_service,
schema=SCHEMA_SERVICE_SET_VARIABLE_VALUE,
)
hass.services.async_register(
domain=DOMAIN,
service=SERVICE_SET_DEVICE_VALUE,
service_func=async_call_hahm_service,
schema=SCHEMA_SERVICE_SET_DEVICE_VALUE,
)
async_register_admin_service(
hass=hass,
domain=DOMAIN,
service=SERVICE_SET_INSTALL_MODE,
service_func=async_call_hahm_service,
schema=SCHEMA_SERVICE_SET_INSTALL_MODE,
)
hass.services.async_register(
domain=DOMAIN,
service=SERVICE_PUT_PARAMSET,
service_func=async_call_hahm_service,
schema=SCHEMA_SERVICE_PUT_PARAMSET,
)
async def async_unload_services(hass: HomeAssistant) -> None:
"""Unload HAHM services."""
if hass.data[DOMAIN]:
return
for hahm_service in HAHM_SERVICES:
hass.services.async_remove(domain=DOMAIN, service=hahm_service)
async def _async_service_export_device_definition(
hass: HomeAssistant, service: ServiceCall
) -> None:
"""Service to call setValue method for HomeMatic devices."""
device_id = service.data[ATTR_DEVICE_ID]
if hm_device := _get_device(hass=hass, device_id=device_id):
await hm_device.export_device_definition()
_LOGGER.debug(
"Calling export_device_definition: %s, %s",
hm_device.name,
hm_device.device_address,
)
async def _async_service_set_variable_value(
hass: HomeAssistant, service: ServiceCall
) -> None:
"""Service to call setValue method for HomeMatic system variable."""
entity_id = service.data[ATTR_ENTITY_ID]
name = service.data[ATTR_NAME]
value = service.data[ATTR_VALUE]
if hub := _get_hub_by_entity_id(hass=hass, entity_id=entity_id):
await hub.async_set_variable(name=name, value=value)
async def _async_service_set_device_value(
hass: HomeAssistant, service: ServiceCall
) -> None:
"""Service to call setValue method for HomeMatic devices."""
device_id = service.data[ATTR_DEVICE_ID]
channel = service.data[ATTR_CHANNEL]
parameter = service.data[ATTR_PARAMETER]
value = service.data[ATTR_VALUE]
rx_mode = service.data.get(ATTR_RX_MODE)
# Convert value into correct XML-RPC Type.
# https://docs.python.org/3/library/xmlrpc.client.html#xmlrpc.client.ServerProxy
if value_type := service.data.get(ATTR_VALUE_TYPE):
if value_type == "int":
value = int(value)
elif value_type == "double":
value = float(value)
elif value_type == "boolean":
value = bool(value)
elif value_type == "dateTime.iso8601":
value = datetime.strptime(value, "%Y%m%dT%H:%M:%S")
else:
# Default is 'string'
value = str(value)
if (
address_data := _get_interface_channel_address(
hass=hass, device_id=device_id, channel=channel
)
) is None:
return None
interface_id: str = address_data[0]
channel_address: str = address_data[1]
_LOGGER.debug(
"Calling setValue: %s, %s, %s, %s, %s, %s",
interface_id,
channel_address,
parameter,
value,
value_type,
rx_mode,
)
if interface_id and channel_address:
if control_unit := _get_cu_by_interface_id(
hass=hass, interface_id=interface_id
):
await control_unit.central.set_value(
interface_id=interface_id,
channel_address=channel_address,
parameter=parameter,
value=value,
rx_mode=rx_mode,
)
async def _async_service_set_install_mode(
hass: HomeAssistant, service: ServiceCall
) -> None:
"""Service to set interface_id into install mode."""
interface_id = service.data[ATTR_INTERFACE_ID]
mode: int = service.data.get(ATTR_MODE, 1)
time: int = service.data.get(ATTR_TIME, 60)
device_address = service.data.get(ATTR_ADDRESS)
if control_unit := _get_cu_by_interface_id(hass=hass, interface_id=interface_id):
await control_unit.central.set_install_mode(
interface_id, t=time, mode=mode, device_address=device_address
)
async def _async_service_put_paramset(
hass: HomeAssistant, service: ServiceCall
) -> None:
"""Service to call the putParamset method on a HomeMatic connection."""
device_id = service.data[ATTR_DEVICE_ID]
channel = service.data[ATTR_CHANNEL]
paramset_key = service.data[ATTR_PARAMSET_KEY]
# When passing in the paramset from a YAML file we get an OrderedDict
# here instead of a dict, so add this explicit cast.
# The service schema makes sure that this cast works.
paramset = dict(service.data[ATTR_PARAMSET])
rx_mode = service.data.get(ATTR_RX_MODE)
if (
address_data := _get_interface_channel_address(
hass=hass, device_id=device_id, channel=channel
)
) is None:
return None
interface_id: str = address_data[0]
channel_address: str = address_data[1]
_LOGGER.debug(
"Calling putParamset: %s, %s, %s, %s, %s",
interface_id,
channel_address,
paramset_key,
paramset,
rx_mode,
)
if interface_id and channel_address:
if control_unit := _get_cu_by_interface_id(
hass=hass, interface_id=interface_id
):
await control_unit.central.put_paramset(
interface_id=interface_id,
channel_address=channel_address,
paramset=paramset_key,
value=paramset,
rx_mode=rx_mode,
)
def _get_device(hass: HomeAssistant, device_id: str) -> HmDevice | None:
"""Return the homematic device."""
device_registry = dr.async_get(hass)
device_entry: DeviceEntry | None = device_registry.async_get(device_id)
if not device_entry:
return None
if (
data := get_device_address_at_interface_from_identifiers(
identifiers=device_entry.identifiers
)
) is None:
return None
device_address = data[0]
interface_id = data[1]
if control_unit := _get_cu_by_interface_id(hass=hass, interface_id=interface_id):
return control_unit.central.hm_devices.get(device_address)
return None
def _get_interface_channel_address(
hass: HomeAssistant, device_id: str, channel: int
) -> tuple[str, str] | None:
"""Return interface and channel_address with given device_id and channel."""
device_registry = dr.async_get(hass)
device_entry: DeviceEntry | None = device_registry.async_get(device_id)
if not device_entry:
return None
if (
data := get_device_address_at_interface_from_identifiers(
identifiers=device_entry.identifiers
)
) is None:
return None
device_address = data[0]
interface_id = data[1]
channel_address = f"{device_address}:{channel}"
return interface_id, channel_address
def _get_entity(hass: HomeAssistant, entity_id: str) -> BaseEntity | None:
"""Return entity by given entity_id."""
control_unit: ControlUnit
for control_unit in hass.data[DOMAIN].values():
if hm_entity := control_unit.async_get_hm_entity(entity_id=entity_id):
if isinstance(hm_entity, BaseEntity):
return hm_entity
return None
def _get_entities_by_platform(
hass: HomeAssistant, platform: HmPlatform
) -> list[BaseEntity]:
"""Return entities by given platform."""
control_unit: ControlUnit
hm_entities: list[BaseEntity] = []
for control_unit in hass.data[DOMAIN].values():
hm_entities.extend(
control_unit.async_get_hm_entities_by_platform(platform=platform)
)
return hm_entities
def _get_hm_entity(
hass: HomeAssistant, interface_id: str, channel_address: str, parameter: str
) -> GenericEntity | None:
"""Get homematic entity."""
if control_unit := _get_cu_by_interface_id(hass=hass, interface_id=interface_id):
return control_unit.central.get_hm_entity_by_parameter(
channel_address=channel_address, parameter=parameter
)
return None
def _get_cu_by_interface_id(
hass: HomeAssistant, interface_id: str
) -> ControlUnit | None:
"""Get ControlUnit by interface_id."""
for entry_id in hass.data[DOMAIN].keys():
control_unit: ControlUnit = hass.data[DOMAIN][entry_id]
if control_unit and control_unit.central.clients.get(interface_id):
return control_unit
return None
def _get_hub_by_entity_id(hass: HomeAssistant, entity_id: str) -> HaHub | None:
"""Get ControlUnit by device address."""
for entry_id in hass.data[DOMAIN].keys():
control_unit: ControlUnit = hass.data[DOMAIN][entry_id]
if (
control_unit
and control_unit.hub
and control_unit.hub.entity_id == entity_id
):
return control_unit.hub
return None
| """Module with hahomematic services."""
from __future__ import annotations
from datetime import datetime
import logging
from hahomematic.const import (
ATTR_ADDRESS,
ATTR_INTERFACE_ID,
ATTR_NAME,
ATTR_PARAMETER,
ATTR_VALUE,
HmPlatform,
)
from hahomematic.device import HmDevice
from hahomematic.entity import BaseEntity, GenericEntity
import voluptuous as vol
from homeassistant.const import ATTR_ENTITY_ID, ATTR_MODE, ATTR_TIME
from homeassistant.core import HomeAssistant, ServiceCall
from homeassistant.helpers import device_registry as dr
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.config_validation import comp_entity_ids
from homeassistant.helpers.device_registry import DeviceEntry
from homeassistant.helpers.service import (
async_register_admin_service,
verify_domain_control,
)
from .const import (
ATTR_PARAMSET,
ATTR_PARAMSET_KEY,
ATTR_RX_MODE,
ATTR_VALUE_TYPE,
DOMAIN,
)
from .control_unit import ControlUnit, HaHub
from .helpers import get_device_address_at_interface_from_identifiers
_LOGGER = logging.getLogger(__name__)
ATTR_CHANNEL = "channel"
ATTR_DEVICE_ID = "device_id"
DEFAULT_CHANNEL = 1
SERVICE_EXPORT_DEVICE_DEFINITION = "export_device_definition"
SERVICE_PUT_PARAMSET = "put_paramset"
SERVICE_SET_DEVICE_VALUE = "set_device_value"
SERVICE_SET_INSTALL_MODE = "set_install_mode"
SERVICE_SET_VARIABLE_VALUE = "set_variable_value"
HAHM_SERVICES = [
SERVICE_EXPORT_DEVICE_DEFINITION,
SERVICE_PUT_PARAMSET,
SERVICE_SET_DEVICE_VALUE,
SERVICE_SET_INSTALL_MODE,
SERVICE_SET_VARIABLE_VALUE,
]
SCHEMA_SERVICE_EXPORT_DEVICE_DEFINITION = vol.Schema(
{
vol.Required(ATTR_DEVICE_ID): cv.string,
}
)
SCHEMA_SERVICE_SET_VARIABLE_VALUE = vol.Schema(
{
vol.Required(ATTR_ENTITY_ID): comp_entity_ids,
vol.Required(ATTR_NAME): cv.string,
vol.Required(ATTR_VALUE): cv.match_all,
}
)
SCHEMA_SERVICE_SET_INSTALL_MODE = vol.Schema(
{
vol.Required(ATTR_INTERFACE_ID): cv.string,
vol.Optional(ATTR_TIME, default=60): cv.positive_int,
vol.Optional(ATTR_MODE, default=1): vol.All(vol.Coerce(int), vol.In([1, 2])),
vol.Optional(ATTR_ADDRESS): vol.All(cv.string, vol.Upper),
}
)
SCHEMA_SERVICE_SET_DEVICE_VALUE = vol.Schema(
{
vol.Required(ATTR_DEVICE_ID): cv.string,
vol.Required(ATTR_CHANNEL, default=DEFAULT_CHANNEL): vol.Coerce(int),
vol.Required(ATTR_PARAMETER): vol.All(cv.string, vol.Upper),
vol.Required(ATTR_VALUE): cv.match_all,
vol.Optional(ATTR_VALUE_TYPE): vol.In(
["boolean", "dateTime.iso8601", "double", "int", "string"]
),
vol.Optional(ATTR_RX_MODE): vol.All(cv.string, vol.Upper),
}
)
SCHEMA_SERVICE_PUT_PARAMSET = vol.Schema(
{
vol.Required(ATTR_DEVICE_ID): cv.string,
vol.Required(ATTR_CHANNEL, default=DEFAULT_CHANNEL): vol.Coerce(int),
vol.Required(ATTR_PARAMSET_KEY): vol.All(cv.string, vol.Upper),
vol.Required(ATTR_PARAMSET): dict,
vol.Optional(ATTR_RX_MODE): vol.All(cv.string, vol.Upper),
}
)
async def async_setup_services(hass: HomeAssistant) -> None:
"""Create the hahomematic services."""
@verify_domain_control(hass, DOMAIN)
async def async_call_hahm_service(service: ServiceCall) -> None:
"""Call correct HomematicIP Cloud service."""
service_name = service.service
if service_name == SERVICE_EXPORT_DEVICE_DEFINITION:
await _async_service_export_device_definition(hass=hass, service=service)
elif service_name == SERVICE_PUT_PARAMSET:
await _async_service_put_paramset(hass=hass, service=service)
elif service_name == SERVICE_SET_INSTALL_MODE:
await _async_service_set_install_mode(hass=hass, service=service)
elif service_name == SERVICE_SET_DEVICE_VALUE:
await _async_service_set_device_value(hass=hass, service=service)
elif service_name == SERVICE_SET_VARIABLE_VALUE:
await _async_service_set_variable_value(hass=hass, service=service)
hass.services.async_register(
domain=DOMAIN,
service=SERVICE_EXPORT_DEVICE_DEFINITION,
service_func=async_call_hahm_service,
schema=SCHEMA_SERVICE_EXPORT_DEVICE_DEFINITION,
)
hass.services.async_register(
domain=DOMAIN,
service=SERVICE_SET_VARIABLE_VALUE,
service_func=async_call_hahm_service,
schema=SCHEMA_SERVICE_SET_VARIABLE_VALUE,
)
hass.services.async_register(
domain=DOMAIN,
service=SERVICE_SET_DEVICE_VALUE,
service_func=async_call_hahm_service,
schema=SCHEMA_SERVICE_SET_DEVICE_VALUE,
)
async_register_admin_service(
hass=hass,
domain=DOMAIN,
service=SERVICE_SET_INSTALL_MODE,
service_func=async_call_hahm_service,
schema=SCHEMA_SERVICE_SET_INSTALL_MODE,
)
hass.services.async_register(
domain=DOMAIN,
service=SERVICE_PUT_PARAMSET,
service_func=async_call_hahm_service,
schema=SCHEMA_SERVICE_PUT_PARAMSET,
)
async def async_unload_services(hass: HomeAssistant) -> None:
"""Unload HAHM services."""
if hass.data[DOMAIN]:
return
for hahm_service in HAHM_SERVICES:
hass.services.async_remove(domain=DOMAIN, service=hahm_service)
async def _async_service_export_device_definition(
hass: HomeAssistant, service: ServiceCall
) -> None:
"""Service to call setValue method for HomeMatic devices."""
device_id = service.data[ATTR_DEVICE_ID]
if hm_device := _get_device(hass=hass, device_id=device_id):
await hm_device.export_device_definition()
_LOGGER.debug(
"Calling export_device_definition: %s, %s",
hm_device.name,
hm_device.device_address,
)
async def _async_service_set_variable_value(
hass: HomeAssistant, service: ServiceCall
) -> None:
"""Service to call setValue method for HomeMatic system variable."""
entity_id = service.data[ATTR_ENTITY_ID]
name = service.data[ATTR_NAME]
value = service.data[ATTR_VALUE]
if hub := _get_hub_by_entity_id(hass=hass, entity_id=entity_id):
await hub.async_set_variable(name=name, value=value)
async def _async_service_set_device_value(
hass: HomeAssistant, service: ServiceCall
) -> None:
"""Service to call setValue method for HomeMatic devices."""
device_id = service.data[ATTR_DEVICE_ID]
channel = service.data[ATTR_CHANNEL]
parameter = service.data[ATTR_PARAMETER]
value = service.data[ATTR_VALUE]
rx_mode = service.data.get(ATTR_RX_MODE)
# Convert value into correct XML-RPC Type.
# https://docs.python.org/3/library/xmlrpc.client.html#xmlrpc.client.ServerProxy
if value_type := service.data.get(ATTR_VALUE_TYPE):
if value_type == "int":
value = int(value)
elif value_type == "double":
value = float(value)
elif value_type == "boolean":
value = bool(value)
elif value_type == "dateTime.iso8601":
value = datetime.strptime(value, "%Y%m%dT%H:%M:%S")
else:
# Default is 'string'
value = str(value)
if (
address_data := _get_interface_channel_address(
hass=hass, device_id=device_id, channel=channel
)
) is None:
return None
interface_id: str = address_data[0]
channel_address: str = address_data[1]
_LOGGER.debug(
"Calling setValue: %s, %s, %s, %s, %s, %s",
interface_id,
channel_address,
parameter,
value,
value_type,
rx_mode,
)
if interface_id and channel_address:
if control_unit := _get_cu_by_interface_id(
hass=hass, interface_id=interface_id
):
await control_unit.central.set_value(
interface_id=interface_id,
channel_address=channel_address,
parameter=parameter,
value=value,
rx_mode=rx_mode,
)
async def _async_service_set_install_mode(
hass: HomeAssistant, service: ServiceCall
) -> None:
"""Service to set interface_id into install mode."""
interface_id = service.data[ATTR_INTERFACE_ID]
mode: int = service.data.get(ATTR_MODE, 1)
time: int = service.data.get(ATTR_TIME, 60)
device_address = service.data.get(ATTR_ADDRESS)
if control_unit := _get_cu_by_interface_id(hass=hass, interface_id=interface_id):
await control_unit.central.set_install_mode(
interface_id, t=time, mode=mode, device_address=device_address
)
async def _async_service_put_paramset(
hass: HomeAssistant, service: ServiceCall
) -> None:
"""Service to call the putParamset method on a HomeMatic connection."""
device_id = service.data[ATTR_DEVICE_ID]
channel = service.data[ATTR_CHANNEL]
paramset_key = service.data[ATTR_PARAMSET_KEY]
# When passing in the paramset from a YAML file we get an OrderedDict
# here instead of a dict, so add this explicit cast.
# The service schema makes sure that this cast works.
paramset = dict(service.data[ATTR_PARAMSET])
rx_mode = service.data.get(ATTR_RX_MODE)
if (
address_data := _get_interface_channel_address(
hass=hass, device_id=device_id, channel=channel
)
) is None:
return None
interface_id: str = address_data[0]
channel_address: str = address_data[1]
_LOGGER.debug(
"Calling putParamset: %s, %s, %s, %s, %s",
interface_id,
channel_address,
paramset_key,
paramset,
rx_mode,
)
if interface_id and channel_address:
if control_unit := _get_cu_by_interface_id(
hass=hass, interface_id=interface_id
):
await control_unit.central.put_paramset(
interface_id=interface_id,
channel_address=channel_address,
paramset=paramset_key,
value=paramset,
rx_mode=rx_mode,
)
def _get_device(hass: HomeAssistant, device_id: str) -> HmDevice | None:
"""Return the homematic device."""
device_registry = dr.async_get(hass)
device_entry: DeviceEntry | None = device_registry.async_get(device_id)
if not device_entry:
return None
if (
data := get_device_address_at_interface_from_identifiers(
identifiers=device_entry.identifiers
)
) is None:
return None
device_address = data[0]
interface_id = data[1]
if control_unit := _get_cu_by_interface_id(hass=hass, interface_id=interface_id):
return control_unit.central.hm_devices.get(device_address)
return None
def _get_interface_channel_address(
hass: HomeAssistant, device_id: str, channel: int
) -> tuple[str, str] | None:
"""Return interface and channel_address with given device_id and channel."""
device_registry = dr.async_get(hass)
device_entry: DeviceEntry | None = device_registry.async_get(device_id)
if not device_entry:
return None
if (
data := get_device_address_at_interface_from_identifiers(
identifiers=device_entry.identifiers
)
) is None:
return None
device_address = data[0]
interface_id = data[1]
channel_address = f"{device_address}:{channel}"
return interface_id, channel_address
def _get_entity(hass: HomeAssistant, entity_id: str) -> BaseEntity | None:
"""Return entity by given entity_id."""
control_unit: ControlUnit
for control_unit in hass.data[DOMAIN].values():
if hm_entity := control_unit.async_get_hm_entity(entity_id=entity_id):
if isinstance(hm_entity, BaseEntity):
return hm_entity
return None
def _get_entities_by_platform(
hass: HomeAssistant, platform: HmPlatform
) -> list[BaseEntity]:
"""Return entities by given platform."""
control_unit: ControlUnit
hm_entities: list[BaseEntity] = []
for control_unit in hass.data[DOMAIN].values():
hm_entities.extend(
control_unit.async_get_hm_entities_by_platform(platform=platform)
)
return hm_entities
def _get_hm_entity(
hass: HomeAssistant, interface_id: str, channel_address: str, parameter: str
) -> GenericEntity | None:
"""Get homematic entity."""
if control_unit := _get_cu_by_interface_id(hass=hass, interface_id=interface_id):
return control_unit.central.get_hm_entity_by_parameter(
channel_address=channel_address, parameter=parameter
)
return None
def _get_cu_by_interface_id(
hass: HomeAssistant, interface_id: str
) -> ControlUnit | None:
"""Get ControlUnit by interface_id."""
for entry_id in hass.data[DOMAIN].keys():
control_unit: ControlUnit = hass.data[DOMAIN][entry_id]
if control_unit and control_unit.central.clients.get(interface_id):
return control_unit
return None
def _get_hub_by_entity_id(hass: HomeAssistant, entity_id: str) -> HaHub | None:
"""Get ControlUnit by device address."""
for entry_id in hass.data[DOMAIN].keys():
control_unit: ControlUnit = hass.data[DOMAIN][entry_id]
if (
control_unit
and control_unit.hub
and control_unit.hub.entity_id == entity_id
):
return control_unit.hub
return None
| en | 0.789944 | Module with hahomematic services. Create the hahomematic services. Call correct HomematicIP Cloud service. Unload HAHM services. Service to call setValue method for HomeMatic devices. Service to call setValue method for HomeMatic system variable. Service to call setValue method for HomeMatic devices. # Convert value into correct XML-RPC Type. # https://docs.python.org/3/library/xmlrpc.client.html#xmlrpc.client.ServerProxy # Default is 'string' Service to set interface_id into install mode. Service to call the putParamset method on a HomeMatic connection. # When passing in the paramset from a YAML file we get an OrderedDict # here instead of a dict, so add this explicit cast. # The service schema makes sure that this cast works. Return the homematic device. Return interface and channel_address with given device_id and channel. Return entity by given entity_id. Return entities by given platform. Get homematic entity. Get ControlUnit by interface_id. Get ControlUnit by device address. | 1.98916 | 2 |
app/migrations/0001_initial.py | MariaAlice00/ifpi-tds-projeto-integrador | 0 | 10778 | # Generated by Django 3.2.3 on 2021-06-03 00:35
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Livro',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('imagem', models.ImageField(upload_to='imagens')),
('titulo', models.CharField(max_length=150)),
('autor', models.CharField(max_length=50)),
('genero', models.CharField(max_length=50)),
('serieunico', models.CharField(max_length=50)),
('nota', models.CharField(max_length=2)),
('opiniao', models.CharField(max_length=300)),
],
),
]
| # Generated by Django 3.2.3 on 2021-06-03 00:35
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Livro',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('imagem', models.ImageField(upload_to='imagens')),
('titulo', models.CharField(max_length=150)),
('autor', models.CharField(max_length=50)),
('genero', models.CharField(max_length=50)),
('serieunico', models.CharField(max_length=50)),
('nota', models.CharField(max_length=2)),
('opiniao', models.CharField(max_length=300)),
],
),
]
| en | 0.859339 | # Generated by Django 3.2.3 on 2021-06-03 00:35 | 1.761269 | 2 |
graalpython/com.oracle.graal.python.parser.antlr/postprocess.py | transposit/graalpython | 1 | 10779 | # Copyright (c) 2018, 2019, Oracle and/or its affiliates. All rights reserved.
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
#
# The Universal Permissive License (UPL), Version 1.0
#
# Subject to the condition set forth below, permission is hereby granted to any
# person obtaining a copy of this software, associated documentation and/or
# data (collectively the "Software"), free of charge and under any and all
# copyright rights in the Software, and any and all patent rights owned or
# freely licensable by each licensor hereunder covering either (i) the
# unmodified Software as contributed to or provided by such licensor, or (ii)
# the Larger Works (as defined below), to deal in both
#
# (a) the Software, and
#
# (b) any piece of software and/or hardware listed in the lrgrwrks.txt file if
# one is included with the Software each a "Larger Work" to which the Software
# is contributed by such licensors),
#
# without restriction, including without limitation the rights to copy, create
# derivative works of, display, perform, and distribute the Software and make,
# use, sell, offer for sale, import, export, have made, and have sold the
# Software and the Larger Work(s), and to sublicense the foregoing rights on
# either these or other terms.
#
# This license is subject to the following condition:
#
# The above copyright notice and either this complete permission notice or at a
# minimum a reference to the UPL must be included in all copies or substantial
# portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import sys
import re
COPYRIGHT_HEADER = """\
/*
* Copyright (c) 2017-2019, Oracle and/or its affiliates.
* Copyright (c) 2014 by <NAME>
*
* The MIT License (MIT)
*
* Permission is hereby granted, free of charge, to any person
* obtaining a copy of this software and associated documentation
* files (the "Software"), to deal in the Software without
* restriction, including without limitation the rights to use,
* copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following
* conditions:
*
* The above copyright notice and this permission notice shall be
* included in all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
* OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
* HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
* WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*/
// Checkstyle: stop
// JaCoCo Exclude
//@formatter:off
{0}
"""
PTRN_SUPPRESS_WARNINGS = re.compile(r"@SuppressWarnings.*")
def replace_suppress_warnings(line):
return PTRN_SUPPRESS_WARNINGS.sub('@SuppressWarnings("all")', line)
def replace_rulectx(line):
return line.replace("(RuleContext)_localctx", "_localctx")
def replace_localctx(line):
return re.sub(r'\(\((([a-zA-Z]*?_?)*[a-zA-Z]*)\)_localctx\)', '_localctx', line)
TRANSFORMS = [
replace_suppress_warnings,
replace_rulectx,
replace_localctx,
]
def postprocess(file):
lines = []
for line in file:
for transform in TRANSFORMS:
line = transform(line)
lines.append(line)
return ''.join(lines)
if __name__ == '__main__':
fpath = sys.argv[1]
with open(fpath, 'r') as FILE:
content = COPYRIGHT_HEADER.format(postprocess(FILE))
with open(fpath, 'w+') as FILE:
FILE.write(content)
| # Copyright (c) 2018, 2019, Oracle and/or its affiliates. All rights reserved.
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
#
# The Universal Permissive License (UPL), Version 1.0
#
# Subject to the condition set forth below, permission is hereby granted to any
# person obtaining a copy of this software, associated documentation and/or
# data (collectively the "Software"), free of charge and under any and all
# copyright rights in the Software, and any and all patent rights owned or
# freely licensable by each licensor hereunder covering either (i) the
# unmodified Software as contributed to or provided by such licensor, or (ii)
# the Larger Works (as defined below), to deal in both
#
# (a) the Software, and
#
# (b) any piece of software and/or hardware listed in the lrgrwrks.txt file if
# one is included with the Software each a "Larger Work" to which the Software
# is contributed by such licensors),
#
# without restriction, including without limitation the rights to copy, create
# derivative works of, display, perform, and distribute the Software and make,
# use, sell, offer for sale, import, export, have made, and have sold the
# Software and the Larger Work(s), and to sublicense the foregoing rights on
# either these or other terms.
#
# This license is subject to the following condition:
#
# The above copyright notice and either this complete permission notice or at a
# minimum a reference to the UPL must be included in all copies or substantial
# portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import sys
import re
COPYRIGHT_HEADER = """\
/*
* Copyright (c) 2017-2019, Oracle and/or its affiliates.
* Copyright (c) 2014 by <NAME>
*
* The MIT License (MIT)
*
* Permission is hereby granted, free of charge, to any person
* obtaining a copy of this software and associated documentation
* files (the "Software"), to deal in the Software without
* restriction, including without limitation the rights to use,
* copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following
* conditions:
*
* The above copyright notice and this permission notice shall be
* included in all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
* OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
* HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
* WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*/
// Checkstyle: stop
// JaCoCo Exclude
//@formatter:off
{0}
"""
PTRN_SUPPRESS_WARNINGS = re.compile(r"@SuppressWarnings.*")
def replace_suppress_warnings(line):
return PTRN_SUPPRESS_WARNINGS.sub('@SuppressWarnings("all")', line)
def replace_rulectx(line):
return line.replace("(RuleContext)_localctx", "_localctx")
def replace_localctx(line):
return re.sub(r'\(\((([a-zA-Z]*?_?)*[a-zA-Z]*)\)_localctx\)', '_localctx', line)
TRANSFORMS = [
replace_suppress_warnings,
replace_rulectx,
replace_localctx,
]
def postprocess(file):
lines = []
for line in file:
for transform in TRANSFORMS:
line = transform(line)
lines.append(line)
return ''.join(lines)
if __name__ == '__main__':
fpath = sys.argv[1]
with open(fpath, 'r') as FILE:
content = COPYRIGHT_HEADER.format(postprocess(FILE))
with open(fpath, 'w+') as FILE:
FILE.write(content)
| en | 0.812622 | # Copyright (c) 2018, 2019, Oracle and/or its affiliates. All rights reserved. # DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. # # The Universal Permissive License (UPL), Version 1.0 # # Subject to the condition set forth below, permission is hereby granted to any # person obtaining a copy of this software, associated documentation and/or # data (collectively the "Software"), free of charge and under any and all # copyright rights in the Software, and any and all patent rights owned or # freely licensable by each licensor hereunder covering either (i) the # unmodified Software as contributed to or provided by such licensor, or (ii) # the Larger Works (as defined below), to deal in both # # (a) the Software, and # # (b) any piece of software and/or hardware listed in the lrgrwrks.txt file if # one is included with the Software each a "Larger Work" to which the Software # is contributed by such licensors), # # without restriction, including without limitation the rights to copy, create # derivative works of, display, perform, and distribute the Software and make, # use, sell, offer for sale, import, export, have made, and have sold the # Software and the Larger Work(s), and to sublicense the foregoing rights on # either these or other terms. # # This license is subject to the following condition: # # The above copyright notice and either this complete permission notice or at a # minimum a reference to the UPL must be included in all copies or substantial # portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE. \ /* * Copyright (c) 2017-2019, Oracle and/or its affiliates. * Copyright (c) 2014 by <NAME> * * The MIT License (MIT) * * Permission is hereby granted, free of charge, to any person * obtaining a copy of this software and associated documentation * files (the "Software"), to deal in the Software without * restriction, including without limitation the rights to use, * copy, modify, merge, publish, distribute, sublicense, and/or sell * copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following * conditions: * * The above copyright notice and this permission notice shall be * included in all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. */ // Checkstyle: stop // JaCoCo Exclude //@formatter:off {0} | 1.165558 | 1 |
scripts/ape_protocol_deploy.py | coordinape/coordinape-protocol | 22 | 10780 | <reponame>coordinape/coordinape-protocol
from brownie import accounts, Wei, chain, ApeToken, ApeVaultFactory, ApeDistributor, ApeRegistry, ApeRouter, FeeRegistry, MockRegistry, MockVaultFactory, MockToken, MockVault
def deploy_token():
funds = accounts.load('moist', '\0')
user = accounts.load('ape_deployer', '\0')
multi_sig = '0x15B513F658f7390D8720dCE321f50974B28672EF'
# funds.transfer(to=user, amount='1 ether')
# ape = ApeToken.deploy({'from':user}, publish_source=True)
# ape.transferOwnership(multi_sig, {'from':user})
gas_used = Wei('150 gwei') * 21000
remaining = user.balance() - gas_used
ask_back = Wei('1 ether') - remaining
print(f'to ask back: {Wei(ask_back).to("ether")}')
user.transfer(to=funds, amount=remaining, gas_price='150 gwei')
def deploy_protocol():
user = accounts.load('ape_deployer', '\0')
multi_sig = ''
lock_length = 60 * 60 * 24 * 14 # 14 days
yearn_reg = '0x50c1a2ea0a861a967d9d0ffe2ae4012c2e053804'
ape_reg = ApeRegistry.deploy(0, {'from':user}, publish_source=True)
ape_factory = ApeVaultFactory.deploy(yearn_reg, ape_reg, {'from':user}, publish_source=True)
ape_router = ApeRouter.deploy(yearn_reg, ape_factory, 0, {'from':user}, publish_source=True)
ape_distro = ApeDistributor.deploy({'from':user}, publish_source=True)
ape_fee = FeeRegistry.deploy({'from':user}, publish_source=True)
setup_protocol(ape_reg, ape_fee, ape_distro, ape_router, ape_factory, user)
min_delay_call = ape_reg.changeMinDelay.encode_input(lock_length)
ape_reg.schedule(ape_reg, min_delay_call, '', '', 0, {'from':user})
ape_fee.schedule(ape_fee, min_delay_call, '', '', 0, {'from':user})
ape_router.schedule(ape_router, min_delay_call, '', '', 0, {'from':user})
ape_reg.execute(ape_reg, min_delay_call, '', '', 0, {'from':user})
ape_fee.execute(ape_fee, min_delay_call, '', '', 0, {'from':user})
ape_router.execute(ape_router, min_delay_call, '', '', 0, {'from':user})
ape_reg.transferOwnership(multi_sig, {'from':user})
ape_fee.transferOwnership(multi_sig, {'from':user})
ape_router.transferOwnership(multi_sig, {'from':user})
def deploy_protocol_testnet():
user = accounts.load('moist', '\0')
multi_sig = user
lock_length = 60 * 60 * 24 * 14 # 14 days
mock_yearn_reg = MockRegistry.deploy({'from':user}, publish_source=True)
mock_yearn_vault_factories = MockVaultFactory.deploy(mock_yearn_reg, {'from':user}, publish_source=True)
mock_ape_reg = ApeRegistry.deploy(0, {'from':user}, publish_source=True)
mock_ape_factory = ApeVaultFactory.deploy(mock_yearn_reg, mock_ape_reg, {'from':user}, publish_source=True)
mock_ape_router = ApeRouter.deploy(mock_yearn_reg, mock_ape_factory, 0, {'from':user}, publish_source=True)
mock_ape_distro = ApeDistributor.deploy({'from':user}, publish_source=True)
mock_ape_fee = FeeRegistry.deploy({'from':user}, publish_source=True)
setup_protocol(mock_ape_reg, mock_ape_fee, mock_ape_distro, mock_ape_router, mock_ape_factory, user)
# setup_mockvaults(mock_yearn_vault_factories, user)
# min_delay_call = mock_ape_reg.changeMinDelay.encode_input(lock_length)
# mock_ape_reg.schedule(mock_ape_reg, min_delay_call, '', '', 0, {'from':user})
# mock_ape_fee.schedule(mock_ape_fee, min_delay_call, '', '', 0, {'from':user})
# mock_ape_router.schedule(mock_ape_router, min_delay_call, '', '', 0, {'from':user})
# mock_ape_reg.execute(mock_ape_reg, min_delay_call, '', '', 0, {'from':user})
# mock_ape_fee.execute(mock_ape_fee, min_delay_call, '', '', 0, {'from':user})
# mock_ape_router.execute(mock_ape_router, min_delay_call, '', '', 0, {'from':user})
# mock_ape_reg.transferOwnership(multi_sig, {'from':user})
# mock_ape_fee.transferOwnership(multi_sig, {'from':user})
# mock_ape_router.transferOwnership(multi_sig, {'from':user})
base_uri = 'https://rinkeby.etherscan.io/address/'
print(f'Mock yearn reg: {base_uri + mock_yearn_reg.address}')
print(f'Mock yearn Vault factory: {base_uri + mock_yearn_vault_factories.address}')
print(f'Mock ape reg: {base_uri + mock_ape_reg.address}')
print(f'Mock ape factory: {base_uri + mock_ape_factory.address}')
print(f'Mock ape router: {base_uri + mock_ape_router.address}')
print(f'Mock ape distro: {base_uri + mock_ape_distro.address}')
print(f'Mock ape fee: {base_uri + mock_ape_fee.address}')
def setup_protocol(ape_reg, ape_fee, ape_distro, ape_router, ape_factory, minter):
set_fee_call = ape_reg.setFeeRegistry.encode_input(ape_fee)
set_router_call = ape_reg.setRouter.encode_input(ape_router)
set_distro_call = ape_reg.setDistributor.encode_input(ape_distro)
set_factory_call = ape_reg.setFactory.encode_input(ape_factory)
set_treasury_call = ape_reg.setTreasury.encode_input(minter)
ape_reg.schedule(ape_reg, set_fee_call, '', '', 0, {'from':minter})
ape_reg.schedule(ape_reg, set_router_call, '', '', 0, {'from':minter})
ape_reg.schedule(ape_reg, set_distro_call, '', '', 0, {'from':minter})
ape_reg.schedule(ape_reg, set_factory_call, '', '', 0, {'from':minter})
ape_reg.schedule(ape_reg, set_treasury_call, '', '', 0, {'from':minter})
ape_reg.execute(ape_reg, set_fee_call, '', '', 0, {'from':minter})
ape_reg.execute(ape_reg, set_router_call, '', '', 0, {'from':minter})
ape_reg.execute(ape_reg, set_distro_call, '', '', 0, {'from':minter})
ape_reg.execute(ape_reg, set_factory_call, '', '', 0, {'from':minter})
ape_reg.execute(ape_reg, set_treasury_call, '', '', 0, {'from':minter})
def setup_mockvaults(mock_yearn_vault_factories, user):
usdc = MockToken.deploy('USD Coin', 'USDC', {'from':user}, publish_source=True)
dai = MockToken.deploy('Dai', 'DAI', {'from':user})
ape = MockToken.deploy('Ape', 'OOH', {'from':user})
tx1 = mock_yearn_vault_factories.createVault(usdc, 'yearnVault USDC', 'yvUSDC', {'from':user})
MockVault.publish_source(tx1.new_contracts[0])
tx2 = mock_yearn_vault_factories.createVault(dai, 'yearnVault DAI', 'yvDAI', {'from':user})
tx3 = mock_yearn_vault_factories.createVault(ape, 'yearnVault Ape', 'yvOOH', {'from':user})
base_uri = 'https://rinkeby.etherscan.io/address/'
print(f'Mock usdc: {base_uri + usdc.address}')
print(f'Mock dai: {base_uri + dai.address}')
print(f'Mock ape token: {base_uri + ape.address}')
print(f'Mock usdc vault: {base_uri + tx1}')
print(f'Mock dai vault: {base_uri + tx2}')
print(f'Mock ape vault: {base_uri + tx3}')
| from brownie import accounts, Wei, chain, ApeToken, ApeVaultFactory, ApeDistributor, ApeRegistry, ApeRouter, FeeRegistry, MockRegistry, MockVaultFactory, MockToken, MockVault
def deploy_token():
funds = accounts.load('moist', '\0')
user = accounts.load('ape_deployer', '\0')
multi_sig = '0x15B513F658f7390D8720dCE321f50974B28672EF'
# funds.transfer(to=user, amount='1 ether')
# ape = ApeToken.deploy({'from':user}, publish_source=True)
# ape.transferOwnership(multi_sig, {'from':user})
gas_used = Wei('150 gwei') * 21000
remaining = user.balance() - gas_used
ask_back = Wei('1 ether') - remaining
print(f'to ask back: {Wei(ask_back).to("ether")}')
user.transfer(to=funds, amount=remaining, gas_price='150 gwei')
def deploy_protocol():
user = accounts.load('ape_deployer', '\0')
multi_sig = ''
lock_length = 60 * 60 * 24 * 14 # 14 days
yearn_reg = '0x50c1a2ea0a861a967d9d0ffe2ae4012c2e053804'
ape_reg = ApeRegistry.deploy(0, {'from':user}, publish_source=True)
ape_factory = ApeVaultFactory.deploy(yearn_reg, ape_reg, {'from':user}, publish_source=True)
ape_router = ApeRouter.deploy(yearn_reg, ape_factory, 0, {'from':user}, publish_source=True)
ape_distro = ApeDistributor.deploy({'from':user}, publish_source=True)
ape_fee = FeeRegistry.deploy({'from':user}, publish_source=True)
setup_protocol(ape_reg, ape_fee, ape_distro, ape_router, ape_factory, user)
min_delay_call = ape_reg.changeMinDelay.encode_input(lock_length)
ape_reg.schedule(ape_reg, min_delay_call, '', '', 0, {'from':user})
ape_fee.schedule(ape_fee, min_delay_call, '', '', 0, {'from':user})
ape_router.schedule(ape_router, min_delay_call, '', '', 0, {'from':user})
ape_reg.execute(ape_reg, min_delay_call, '', '', 0, {'from':user})
ape_fee.execute(ape_fee, min_delay_call, '', '', 0, {'from':user})
ape_router.execute(ape_router, min_delay_call, '', '', 0, {'from':user})
ape_reg.transferOwnership(multi_sig, {'from':user})
ape_fee.transferOwnership(multi_sig, {'from':user})
ape_router.transferOwnership(multi_sig, {'from':user})
def deploy_protocol_testnet():
user = accounts.load('moist', '\0')
multi_sig = user
lock_length = 60 * 60 * 24 * 14 # 14 days
mock_yearn_reg = MockRegistry.deploy({'from':user}, publish_source=True)
mock_yearn_vault_factories = MockVaultFactory.deploy(mock_yearn_reg, {'from':user}, publish_source=True)
mock_ape_reg = ApeRegistry.deploy(0, {'from':user}, publish_source=True)
mock_ape_factory = ApeVaultFactory.deploy(mock_yearn_reg, mock_ape_reg, {'from':user}, publish_source=True)
mock_ape_router = ApeRouter.deploy(mock_yearn_reg, mock_ape_factory, 0, {'from':user}, publish_source=True)
mock_ape_distro = ApeDistributor.deploy({'from':user}, publish_source=True)
mock_ape_fee = FeeRegistry.deploy({'from':user}, publish_source=True)
setup_protocol(mock_ape_reg, mock_ape_fee, mock_ape_distro, mock_ape_router, mock_ape_factory, user)
# setup_mockvaults(mock_yearn_vault_factories, user)
# min_delay_call = mock_ape_reg.changeMinDelay.encode_input(lock_length)
# mock_ape_reg.schedule(mock_ape_reg, min_delay_call, '', '', 0, {'from':user})
# mock_ape_fee.schedule(mock_ape_fee, min_delay_call, '', '', 0, {'from':user})
# mock_ape_router.schedule(mock_ape_router, min_delay_call, '', '', 0, {'from':user})
# mock_ape_reg.execute(mock_ape_reg, min_delay_call, '', '', 0, {'from':user})
# mock_ape_fee.execute(mock_ape_fee, min_delay_call, '', '', 0, {'from':user})
# mock_ape_router.execute(mock_ape_router, min_delay_call, '', '', 0, {'from':user})
# mock_ape_reg.transferOwnership(multi_sig, {'from':user})
# mock_ape_fee.transferOwnership(multi_sig, {'from':user})
# mock_ape_router.transferOwnership(multi_sig, {'from':user})
base_uri = 'https://rinkeby.etherscan.io/address/'
print(f'Mock yearn reg: {base_uri + mock_yearn_reg.address}')
print(f'Mock yearn Vault factory: {base_uri + mock_yearn_vault_factories.address}')
print(f'Mock ape reg: {base_uri + mock_ape_reg.address}')
print(f'Mock ape factory: {base_uri + mock_ape_factory.address}')
print(f'Mock ape router: {base_uri + mock_ape_router.address}')
print(f'Mock ape distro: {base_uri + mock_ape_distro.address}')
print(f'Mock ape fee: {base_uri + mock_ape_fee.address}')
def setup_protocol(ape_reg, ape_fee, ape_distro, ape_router, ape_factory, minter):
set_fee_call = ape_reg.setFeeRegistry.encode_input(ape_fee)
set_router_call = ape_reg.setRouter.encode_input(ape_router)
set_distro_call = ape_reg.setDistributor.encode_input(ape_distro)
set_factory_call = ape_reg.setFactory.encode_input(ape_factory)
set_treasury_call = ape_reg.setTreasury.encode_input(minter)
ape_reg.schedule(ape_reg, set_fee_call, '', '', 0, {'from':minter})
ape_reg.schedule(ape_reg, set_router_call, '', '', 0, {'from':minter})
ape_reg.schedule(ape_reg, set_distro_call, '', '', 0, {'from':minter})
ape_reg.schedule(ape_reg, set_factory_call, '', '', 0, {'from':minter})
ape_reg.schedule(ape_reg, set_treasury_call, '', '', 0, {'from':minter})
ape_reg.execute(ape_reg, set_fee_call, '', '', 0, {'from':minter})
ape_reg.execute(ape_reg, set_router_call, '', '', 0, {'from':minter})
ape_reg.execute(ape_reg, set_distro_call, '', '', 0, {'from':minter})
ape_reg.execute(ape_reg, set_factory_call, '', '', 0, {'from':minter})
ape_reg.execute(ape_reg, set_treasury_call, '', '', 0, {'from':minter})
def setup_mockvaults(mock_yearn_vault_factories, user):
usdc = MockToken.deploy('USD Coin', 'USDC', {'from':user}, publish_source=True)
dai = MockToken.deploy('Dai', 'DAI', {'from':user})
ape = MockToken.deploy('Ape', 'OOH', {'from':user})
tx1 = mock_yearn_vault_factories.createVault(usdc, 'yearnVault USDC', 'yvUSDC', {'from':user})
MockVault.publish_source(tx1.new_contracts[0])
tx2 = mock_yearn_vault_factories.createVault(dai, 'yearnVault DAI', 'yvDAI', {'from':user})
tx3 = mock_yearn_vault_factories.createVault(ape, 'yearnVault Ape', 'yvOOH', {'from':user})
base_uri = 'https://rinkeby.etherscan.io/address/'
print(f'Mock usdc: {base_uri + usdc.address}')
print(f'Mock dai: {base_uri + dai.address}')
print(f'Mock ape token: {base_uri + ape.address}')
print(f'Mock usdc vault: {base_uri + tx1}')
print(f'Mock dai vault: {base_uri + tx2}')
print(f'Mock ape vault: {base_uri + tx3}') | en | 0.531464 | # funds.transfer(to=user, amount='1 ether') # ape = ApeToken.deploy({'from':user}, publish_source=True) # ape.transferOwnership(multi_sig, {'from':user}) # 14 days # 14 days # setup_mockvaults(mock_yearn_vault_factories, user) # min_delay_call = mock_ape_reg.changeMinDelay.encode_input(lock_length) # mock_ape_reg.schedule(mock_ape_reg, min_delay_call, '', '', 0, {'from':user}) # mock_ape_fee.schedule(mock_ape_fee, min_delay_call, '', '', 0, {'from':user}) # mock_ape_router.schedule(mock_ape_router, min_delay_call, '', '', 0, {'from':user}) # mock_ape_reg.execute(mock_ape_reg, min_delay_call, '', '', 0, {'from':user}) # mock_ape_fee.execute(mock_ape_fee, min_delay_call, '', '', 0, {'from':user}) # mock_ape_router.execute(mock_ape_router, min_delay_call, '', '', 0, {'from':user}) # mock_ape_reg.transferOwnership(multi_sig, {'from':user}) # mock_ape_fee.transferOwnership(multi_sig, {'from':user}) # mock_ape_router.transferOwnership(multi_sig, {'from':user}) | 1.975774 | 2 |
groups/views.py | MAKENTNU/web | 10 | 10781 | <filename>groups/views.py
from django.contrib.auth.mixins import PermissionRequiredMixin
from django.urls import reverse_lazy
from django.views.generic import DetailView, ListView, UpdateView
from .models import Committee
class CommitteeList(ListView):
model = Committee
template_name = 'groups/committee_list.html'
context_object_name = 'committees'
class CommitteeDetailView(DetailView):
model = Committee
template_name = 'groups/committee_detail.html'
context_object_name = 'committee'
class EditCommitteeView(PermissionRequiredMixin, UpdateView):
permission_required = ('groups.change_committee',)
model = Committee
fields = ('clickbait', 'description', 'email', 'image')
success_url = reverse_lazy('committee_list')
class CommitteeAdminView(PermissionRequiredMixin, ListView):
permission_required = ('groups.change_committee',)
model = Committee
template_name = 'groups/committee_admin.html'
context_object_name = 'committees'
| <filename>groups/views.py
from django.contrib.auth.mixins import PermissionRequiredMixin
from django.urls import reverse_lazy
from django.views.generic import DetailView, ListView, UpdateView
from .models import Committee
class CommitteeList(ListView):
model = Committee
template_name = 'groups/committee_list.html'
context_object_name = 'committees'
class CommitteeDetailView(DetailView):
model = Committee
template_name = 'groups/committee_detail.html'
context_object_name = 'committee'
class EditCommitteeView(PermissionRequiredMixin, UpdateView):
permission_required = ('groups.change_committee',)
model = Committee
fields = ('clickbait', 'description', 'email', 'image')
success_url = reverse_lazy('committee_list')
class CommitteeAdminView(PermissionRequiredMixin, ListView):
permission_required = ('groups.change_committee',)
model = Committee
template_name = 'groups/committee_admin.html'
context_object_name = 'committees'
| none | 1 | 2.001575 | 2 |
|
src/programy/braintree.py | motazsaad/fit-bot-fb-clt | 0 | 10782 | <reponame>motazsaad/fit-bot-fb-clt<gh_stars>0
"""
Copyright (c) 2016-2019 <NAME> http://www.keithsterling.com
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
documentation files (the "Software"), to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software,
and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the
Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO
THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
from programy.utils.logging.ylogger import YLogger
from programy.storage.factory import StorageFactory
from programy.config.brain.braintree import BrainBraintreeConfiguration
class BraintreeManager(object):
def __init__(self, braintree_configuration, admin_user="system"):
assert (braintree_configuration is not None)
assert (isinstance(braintree_configuration, BrainBraintreeConfiguration))
self._configuration = braintree_configuration
self._save_as_user = self._configuration.save_as_user
def dump_brain_tree(self, client_context):
if self._configuration.create is True:
YLogger.debug(self, "Dumping AIML Graph as tree to [%s]", self._configuration.file)
if client_context.client.storage_factory.entity_storage_engine_available(StorageFactory.BRAINTREE) is True:
storage_engine = client_context.client.storage_factory.entity_storage_engine(StorageFactory.BRAINTREE)
braintree_storage = storage_engine.braintree_storage()
braintree_storage.save_braintree(client_context)
| """
Copyright (c) 2016-2019 <NAME> http://www.keithsterling.com
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
documentation files (the "Software"), to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software,
and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the
Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO
THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
from programy.utils.logging.ylogger import YLogger
from programy.storage.factory import StorageFactory
from programy.config.brain.braintree import BrainBraintreeConfiguration
class BraintreeManager(object):
def __init__(self, braintree_configuration, admin_user="system"):
assert (braintree_configuration is not None)
assert (isinstance(braintree_configuration, BrainBraintreeConfiguration))
self._configuration = braintree_configuration
self._save_as_user = self._configuration.save_as_user
def dump_brain_tree(self, client_context):
if self._configuration.create is True:
YLogger.debug(self, "Dumping AIML Graph as tree to [%s]", self._configuration.file)
if client_context.client.storage_factory.entity_storage_engine_available(StorageFactory.BRAINTREE) is True:
storage_engine = client_context.client.storage_factory.entity_storage_engine(StorageFactory.BRAINTREE)
braintree_storage = storage_engine.braintree_storage()
braintree_storage.save_braintree(client_context) | en | 0.754276 | Copyright (c) 2016-2019 <NAME> http://www.keithsterling.com Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. | 1.567724 | 2 |
reana_commons/publisher.py | marcdiazsan/reana-commons | 0 | 10783 | # -*- coding: utf-8 -*-
#
# This file is part of REANA.
# Copyright (C) 2018 CERN.
#
# REANA is free software; you can redistribute it and/or modify it
# under the terms of the MIT License; see LICENSE file for more details.
"""REANA-Commons module to manage AMQP connections on REANA."""
import json
import logging
from kombu import Connection, Exchange, Queue
from .config import (
MQ_CONNECTION_STRING,
MQ_DEFAULT_EXCHANGE,
MQ_DEFAULT_FORMAT,
MQ_DEFAULT_QUEUES,
MQ_PRODUCER_MAX_RETRIES,
)
class BasePublisher(object):
"""Base publisher to MQ."""
def __init__(
self,
queue,
routing_key,
connection=None,
exchange=None,
durable=False,
max_priority=None,
):
"""Initialise the BasePublisher class.
:param connection: A :class:`kombu.Connection`, if not provided a
:class:`kombu.Connection` with the default configuration will
be instantiated.
:param queue: String which represents the queue the messages will
be sent to.
:param routing_key: String which represents the routing key which
will be used to send the messages, if not provided default
routing key will be used.
:param exchange: A :class:`kombu.Exchange` where the messages will
be delivered to, if not provided, it will be instantiated with
the default configuration.
"""
self._routing_key = routing_key
self._exchange = (
exchange
if isinstance(exchange, Exchange)
else Exchange(name=exchange or MQ_DEFAULT_EXCHANGE, type="direct")
)
self._queue = (
queue
if isinstance(queue, Queue)
else Queue(
queue,
durable=durable,
exchange=self._exchange,
routing_key=self._routing_key,
max_priority=max_priority,
)
)
self._connection = connection or Connection(MQ_CONNECTION_STRING)
self.producer = self._build_producer()
def _build_producer(self):
"""Instantiate a :class:`kombu.Producer`."""
return self._connection.Producer(serializer=MQ_DEFAULT_FORMAT)
def __error_callback(self, exception, interval):
"""Execute when there is an error while sending a message.
:param exception: Exception which has been thrown while trying to send
the message.
:param interval: Interval in which the message delivery will be
retried.
"""
logging.error("Error while publishing {}".format(exception))
logging.info("Retry in %s seconds.", interval)
def _publish(self, msg, priority=None):
"""Publish, handling retries, a message in the queue.
:param msg: Object which represents the message to be sent in
the queue. Note that this object should be serializable in the
configured format (by default JSON).
:param priority: Message priority.
"""
connection = self._connection.clone()
publish = connection.ensure(
self.producer,
self.producer.publish,
errback=self.__error_callback,
max_retries=MQ_PRODUCER_MAX_RETRIES,
)
publish(
json.dumps(msg),
exchange=self._exchange,
routing_key=self._routing_key,
declare=[self._queue],
priority=priority,
)
logging.debug("Publisher: message sent: %s", msg)
def close(self):
"""Close connection."""
logging.debug("Publisher: closing queue connection")
self._connection.release()
class WorkflowStatusPublisher(BasePublisher):
"""Progress publisher to MQ."""
def __init__(self, **kwargs):
"""Initialise the WorkflowStatusPublisher class."""
queue = "jobs-status"
if "queue" not in kwargs:
kwargs["queue"] = "jobs-status"
if "routing_key" not in kwargs:
kwargs["routing_key"] = MQ_DEFAULT_QUEUES[queue]["routing_key"]
if "durable" not in kwargs:
kwargs["durable"] = MQ_DEFAULT_QUEUES[queue]["durable"]
super(WorkflowStatusPublisher, self).__init__(**kwargs)
def publish_workflow_status(self, workflow_uuid, status, logs="", message=None):
"""Publish workflow status using the configured.
:param workflow_uudid: String which represents the workflow UUID.
:param status: Integer which represents the status of the workflow,
this is defined in the `reana-db` `Workflow` models.
:param logs: String which represents the logs which the workflow
has produced as output.
:param message: Dictionary which includes additional information
can be attached such as the overall progress of the workflow.
"""
msg = {
"workflow_uuid": workflow_uuid,
"logs": logs,
"status": status,
"message": message,
}
self._publish(msg)
class WorkflowSubmissionPublisher(BasePublisher):
"""Workflow submission publisher."""
def __init__(self, **kwargs):
"""Initialise the WorkflowSubmissionPublisher class."""
queue = "workflow-submission"
super(WorkflowSubmissionPublisher, self).__init__(
queue,
MQ_DEFAULT_QUEUES[queue]["routing_key"],
durable=MQ_DEFAULT_QUEUES[queue]["durable"],
max_priority=MQ_DEFAULT_QUEUES[queue]["max_priority"],
**kwargs
)
def publish_workflow_submission(
self, user_id, workflow_id_or_name, parameters, priority=0, min_job_memory=0,
):
"""Publish workflow submission parameters."""
msg = {
"user": user_id,
"workflow_id_or_name": workflow_id_or_name,
"parameters": parameters,
"priority": priority,
"min_job_memory": min_job_memory,
}
self._publish(msg, priority)
| # -*- coding: utf-8 -*-
#
# This file is part of REANA.
# Copyright (C) 2018 CERN.
#
# REANA is free software; you can redistribute it and/or modify it
# under the terms of the MIT License; see LICENSE file for more details.
"""REANA-Commons module to manage AMQP connections on REANA."""
import json
import logging
from kombu import Connection, Exchange, Queue
from .config import (
MQ_CONNECTION_STRING,
MQ_DEFAULT_EXCHANGE,
MQ_DEFAULT_FORMAT,
MQ_DEFAULT_QUEUES,
MQ_PRODUCER_MAX_RETRIES,
)
class BasePublisher(object):
"""Base publisher to MQ."""
def __init__(
self,
queue,
routing_key,
connection=None,
exchange=None,
durable=False,
max_priority=None,
):
"""Initialise the BasePublisher class.
:param connection: A :class:`kombu.Connection`, if not provided a
:class:`kombu.Connection` with the default configuration will
be instantiated.
:param queue: String which represents the queue the messages will
be sent to.
:param routing_key: String which represents the routing key which
will be used to send the messages, if not provided default
routing key will be used.
:param exchange: A :class:`kombu.Exchange` where the messages will
be delivered to, if not provided, it will be instantiated with
the default configuration.
"""
self._routing_key = routing_key
self._exchange = (
exchange
if isinstance(exchange, Exchange)
else Exchange(name=exchange or MQ_DEFAULT_EXCHANGE, type="direct")
)
self._queue = (
queue
if isinstance(queue, Queue)
else Queue(
queue,
durable=durable,
exchange=self._exchange,
routing_key=self._routing_key,
max_priority=max_priority,
)
)
self._connection = connection or Connection(MQ_CONNECTION_STRING)
self.producer = self._build_producer()
def _build_producer(self):
"""Instantiate a :class:`kombu.Producer`."""
return self._connection.Producer(serializer=MQ_DEFAULT_FORMAT)
def __error_callback(self, exception, interval):
"""Execute when there is an error while sending a message.
:param exception: Exception which has been thrown while trying to send
the message.
:param interval: Interval in which the message delivery will be
retried.
"""
logging.error("Error while publishing {}".format(exception))
logging.info("Retry in %s seconds.", interval)
def _publish(self, msg, priority=None):
"""Publish, handling retries, a message in the queue.
:param msg: Object which represents the message to be sent in
the queue. Note that this object should be serializable in the
configured format (by default JSON).
:param priority: Message priority.
"""
connection = self._connection.clone()
publish = connection.ensure(
self.producer,
self.producer.publish,
errback=self.__error_callback,
max_retries=MQ_PRODUCER_MAX_RETRIES,
)
publish(
json.dumps(msg),
exchange=self._exchange,
routing_key=self._routing_key,
declare=[self._queue],
priority=priority,
)
logging.debug("Publisher: message sent: %s", msg)
def close(self):
"""Close connection."""
logging.debug("Publisher: closing queue connection")
self._connection.release()
class WorkflowStatusPublisher(BasePublisher):
"""Progress publisher to MQ."""
def __init__(self, **kwargs):
"""Initialise the WorkflowStatusPublisher class."""
queue = "jobs-status"
if "queue" not in kwargs:
kwargs["queue"] = "jobs-status"
if "routing_key" not in kwargs:
kwargs["routing_key"] = MQ_DEFAULT_QUEUES[queue]["routing_key"]
if "durable" not in kwargs:
kwargs["durable"] = MQ_DEFAULT_QUEUES[queue]["durable"]
super(WorkflowStatusPublisher, self).__init__(**kwargs)
def publish_workflow_status(self, workflow_uuid, status, logs="", message=None):
"""Publish workflow status using the configured.
:param workflow_uudid: String which represents the workflow UUID.
:param status: Integer which represents the status of the workflow,
this is defined in the `reana-db` `Workflow` models.
:param logs: String which represents the logs which the workflow
has produced as output.
:param message: Dictionary which includes additional information
can be attached such as the overall progress of the workflow.
"""
msg = {
"workflow_uuid": workflow_uuid,
"logs": logs,
"status": status,
"message": message,
}
self._publish(msg)
class WorkflowSubmissionPublisher(BasePublisher):
"""Workflow submission publisher."""
def __init__(self, **kwargs):
"""Initialise the WorkflowSubmissionPublisher class."""
queue = "workflow-submission"
super(WorkflowSubmissionPublisher, self).__init__(
queue,
MQ_DEFAULT_QUEUES[queue]["routing_key"],
durable=MQ_DEFAULT_QUEUES[queue]["durable"],
max_priority=MQ_DEFAULT_QUEUES[queue]["max_priority"],
**kwargs
)
def publish_workflow_submission(
self, user_id, workflow_id_or_name, parameters, priority=0, min_job_memory=0,
):
"""Publish workflow submission parameters."""
msg = {
"user": user_id,
"workflow_id_or_name": workflow_id_or_name,
"parameters": parameters,
"priority": priority,
"min_job_memory": min_job_memory,
}
self._publish(msg, priority)
| en | 0.823638 | # -*- coding: utf-8 -*- # # This file is part of REANA. # Copyright (C) 2018 CERN. # # REANA is free software; you can redistribute it and/or modify it # under the terms of the MIT License; see LICENSE file for more details. REANA-Commons module to manage AMQP connections on REANA. Base publisher to MQ. Initialise the BasePublisher class. :param connection: A :class:`kombu.Connection`, if not provided a :class:`kombu.Connection` with the default configuration will be instantiated. :param queue: String which represents the queue the messages will be sent to. :param routing_key: String which represents the routing key which will be used to send the messages, if not provided default routing key will be used. :param exchange: A :class:`kombu.Exchange` where the messages will be delivered to, if not provided, it will be instantiated with the default configuration. Instantiate a :class:`kombu.Producer`. Execute when there is an error while sending a message. :param exception: Exception which has been thrown while trying to send the message. :param interval: Interval in which the message delivery will be retried. Publish, handling retries, a message in the queue. :param msg: Object which represents the message to be sent in the queue. Note that this object should be serializable in the configured format (by default JSON). :param priority: Message priority. Close connection. Progress publisher to MQ. Initialise the WorkflowStatusPublisher class. Publish workflow status using the configured. :param workflow_uudid: String which represents the workflow UUID. :param status: Integer which represents the status of the workflow, this is defined in the `reana-db` `Workflow` models. :param logs: String which represents the logs which the workflow has produced as output. :param message: Dictionary which includes additional information can be attached such as the overall progress of the workflow. Workflow submission publisher. Initialise the WorkflowSubmissionPublisher class. Publish workflow submission parameters. | 2.417658 | 2 |
model/commit.py | uniaim-event-team/pullre-kun | 3 | 10784 | from sqlalchemy import (
BigInteger,
Column,
DateTime,
Text,
String,
Integer,
)
from sqlalchemy.sql.functions import current_timestamp
from model.base import BaseObject
class Commit(BaseObject):
__tablename__ = 'commits'
id = Column(BigInteger, primary_key=True, autoincrement=True)
created_at = Column(DateTime, default=current_timestamp(), nullable=False)
updated_at = Column(DateTime, default=current_timestamp(), onupdate=current_timestamp(), nullable=False)
sha = Column(String(40), unique=True, nullable=False)
message = Column(Text)
parent_a = Column(String(40))
parent_b = Column(String(40))
production_reported = Column(Integer)
class Issue(BaseObject):
__tablename__ = 'issues'
id = Column(BigInteger, primary_key=True, autoincrement=True)
created_at = Column(DateTime, default=current_timestamp(), nullable=False)
updated_at = Column(DateTime, default=current_timestamp(), onupdate=current_timestamp(), nullable=False)
number = Column(Integer, unique=True, nullable=False)
state = Column(String(10))
title = Column(Text)
body = Column(Text)
labels = Column(String(128))
assignee = Column(String(128))
| from sqlalchemy import (
BigInteger,
Column,
DateTime,
Text,
String,
Integer,
)
from sqlalchemy.sql.functions import current_timestamp
from model.base import BaseObject
class Commit(BaseObject):
__tablename__ = 'commits'
id = Column(BigInteger, primary_key=True, autoincrement=True)
created_at = Column(DateTime, default=current_timestamp(), nullable=False)
updated_at = Column(DateTime, default=current_timestamp(), onupdate=current_timestamp(), nullable=False)
sha = Column(String(40), unique=True, nullable=False)
message = Column(Text)
parent_a = Column(String(40))
parent_b = Column(String(40))
production_reported = Column(Integer)
class Issue(BaseObject):
__tablename__ = 'issues'
id = Column(BigInteger, primary_key=True, autoincrement=True)
created_at = Column(DateTime, default=current_timestamp(), nullable=False)
updated_at = Column(DateTime, default=current_timestamp(), onupdate=current_timestamp(), nullable=False)
number = Column(Integer, unique=True, nullable=False)
state = Column(String(10))
title = Column(Text)
body = Column(Text)
labels = Column(String(128))
assignee = Column(String(128))
| none | 1 | 2.465212 | 2 |
|
drae/__init__.py | hso/drae.py | 0 | 10785 | <reponame>hso/drae.py
from drae import search
| from drae import search | none | 1 | 0.95013 | 1 |
|
tests/components/template/test_select.py | JeffersonBledsoe/core | 5 | 10786 | """The tests for the Template select platform."""
import pytest
from homeassistant import setup
from homeassistant.components.input_select import (
ATTR_OPTION as INPUT_SELECT_ATTR_OPTION,
ATTR_OPTIONS as INPUT_SELECT_ATTR_OPTIONS,
DOMAIN as INPUT_SELECT_DOMAIN,
SERVICE_SELECT_OPTION as INPUT_SELECT_SERVICE_SELECT_OPTION,
SERVICE_SET_OPTIONS,
)
from homeassistant.components.select.const import (
ATTR_OPTION as SELECT_ATTR_OPTION,
ATTR_OPTIONS as SELECT_ATTR_OPTIONS,
DOMAIN as SELECT_DOMAIN,
SERVICE_SELECT_OPTION as SELECT_SERVICE_SELECT_OPTION,
)
from homeassistant.const import ATTR_ICON, CONF_ENTITY_ID, STATE_UNKNOWN
from homeassistant.core import Context
from homeassistant.helpers.entity_registry import async_get
from tests.common import (
assert_setup_component,
async_capture_events,
async_mock_service,
)
_TEST_SELECT = "select.template_select"
# Represent for select's current_option
_OPTION_INPUT_SELECT = "input_select.option"
@pytest.fixture
def calls(hass):
"""Track calls to a mock service."""
return async_mock_service(hass, "test", "automation")
async def test_missing_optional_config(hass, calls):
"""Test: missing optional template is ok."""
with assert_setup_component(1, "template"):
assert await setup.async_setup_component(
hass,
"template",
{
"template": {
"select": {
"state": "{{ 'a' }}",
"select_option": {"service": "script.select_option"},
"options": "{{ ['a', 'b'] }}",
}
}
},
)
await hass.async_block_till_done()
await hass.async_start()
await hass.async_block_till_done()
_verify(hass, "a", ["a", "b"])
async def test_multiple_configs(hass, calls):
"""Test: multiple select entities get created."""
with assert_setup_component(1, "template"):
assert await setup.async_setup_component(
hass,
"template",
{
"template": {
"select": [
{
"state": "{{ 'a' }}",
"select_option": {"service": "script.select_option"},
"options": "{{ ['a', 'b'] }}",
},
{
"state": "{{ 'a' }}",
"select_option": {"service": "script.select_option"},
"options": "{{ ['a', 'b'] }}",
},
]
}
},
)
await hass.async_block_till_done()
await hass.async_start()
await hass.async_block_till_done()
_verify(hass, "a", ["a", "b"])
_verify(hass, "a", ["a", "b"], f"{_TEST_SELECT}_2")
async def test_missing_required_keys(hass, calls):
"""Test: missing required fields will fail."""
with assert_setup_component(0, "template"):
assert await setup.async_setup_component(
hass,
"template",
{
"template": {
"select": {
"select_option": {"service": "script.select_option"},
"options": "{{ ['a', 'b'] }}",
}
}
},
)
with assert_setup_component(0, "select"):
assert await setup.async_setup_component(
hass,
"select",
{
"template": {
"select": {
"state": "{{ 'a' }}",
"select_option": {"service": "script.select_option"},
}
}
},
)
with assert_setup_component(0, "select"):
assert await setup.async_setup_component(
hass,
"select",
{
"template": {
"select": {
"state": "{{ 'a' }}",
"options": "{{ ['a', 'b'] }}",
}
}
},
)
await hass.async_block_till_done()
await hass.async_start()
await hass.async_block_till_done()
assert hass.states.async_all("select") == []
async def test_templates_with_entities(hass, calls):
"""Test templates with values from other entities."""
with assert_setup_component(1, "input_select"):
assert await setup.async_setup_component(
hass,
"input_select",
{
"input_select": {
"option": {
"options": ["a", "b"],
"initial": "a",
"name": "Option",
},
}
},
)
with assert_setup_component(1, "template"):
assert await setup.async_setup_component(
hass,
"template",
{
"template": {
"unique_id": "b",
"select": {
"state": f"{{{{ states('{_OPTION_INPUT_SELECT}') }}}}",
"options": f"{{{{ state_attr('{_OPTION_INPUT_SELECT}', '{INPUT_SELECT_ATTR_OPTIONS}') }}}}",
"select_option": {
"service": "input_select.select_option",
"data_template": {
"entity_id": _OPTION_INPUT_SELECT,
"option": "{{ option }}",
},
},
"optimistic": True,
"unique_id": "a",
},
}
},
)
await hass.async_block_till_done()
await hass.async_start()
await hass.async_block_till_done()
ent_reg = async_get(hass)
entry = ent_reg.async_get(_TEST_SELECT)
assert entry
assert entry.unique_id == "b-a"
_verify(hass, "a", ["a", "b"])
await hass.services.async_call(
INPUT_SELECT_DOMAIN,
INPUT_SELECT_SERVICE_SELECT_OPTION,
{CONF_ENTITY_ID: _OPTION_INPUT_SELECT, INPUT_SELECT_ATTR_OPTION: "b"},
blocking=True,
)
await hass.async_block_till_done()
_verify(hass, "b", ["a", "b"])
await hass.services.async_call(
INPUT_SELECT_DOMAIN,
SERVICE_SET_OPTIONS,
{
CONF_ENTITY_ID: _OPTION_INPUT_SELECT,
INPUT_SELECT_ATTR_OPTIONS: ["a", "b", "c"],
},
blocking=True,
)
await hass.async_block_till_done()
_verify(hass, "a", ["a", "b", "c"])
await hass.services.async_call(
SELECT_DOMAIN,
SELECT_SERVICE_SELECT_OPTION,
{CONF_ENTITY_ID: _TEST_SELECT, SELECT_ATTR_OPTION: "c"},
blocking=True,
)
_verify(hass, "c", ["a", "b", "c"])
async def test_trigger_select(hass):
"""Test trigger based template select."""
events = async_capture_events(hass, "test_number_event")
assert await setup.async_setup_component(
hass,
"template",
{
"template": [
{"invalid": "config"},
# Config after invalid should still be set up
{
"unique_id": "listening-test-event",
"trigger": {"platform": "event", "event_type": "test_event"},
"select": [
{
"name": "<NAME>",
"unique_id": "hello_name-id",
"state": "{{ trigger.event.data.beer }}",
"options": "{{ trigger.event.data.beers }}",
"select_option": {"event": "test_number_event"},
"optimistic": True,
},
],
},
],
},
)
await hass.async_block_till_done()
await hass.async_start()
await hass.async_block_till_done()
state = hass.states.get("select.hello_name")
assert state is not None
assert state.state == STATE_UNKNOWN
context = Context()
hass.bus.async_fire(
"test_event", {"beer": "duff", "beers": ["duff", "alamo"]}, context=context
)
await hass.async_block_till_done()
state = hass.states.get("select.hello_name")
assert state is not None
assert state.state == "duff"
assert state.attributes["options"] == ["duff", "alamo"]
await hass.services.async_call(
SELECT_DOMAIN,
SELECT_SERVICE_SELECT_OPTION,
{CONF_ENTITY_ID: "select.hello_name", SELECT_ATTR_OPTION: "alamo"},
blocking=True,
)
assert len(events) == 1
assert events[0].event_type == "test_number_event"
def _verify(hass, expected_current_option, expected_options, entity_name=_TEST_SELECT):
"""Verify select's state."""
state = hass.states.get(entity_name)
attributes = state.attributes
assert state.state == str(expected_current_option)
assert attributes.get(SELECT_ATTR_OPTIONS) == expected_options
async def test_template_icon_with_entities(hass, calls):
"""Test templates with values from other entities."""
with assert_setup_component(1, "input_select"):
assert await setup.async_setup_component(
hass,
"input_select",
{
"input_select": {
"option": {
"options": ["a", "b"],
"initial": "a",
"name": "Option",
},
}
},
)
with assert_setup_component(1, "template"):
assert await setup.async_setup_component(
hass,
"template",
{
"template": {
"unique_id": "b",
"select": {
"state": f"{{{{ states('{_OPTION_INPUT_SELECT}') }}}}",
"options": f"{{{{ state_attr('{_OPTION_INPUT_SELECT}', '{INPUT_SELECT_ATTR_OPTIONS}') }}}}",
"select_option": {
"service": "input_select.select_option",
"data": {
"entity_id": _OPTION_INPUT_SELECT,
"option": "{{ option }}",
},
},
"optimistic": True,
"unique_id": "a",
"icon": f"{{% if (states('{_OPTION_INPUT_SELECT}') == 'a') %}}mdi:greater{{% else %}}mdi:less{{% endif %}}",
},
}
},
)
await hass.async_block_till_done()
await hass.async_start()
await hass.async_block_till_done()
state = hass.states.get(_TEST_SELECT)
assert state.state == "a"
assert state.attributes[ATTR_ICON] == "mdi:greater"
await hass.services.async_call(
INPUT_SELECT_DOMAIN,
INPUT_SELECT_SERVICE_SELECT_OPTION,
{CONF_ENTITY_ID: _OPTION_INPUT_SELECT, INPUT_SELECT_ATTR_OPTION: "b"},
blocking=True,
)
await hass.async_block_till_done()
state = hass.states.get(_TEST_SELECT)
assert state.state == "b"
assert state.attributes[ATTR_ICON] == "mdi:less"
async def test_template_icon_with_trigger(hass):
"""Test trigger based template select."""
with assert_setup_component(1, "input_select"):
assert await setup.async_setup_component(
hass,
"input_select",
{
"input_select": {
"option": {
"options": ["a", "b"],
"initial": "a",
"name": "Option",
},
}
},
)
assert await setup.async_setup_component(
hass,
"template",
{
"template": {
"trigger": {"platform": "state", "entity_id": _OPTION_INPUT_SELECT},
"select": {
"unique_id": "b",
"state": "{{ trigger.to_state.state }}",
"options": f"{{{{ state_attr('{_OPTION_INPUT_SELECT}', '{INPUT_SELECT_ATTR_OPTIONS}') }}}}",
"select_option": {
"service": "input_select.select_option",
"data": {
"entity_id": _OPTION_INPUT_SELECT,
"option": "{{ option }}",
},
},
"optimistic": True,
"icon": "{% if (trigger.to_state.state or '') == 'a' %}mdi:greater{% else %}mdi:less{% endif %}",
},
},
},
)
await hass.async_block_till_done()
await hass.async_start()
await hass.async_block_till_done()
await hass.services.async_call(
INPUT_SELECT_DOMAIN,
INPUT_SELECT_SERVICE_SELECT_OPTION,
{CONF_ENTITY_ID: _OPTION_INPUT_SELECT, INPUT_SELECT_ATTR_OPTION: "b"},
blocking=True,
)
await hass.async_block_till_done()
state = hass.states.get(_TEST_SELECT)
assert state is not None
assert state.state == "b"
assert state.attributes[ATTR_ICON] == "mdi:less"
await hass.services.async_call(
INPUT_SELECT_DOMAIN,
INPUT_SELECT_SERVICE_SELECT_OPTION,
{CONF_ENTITY_ID: _OPTION_INPUT_SELECT, INPUT_SELECT_ATTR_OPTION: "a"},
blocking=True,
)
await hass.async_block_till_done()
state = hass.states.get(_TEST_SELECT)
assert state.state == "a"
assert state.attributes[ATTR_ICON] == "mdi:greater"
| """The tests for the Template select platform."""
import pytest
from homeassistant import setup
from homeassistant.components.input_select import (
ATTR_OPTION as INPUT_SELECT_ATTR_OPTION,
ATTR_OPTIONS as INPUT_SELECT_ATTR_OPTIONS,
DOMAIN as INPUT_SELECT_DOMAIN,
SERVICE_SELECT_OPTION as INPUT_SELECT_SERVICE_SELECT_OPTION,
SERVICE_SET_OPTIONS,
)
from homeassistant.components.select.const import (
ATTR_OPTION as SELECT_ATTR_OPTION,
ATTR_OPTIONS as SELECT_ATTR_OPTIONS,
DOMAIN as SELECT_DOMAIN,
SERVICE_SELECT_OPTION as SELECT_SERVICE_SELECT_OPTION,
)
from homeassistant.const import ATTR_ICON, CONF_ENTITY_ID, STATE_UNKNOWN
from homeassistant.core import Context
from homeassistant.helpers.entity_registry import async_get
from tests.common import (
assert_setup_component,
async_capture_events,
async_mock_service,
)
_TEST_SELECT = "select.template_select"
# Represent for select's current_option
_OPTION_INPUT_SELECT = "input_select.option"
@pytest.fixture
def calls(hass):
"""Track calls to a mock service."""
return async_mock_service(hass, "test", "automation")
async def test_missing_optional_config(hass, calls):
"""Test: missing optional template is ok."""
with assert_setup_component(1, "template"):
assert await setup.async_setup_component(
hass,
"template",
{
"template": {
"select": {
"state": "{{ 'a' }}",
"select_option": {"service": "script.select_option"},
"options": "{{ ['a', 'b'] }}",
}
}
},
)
await hass.async_block_till_done()
await hass.async_start()
await hass.async_block_till_done()
_verify(hass, "a", ["a", "b"])
async def test_multiple_configs(hass, calls):
"""Test: multiple select entities get created."""
with assert_setup_component(1, "template"):
assert await setup.async_setup_component(
hass,
"template",
{
"template": {
"select": [
{
"state": "{{ 'a' }}",
"select_option": {"service": "script.select_option"},
"options": "{{ ['a', 'b'] }}",
},
{
"state": "{{ 'a' }}",
"select_option": {"service": "script.select_option"},
"options": "{{ ['a', 'b'] }}",
},
]
}
},
)
await hass.async_block_till_done()
await hass.async_start()
await hass.async_block_till_done()
_verify(hass, "a", ["a", "b"])
_verify(hass, "a", ["a", "b"], f"{_TEST_SELECT}_2")
async def test_missing_required_keys(hass, calls):
"""Test: missing required fields will fail."""
with assert_setup_component(0, "template"):
assert await setup.async_setup_component(
hass,
"template",
{
"template": {
"select": {
"select_option": {"service": "script.select_option"},
"options": "{{ ['a', 'b'] }}",
}
}
},
)
with assert_setup_component(0, "select"):
assert await setup.async_setup_component(
hass,
"select",
{
"template": {
"select": {
"state": "{{ 'a' }}",
"select_option": {"service": "script.select_option"},
}
}
},
)
with assert_setup_component(0, "select"):
assert await setup.async_setup_component(
hass,
"select",
{
"template": {
"select": {
"state": "{{ 'a' }}",
"options": "{{ ['a', 'b'] }}",
}
}
},
)
await hass.async_block_till_done()
await hass.async_start()
await hass.async_block_till_done()
assert hass.states.async_all("select") == []
async def test_templates_with_entities(hass, calls):
"""Test templates with values from other entities."""
with assert_setup_component(1, "input_select"):
assert await setup.async_setup_component(
hass,
"input_select",
{
"input_select": {
"option": {
"options": ["a", "b"],
"initial": "a",
"name": "Option",
},
}
},
)
with assert_setup_component(1, "template"):
assert await setup.async_setup_component(
hass,
"template",
{
"template": {
"unique_id": "b",
"select": {
"state": f"{{{{ states('{_OPTION_INPUT_SELECT}') }}}}",
"options": f"{{{{ state_attr('{_OPTION_INPUT_SELECT}', '{INPUT_SELECT_ATTR_OPTIONS}') }}}}",
"select_option": {
"service": "input_select.select_option",
"data_template": {
"entity_id": _OPTION_INPUT_SELECT,
"option": "{{ option }}",
},
},
"optimistic": True,
"unique_id": "a",
},
}
},
)
await hass.async_block_till_done()
await hass.async_start()
await hass.async_block_till_done()
ent_reg = async_get(hass)
entry = ent_reg.async_get(_TEST_SELECT)
assert entry
assert entry.unique_id == "b-a"
_verify(hass, "a", ["a", "b"])
await hass.services.async_call(
INPUT_SELECT_DOMAIN,
INPUT_SELECT_SERVICE_SELECT_OPTION,
{CONF_ENTITY_ID: _OPTION_INPUT_SELECT, INPUT_SELECT_ATTR_OPTION: "b"},
blocking=True,
)
await hass.async_block_till_done()
_verify(hass, "b", ["a", "b"])
await hass.services.async_call(
INPUT_SELECT_DOMAIN,
SERVICE_SET_OPTIONS,
{
CONF_ENTITY_ID: _OPTION_INPUT_SELECT,
INPUT_SELECT_ATTR_OPTIONS: ["a", "b", "c"],
},
blocking=True,
)
await hass.async_block_till_done()
_verify(hass, "a", ["a", "b", "c"])
await hass.services.async_call(
SELECT_DOMAIN,
SELECT_SERVICE_SELECT_OPTION,
{CONF_ENTITY_ID: _TEST_SELECT, SELECT_ATTR_OPTION: "c"},
blocking=True,
)
_verify(hass, "c", ["a", "b", "c"])
async def test_trigger_select(hass):
"""Test trigger based template select."""
events = async_capture_events(hass, "test_number_event")
assert await setup.async_setup_component(
hass,
"template",
{
"template": [
{"invalid": "config"},
# Config after invalid should still be set up
{
"unique_id": "listening-test-event",
"trigger": {"platform": "event", "event_type": "test_event"},
"select": [
{
"name": "<NAME>",
"unique_id": "hello_name-id",
"state": "{{ trigger.event.data.beer }}",
"options": "{{ trigger.event.data.beers }}",
"select_option": {"event": "test_number_event"},
"optimistic": True,
},
],
},
],
},
)
await hass.async_block_till_done()
await hass.async_start()
await hass.async_block_till_done()
state = hass.states.get("select.hello_name")
assert state is not None
assert state.state == STATE_UNKNOWN
context = Context()
hass.bus.async_fire(
"test_event", {"beer": "duff", "beers": ["duff", "alamo"]}, context=context
)
await hass.async_block_till_done()
state = hass.states.get("select.hello_name")
assert state is not None
assert state.state == "duff"
assert state.attributes["options"] == ["duff", "alamo"]
await hass.services.async_call(
SELECT_DOMAIN,
SELECT_SERVICE_SELECT_OPTION,
{CONF_ENTITY_ID: "select.hello_name", SELECT_ATTR_OPTION: "alamo"},
blocking=True,
)
assert len(events) == 1
assert events[0].event_type == "test_number_event"
def _verify(hass, expected_current_option, expected_options, entity_name=_TEST_SELECT):
"""Verify select's state."""
state = hass.states.get(entity_name)
attributes = state.attributes
assert state.state == str(expected_current_option)
assert attributes.get(SELECT_ATTR_OPTIONS) == expected_options
async def test_template_icon_with_entities(hass, calls):
"""Test templates with values from other entities."""
with assert_setup_component(1, "input_select"):
assert await setup.async_setup_component(
hass,
"input_select",
{
"input_select": {
"option": {
"options": ["a", "b"],
"initial": "a",
"name": "Option",
},
}
},
)
with assert_setup_component(1, "template"):
assert await setup.async_setup_component(
hass,
"template",
{
"template": {
"unique_id": "b",
"select": {
"state": f"{{{{ states('{_OPTION_INPUT_SELECT}') }}}}",
"options": f"{{{{ state_attr('{_OPTION_INPUT_SELECT}', '{INPUT_SELECT_ATTR_OPTIONS}') }}}}",
"select_option": {
"service": "input_select.select_option",
"data": {
"entity_id": _OPTION_INPUT_SELECT,
"option": "{{ option }}",
},
},
"optimistic": True,
"unique_id": "a",
"icon": f"{{% if (states('{_OPTION_INPUT_SELECT}') == 'a') %}}mdi:greater{{% else %}}mdi:less{{% endif %}}",
},
}
},
)
await hass.async_block_till_done()
await hass.async_start()
await hass.async_block_till_done()
state = hass.states.get(_TEST_SELECT)
assert state.state == "a"
assert state.attributes[ATTR_ICON] == "mdi:greater"
await hass.services.async_call(
INPUT_SELECT_DOMAIN,
INPUT_SELECT_SERVICE_SELECT_OPTION,
{CONF_ENTITY_ID: _OPTION_INPUT_SELECT, INPUT_SELECT_ATTR_OPTION: "b"},
blocking=True,
)
await hass.async_block_till_done()
state = hass.states.get(_TEST_SELECT)
assert state.state == "b"
assert state.attributes[ATTR_ICON] == "mdi:less"
async def test_template_icon_with_trigger(hass):
"""Test trigger based template select."""
with assert_setup_component(1, "input_select"):
assert await setup.async_setup_component(
hass,
"input_select",
{
"input_select": {
"option": {
"options": ["a", "b"],
"initial": "a",
"name": "Option",
},
}
},
)
assert await setup.async_setup_component(
hass,
"template",
{
"template": {
"trigger": {"platform": "state", "entity_id": _OPTION_INPUT_SELECT},
"select": {
"unique_id": "b",
"state": "{{ trigger.to_state.state }}",
"options": f"{{{{ state_attr('{_OPTION_INPUT_SELECT}', '{INPUT_SELECT_ATTR_OPTIONS}') }}}}",
"select_option": {
"service": "input_select.select_option",
"data": {
"entity_id": _OPTION_INPUT_SELECT,
"option": "{{ option }}",
},
},
"optimistic": True,
"icon": "{% if (trigger.to_state.state or '') == 'a' %}mdi:greater{% else %}mdi:less{% endif %}",
},
},
},
)
await hass.async_block_till_done()
await hass.async_start()
await hass.async_block_till_done()
await hass.services.async_call(
INPUT_SELECT_DOMAIN,
INPUT_SELECT_SERVICE_SELECT_OPTION,
{CONF_ENTITY_ID: _OPTION_INPUT_SELECT, INPUT_SELECT_ATTR_OPTION: "b"},
blocking=True,
)
await hass.async_block_till_done()
state = hass.states.get(_TEST_SELECT)
assert state is not None
assert state.state == "b"
assert state.attributes[ATTR_ICON] == "mdi:less"
await hass.services.async_call(
INPUT_SELECT_DOMAIN,
INPUT_SELECT_SERVICE_SELECT_OPTION,
{CONF_ENTITY_ID: _OPTION_INPUT_SELECT, INPUT_SELECT_ATTR_OPTION: "a"},
blocking=True,
)
await hass.async_block_till_done()
state = hass.states.get(_TEST_SELECT)
assert state.state == "a"
assert state.attributes[ATTR_ICON] == "mdi:greater"
| en | 0.814746 | The tests for the Template select platform. # Represent for select's current_option Track calls to a mock service. Test: missing optional template is ok. Test: multiple select entities get created. Test: missing required fields will fail. Test templates with values from other entities. Test trigger based template select. # Config after invalid should still be set up Verify select's state. Test templates with values from other entities. Test trigger based template select. | 2.20898 | 2 |
corefacility/core/test/models/test_application_access.py | serik1987/corefacility | 0 | 10787 | import os
import random
import string
import base64
from django.utils import timezone
from django.contrib.auth.hashers import make_password, check_password
from django.test import TestCase
from parameterized import parameterized
from core.models import Module, EntryPoint, ExternalAuthorizationSession, User
AUTHORIZATION_MODULE_LIST = ["ihna", "google", "mailru"]
class TestApplicationProcess(TestCase):
PASSWORD_LENGTH = 25
auth_sessions = None
uuid_list = None
@classmethod
def setUpTestData(cls):
cls.auth_sessions = {}
cls.session_keys = {}
user = User(login="sergei.kozhukhov")
user.save()
for module in AUTHORIZATION_MODULE_LIST:
password = <PASSWORD>_password()
password_hash = make_password(password)
module_app = Module.objects.get(parent_entry_point__alias="authorizations", alias=module)
session = ExternalAuthorizationSession(
authorization_module=module_app,
session_key=password_hash,
session_key_expiry_date=timezone.now()
)
session.save()
session_key = base64.encodebytes((str(session.id) + ":" + password).encode("utf-8")).decode("utf-8")
cls.auth_sessions[module] = session_key
Account = cls.get_account_class(module)
Account(user=user, email="<EMAIL>").save()
cls.uuid_list = {}
for apps_used in ['imaging', 'roi']:
cls.uuid_list[apps_used] = Module.objects.get(alias=apps_used).uuid
@parameterized.expand([
(["core", "authorizations"], [
("standard", None),
("ihna", "<div class='auth ihna'></div>"),
("google", "<div class='auth google'></div>"),
("mailru", "<div class='auth mailru'></div>"),
("unix", None),
("cookie", None),
("password_recovery", None),
("auto", None),
]),
(["core", "synchronizations"], [
("ihna_employees", None),
]),
(["core", "projects"], [
("imaging", None),
]),
(["core", "projects", "imaging", "processors"], [
("roi", None),
]),
])
def test_widgets_show(self, route, expected_widget_list):
app = None
entry_point = None
current_route = list(route)
current_look = "app"
while len(current_route) > 0:
route_element = current_route.pop(0)
if current_look == "app":
app = Module.objects.get(alias=route_element, parent_entry_point=entry_point)
current_look = "ep"
elif current_look == "ep":
entry_point = EntryPoint.objects.get(alias=route_element, belonging_module=app)
current_look = "app"
self.assertEquals(current_look, "app")
values = Module.objects.filter(parent_entry_point=entry_point).values("alias", "html_code")
self.assertEquals(len(values), len(expected_widget_list),
"Number of modules attached to this entry point is not the same as expected")
for value in values:
alias = value['alias']
html_code = value['html_code']
expected_widget_found = False
for expected_alias, expected_widget in expected_widget_list:
if expected_alias == alias:
expected_widget_found = True
if html_code is not None and expected_widget is None:
self.fail("HTML code for module '%s' does not exist but expected" % alias)
if html_code is None and expected_widget is not None:
self.fail("HTML code for module '%s' exists but not expected" % alias)
if html_code is not None and expected_widget is not None:
self.assertHTMLEqual(html_code, expected_widget,
"HTML code for module '%s' is not the same as expected" % html_code)
break
self.assertTrue(expected_widget_found, "the module '%s' is not within the list of expected modules" %
alias)
@parameterized.expand([
("standard", "core.authorizations.StandardAuthorization"),
("ihna", "authorizations.ihna.App"),
("google", "authorizations.google.App"),
("mailru", "authorizations.mailru.App"),
("unix", "core.authorizations.UnixAuthorization"),
("cookie", "authorizations.cookie.App"),
("password_recovery", "core.authorizations.PasswordRecoveryAuthorization"),
("auto", "core.authorizations.AutomaticAuthorization"),
])
def test_authorization_modules(self, alias, expected_authorization_module):
authorization_app = Module.objects.get(parent_entry_point__alias="authorizations", alias=alias)
authorization_module = authorization_app.app_class
self.assertEquals(authorization_module, expected_authorization_module)
def test_authorization_sessions(self):
for module, session_key in self.auth_sessions.items():
session_info = base64.decodebytes(session_key.encode("utf-8")).decode("utf-8")
session_id, session_password = session_info.split(":", 1)
session = ExternalAuthorizationSession.objects.get(authorization_module__alias=module, id=session_id)
stored_password_hash = session.session_key
self.assertTrue(check_password(session_password, stored_password_hash))
module_class = session.authorization_module.app_class
session.delete()
self.assertTrue(module_class.split('.')[1], module)
def test_find_user(self):
for module in AUTHORIZATION_MODULE_LIST:
account_class = self.get_account_class(module)
account = account_class.objects.get(email="<EMAIL>")
self.assertEquals(account.user.login, "sergei.kozhukhov")
def test_account_contigency(self):
for module in AUTHORIZATION_MODULE_LIST:
self.assertEquals(self.get_account_class(module).objects.count(), 1)
User.objects.get(login="sergei.kozhukhov").delete()
for module in AUTHORIZATION_MODULE_LIST:
self.assertEquals(self.get_account_class(module).objects.count(), 0)
def test_access_by_uuid(self):
for module_name, uuid in self.uuid_list.items():
module_class = Module.objects.get(uuid=uuid).app_class
actual_module_name, module_class = module_class.split('.')
self.assertEquals(actual_module_name, module_name)
self.assertEquals(module_class, "App")
@classmethod
def generate_random_password(cls):
chars = string.ascii_letters + string.digits + '!@#$%^&*()'
random.seed = (os.urandom(1024))
return ''.join(random.choice(chars) for i in range(cls.PASSWORD_LENGTH))
@classmethod
def get_account_class(cls, module):
import authorizations
auth_module = getattr(authorizations, module)
return auth_module.models.Account
| import os
import random
import string
import base64
from django.utils import timezone
from django.contrib.auth.hashers import make_password, check_password
from django.test import TestCase
from parameterized import parameterized
from core.models import Module, EntryPoint, ExternalAuthorizationSession, User
AUTHORIZATION_MODULE_LIST = ["ihna", "google", "mailru"]
class TestApplicationProcess(TestCase):
PASSWORD_LENGTH = 25
auth_sessions = None
uuid_list = None
@classmethod
def setUpTestData(cls):
cls.auth_sessions = {}
cls.session_keys = {}
user = User(login="sergei.kozhukhov")
user.save()
for module in AUTHORIZATION_MODULE_LIST:
password = <PASSWORD>_password()
password_hash = make_password(password)
module_app = Module.objects.get(parent_entry_point__alias="authorizations", alias=module)
session = ExternalAuthorizationSession(
authorization_module=module_app,
session_key=password_hash,
session_key_expiry_date=timezone.now()
)
session.save()
session_key = base64.encodebytes((str(session.id) + ":" + password).encode("utf-8")).decode("utf-8")
cls.auth_sessions[module] = session_key
Account = cls.get_account_class(module)
Account(user=user, email="<EMAIL>").save()
cls.uuid_list = {}
for apps_used in ['imaging', 'roi']:
cls.uuid_list[apps_used] = Module.objects.get(alias=apps_used).uuid
@parameterized.expand([
(["core", "authorizations"], [
("standard", None),
("ihna", "<div class='auth ihna'></div>"),
("google", "<div class='auth google'></div>"),
("mailru", "<div class='auth mailru'></div>"),
("unix", None),
("cookie", None),
("password_recovery", None),
("auto", None),
]),
(["core", "synchronizations"], [
("ihna_employees", None),
]),
(["core", "projects"], [
("imaging", None),
]),
(["core", "projects", "imaging", "processors"], [
("roi", None),
]),
])
def test_widgets_show(self, route, expected_widget_list):
app = None
entry_point = None
current_route = list(route)
current_look = "app"
while len(current_route) > 0:
route_element = current_route.pop(0)
if current_look == "app":
app = Module.objects.get(alias=route_element, parent_entry_point=entry_point)
current_look = "ep"
elif current_look == "ep":
entry_point = EntryPoint.objects.get(alias=route_element, belonging_module=app)
current_look = "app"
self.assertEquals(current_look, "app")
values = Module.objects.filter(parent_entry_point=entry_point).values("alias", "html_code")
self.assertEquals(len(values), len(expected_widget_list),
"Number of modules attached to this entry point is not the same as expected")
for value in values:
alias = value['alias']
html_code = value['html_code']
expected_widget_found = False
for expected_alias, expected_widget in expected_widget_list:
if expected_alias == alias:
expected_widget_found = True
if html_code is not None and expected_widget is None:
self.fail("HTML code for module '%s' does not exist but expected" % alias)
if html_code is None and expected_widget is not None:
self.fail("HTML code for module '%s' exists but not expected" % alias)
if html_code is not None and expected_widget is not None:
self.assertHTMLEqual(html_code, expected_widget,
"HTML code for module '%s' is not the same as expected" % html_code)
break
self.assertTrue(expected_widget_found, "the module '%s' is not within the list of expected modules" %
alias)
@parameterized.expand([
("standard", "core.authorizations.StandardAuthorization"),
("ihna", "authorizations.ihna.App"),
("google", "authorizations.google.App"),
("mailru", "authorizations.mailru.App"),
("unix", "core.authorizations.UnixAuthorization"),
("cookie", "authorizations.cookie.App"),
("password_recovery", "core.authorizations.PasswordRecoveryAuthorization"),
("auto", "core.authorizations.AutomaticAuthorization"),
])
def test_authorization_modules(self, alias, expected_authorization_module):
authorization_app = Module.objects.get(parent_entry_point__alias="authorizations", alias=alias)
authorization_module = authorization_app.app_class
self.assertEquals(authorization_module, expected_authorization_module)
def test_authorization_sessions(self):
for module, session_key in self.auth_sessions.items():
session_info = base64.decodebytes(session_key.encode("utf-8")).decode("utf-8")
session_id, session_password = session_info.split(":", 1)
session = ExternalAuthorizationSession.objects.get(authorization_module__alias=module, id=session_id)
stored_password_hash = session.session_key
self.assertTrue(check_password(session_password, stored_password_hash))
module_class = session.authorization_module.app_class
session.delete()
self.assertTrue(module_class.split('.')[1], module)
def test_find_user(self):
for module in AUTHORIZATION_MODULE_LIST:
account_class = self.get_account_class(module)
account = account_class.objects.get(email="<EMAIL>")
self.assertEquals(account.user.login, "sergei.kozhukhov")
def test_account_contigency(self):
for module in AUTHORIZATION_MODULE_LIST:
self.assertEquals(self.get_account_class(module).objects.count(), 1)
User.objects.get(login="sergei.kozhukhov").delete()
for module in AUTHORIZATION_MODULE_LIST:
self.assertEquals(self.get_account_class(module).objects.count(), 0)
def test_access_by_uuid(self):
for module_name, uuid in self.uuid_list.items():
module_class = Module.objects.get(uuid=uuid).app_class
actual_module_name, module_class = module_class.split('.')
self.assertEquals(actual_module_name, module_name)
self.assertEquals(module_class, "App")
@classmethod
def generate_random_password(cls):
chars = string.ascii_letters + string.digits + '!@#$%^&*()'
random.seed = (os.urandom(1024))
return ''.join(random.choice(chars) for i in range(cls.PASSWORD_LENGTH))
@classmethod
def get_account_class(cls, module):
import authorizations
auth_module = getattr(authorizations, module)
return auth_module.models.Account
| none | 1 | 2.169687 | 2 |
|
cmsfix/lib/macro.py | trmznt/cmsfix | 0 | 10788 | <filename>cmsfix/lib/macro.py
from rhombus.lib.utils import get_dbhandler
from rhombus.lib.tags import *
from cmsfix.models.node import Node
import re
# the pattern below is either
# ///123
# <<MacroName>>
# [[MacroName]]
pattern = re.compile('///(\d+)|///\{([\w-]+)\}|\<\;\<\;(.+)\>\;\>\;|\[\[(.+)\]\]')
# syntax for Macro is:
# [[MacroName|option1|option2|option3]]
class MacroError(RuntimeError):
pass
def postrender(buffer, node, request):
""" return a new buffer """
dbh = get_dbhandler()
nb = ''
start_pos = 0
for m in pattern.finditer(buffer):
nb += buffer[start_pos:m.start()]
group = m.group()
print(group)
if group.startswith('///'):
nb += node_link(group, dbh)
elif group.startswith('[['):
nb += run_macro(group, node, dbh, request)
else:
nb += '{{ ERR: macro pattern unprocessed }}'
start_pos = m.end()
nb += buffer[start_pos:]
return nb
def postedit(content, node):
""" post edit the content, return a new modified content """
dbh = get_dbhandler()
nc = ''
start_pos = 0
for m in pattern.finditer(content):
nc += content[start_pos:m.start()]
group = m.group()
if group.startswith('///'):
if group[3] != '{':
# convert to UUID
node = dbh.get_node_by_id(int(group[3:]))
nc += ('///{' + str(node.uuid) + '}' if node else group)
else:
nc += group
else:
nc += group
start_pos = m.end()
nc += content[start_pos:]
return nc
def node_link(text, dbh):
try:
if text[3] == '{':
node = dbh.get_nodes_by_uuids(text[4:-1])
else:
node = dbh.get_node_by_id(int(text[3:]))
except:
node = None
if node is None:
return literal('<b>%s</b>' % text)
return literal('<a href="/%s">%s</a>' % (node.url, node.title))
def run_macro(text, node, dbh, request):
global _MACROS_
text = text[2:-2]
components = text.split('|')
macro_name = components[0]
if macro_name not in _MACROS_:
return '[[ERR - macro %s not found]]' % macro_name
try:
return _MACROS_[macro_name](node, components[1:], request)
except MacroError as m_err:
return '[[%s ERR: %s]]' % (macro_name, m_err)
_MACROS_ = {}
def macro(func):
global _MACROS_
macro_name = func.__name__
if not macro_name.startswith('M_'):
raise RuntimeError('function name does not start with M_')
_MACROS_[macro_name[2:]] = func
return func
def macro_dict():
return _MACROS_
## -- MACRO --
##
## all macro functions should return either html or literal objects
##
@macro
def M_ListChildNodes(node, components, request):
""" Create list of child nodes.
[[ListChildNodes|option|option|..]]
Options:
type=Nodetype(PageNode,JournalNode, etc)
order=[+-]slug/id/mtime/title
Example:
[[ListChildNodes|type=PageNode|order=+title]]
"""
nodetype=[]
children = node.children
for c in components:
if c.startswith('type='):
nodetype.append( c[5:] )
elif c.startswith('order='):
order = c[6:].strip().lower()
desc = False
if order[0] == '-':
desc = True
order = order[1:]
elif order[0] == '+':
order = order[1:]
# we cancel the default ordering first
children = node.children.order_by(None)
if order == 'slug':
if desc: children = children.order_by(Node.slug.desc())
else: children = children.order_by(Node.slug)
elif order == 'id':
if desc: children = children.order_by(Node.id.desc())
else: children = children.order_by(Node.id)
elif order == 'mtime':
if desc: children = children.order_by(Node.stamp.desc())
else: children = children.order_by(Node.stamp)
elif order == 'title':
children_list = sorted( [(n.title or n.slug, n) for n in children.all()],
reverse = desc)
children = [n for (k, n) in children_list]
else:
raise MacroError("unknown order option: %s" % order )
html = div()
toc = ul()
if not nodetype:
nodetype.append( 'PageNode' )
for c in children:
if c.__class__.__name__ in nodetype:
toc.add(
li(a(c.title, href=c.path))
)
html.add(toc)
return html
@macro
def M_Img(node, components, request):
""" Show embedded images in the text.
[[Img|source|option|option|...]]
source: link to source (//ID, /images/a.jpg, http://domain/image.jpg, path/to/image.jpg)
Options:
currently none
"""
path = components[0]
if path.startswith('http') or path.startswith('ftp'):
url = path
elif path.startswith('//'):
image_node_id = int(path[2:])
image_node = get_dbhandler().get_node_by_id(image_node_id)
if not image_node:
return '[[ Invalid image macro: non existent node %d]]' % image_node_id
url = image_node.path
elif path.startswith('/'):
# check node with this path
path_node = get_dbhandler().get_node_by_path(path)
if not path_node:
return '[[ Invalid image macro: not existent path %s ]]' % path
url = path
else:
url = '/%s/%s' % (node.url, path)
#return '[[ Invalid image macro (%s) ]]' % path
return literal('<img src="%s" />' % url)
@macro
def M_ListNode(node, components, request):
""" Create list of nodes that are accessible by the current user.
[[ListNode|option|...]]
Options:
level = node level
tags = only nodes which have these tags
Example:
[[ListNode|level=2|tags=keyword1;keyword2]]
"""
kwargs = {}
for c in components:
if c.startswith('level='):
kwargs['level'] = int(c[6:])
elif c.startswith('tags='):
kwargs['tags'] = c[5:].split(';')
elif c.startswith('limit='):
pass
nodes = get_dbhandler().get_nodes(**kwargs)
# check accessibility
nodes = [ n for n in nodes if get_workflow(n).is_accessible(n, request) ]
html = div()
toc = ul()
for n in nodes:
# check user accessibility
toc.add(
li(a(n.title or n.slug, href=n.path))
)
html.add(toc)
return html
| <filename>cmsfix/lib/macro.py
from rhombus.lib.utils import get_dbhandler
from rhombus.lib.tags import *
from cmsfix.models.node import Node
import re
# the pattern below is either
# ///123
# <<MacroName>>
# [[MacroName]]
pattern = re.compile('///(\d+)|///\{([\w-]+)\}|\<\;\<\;(.+)\>\;\>\;|\[\[(.+)\]\]')
# syntax for Macro is:
# [[MacroName|option1|option2|option3]]
class MacroError(RuntimeError):
pass
def postrender(buffer, node, request):
""" return a new buffer """
dbh = get_dbhandler()
nb = ''
start_pos = 0
for m in pattern.finditer(buffer):
nb += buffer[start_pos:m.start()]
group = m.group()
print(group)
if group.startswith('///'):
nb += node_link(group, dbh)
elif group.startswith('[['):
nb += run_macro(group, node, dbh, request)
else:
nb += '{{ ERR: macro pattern unprocessed }}'
start_pos = m.end()
nb += buffer[start_pos:]
return nb
def postedit(content, node):
""" post edit the content, return a new modified content """
dbh = get_dbhandler()
nc = ''
start_pos = 0
for m in pattern.finditer(content):
nc += content[start_pos:m.start()]
group = m.group()
if group.startswith('///'):
if group[3] != '{':
# convert to UUID
node = dbh.get_node_by_id(int(group[3:]))
nc += ('///{' + str(node.uuid) + '}' if node else group)
else:
nc += group
else:
nc += group
start_pos = m.end()
nc += content[start_pos:]
return nc
def node_link(text, dbh):
try:
if text[3] == '{':
node = dbh.get_nodes_by_uuids(text[4:-1])
else:
node = dbh.get_node_by_id(int(text[3:]))
except:
node = None
if node is None:
return literal('<b>%s</b>' % text)
return literal('<a href="/%s">%s</a>' % (node.url, node.title))
def run_macro(text, node, dbh, request):
global _MACROS_
text = text[2:-2]
components = text.split('|')
macro_name = components[0]
if macro_name not in _MACROS_:
return '[[ERR - macro %s not found]]' % macro_name
try:
return _MACROS_[macro_name](node, components[1:], request)
except MacroError as m_err:
return '[[%s ERR: %s]]' % (macro_name, m_err)
_MACROS_ = {}
def macro(func):
global _MACROS_
macro_name = func.__name__
if not macro_name.startswith('M_'):
raise RuntimeError('function name does not start with M_')
_MACROS_[macro_name[2:]] = func
return func
def macro_dict():
return _MACROS_
## -- MACRO --
##
## all macro functions should return either html or literal objects
##
@macro
def M_ListChildNodes(node, components, request):
""" Create list of child nodes.
[[ListChildNodes|option|option|..]]
Options:
type=Nodetype(PageNode,JournalNode, etc)
order=[+-]slug/id/mtime/title
Example:
[[ListChildNodes|type=PageNode|order=+title]]
"""
nodetype=[]
children = node.children
for c in components:
if c.startswith('type='):
nodetype.append( c[5:] )
elif c.startswith('order='):
order = c[6:].strip().lower()
desc = False
if order[0] == '-':
desc = True
order = order[1:]
elif order[0] == '+':
order = order[1:]
# we cancel the default ordering first
children = node.children.order_by(None)
if order == 'slug':
if desc: children = children.order_by(Node.slug.desc())
else: children = children.order_by(Node.slug)
elif order == 'id':
if desc: children = children.order_by(Node.id.desc())
else: children = children.order_by(Node.id)
elif order == 'mtime':
if desc: children = children.order_by(Node.stamp.desc())
else: children = children.order_by(Node.stamp)
elif order == 'title':
children_list = sorted( [(n.title or n.slug, n) for n in children.all()],
reverse = desc)
children = [n for (k, n) in children_list]
else:
raise MacroError("unknown order option: %s" % order )
html = div()
toc = ul()
if not nodetype:
nodetype.append( 'PageNode' )
for c in children:
if c.__class__.__name__ in nodetype:
toc.add(
li(a(c.title, href=c.path))
)
html.add(toc)
return html
@macro
def M_Img(node, components, request):
""" Show embedded images in the text.
[[Img|source|option|option|...]]
source: link to source (//ID, /images/a.jpg, http://domain/image.jpg, path/to/image.jpg)
Options:
currently none
"""
path = components[0]
if path.startswith('http') or path.startswith('ftp'):
url = path
elif path.startswith('//'):
image_node_id = int(path[2:])
image_node = get_dbhandler().get_node_by_id(image_node_id)
if not image_node:
return '[[ Invalid image macro: non existent node %d]]' % image_node_id
url = image_node.path
elif path.startswith('/'):
# check node with this path
path_node = get_dbhandler().get_node_by_path(path)
if not path_node:
return '[[ Invalid image macro: not existent path %s ]]' % path
url = path
else:
url = '/%s/%s' % (node.url, path)
#return '[[ Invalid image macro (%s) ]]' % path
return literal('<img src="%s" />' % url)
@macro
def M_ListNode(node, components, request):
""" Create list of nodes that are accessible by the current user.
[[ListNode|option|...]]
Options:
level = node level
tags = only nodes which have these tags
Example:
[[ListNode|level=2|tags=keyword1;keyword2]]
"""
kwargs = {}
for c in components:
if c.startswith('level='):
kwargs['level'] = int(c[6:])
elif c.startswith('tags='):
kwargs['tags'] = c[5:].split(';')
elif c.startswith('limit='):
pass
nodes = get_dbhandler().get_nodes(**kwargs)
# check accessibility
nodes = [ n for n in nodes if get_workflow(n).is_accessible(n, request) ]
html = div()
toc = ul()
for n in nodes:
# check user accessibility
toc.add(
li(a(n.title or n.slug, href=n.path))
)
html.add(toc)
return html
| en | 0.4698 | # the pattern below is either # ///123 # <<MacroName>> # [[MacroName]] # syntax for Macro is: # [[MacroName|option1|option2|option3]] return a new buffer post edit the content, return a new modified content # convert to UUID ## -- MACRO -- ## ## all macro functions should return either html or literal objects ## Create list of child nodes. [[ListChildNodes|option|option|..]] Options: type=Nodetype(PageNode,JournalNode, etc) order=[+-]slug/id/mtime/title Example: [[ListChildNodes|type=PageNode|order=+title]] # we cancel the default ordering first Show embedded images in the text. [[Img|source|option|option|...]] source: link to source (//ID, /images/a.jpg, http://domain/image.jpg, path/to/image.jpg) Options: currently none # check node with this path #return '[[ Invalid image macro (%s) ]]' % path Create list of nodes that are accessible by the current user. [[ListNode|option|...]] Options: level = node level tags = only nodes which have these tags Example: [[ListNode|level=2|tags=keyword1;keyword2]] # check accessibility # check user accessibility | 2.373102 | 2 |
xastropy/sdss/qso.py | bpholden/xastropy | 3 | 10789 | '''
#;+
#; NAME:
#; sdss.qso
#; Version 1.1
#;
#; PURPOSE:
#; Class for SDSS QSO
#; 2015 Written by JXP
#;-
#;------------------------------------------------------------------------------
'''
# Import libraries
import numpy as np
import os
from astropy.table import QTable, Column
from astropy.coordinates import SkyCoord
from astropy import units as u
from astropy.units import Quantity
from xastropy.obs import radec as xor
from xastropy.xutils import xdebug as xdb
class SdssQso(object):
'''Class to handle a single SDSS Quasar
Parameters:
----------
coord: SkyCoord, optional
RA/Dec of the sightline
z: float, optional
Emission redshift
database: SdssQuasars class, optional
Required for grabbing data, etc.
'''
# Init
def __init__(self, coord=None, z=0., database=None, verbose=True):
# Init
if coord is None:
radec = (0.*u.deg, 0.*u.deg)
self.coord = SkyCoord(ra=radec[0], dec=radec[0])
else:
self.coord = coord
self.z = z
self.verbose = verbose
self.database = database
# None init
self._specfil = None
def get_specfil(self):
'''Parse the SDSS spectrum file
Requires a link to the database Class
'''
if self.database is None:
raise IOError('SdssQso: Need to be linked to an SDSS Database')
# Generate file name (DR4 is different)
pnm = '{0:04d}'.format(
self.database._data[self.database.index]['PLATE'])
#fnm = '{0:04d}'.format(
# self.database._data[self.database.index]['FIBERID'])
fnm = '{0:03d}'.format(
self.database._data[self.database.index]['FIBERID'])
mjd = str(self.database._data[self.database.index]['MJD'])
sfil = self.database._datdir+pnm+'/1d/'+'spSpec-'
# Finish
self._specfil = sfil+mjd+'-'+pnm+'-'+fnm+'.fit' # Is usually gzipped
def load_spec(self):
'''Input the Spectrum
'''
from linetools.spectra.xspectrum1d import XSpectrum1D
if self._specfil is None:
self.get_specfil()
#
if self.verbose:
print('SdssQso: Loading spectrum from {:s}'.format(self._specfil))
self.spec = XSpectrum1D.from_file(self._specfil)
def __repr__(self):
''' For printing
'''
return '[{:s}: {:s} {:s}, z={:g}]'.format(self.__class__.__name__,
self.coord.ra.to_string(unit=u.hour,sep=':',pad=True),
self.coord.dec.to_string(sep=':',pad=True,alwayssign=True), self.z)
| '''
#;+
#; NAME:
#; sdss.qso
#; Version 1.1
#;
#; PURPOSE:
#; Class for SDSS QSO
#; 2015 Written by JXP
#;-
#;------------------------------------------------------------------------------
'''
# Import libraries
import numpy as np
import os
from astropy.table import QTable, Column
from astropy.coordinates import SkyCoord
from astropy import units as u
from astropy.units import Quantity
from xastropy.obs import radec as xor
from xastropy.xutils import xdebug as xdb
class SdssQso(object):
'''Class to handle a single SDSS Quasar
Parameters:
----------
coord: SkyCoord, optional
RA/Dec of the sightline
z: float, optional
Emission redshift
database: SdssQuasars class, optional
Required for grabbing data, etc.
'''
# Init
def __init__(self, coord=None, z=0., database=None, verbose=True):
# Init
if coord is None:
radec = (0.*u.deg, 0.*u.deg)
self.coord = SkyCoord(ra=radec[0], dec=radec[0])
else:
self.coord = coord
self.z = z
self.verbose = verbose
self.database = database
# None init
self._specfil = None
def get_specfil(self):
'''Parse the SDSS spectrum file
Requires a link to the database Class
'''
if self.database is None:
raise IOError('SdssQso: Need to be linked to an SDSS Database')
# Generate file name (DR4 is different)
pnm = '{0:04d}'.format(
self.database._data[self.database.index]['PLATE'])
#fnm = '{0:04d}'.format(
# self.database._data[self.database.index]['FIBERID'])
fnm = '{0:03d}'.format(
self.database._data[self.database.index]['FIBERID'])
mjd = str(self.database._data[self.database.index]['MJD'])
sfil = self.database._datdir+pnm+'/1d/'+'spSpec-'
# Finish
self._specfil = sfil+mjd+'-'+pnm+'-'+fnm+'.fit' # Is usually gzipped
def load_spec(self):
'''Input the Spectrum
'''
from linetools.spectra.xspectrum1d import XSpectrum1D
if self._specfil is None:
self.get_specfil()
#
if self.verbose:
print('SdssQso: Loading spectrum from {:s}'.format(self._specfil))
self.spec = XSpectrum1D.from_file(self._specfil)
def __repr__(self):
''' For printing
'''
return '[{:s}: {:s} {:s}, z={:g}]'.format(self.__class__.__name__,
self.coord.ra.to_string(unit=u.hour,sep=':',pad=True),
self.coord.dec.to_string(sep=':',pad=True,alwayssign=True), self.z)
| en | 0.479078 | #;+ #; NAME: #; sdss.qso #; Version 1.1 #; #; PURPOSE: #; Class for SDSS QSO #; 2015 Written by JXP #;- #;------------------------------------------------------------------------------ # Import libraries Class to handle a single SDSS Quasar Parameters: ---------- coord: SkyCoord, optional RA/Dec of the sightline z: float, optional Emission redshift database: SdssQuasars class, optional Required for grabbing data, etc. # Init # Init # None init Parse the SDSS spectrum file Requires a link to the database Class # Generate file name (DR4 is different) #fnm = '{0:04d}'.format( # self.database._data[self.database.index]['FIBERID']) # Finish # Is usually gzipped Input the Spectrum # For printing | 2.206141 | 2 |
ez_sten/__init__.py | deadlift1226/ez-sten | 0 | 10790 | <reponame>deadlift1226/ez-sten
name = "module"
from .module import func
| name = "module"
from .module import func | none | 1 | 1.280127 | 1 |
|
wisdem/test/test_optimization_drivers/test_dakota_driver.py | johnjasa/WISDEM | 81 | 10791 | import unittest
import numpy as np
from openmdao.utils.assert_utils import assert_near_equal
from wisdem.optimization_drivers.dakota_driver import DakotaOptimizer
try:
import dakota
except ImportError:
dakota = None
@unittest.skipIf(dakota is None, "only run if Dakota is installed.")
class TestDakotaOptimization(unittest.TestCase):
def test_2D_opt_max_iterations(self):
bounds = {"x": np.array([[0.0, 1.0], [0.0, 1.0]])}
desvars = {"x": np.array([0.0, 0.25])}
outputs = ["y"]
template_dir = "template_dir/"
model_string = "from weis.multifidelity.models.testbed_components import simple_2D_high_model as model"
output_scalers = [1.0]
options = {"method": "coliny_cobyla", "max_function_evaluations": 3}
opt = DakotaOptimizer(template_dir)
results = opt.optimize(desvars, outputs, bounds, model_string, output_scalers, options)
assert_near_equal(np.min(np.array(results["y"])), -9.5)
def test_2D_opt_EGO(self):
bounds = {"x": np.array([[0.0, 1.0], [0.0, 1.0]])}
desvars = {"x": np.array([0.0, 0.25])}
outputs = ["y"]
template_dir = "template_dir/"
model_string = "from weis.multifidelity.models.testbed_components import simple_2D_high_model as model"
output_scalers = [1.0]
options = {"initial_samples": 5, "method": "efficient_global", "seed": 123456}
opt = DakotaOptimizer(template_dir)
results = opt.optimize(desvars, outputs, bounds, model_string, output_scalers, options)
assert_near_equal(np.min(np.array(results["y"])), -9.999996864)
def test_two_variables(self):
bounds = {"x": np.array([[0.0, 1.0], [0.0, 1.0]]), "z": [1.0, 2.0]}
desvars = {"x": np.array([0.0, 0.25]), "z": 1.5}
outputs = ["y"]
template_dir = "template_dir/"
model_string = "from weis.multifidelity.models.testbed_components import simple_two_variable as model"
output_scalers = [1.0]
options = {"method": "coliny_cobyla", "max_function_evaluations": 3}
opt = DakotaOptimizer(template_dir)
results = opt.optimize(desvars, outputs, bounds, model_string, output_scalers, options)
assert_near_equal(np.min(np.array(results["y"])), 1.0)
def test_constraint(self):
bounds = {"x": np.array([[0.0, 1.0], [0.0, 1.0]])}
desvars = {"x": np.array([0.0, 0.25])}
outputs = ["y", "con"]
template_dir = "template_dir/"
model_string = "from weis.multifidelity.models.testbed_components import simple_2D_low_model as model"
output_scalers = [1.0, 1.0]
options = {"method": "coliny_cobyla", "max_function_evaluations": 3}
opt = DakotaOptimizer(template_dir)
results = opt.optimize(desvars, outputs, bounds, model_string, output_scalers, options)
assert_near_equal(np.min(np.array(results["y"])), 0.5)
assert_near_equal(np.min(np.array(results["con"])), 0.0)
if __name__ == "__main__":
unittest.main()
| import unittest
import numpy as np
from openmdao.utils.assert_utils import assert_near_equal
from wisdem.optimization_drivers.dakota_driver import DakotaOptimizer
try:
import dakota
except ImportError:
dakota = None
@unittest.skipIf(dakota is None, "only run if Dakota is installed.")
class TestDakotaOptimization(unittest.TestCase):
def test_2D_opt_max_iterations(self):
bounds = {"x": np.array([[0.0, 1.0], [0.0, 1.0]])}
desvars = {"x": np.array([0.0, 0.25])}
outputs = ["y"]
template_dir = "template_dir/"
model_string = "from weis.multifidelity.models.testbed_components import simple_2D_high_model as model"
output_scalers = [1.0]
options = {"method": "coliny_cobyla", "max_function_evaluations": 3}
opt = DakotaOptimizer(template_dir)
results = opt.optimize(desvars, outputs, bounds, model_string, output_scalers, options)
assert_near_equal(np.min(np.array(results["y"])), -9.5)
def test_2D_opt_EGO(self):
bounds = {"x": np.array([[0.0, 1.0], [0.0, 1.0]])}
desvars = {"x": np.array([0.0, 0.25])}
outputs = ["y"]
template_dir = "template_dir/"
model_string = "from weis.multifidelity.models.testbed_components import simple_2D_high_model as model"
output_scalers = [1.0]
options = {"initial_samples": 5, "method": "efficient_global", "seed": 123456}
opt = DakotaOptimizer(template_dir)
results = opt.optimize(desvars, outputs, bounds, model_string, output_scalers, options)
assert_near_equal(np.min(np.array(results["y"])), -9.999996864)
def test_two_variables(self):
bounds = {"x": np.array([[0.0, 1.0], [0.0, 1.0]]), "z": [1.0, 2.0]}
desvars = {"x": np.array([0.0, 0.25]), "z": 1.5}
outputs = ["y"]
template_dir = "template_dir/"
model_string = "from weis.multifidelity.models.testbed_components import simple_two_variable as model"
output_scalers = [1.0]
options = {"method": "coliny_cobyla", "max_function_evaluations": 3}
opt = DakotaOptimizer(template_dir)
results = opt.optimize(desvars, outputs, bounds, model_string, output_scalers, options)
assert_near_equal(np.min(np.array(results["y"])), 1.0)
def test_constraint(self):
bounds = {"x": np.array([[0.0, 1.0], [0.0, 1.0]])}
desvars = {"x": np.array([0.0, 0.25])}
outputs = ["y", "con"]
template_dir = "template_dir/"
model_string = "from weis.multifidelity.models.testbed_components import simple_2D_low_model as model"
output_scalers = [1.0, 1.0]
options = {"method": "coliny_cobyla", "max_function_evaluations": 3}
opt = DakotaOptimizer(template_dir)
results = opt.optimize(desvars, outputs, bounds, model_string, output_scalers, options)
assert_near_equal(np.min(np.array(results["y"])), 0.5)
assert_near_equal(np.min(np.array(results["con"])), 0.0)
if __name__ == "__main__":
unittest.main()
| none | 1 | 2.315692 | 2 |
|
lab4_runTFCurveFitting.py | pskdev/EveryBodyTensorFlow | 1 | 10792 | #-*- coding: utf-8 -*-
#! /usr/bin/env python
'''
#------------------------------------------------------------
filename: lab4_runTFCurveFitting.py
This is an example for linear regression in tensorflow
Which is a curve fitting example
written by <NAME> @ Aug 2017
#------------------------------------------------------------
'''
from os import getcwd
import math
from IPython import display
from matplotlib import cm
from matplotlib import gridspec
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from sklearn import metrics
import tensorflow as tf
from tensorflow.contrib.learn.python.learn import learn_io
# from __future__ import print_function
# Preparing data set ================================================
from tensorflow.examples.tutorials.mnist import input_data
# generation of sinusoid data set
total_size = 5000
training_size = 4000
validation_size = total_size - training_size
xsize = 50 # the size of single x_data
x_data = np.zeros([xsize, total_size])
cos_x = np.zeros([xsize, total_size])
mag = 1.0
phase_rad = np.pi/4
rad_freq = np.pi / 2.0
for i in range(total_size):
x_data[:,i] = np.linspace(-4,4,xsize)
cos_x = np.cos(rad_freq*x_data + phase_rad)
noise_var = 0.01
noise = np.sqrt(noise_var) * np.random.randn(xsize,total_size)
y_clean = cos_x
y_data = y_clean + noise
x_training_data = x_data[:,0:training_size]
y_training_data = y_data[:,0:training_size]
x_validation_data = x_data[:,training_size:-1]
y_validation_data = y_data[:,training_size:-1]
# signal plot
# hfig1= plt.figure(1,figsize=[10,10])
# plt.plot(cos_x[:,1],color='b',label='clean')
# plt.plot(y_data[:,1],color='r',label='noisy')
# plt.legend()
# configure training parameters =====================================
learning_rate = 0.01
training_epochs = 20
batch_size = 100
display_step = 1
# computational TF graph construction ================================
##---------------- Define graph nodes -------------------
# tf Graph data input holder
# (x,y) : input / output of prediction model
# which will be feeded by training data in the TF graph computation
# (a,b,c,d) : model parameters
# which will be learned from training data in the TF graph computation
x = tf.placeholder(tf.float32, [xsize,None])
y = tf.placeholder(tf.float32, [xsize,None])
# Set model weights which is calculated in the TF graph
a = tf.Variable(1.) # initialization by 1
b = tf.Variable(1.)
c = tf.Variable(1.)
d = tf.Variable(1.)
print ('TF graph nodes are defined')
##--------------------- Define function -----------------
# define relationshitp btw instance data x and label data y
# define optimizer used in the learning phase
# define cost function for optimization
# Construct model
pred_y = c*tf.cos(a*x+b)+d
# Minimize error using MSE function
cost = tf.reduce_mean(tf.reduce_sum( tf.square(y - pred_y) , reduction_indices=1), name="mse")
# Gradient Descent
# optimizer = tf.train.GradientDescentOptimizer(learning_rate).minimize(cost)
optimizer = tf.train.AdamOptimizer(learning_rate).minimize(cost)
print ('Functions in TF graph are ready')
## Performance evaluation model ========================_y===========
# y : data output
# pred_y: prediction output by model, a x^3 + b x^2 + c x + d
correct_prediction = cost
# Calculate error rate using data --------------
# where
# tf_reduce_mean(input_tensor, axis) : reduce dimension of tensor by computing the mean of elements
# # 'x' is [[1., 1.]
# [2., 2.]]
# tf.reduce_mean(x) ==> 1.5
# tf.reduce_mean(x, 0) ==> [1.5, 1.5]
# tf.reduce_mean(x, 1) ==> [1., 2.]
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
error_rate_training = np.zeros(training_epochs)
error_rate_validation = np.zeros(training_epochs)
# Launch the graph (execution) ========================================
# Initializing the variables
init = tf.global_variables_initializer()
## -------------------- Learning iteration start --------------------
with tf.Session() as sess:
sess.run(init) # this for variable use
# Training cycle
for epoch in range(training_epochs): # iteration loop
avg_cost = 0.
total_batch = int(training_size/batch_size) #
# Loop over all batches
for i in range(total_batch): # batch loop
data_start_index = i * batch_size
data_end_index = (i + 1) * batch_size
# feed traing data --------------------------
batch_xs = x_training_data[:,data_start_index:data_end_index]
batch_ys = y_training_data[:,data_start_index:data_end_index]
#----------------------------------------------
# Run optimization op (backprop) and cost op (to get loss value)
# feedign training data
_, local_batch_cost = sess.run([optimizer, cost], feed_dict={x: batch_xs,
y: batch_ys})
# Compute average loss
avg_cost += local_batch_cost / total_batch
# print ("At %d-th batch in %d-epoch, avg_cost = %f" % (i,epoch,avg_cost) )
# Display logs per epoch step
if (epoch+1) % display_step == 0:
print("Epoch:", '%04d' % (epoch+1), "cost=", "{:.9f}".format(avg_cost/batch_size))
batch_xs = x_training_data
batch_ys = y_training_data
error_rate_training[epoch] = accuracy.eval({x: batch_xs, y: batch_ys},session=sess)/training_size
error_rate_validation[epoch] = accuracy.eval({x: x_validation_data, y: y_validation_data},session=sess)/validation_size
print("Training set MSE:", error_rate_training[epoch])
print("Validation set MSE:", error_rate_validation[epoch])
print("--------------------------------------------")
print("Optimization Finished!")
pred_a = sess.run(a)
pred_b = sess.run(b)
pred_c = sess.run(c)
pred_d = sess.run(d)
hfig1 = plt.figure(1,figsize=(10,10))
epoch_index = np.array([elem for elem in range(training_epochs)])
plt.plot(epoch_index,error_rate_training,label='Training data',color='r',marker='o')
plt.plot(epoch_index,error_rate_validation,label='Validation data',color='b',marker='x')
plt.legend()
plt.title('MSE of prediction:')
plt.xlabel('Iteration epoch')
plt.ylabel('MSE')
hfig2 = plt.figure(2,figsize=(10,10))
pred_y = pred_c * np.cos(pred_a * x_data[:,0] + pred_b) +pred_d
plt.plot(x_validation_data[:,0],y_validation_data[:,0],label='noisy data',color='b',marker='*')
plt.plot(x_validation_data[:,0], pred_y,label='prediction',color='r')
plt.legend()
plt.title('A line fitting example:')
plt.xlabel('X data')
plt.ylabel('Y data')
# FIG_SAVE_DIR = getcwd() + '/figs/'
# hfig1.savefig(FIG_SAVE_DIR + 'runExample_TFLogisticReg_aymeric_ErrRate.png')
# hfig1.clear()
| #-*- coding: utf-8 -*-
#! /usr/bin/env python
'''
#------------------------------------------------------------
filename: lab4_runTFCurveFitting.py
This is an example for linear regression in tensorflow
Which is a curve fitting example
written by <NAME> @ Aug 2017
#------------------------------------------------------------
'''
from os import getcwd
import math
from IPython import display
from matplotlib import cm
from matplotlib import gridspec
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from sklearn import metrics
import tensorflow as tf
from tensorflow.contrib.learn.python.learn import learn_io
# from __future__ import print_function
# Preparing data set ================================================
from tensorflow.examples.tutorials.mnist import input_data
# generation of sinusoid data set
total_size = 5000
training_size = 4000
validation_size = total_size - training_size
xsize = 50 # the size of single x_data
x_data = np.zeros([xsize, total_size])
cos_x = np.zeros([xsize, total_size])
mag = 1.0
phase_rad = np.pi/4
rad_freq = np.pi / 2.0
for i in range(total_size):
x_data[:,i] = np.linspace(-4,4,xsize)
cos_x = np.cos(rad_freq*x_data + phase_rad)
noise_var = 0.01
noise = np.sqrt(noise_var) * np.random.randn(xsize,total_size)
y_clean = cos_x
y_data = y_clean + noise
x_training_data = x_data[:,0:training_size]
y_training_data = y_data[:,0:training_size]
x_validation_data = x_data[:,training_size:-1]
y_validation_data = y_data[:,training_size:-1]
# signal plot
# hfig1= plt.figure(1,figsize=[10,10])
# plt.plot(cos_x[:,1],color='b',label='clean')
# plt.plot(y_data[:,1],color='r',label='noisy')
# plt.legend()
# configure training parameters =====================================
learning_rate = 0.01
training_epochs = 20
batch_size = 100
display_step = 1
# computational TF graph construction ================================
##---------------- Define graph nodes -------------------
# tf Graph data input holder
# (x,y) : input / output of prediction model
# which will be feeded by training data in the TF graph computation
# (a,b,c,d) : model parameters
# which will be learned from training data in the TF graph computation
x = tf.placeholder(tf.float32, [xsize,None])
y = tf.placeholder(tf.float32, [xsize,None])
# Set model weights which is calculated in the TF graph
a = tf.Variable(1.) # initialization by 1
b = tf.Variable(1.)
c = tf.Variable(1.)
d = tf.Variable(1.)
print ('TF graph nodes are defined')
##--------------------- Define function -----------------
# define relationshitp btw instance data x and label data y
# define optimizer used in the learning phase
# define cost function for optimization
# Construct model
pred_y = c*tf.cos(a*x+b)+d
# Minimize error using MSE function
cost = tf.reduce_mean(tf.reduce_sum( tf.square(y - pred_y) , reduction_indices=1), name="mse")
# Gradient Descent
# optimizer = tf.train.GradientDescentOptimizer(learning_rate).minimize(cost)
optimizer = tf.train.AdamOptimizer(learning_rate).minimize(cost)
print ('Functions in TF graph are ready')
## Performance evaluation model ========================_y===========
# y : data output
# pred_y: prediction output by model, a x^3 + b x^2 + c x + d
correct_prediction = cost
# Calculate error rate using data --------------
# where
# tf_reduce_mean(input_tensor, axis) : reduce dimension of tensor by computing the mean of elements
# # 'x' is [[1., 1.]
# [2., 2.]]
# tf.reduce_mean(x) ==> 1.5
# tf.reduce_mean(x, 0) ==> [1.5, 1.5]
# tf.reduce_mean(x, 1) ==> [1., 2.]
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
error_rate_training = np.zeros(training_epochs)
error_rate_validation = np.zeros(training_epochs)
# Launch the graph (execution) ========================================
# Initializing the variables
init = tf.global_variables_initializer()
## -------------------- Learning iteration start --------------------
with tf.Session() as sess:
sess.run(init) # this for variable use
# Training cycle
for epoch in range(training_epochs): # iteration loop
avg_cost = 0.
total_batch = int(training_size/batch_size) #
# Loop over all batches
for i in range(total_batch): # batch loop
data_start_index = i * batch_size
data_end_index = (i + 1) * batch_size
# feed traing data --------------------------
batch_xs = x_training_data[:,data_start_index:data_end_index]
batch_ys = y_training_data[:,data_start_index:data_end_index]
#----------------------------------------------
# Run optimization op (backprop) and cost op (to get loss value)
# feedign training data
_, local_batch_cost = sess.run([optimizer, cost], feed_dict={x: batch_xs,
y: batch_ys})
# Compute average loss
avg_cost += local_batch_cost / total_batch
# print ("At %d-th batch in %d-epoch, avg_cost = %f" % (i,epoch,avg_cost) )
# Display logs per epoch step
if (epoch+1) % display_step == 0:
print("Epoch:", '%04d' % (epoch+1), "cost=", "{:.9f}".format(avg_cost/batch_size))
batch_xs = x_training_data
batch_ys = y_training_data
error_rate_training[epoch] = accuracy.eval({x: batch_xs, y: batch_ys},session=sess)/training_size
error_rate_validation[epoch] = accuracy.eval({x: x_validation_data, y: y_validation_data},session=sess)/validation_size
print("Training set MSE:", error_rate_training[epoch])
print("Validation set MSE:", error_rate_validation[epoch])
print("--------------------------------------------")
print("Optimization Finished!")
pred_a = sess.run(a)
pred_b = sess.run(b)
pred_c = sess.run(c)
pred_d = sess.run(d)
hfig1 = plt.figure(1,figsize=(10,10))
epoch_index = np.array([elem for elem in range(training_epochs)])
plt.plot(epoch_index,error_rate_training,label='Training data',color='r',marker='o')
plt.plot(epoch_index,error_rate_validation,label='Validation data',color='b',marker='x')
plt.legend()
plt.title('MSE of prediction:')
plt.xlabel('Iteration epoch')
plt.ylabel('MSE')
hfig2 = plt.figure(2,figsize=(10,10))
pred_y = pred_c * np.cos(pred_a * x_data[:,0] + pred_b) +pred_d
plt.plot(x_validation_data[:,0],y_validation_data[:,0],label='noisy data',color='b',marker='*')
plt.plot(x_validation_data[:,0], pred_y,label='prediction',color='r')
plt.legend()
plt.title('A line fitting example:')
plt.xlabel('X data')
plt.ylabel('Y data')
# FIG_SAVE_DIR = getcwd() + '/figs/'
# hfig1.savefig(FIG_SAVE_DIR + 'runExample_TFLogisticReg_aymeric_ErrRate.png')
# hfig1.clear()
| en | 0.514409 | #-*- coding: utf-8 -*- #! /usr/bin/env python #------------------------------------------------------------ filename: lab4_runTFCurveFitting.py This is an example for linear regression in tensorflow Which is a curve fitting example written by <NAME> @ Aug 2017 #------------------------------------------------------------ # from __future__ import print_function # Preparing data set ================================================ # generation of sinusoid data set # the size of single x_data # signal plot # hfig1= plt.figure(1,figsize=[10,10]) # plt.plot(cos_x[:,1],color='b',label='clean') # plt.plot(y_data[:,1],color='r',label='noisy') # plt.legend() # configure training parameters ===================================== # computational TF graph construction ================================ ##---------------- Define graph nodes ------------------- # tf Graph data input holder # (x,y) : input / output of prediction model # which will be feeded by training data in the TF graph computation # (a,b,c,d) : model parameters # which will be learned from training data in the TF graph computation # Set model weights which is calculated in the TF graph # initialization by 1 ##--------------------- Define function ----------------- # define relationshitp btw instance data x and label data y # define optimizer used in the learning phase # define cost function for optimization # Construct model # Minimize error using MSE function # Gradient Descent # optimizer = tf.train.GradientDescentOptimizer(learning_rate).minimize(cost) ## Performance evaluation model ========================_y=========== # y : data output # pred_y: prediction output by model, a x^3 + b x^2 + c x + d # Calculate error rate using data -------------- # where # tf_reduce_mean(input_tensor, axis) : reduce dimension of tensor by computing the mean of elements # # 'x' is [[1., 1.] # [2., 2.]] # tf.reduce_mean(x) ==> 1.5 # tf.reduce_mean(x, 0) ==> [1.5, 1.5] # tf.reduce_mean(x, 1) ==> [1., 2.] # Launch the graph (execution) ======================================== # Initializing the variables ## -------------------- Learning iteration start -------------------- # this for variable use # Training cycle # iteration loop # # Loop over all batches # batch loop # feed traing data -------------------------- #---------------------------------------------- # Run optimization op (backprop) and cost op (to get loss value) # feedign training data # Compute average loss # print ("At %d-th batch in %d-epoch, avg_cost = %f" % (i,epoch,avg_cost) ) # Display logs per epoch step # FIG_SAVE_DIR = getcwd() + '/figs/' # hfig1.savefig(FIG_SAVE_DIR + 'runExample_TFLogisticReg_aymeric_ErrRate.png') # hfig1.clear() | 3.116788 | 3 |
app/route/stats/route.py | LifeLaboratory/finopolis_backend | 0 | 10793 | <reponame>LifeLaboratory/finopolis_backend
# coding=utf-8
from app.route.stats.processor import *
from app.api.base.base_router import BaseRouter
from app.api.base import base_name as names
class Stats(BaseRouter):
def __init__(self):
super().__init__()
self.args = [names.face, names.post, names.socnet, names.likes, names.views, names.comments]
def get(self):
self._read_args()
print(self.data)
answer = get_stat(self.data)
return answer or {}
def put(self):
self._read_args()
answer = update_stat(self.data)
return answer or {}
| # coding=utf-8
from app.route.stats.processor import *
from app.api.base.base_router import BaseRouter
from app.api.base import base_name as names
class Stats(BaseRouter):
def __init__(self):
super().__init__()
self.args = [names.face, names.post, names.socnet, names.likes, names.views, names.comments]
def get(self):
self._read_args()
print(self.data)
answer = get_stat(self.data)
return answer or {}
def put(self):
self._read_args()
answer = update_stat(self.data)
return answer or {} | en | 0.644078 | # coding=utf-8 | 2.294161 | 2 |
script/calculate_correct_percentage_kingdom.py | xie186/dragmap-meth | 4 | 10794 | <gh_stars>1-10
from Bio import TogoWS
import argparse
import sys
import os
def summary(options):
num_reads = 0
num_correct = 0
with open(options.input) as file_input:
for line in file_input:
line = line.rstrip()
ele = line.split("\t")
if "FAILED" in line:
continue
if "BAD" in ele[0]:
num_reads += 1
if "Bacteria" in line:
num_correct += 1
#raise("Makeblastdb failed!")
else:
num_reads += 1
if options.species in line:
num_correct += 1
#raise("Makeblastdb failed!")
percentage = 100* num_correct/num_reads
print("Percente: {perc}\n".format(perc = percentage))
if __name__ == '__main__':
## description - Text to display before the argument help (default: none)
parser=argparse.ArgumentParser(description='mbmeth')
parser.add_argument("-i", '--input', help="Input list")
parser.add_argument("-s", '--species', help="species")
options = parser.parse_args(args=None if sys.argv[1:] else ['--help'])
summary(options)
| from Bio import TogoWS
import argparse
import sys
import os
def summary(options):
num_reads = 0
num_correct = 0
with open(options.input) as file_input:
for line in file_input:
line = line.rstrip()
ele = line.split("\t")
if "FAILED" in line:
continue
if "BAD" in ele[0]:
num_reads += 1
if "Bacteria" in line:
num_correct += 1
#raise("Makeblastdb failed!")
else:
num_reads += 1
if options.species in line:
num_correct += 1
#raise("Makeblastdb failed!")
percentage = 100* num_correct/num_reads
print("Percente: {perc}\n".format(perc = percentage))
if __name__ == '__main__':
## description - Text to display before the argument help (default: none)
parser=argparse.ArgumentParser(description='mbmeth')
parser.add_argument("-i", '--input', help="Input list")
parser.add_argument("-s", '--species', help="species")
options = parser.parse_args(args=None if sys.argv[1:] else ['--help'])
summary(options) | en | 0.220615 | #raise("Makeblastdb failed!") #raise("Makeblastdb failed!") ## description - Text to display before the argument help (default: none) | 3.273652 | 3 |
borax/patterns/singleton.py | kinegratii/borax | 51 | 10795 | <reponame>kinegratii/borax<filename>borax/patterns/singleton.py<gh_stars>10-100
# coding=utf8
class MetaSingleton(type):
def __init__(cls, *args):
type.__init__(cls, *args)
cls.instance = None
def __call__(cls, *args, **kwargs):
if not cls.instance:
cls.instance = type.__call__(cls, *args, **kwargs)
return cls.instance
| # coding=utf8
class MetaSingleton(type):
def __init__(cls, *args):
type.__init__(cls, *args)
cls.instance = None
def __call__(cls, *args, **kwargs):
if not cls.instance:
cls.instance = type.__call__(cls, *args, **kwargs)
return cls.instance | ca | 0.404804 | # coding=utf8 | 2.486921 | 2 |
aiida/backends/general/migrations/utils.py | pranavmodx/aiida-core | 0 | 10796 | # -*- coding: utf-8 -*-
###########################################################################
# Copyright (c), The AiiDA team. All rights reserved. #
# This file is part of the AiiDA code. #
# #
# The code is hosted on GitHub at https://github.com/aiidateam/aiida-core #
# For further information on the license, see the LICENSE.txt file #
# For further information please visit http://www.aiida.net #
###########################################################################
# pylint: disable=invalid-name
"""Various utils that should be used during migrations and migrations tests because the AiiDA ORM cannot be used."""
import datetime
import errno
import os
import re
import numpy
from aiida.common import json
ISOFORMAT_DATETIME_REGEX = re.compile(r'^\d{4}-\d{2}-\d{2}T\d{2}:\d{2}:\d{2}\.\d+(\+\d{2}:\d{2})?$')
def ensure_repository_folder_created(uuid):
"""Make sure that the repository sub folder for the node with the given UUID exists or create it.
:param uuid: UUID of the node
"""
dirpath = get_node_repository_sub_folder(uuid)
try:
os.makedirs(dirpath)
except OSError as exception:
if exception.errno != errno.EEXIST:
raise
def put_object_from_string(uuid, name, content):
"""Write a file with the given content in the repository sub folder of the given node.
:param uuid: UUID of the node
:param name: name to use for the file
:param content: the content to write to the file
"""
ensure_repository_folder_created(uuid)
filepath = os.path.join(get_node_repository_sub_folder(uuid), name)
with open(filepath, 'w', encoding='utf-8') as handle:
handle.write(content)
def get_object_from_repository(uuid, name):
"""Return the content of a file with the given name in the repository sub folder of the given node.
:param uuid: UUID of the node
:param name: name to use for the file
"""
filepath = os.path.join(get_node_repository_sub_folder(uuid), name)
with open(filepath) as handle:
return handle.read()
def get_node_repository_sub_folder(uuid):
"""Return the absolute path to the sub folder `path` within the repository of the node with the given UUID.
:param uuid: UUID of the node
:return: absolute path to node repository folder, i.e `/some/path/repository/node/12/ab/c123134-a123/path`
"""
from aiida.manage.configuration import get_profile
uuid = str(uuid)
repo_dirpath = os.path.join(get_profile().repository_path, 'repository')
node_dirpath = os.path.join(repo_dirpath, 'node', uuid[:2], uuid[2:4], uuid[4:], 'path')
return node_dirpath
def get_numpy_array_absolute_path(uuid, name):
"""Return the absolute path of a numpy array with the given name in the repository of the node with the given uuid.
:param uuid: the UUID of the node
:param name: the name of the numpy array
:return: the absolute path of the numpy array file
"""
return os.path.join(get_node_repository_sub_folder(uuid), name + '.npy')
def store_numpy_array_in_repository(uuid, name, array):
"""Store a numpy array in the repository folder of a node.
:param uuid: the node UUID
:param name: the name under which to store the array
:param array: the numpy array to store
"""
ensure_repository_folder_created(uuid)
filepath = get_numpy_array_absolute_path(uuid, name)
with open(filepath, 'wb') as handle:
numpy.save(handle, array)
def delete_numpy_array_from_repository(uuid, name):
"""Delete the numpy array with a given name from the repository corresponding to a node with a given uuid.
:param uuid: the UUID of the node
:param name: the name of the numpy array
"""
filepath = get_numpy_array_absolute_path(uuid, name)
try:
os.remove(filepath)
except (IOError, OSError):
pass
def load_numpy_array_from_repository(uuid, name):
"""Load and return a numpy array from the repository folder of a node.
:param uuid: the node UUID
:param name: the name under which to store the array
:return: the numpy array
"""
filepath = get_numpy_array_absolute_path(uuid, name)
return numpy.load(filepath)
def recursive_datetime_to_isoformat(value):
"""Convert all datetime objects in the given value to string representations in ISO format.
:param value: a mapping, sequence or single value optionally containing datetime objects
"""
if isinstance(value, list):
return [recursive_datetime_to_isoformat(_) for _ in value]
if isinstance(value, dict):
return dict((key, recursive_datetime_to_isoformat(val)) for key, val in value.items())
if isinstance(value, datetime.datetime):
return value.isoformat()
return value
def dumps_json(dictionary):
"""Transforms all datetime object into isoformat and then returns the JSON."""
return json.dumps(recursive_datetime_to_isoformat(dictionary))
| # -*- coding: utf-8 -*-
###########################################################################
# Copyright (c), The AiiDA team. All rights reserved. #
# This file is part of the AiiDA code. #
# #
# The code is hosted on GitHub at https://github.com/aiidateam/aiida-core #
# For further information on the license, see the LICENSE.txt file #
# For further information please visit http://www.aiida.net #
###########################################################################
# pylint: disable=invalid-name
"""Various utils that should be used during migrations and migrations tests because the AiiDA ORM cannot be used."""
import datetime
import errno
import os
import re
import numpy
from aiida.common import json
ISOFORMAT_DATETIME_REGEX = re.compile(r'^\d{4}-\d{2}-\d{2}T\d{2}:\d{2}:\d{2}\.\d+(\+\d{2}:\d{2})?$')
def ensure_repository_folder_created(uuid):
"""Make sure that the repository sub folder for the node with the given UUID exists or create it.
:param uuid: UUID of the node
"""
dirpath = get_node_repository_sub_folder(uuid)
try:
os.makedirs(dirpath)
except OSError as exception:
if exception.errno != errno.EEXIST:
raise
def put_object_from_string(uuid, name, content):
"""Write a file with the given content in the repository sub folder of the given node.
:param uuid: UUID of the node
:param name: name to use for the file
:param content: the content to write to the file
"""
ensure_repository_folder_created(uuid)
filepath = os.path.join(get_node_repository_sub_folder(uuid), name)
with open(filepath, 'w', encoding='utf-8') as handle:
handle.write(content)
def get_object_from_repository(uuid, name):
"""Return the content of a file with the given name in the repository sub folder of the given node.
:param uuid: UUID of the node
:param name: name to use for the file
"""
filepath = os.path.join(get_node_repository_sub_folder(uuid), name)
with open(filepath) as handle:
return handle.read()
def get_node_repository_sub_folder(uuid):
"""Return the absolute path to the sub folder `path` within the repository of the node with the given UUID.
:param uuid: UUID of the node
:return: absolute path to node repository folder, i.e `/some/path/repository/node/12/ab/c123134-a123/path`
"""
from aiida.manage.configuration import get_profile
uuid = str(uuid)
repo_dirpath = os.path.join(get_profile().repository_path, 'repository')
node_dirpath = os.path.join(repo_dirpath, 'node', uuid[:2], uuid[2:4], uuid[4:], 'path')
return node_dirpath
def get_numpy_array_absolute_path(uuid, name):
"""Return the absolute path of a numpy array with the given name in the repository of the node with the given uuid.
:param uuid: the UUID of the node
:param name: the name of the numpy array
:return: the absolute path of the numpy array file
"""
return os.path.join(get_node_repository_sub_folder(uuid), name + '.npy')
def store_numpy_array_in_repository(uuid, name, array):
"""Store a numpy array in the repository folder of a node.
:param uuid: the node UUID
:param name: the name under which to store the array
:param array: the numpy array to store
"""
ensure_repository_folder_created(uuid)
filepath = get_numpy_array_absolute_path(uuid, name)
with open(filepath, 'wb') as handle:
numpy.save(handle, array)
def delete_numpy_array_from_repository(uuid, name):
"""Delete the numpy array with a given name from the repository corresponding to a node with a given uuid.
:param uuid: the UUID of the node
:param name: the name of the numpy array
"""
filepath = get_numpy_array_absolute_path(uuid, name)
try:
os.remove(filepath)
except (IOError, OSError):
pass
def load_numpy_array_from_repository(uuid, name):
"""Load and return a numpy array from the repository folder of a node.
:param uuid: the node UUID
:param name: the name under which to store the array
:return: the numpy array
"""
filepath = get_numpy_array_absolute_path(uuid, name)
return numpy.load(filepath)
def recursive_datetime_to_isoformat(value):
"""Convert all datetime objects in the given value to string representations in ISO format.
:param value: a mapping, sequence or single value optionally containing datetime objects
"""
if isinstance(value, list):
return [recursive_datetime_to_isoformat(_) for _ in value]
if isinstance(value, dict):
return dict((key, recursive_datetime_to_isoformat(val)) for key, val in value.items())
if isinstance(value, datetime.datetime):
return value.isoformat()
return value
def dumps_json(dictionary):
"""Transforms all datetime object into isoformat and then returns the JSON."""
return json.dumps(recursive_datetime_to_isoformat(dictionary))
| en | 0.702268 | # -*- coding: utf-8 -*- ########################################################################### # Copyright (c), The AiiDA team. All rights reserved. # # This file is part of the AiiDA code. # # # # The code is hosted on GitHub at https://github.com/aiidateam/aiida-core # # For further information on the license, see the LICENSE.txt file # # For further information please visit http://www.aiida.net # ########################################################################### # pylint: disable=invalid-name Various utils that should be used during migrations and migrations tests because the AiiDA ORM cannot be used. Make sure that the repository sub folder for the node with the given UUID exists or create it. :param uuid: UUID of the node Write a file with the given content in the repository sub folder of the given node. :param uuid: UUID of the node :param name: name to use for the file :param content: the content to write to the file Return the content of a file with the given name in the repository sub folder of the given node. :param uuid: UUID of the node :param name: name to use for the file Return the absolute path to the sub folder `path` within the repository of the node with the given UUID. :param uuid: UUID of the node :return: absolute path to node repository folder, i.e `/some/path/repository/node/12/ab/c123134-a123/path` Return the absolute path of a numpy array with the given name in the repository of the node with the given uuid. :param uuid: the UUID of the node :param name: the name of the numpy array :return: the absolute path of the numpy array file Store a numpy array in the repository folder of a node. :param uuid: the node UUID :param name: the name under which to store the array :param array: the numpy array to store Delete the numpy array with a given name from the repository corresponding to a node with a given uuid. :param uuid: the UUID of the node :param name: the name of the numpy array Load and return a numpy array from the repository folder of a node. :param uuid: the node UUID :param name: the name under which to store the array :return: the numpy array Convert all datetime objects in the given value to string representations in ISO format. :param value: a mapping, sequence or single value optionally containing datetime objects Transforms all datetime object into isoformat and then returns the JSON. | 1.988377 | 2 |
src/models/__init__.py | DwaraknathT/sparsity | 0 | 10797 | <filename>src/models/__init__.py
__all__ = ["transformers", "vision"]
from .transformers import *
from .vision import *
| <filename>src/models/__init__.py
__all__ = ["transformers", "vision"]
from .transformers import *
from .vision import *
| none | 1 | 1.217687 | 1 |
|
packages/pyright-internal/src/tests/samples/genericTypes12.py | sasano8/pyright | 4,391 | 10798 | # This sample tests the checker's ability to enforce
# type invariance for type arguments.
# pyright: strict
from typing import Dict, Union
foo: Dict[Union[int, str], str] = {}
bar: Dict[str, str] = {}
# This should generate an error because
# both type parameters for Dict are invariant,
# and str isn't assignable to Union[int, str].
foo = bar
| # This sample tests the checker's ability to enforce
# type invariance for type arguments.
# pyright: strict
from typing import Dict, Union
foo: Dict[Union[int, str], str] = {}
bar: Dict[str, str] = {}
# This should generate an error because
# both type parameters for Dict are invariant,
# and str isn't assignable to Union[int, str].
foo = bar
| en | 0.761926 | # This sample tests the checker's ability to enforce # type invariance for type arguments. # pyright: strict # This should generate an error because # both type parameters for Dict are invariant, # and str isn't assignable to Union[int, str]. | 2.642449 | 3 |
test.py | Naveenkhasyap/udacity-ml | 0 | 10799 | <reponame>Naveenkhasyap/udacity-ml
how_many_snakes = 1
snake_string = """
Welcome to Python3!
____
/ . .\\
\\ ---<
\\ /
__________/ /
-=:___________/
<3, Juno
"""
print(snake_string * how_many_snakes) | how_many_snakes = 1
snake_string = """
Welcome to Python3!
____
/ . .\\
\\ ---<
\\ /
__________/ /
-=:___________/
<3, Juno
"""
print(snake_string * how_many_snakes) | en | 0.18899 | Welcome to Python3! ____ / . .\\ \\ ---< \\ / __________/ / -=:___________/ <3, Juno | 3.307518 | 3 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.