hexsha
stringlengths 40
40
| size
int64 5
2.06M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 3
248
| max_stars_repo_name
stringlengths 5
125
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 3
248
| max_issues_repo_name
stringlengths 5
125
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 3
248
| max_forks_repo_name
stringlengths 5
125
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 5
2.06M
| avg_line_length
float64 1
1.02M
| max_line_length
int64 3
1.03M
| alphanum_fraction
float64 0
1
| count_classes
int64 0
1.6M
| score_classes
float64 0
1
| count_generators
int64 0
651k
| score_generators
float64 0
1
| count_decorators
int64 0
990k
| score_decorators
float64 0
1
| count_async_functions
int64 0
235k
| score_async_functions
float64 0
1
| count_documentation
int64 0
1.04M
| score_documentation
float64 0
1
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
d2d415b3f1a1db25737dd9e6b40de2eb5823d384 | 325 | py | Python | DjangoTry/venv/Lib/site-packages/django_select2/__init__.py | PavelKoksharov/QR-BOOK | 8b05cecd7a3cffcec281f2e17da398ad9e4c5de5 | [
"MIT"
]
| null | null | null | DjangoTry/venv/Lib/site-packages/django_select2/__init__.py | PavelKoksharov/QR-BOOK | 8b05cecd7a3cffcec281f2e17da398ad9e4c5de5 | [
"MIT"
]
| null | null | null | DjangoTry/venv/Lib/site-packages/django_select2/__init__.py | PavelKoksharov/QR-BOOK | 8b05cecd7a3cffcec281f2e17da398ad9e4c5de5 | [
"MIT"
]
| null | null | null | """
This is a Django_ integration of Select2_.
The application includes Select2 driven Django Widgets and Form Fields.
.. _Django: https://www.djangoproject.com/
.. _Select2: https://select2.org/
"""
from django import get_version
if get_version() < '3.2':
default_app_config = "django_select2.apps.Select2AppConfig"
| 23.214286 | 71 | 0.750769 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 245 | 0.753846 |
d2d4cdab7ece6cb0f6e54ac92797ae4e32cdf266 | 673 | py | Python | Sorting/bubble.py | Krylovsentry/Algorithms | 0cd236f04dc065d5247a6f274bb3db503db591b0 | [
"MIT"
]
| 1 | 2016-08-21T13:01:42.000Z | 2016-08-21T13:01:42.000Z | Sorting/bubble.py | Krylovsentry/Algorithms | 0cd236f04dc065d5247a6f274bb3db503db591b0 | [
"MIT"
]
| null | null | null | Sorting/bubble.py | Krylovsentry/Algorithms | 0cd236f04dc065d5247a6f274bb3db503db591b0 | [
"MIT"
]
| null | null | null | # O(n ** 2)
def bubble_sort(slist, asc=True):
need_exchanges = False
for iteration in range(len(slist))[:: -1]:
for j in range(iteration):
if asc:
if slist[j] > slist[j + 1]:
need_exchanges = True
slist[j], slist[j + 1] = slist[j + 1], slist[j]
else:
if slist[j] < slist[j + 1]:
need_exchanges = True
slist[j], slist[j + 1] = slist[j + 1], slist[j]
if not need_exchanges:
return slist
return slist
print(bubble_sort([8, 1, 13, 34, 5, 2, 21, 3, 1], False))
print(bubble_sort([1, 2, 3, 4, 5, 6]))
| 32.047619 | 67 | 0.473997 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 11 | 0.016345 |
d2d55fb8abaabd881c853630310ad6dc464e75ff | 8,247 | py | Python | chapter_13/pymail.py | bimri/programming_python | ba52ccd18b9b4e6c5387bf4032f381ae816b5e77 | [
"MIT"
]
| null | null | null | chapter_13/pymail.py | bimri/programming_python | ba52ccd18b9b4e6c5387bf4032f381ae816b5e77 | [
"MIT"
]
| null | null | null | chapter_13/pymail.py | bimri/programming_python | ba52ccd18b9b4e6c5387bf4032f381ae816b5e77 | [
"MIT"
]
| null | null | null | "A Console-Based Email Client"
#!/usr/local/bin/python
"""
##########################################################################
pymail - a simple console email interface client in Python; uses Python
poplib module to view POP email messages, smtplib to send new mails, and
the email package to extract mail headers and payload and compose mails;
##########################################################################
"""
import poplib, smtplib, email.utils, mailconfig
from email.parser import Parser
from email.message import Message
fetchEncoding = mailconfig.fetchEncoding
def decodeToUnicode(messageBytes, fetchEncoding=fetchEncoding):
"""
4E, Py3.1: decode fetched bytes to str Unicode string for display or parsing;
use global setting (or by platform default, hdrs inspection, intelligent guess);
in Python 3.2/3.3, this step may not be required: if so, return message intact;
"""
return [line.decode(fetchEncoding) for line in messageBytes]
def splitaddrs(field):
"""
4E: split address list on commas, allowing for commas in name parts
"""
pairs = email.utils.getaddresses([field]) # [(name,addr)]
return [email.utils.formataddr(pair) for pair in pairs] # [name <addr>]
def inputmessage():
import sys
From = input('From? ').strip()
To = input('To? ').strip() # datetime hdr may be set auto
To = splitaddrs(To) # possible many, name+<addr> okay
Subj = input('Subj? ').strip() # don't split blindly on ',' or ';'
print('Type message text, end with line="."')
text = ''
while True:
line = sys.stdin.readline()
if line == '.\n': break
text += line
return From, To, Subj, text
def sendmessage():
From, To, Subj, text = inputmessage()
msg = Message()
msg['From'] = From
msg['To'] = ', '.join(To) # join for hdr, not send
msg['Subject'] = Subj
msg['Date'] = email.utils.formatdate() # curr datetime, rfc2822
msg.set_payload(text)
server = smtplib.SMTP(mailconfig.smtpservername)
try:
failed = server.sendmail(From, To, str(msg)) # may also raise exc
except:
print('Error - send failed')
else:
if failed: print('Failed:', failed)
def connect(servername, user, passwd):
print('Connecting...')
server = poplib.POP3(servername)
server.user(user) # connect, log in to mail server
server.pass_(passwd) # pass is a reserved word
print(server.getwelcome()) # print returned greeting message
return server
def loadmessages(servername, user, passwd, loadfrom=1):
server = connect(servername, user, passwd)
try:
print(server.list())
(msgCount, msgBytes) = server.stat()
print('There are', msgCount, 'mail messages in', msgBytes, 'bytes')
print('Retrieving...')
msgList = [] # fetch mail now
for i in range(loadfrom, msgCount+1): # empty if low >= high
(hdr, message, octets) = server.retr(i) # save text on list
message = decodeToUnicode(message) # 4E, Py3.1: bytes to str
msgList.append('\n'.join(message)) # leave mail on server
finally:
server.quit() # unlock the mail box
assert len(msgList) == (msgCount - loadfrom) + 1 # msg nums start at 1
return msgList
def deletemessages(servername, user, passwd, toDelete, verify=True):
print('To be deleted:', toDelete)
if verify and input('Delete?')[:1] not in ['y', 'Y']:
print('Delete cancelled.')
else:
server = connect(servername, user, passwd)
try:
print('Deleting messages from server...')
for msgnum in toDelete: # reconnect to delete mail
server.dele(msgnum) # mbox locked until quit()
finally:
server.quit()
def showindex(msgList):
count = 0 # show some mail headers
for msgtext in msgList:
msghdrs = Parser().parsestr(msgtext, headersonly=True) # expects str in 3.1
count += 1
print('%d:\t%d bytes' % (count, len(msgtext)))
for hdr in ('From', 'To', 'Date', 'Subject'):
try:
print('\t%-8s=>%s' % (hdr, msghdrs[hdr]))
except KeyError:
print('\t%-8s=>(unknown)' % hdr)
if count % 5 == 0:
input('[Press Enter key]') # pause after each 5
def showmessage(i, msgList):
if 1 <= i <= len(msgList):
#print(msgList[i-1]) # old: prints entire mail--hdrs+text
print('-' * 79)
msg = Parser().parsestr(msgList[i-1]) # expects str in 3.1
content = msg.get_payload() # prints payload: string, or [Messages]
if isinstance(content, str): # keep just one end-line at end
content = content.rstrip() + '\n'
print(content)
print('-' * 79) # to get text only, see email.parsers
else:
print('Bad message number')
def savemessage(i, mailfile, msgList):
if 1 <= i <= len(msgList):
savefile = open(mailfile, 'a', encoding=mailconfig.fetchEncoding) # 4E
savefile.write('\n' + msgList[i-1] + '-'*80 + '\n')
else:
print('Bad message number')
def msgnum(command):
try:
return int(command.split()[1])
except:
return -1 # assume this is bad
helptext = """
Available commands:
i - index display
l n? - list all messages (or just message n)
d n? - mark all messages for deletion (or just message n)
s n? - save all messages to a file (or just message n)
m - compose and send a new mail message
q - quit pymail
? - display this help text
"""
def interact(msgList, mailfile):
showindex(msgList)
toDelete = []
while True:
try:
command = input('[Pymail] Action? (i, l, d, s, m, q, ?) ')
except EOFError:
command = 'q'
if not command: command = '*'
# quit
if command == 'q':
break
# index
elif command[0] == 'i':
showindex(msgList)
# list
elif command[0] == 'l':
if len(command) == 1:
for i in range(1, len(msgList)+1):
showmessage(i, msgList)
else:
showmessage(msgnum(command), msgList)
# save
elif command[0] == 's':
if len(command) == 1:
for i in range(1, len(msgList)+1):
savemessage(i, mailfile, msgList)
else:
savemessage(msgnum(command), mailfile, msgList)
# delete
elif command[0] == 'd':
if len(command) == 1: # delete all later
toDelete = list(range(1, len(msgList)+1)) # 3.x requires list
else:
delnum = msgnum(command)
if (1 <= delnum <= len(msgList)) and (delnum not in toDelete):
toDelete.append(delnum)
else:
print('Bad message number')
# mail
elif command[0] == 'm': # send a new mail via SMTP
sendmessage()
#execfile('smtpmail.py', {}) # alt: run file in own namespace
elif command[0] == '?':
print(helptext)
else:
print('What? -- type "?" for commands help')
return toDelete
if __name__ == '__main__':
import getpass, mailconfig
mailserver = mailconfig.popservername # ex: 'pop.rmi.net'
mailuser = mailconfig.popusername # ex: 'lutz'
mailfile = mailconfig.savemailfile # ex: r'c:\stuff\savemail'
mailpswd = getpass.getpass('Password for %s?' % mailserver)
print('[Pymail email client]')
msgList = loadmessages(mailserver, mailuser, mailpswd) # load all
toDelete = interact(msgList, mailfile)
if toDelete: deletemessages(mailserver, mailuser, mailpswd, toDelete)
print('Bye.')
| 37.830275 | 84 | 0.547229 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2,703 | 0.327756 |
d2d6774deb12048e5d8199a5f876c5130870f008 | 1,027 | py | Python | dependencyinjection/internal/param_type_resolver.py | Cologler/dependencyinjection-python | dc05c61571f10652d82929ebec4b255f109b840b | [
"MIT"
]
| null | null | null | dependencyinjection/internal/param_type_resolver.py | Cologler/dependencyinjection-python | dc05c61571f10652d82929ebec4b255f109b840b | [
"MIT"
]
| null | null | null | dependencyinjection/internal/param_type_resolver.py | Cologler/dependencyinjection-python | dc05c61571f10652d82929ebec4b255f109b840b | [
"MIT"
]
| null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (c) 2017~2999 - cologler <[email protected]>
# ----------
#
# ----------
import typing
import inspect
from .errors import ParameterTypeResolveError
class ParameterTypeResolver:
''' desgin for resolve type from parameter. '''
def __init__(self, name_map: typing.Dict[str, type]):
self._name_map = name_map.copy()
def resolve(self, parameter: inspect.Parameter, allow_none):
if parameter.annotation is inspect.Parameter.empty:
typ = self._name_map.get(parameter.name)
if typ is None:
msg = "cannot resolve parameter type from name: '{}'".format(parameter.name)
raise ParameterTypeResolveError(msg)
return typ
elif isinstance(parameter.annotation, type):
return parameter.annotation
elif not allow_none:
msg = 'cannot parse type from annotation: {}'.format(parameter.annotation)
raise ParameterTypeResolveError(msg)
| 31.121212 | 92 | 0.641675 | 816 | 0.794547 | 0 | 0 | 0 | 0 | 0 | 0 | 259 | 0.252191 |
d2d69439ae028b8caac841d651293bd86aa4f321 | 639 | py | Python | rest-api/server.py | phenomax/resnet50-miml-rest | 4f78dd2c9454c54d013085eb4d50080d38a833ac | [
"Unlicense"
]
| 1 | 2020-08-29T16:51:47.000Z | 2020-08-29T16:51:47.000Z | rest-api/server.py | phenomax/resnet50-miml-rest | 4f78dd2c9454c54d013085eb4d50080d38a833ac | [
"Unlicense"
]
| null | null | null | rest-api/server.py | phenomax/resnet50-miml-rest | 4f78dd2c9454c54d013085eb4d50080d38a833ac | [
"Unlicense"
]
| null | null | null | import io
import os
from flask import Flask, request, jsonify
from PIL import Image
from resnet_model import MyResnetModel
app = Flask(__name__)
# max filesize 2mb
app.config['MAX_CONTENT_LENGTH'] = 2 * 1024 * 1024
# setup resnet model
model = MyResnetModel(os.path.dirname(os.path.abspath(__file__)))
@app.route("/")
def hello():
return jsonify({"message": "Hello from the API"})
@app.route('/predict', methods=['POST'])
def predict():
if 'image' not in request.files:
return jsonify({"error": "Missing file in request"})
img = request.files['image']
return jsonify({"result": model.predict(img.read())})
| 22.034483 | 65 | 0.694836 | 0 | 0 | 0 | 0 | 328 | 0.513302 | 0 | 0 | 160 | 0.250391 |
d2d95eb0f80255c257603ed734e875c5ce26b88b | 2,945 | py | Python | authors/apps/profiles/tests/test_follow.py | KabohaJeanMark/ah-backend-invictus | a9cf930934e8cbcb4ee370a088df57abe50ee6d6 | [
"BSD-3-Clause"
]
| 7 | 2021-03-04T09:29:13.000Z | 2021-03-17T17:35:42.000Z | authors/apps/profiles/tests/test_follow.py | KabohaJeanMark/ah-backend-invictus | a9cf930934e8cbcb4ee370a088df57abe50ee6d6 | [
"BSD-3-Clause"
]
| 25 | 2019-04-23T18:51:02.000Z | 2021-06-10T21:22:47.000Z | authors/apps/profiles/tests/test_follow.py | KabohaJeanMark/ah-backend-invictus | a9cf930934e8cbcb4ee370a088df57abe50ee6d6 | [
"BSD-3-Clause"
]
| 7 | 2019-06-29T10:40:38.000Z | 2019-09-23T09:05:45.000Z | from django.urls import reverse
from rest_framework import status
from .base import BaseTestCase
class FollowTestCase(BaseTestCase):
"""Testcases for following a user."""
def test_follow_user_post(self):
"""Test start following a user."""
url = reverse('follow', kwargs={'username': 'test2'})
response = self.client.post(url, HTTP_AUTHORIZATION=self.auth_header)
self.assertEqual(response.status_code, status.HTTP_200_OK)
def test_post_follow_already_followed_user(self):
"""Test start following a user you already follow."""
url = reverse('follow', kwargs={'username': 'test2'})
self.client.post(url, HTTP_AUTHORIZATION=self.auth_header)
response = self.client.post(url, HTTP_AUTHORIZATION=self.auth_header)
self.assertEqual(response.status_code, status.HTTP_200_OK)
def test_follow_missing_user_post(self):
"""Test trying to start following a missing user."""
url = reverse('follow', kwargs={'username': 'joel'})
response = self.client.post(url, HTTP_AUTHORIZATION=self.auth_header)
self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND)
def test_delete_follow(self):
"""Test unfollowing a user"""
url = reverse('follow', kwargs={'username': 'test2'})
self.client.post(url, HTTP_AUTHORIZATION=self.auth_header)
response = self.client.delete(url, HTTP_AUTHORIZATION=self.auth_header)
self.assertEqual(response.status_code, status.HTTP_200_OK)
def test_delete_follow_of_not_followed_user(self):
"""Test unfollowing a user you are not following"""
url = reverse('follow', kwargs={'username': 'test2'})
response = self.client.delete(url, HTTP_AUTHORIZATION=self.auth_header)
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
def test_list_followers_of_user(self):
"""Test list followers of a user"""
url_followers = reverse('getfollowers', kwargs={'username': 'test2'})
self.client.get(url_followers, HTTP_AUTHORIZATION=self.auth_header)
url_follow = reverse('follow', kwargs={'username': 'test2'})
self.client.post(url_follow, HTTP_AUTHORIZATION=self.auth_header)
response = self.client.get(url_followers, HTTP_AUTHORIZATION=self.auth_header)
self.assertEqual(response.status_code, status.HTTP_200_OK)
def test_list_user_is_following(self):
"""Test list users the user is following"""
url_following = reverse('getfollowing', kwargs={'username': 'test1'})
self.client.get(url_following, HTTP_AUTHORIZATION=self.auth_header)
url_follow = reverse('follow', kwargs={'username': 'test2'})
self.client.post(url_follow, HTTP_AUTHORIZATION=self.auth_header)
response = self.client.get(url_following, HTTP_AUTHORIZATION=self.auth_header)
self.assertEqual(response.status_code, status.HTTP_200_OK)
| 50.775862 | 86 | 0.711036 | 2,845 | 0.966044 | 0 | 0 | 0 | 0 | 0 | 0 | 570 | 0.193548 |
d2dbd1807d449ae04403cf686fe2378b35d5fa68 | 6,585 | py | Python | OpenPNM/Phases/__GenericPhase__.py | thirtywang/OpenPNM | e55ee7ae69a8be3e2b0e6bf24c9ff92b6d24e16a | [
"MIT"
]
| null | null | null | OpenPNM/Phases/__GenericPhase__.py | thirtywang/OpenPNM | e55ee7ae69a8be3e2b0e6bf24c9ff92b6d24e16a | [
"MIT"
]
| null | null | null | OpenPNM/Phases/__GenericPhase__.py | thirtywang/OpenPNM | e55ee7ae69a8be3e2b0e6bf24c9ff92b6d24e16a | [
"MIT"
]
| 1 | 2020-07-02T02:21:10.000Z | 2020-07-02T02:21:10.000Z | # -*- coding: utf-8 -*-
"""
===============================================================================
module __GenericPhase__: Base class for building Phase objects
===============================================================================
"""
from OpenPNM.Network import GenericNetwork
import OpenPNM.Phases.models
from OpenPNM.Base import Core, Tools, logging
import scipy as sp
logger = logging.getLogger(__name__)
class GenericPhase(Core):
r"""
Base class to generate a generic phase object. The user must specify models
and parameters for all the properties they require. Classes for several
common phases are included with OpenPNM and can be found under OpenPNM.Phases.
Parameters
----------
network : OpenPNM Network object
The network to which this Phase should be attached
components : list of OpenPNM Phase objects
These Phase objects are ficticious or virtual phases that are the pure
components from which the mixture is made. They are used to calculate
and store any pure component data. If none are supplied then this
object will act like either a pure component, a mixture whose properties
are well known (like air) and need not to be found from consideration of
the pure component properties.
name : str, optional
A unique string name to identify the Phase object, typically same as
instance name but can be anything.
"""
def __init__(self, network=None, components=[], **kwargs):
super().__init__(**kwargs)
logger.name = self.name
if network is None:
network = GenericNetwork()
self.network.update({network.name: network})
# Initialize label 'all' in the object's own info dictionaries
self['pore.all'] = self._net['pore.all']
self['throat.all'] = self._net['throat.all']
# Set standard conditions on the fluid to get started
self['pore.temperature'] = 298.0
self['pore.pressure'] = 101325.0
# Register Ohase object in Network dictionary
self._net['pore.'+self.name] = True
self._net['throat.'+self.name] = True
if components != []:
for comp in components:
self.set_component(phase=comp)
self._net.phases.update({self.name: self}) # Connect Phase to Network
def __setitem__(self, prop, value):
for phys in self._physics:
if (prop in phys.keys()) and ('all' not in prop.split('.')):
logger.error(prop + ' is already defined in at least one \
associated Physics object')
return
super().__setitem__(prop, value)
def __getitem__(self, key):
if key.split('.')[-1] == self.name:
element = key.split('.')[0]
return self[element+'.all']
if key not in self.keys():
logger.debug(key+' not on Phase, constructing data from Physics')
return self._interleave_data(key, sources=self._physics)
else:
return super().__getitem__(key)
def props(self, element=None, mode='all', deep=False):
# TODO: The mode 'deep' is deprecated in favor of the deep argument
# and should be removed in a future version
modes = ['all', 'deep', 'models', 'constants']
mode = self._parse_mode(mode=mode, allowed=modes, single=False)
prop_list = []
if ('deep' in mode) or (deep is True):
if mode.count('deep') > 0:
mode.remove('deep')
for phys in self._physics:
prop_list.extend(phys.props(element=element, mode=mode))
# Get unique values
prop_list = Tools.PrintableList(set(prop_list))
prop_list.extend(super().props(element=element, mode=mode))
return prop_list
props.__doc__ = Core.props.__doc__
def set_component(self, phase, mode='add'):
r"""
This method is used to add or remove a ficticious phase object to this
object.
Parameters
----------
phase : OpenPNM Phase object
This is the ficticious phase object defining a pure component.
mode : string
Indicates whether to 'add' or 'remove' the supplied Phase object
"""
if mode == 'add':
if phase.name in self.phases():
raise Exception('Phase already present')
else:
# Associate components with self
self.phases.update({phase.name: phase})
# Associate self with components
phase.phases.update({self.name: self})
# Add models for components to inherit mixture T and P
phase.models.add(propname='pore.temperature',
model=OpenPNM.Phases.models.misc.mixture_value)
phase.models.add(propname='pore.pressure',
model=OpenPNM.Phases.models.misc.mixture_value)
# Move T and P models to beginning of regeneration order
phase.models.reorder({'pore.temperature': 0, 'pore.pressure': 1})
elif mode == 'remove':
if phase.name in self.phases():
self.phases.pop(phase.name)
else:
raise Exception('Phase not found')
def check_mixture_health(self):
r"""
Query the properties of the 'virtual phases' that make up a mixture
to ensure they all add up
"""
mole_sum = sp.zeros((self.Np,))
for comp in self._phases:
try:
mole_sum = mole_sum + comp['pore.mole_fraction']
except:
pass
return mole_sum
def check_physics_health(self):
r"""
Perform a check to find pores which have overlapping or undefined Physics
"""
phys = self.physics()
Ptemp = sp.zeros((self.Np,))
Ttemp = sp.zeros((self.Nt,))
for item in phys:
Pind = self['pore.'+item]
Tind = self['throat.'+item]
Ptemp[Pind] = Ptemp[Pind] + 1
Ttemp[Tind] = Ttemp[Tind] + 1
health = Tools.HealthDict()
health['overlapping_pores'] = sp.where(Ptemp > 1)[0].tolist()
health['undefined_pores'] = sp.where(Ptemp == 0)[0].tolist()
health['overlapping_throats'] = sp.where(Ttemp > 1)[0].tolist()
health['undefined_throats'] = sp.where(Ttemp == 0)[0].tolist()
return health
| 39.909091 | 82 | 0.577525 | 6,152 | 0.934244 | 0 | 0 | 0 | 0 | 0 | 0 | 2,884 | 0.437965 |
d2dbe93b08cbd7c9fba4a7da5b0696432c491446 | 2,860 | py | Python | rqt_mypkg/src/rqt_mypkg/statistics.py | mounteverset/moveit_path_visualizer | 15e55c631cb4c4d052763ebd695ce5fcb6de5a4c | [
"BSD-3-Clause"
]
| null | null | null | rqt_mypkg/src/rqt_mypkg/statistics.py | mounteverset/moveit_path_visualizer | 15e55c631cb4c4d052763ebd695ce5fcb6de5a4c | [
"BSD-3-Clause"
]
| null | null | null | rqt_mypkg/src/rqt_mypkg/statistics.py | mounteverset/moveit_path_visualizer | 15e55c631cb4c4d052763ebd695ce5fcb6de5a4c | [
"BSD-3-Clause"
]
| null | null | null | #!/usr/bin/env python3
import sys
import copy
from moveit_commander import move_group
import rospy
import moveit_commander
import moveit_msgs.msg
import geometry_msgs.msg
from math import pi, sqrt, pow
from std_msgs.msg import String
import io
import shutil
import json
#used to convert the points from the gui in a valid message for ros
from geometry_msgs.msg import Pose, PoseStamped
#used to read out the start points
import os
from nav_msgs.msg import Path
#used for publishing the planned path from start to goal
from visualization_msgs.msg import Marker, MarkerArray
#used to make a service request
from moveit_msgs.srv import GetPositionIKRequest, GetPositionIK
from rqt_mypkg import path_planning_interface
from trajectory_msgs.msg import JointTrajectoryPoint
## StatsitcisDefinedPath is used to get the path length of given points/positions generated by the Motion Plan
class StatisticsDefinedPath(object):
## Returns the path length
# @param eef_poses A list of end effector poses derived from the motion between start and goal pose
def get_path_length(self, eef_poses):
path_length = 0
for i in range(len(eef_poses) - 1):
## @var posex
# position x of the given position/point
posex = eef_poses[i].position.x
## @var posey
# position y of the given position/point
posey = eef_poses[i].position.y
## @var posez
# position z of the given position/point
posez = eef_poses[i].position.z
## @var posex1
# position x of the next given position/point
posex1 = eef_poses[i+1].position.x
## @var posey1
# position y of the next given position/point
posey1 = eef_poses[i+1].position.y
## @var posez1
# position z of the next given position/point
posez1 = eef_poses[i+1].position.z
## @var path_length
# formula to get the length of 2 corresponding points
path_length += sqrt(pow((posex1 - posex), 2) + pow((posey1- posey), 2))+ pow((posez1-posez),2)
return path_length
## Returns the maximum joint acceleration of every acceleration measured
# @param motion_plan The motion plan retrieved by the planner
def get_max_joint_acceleration(self, motion_plan):
## @var maxlist
# This list contains all accelerations given by the motion plan
maxlist = []
for i in range(len(motion_plan[1].joint_trajectory.points)):
for j in range(len(motion_plan[1].joint_trajectory.points[i].accelerations)):
for k in range(len(motion_plan[1].joint_trajectory.points[i].accelerations)):
maxlist.append(motion_plan[1].joint_trajectory.points[i].accelerations[j])
return max(maxlist) | 39.178082 | 110 | 0.681469 | 1,977 | 0.691259 | 0 | 0 | 0 | 0 | 0 | 0 | 1,066 | 0.372727 |
d2dbfa2d8a9c4169b00a898c87b761496a338473 | 596 | py | Python | apps/sendmail/admin.py | CasualGaming/studlan | 63daed67c1d309e4d5bd755eb68163e2174d0e00 | [
"MIT"
]
| 9 | 2016-03-15T21:03:49.000Z | 2020-12-02T19:45:44.000Z | apps/sendmail/admin.py | piyushd26/studlan | 6eb96ebda182f44759b430cd497a727e0ee5bb63 | [
"MIT"
]
| 161 | 2016-02-05T14:11:50.000Z | 2020-10-14T10:13:21.000Z | apps/sendmail/admin.py | piyushd26/studlan | 6eb96ebda182f44759b430cd497a727e0ee5bb63 | [
"MIT"
]
| 11 | 2016-07-27T12:20:05.000Z | 2021-04-18T05:49:17.000Z | # -*- coding: utf-8 -*-
from django.contrib import admin
from .models import Mail
class MailAdmin(admin.ModelAdmin):
list_display = ['subject', 'sent_time', 'recipients_total', 'successful_mails', 'failed_mails', 'done_sending']
ordering = ['-sent_time']
# Prevent creation
def has_add_permission(self, request, obj=None):
return False
# Prevent changes
def save_model(self, request, obj, form, change):
pass
# Prevent M2M changes
def save_related(self, request, form, formsets, change):
pass
admin.site.register(Mail, MailAdmin)
| 22.074074 | 115 | 0.676174 | 470 | 0.788591 | 0 | 0 | 0 | 0 | 0 | 0 | 175 | 0.293624 |
d2dc2ba48e9f74dafb44ffcc8ba8cd1cd50c6109 | 2,922 | py | Python | event/test_event.py | Web-Team-IITI-Gymkhana/gymkhana_server | 67f4eba9dc0a55de04b3006ffeb5f608086b89ce | [
"MIT"
]
| null | null | null | event/test_event.py | Web-Team-IITI-Gymkhana/gymkhana_server | 67f4eba9dc0a55de04b3006ffeb5f608086b89ce | [
"MIT"
]
| 4 | 2022-01-14T12:31:33.000Z | 2022-01-28T10:25:44.000Z | event/test_event.py | Web-Team-IITI-Gymkhana/gymkhana_server | 67f4eba9dc0a55de04b3006ffeb5f608086b89ce | [
"MIT"
]
| null | null | null | from uuid import uuid4
from fastapi.testclient import TestClient
from ..main import app
client = TestClient(app)
class Test_Event:
record = {
"name": "Winter of CP",
"description": "It is a coding event held in the month of Decemeber by Programming Club",
"created_on": "2022-01-28T21:33:50.795775",
"last_update": "2021-01-28T12:33:52.795775",
"start_time": "2022-02-19T19:33:10.895775",
"end_time": "2022-02-19T21:00:10.895775",
"image": "https://www.google.com/search?q=P",
"website": "",
"notify": True,
"is_online": False,
"meet_link": "",
"venue": "Carbon Building",
}
updated_record = {
"name": "Winter of CP",
"description": "It is a coding event held in the month of Decemeber by Programming Club",
"created_on": "2022-01-28T21:33:50.795775",
"last_update": "2021-01-28T12:33:52.795775",
"start_time": "2022-02-19T19:33:10.895775",
"end_time": "2022-02-19T21:00:10.895775",
"image": "https://www.google.com/search?",
"website": "",
"notify": False,
"is_online": True,
"meet_link": "https://meet.google.com/abc-defg-hij",
"venue": "",
}
def test_create(self):
response = client.post("/event/", json=self.record)
assert response.status_code == 201, f"Received {response.status_code}"
response_record = response.json()
self.record["id"] = response_record["id"]
print(self.record)
for key in response_record.keys():
assert self.record[key] == response_record[key]
def test_get_one(self):
response = client.get(f"/event/{self.record['id']}")
assert response.status_code == 200, f"Received {response.status_code}"
assert response.json() == self.record
def test_get_non_existing(self):
response = client.get(f"/event/{uuid4()}")
assert response.status_code == 404, f"Received {response.status_code}"
assert response.json() == {"detail": "Event not found"}
def test_patch(self):
response = client.patch(
f"/event/{self.record['id']}", json=self.updated_record
)
assert response.status_code == 202, f"Received {response.status_code}"
assert response.json() == self.updated_record
def test_get_all(self):
response = client.get("/event/")
assert response.status_code == 200, f"Received {response.status_code}"
def test_delete(self):
response = client.delete(f"/event/{self.record['id']}")
assert response.status_code == 204, f"Received {response.status_code}"
def test_delete_non_existing(self):
response = client.get(f"/event/{uuid4()}")
assert response.status_code == 404, f"Received {response.status_code}"
assert response.json() == {"detail": "Event not found"}
| 36.525 | 97 | 0.612936 | 2,802 | 0.958932 | 0 | 0 | 0 | 0 | 0 | 0 | 1,205 | 0.412389 |
d2dc62d8070e943c3939b3b81fa0c4b500c8b2a5 | 629 | py | Python | zigzag_conversion.py | cheng10/leetcode | 8ecab26e354501e7819afe29aa79df2eb8caa8ca | [
"MIT"
]
| null | null | null | zigzag_conversion.py | cheng10/leetcode | 8ecab26e354501e7819afe29aa79df2eb8caa8ca | [
"MIT"
]
| null | null | null | zigzag_conversion.py | cheng10/leetcode | 8ecab26e354501e7819afe29aa79df2eb8caa8ca | [
"MIT"
]
| null | null | null | class Solution(object):
def convert(self, s, numRows):
"""
:type s: str
:type numRows: int
:rtype: str
"""
cycle = 2*(numRows-1)
if numRows == 1: cycle = 1
map = []
for i in range(numRows):
map.append('')
for j in range(len(s)):
mod = j % cycle
if mod < numRows:
map[mod] += s[j]
else:
map[2*(numRows-1)-mod] += s[j]
result = ''
for i in range(numRows):
result += map[i];
return result
| 24.192308 | 46 | 0.384738 | 628 | 0.99841 | 0 | 0 | 0 | 0 | 0 | 0 | 87 | 0.138315 |
d2dc870265729c9617c1afe744f12af18a12c128 | 24,837 | py | Python | src/tests/ftest/soak/soak.py | cdurf1/daos | f57f682ba07560fd35c0991798c5496c20f10769 | [
"Apache-2.0"
]
| null | null | null | src/tests/ftest/soak/soak.py | cdurf1/daos | f57f682ba07560fd35c0991798c5496c20f10769 | [
"Apache-2.0"
]
| null | null | null | src/tests/ftest/soak/soak.py | cdurf1/daos | f57f682ba07560fd35c0991798c5496c20f10769 | [
"Apache-2.0"
]
| null | null | null | #!/usr/bin/python
"""
(C) Copyright 2019 Intel Corporation.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
GOVERNMENT LICENSE RIGHTS-OPEN SOURCE SOFTWARE
The Government's rights to use, modify, reproduce, release, perform, display,
or disclose this software are subject to the terms of the Apache License as
provided in Contract No. 8F-30005.
Any reproduction of computer software, computer software documentation, or
portions thereof marked with this legend must also reproduce the markings.
"""
from __future__ import print_function
import os
import time
from apricot import TestWithServers
from ior_utils import IorCommand
import slurm_utils
from test_utils_pool import TestPool
from test_utils_container import TestContainer
from ClusterShell.NodeSet import NodeSet
from general_utils import pcmd
import socket
class SoakTestError(Exception):
"""Soak exception class."""
class Soak(TestWithServers):
"""Execute DAOS Soak test cases.
:avocado: recursive
Args:
TestWithServers (AvocadoTest): Unit Test test cases
There are currently two types of soak tests.
1) smoke - runs each specified cmdline (job spec) for a single
iteration. The smoke test is to verify the environment is
configured properly before running the longer soaks
2) 1 hour - this will run a defined set of jobs and continue to submit
the jobs until the time has expired.
The tests also use an IOR that is compiled with MPICH and is built with
both the DAOS and MPI-IO drivers.
"""
def job_done(self, args):
"""Call this function when a job is done.
Args:
args (list):handle --which job, i.e. the job ID,
state --string indicating job completion status
"""
self.soak_results[args["handle"]] = args["state"]
def create_pool(self, pools):
"""Create a pool that the various tests use for storage.
Args:
pools: list of pool name from yaml file
/run/<test_params>/poollist/*
Returns:
list: list of TestPool object
"""
pool_obj_list = []
for pool_name in pools:
# Create a pool
pool_obj_list.append(TestPool(self.context, self.log))
pool_obj_list[-1].namespace = "/".join(["/run", pool_name, "*"])
pool_obj_list[-1].get_params(self)
pool_obj_list[-1].create()
self.log.info("Valid Pool UUID is %s", pool_obj_list[-1].uuid)
# Commented out due to DAOS-3836.
## Check that the pool was created
#self.assertTrue(
# pool_obj_list[-1].check_files(self.hostlist_servers),
# "Pool data not detected on servers")
return pool_obj_list
def destroy_pool(self, pool):
"""Destroy the specified pool - TO DO."""
pass
def remote_copy(self, hostlist, remote_dir, local_dir):
"""Copy files from remote dir to local dir.
Args:
hostlist (list): list of remote nodes
remote_dir (str): remote directory of files
local_dir (str): local directory
Raises:
SoakTestError: if there is an error with the remote copy
"""
this_host = socket.gethostname()
result = pcmd(
NodeSet.fromlist(hostlist),
"if [ ! -z '$(ls -A {0})' ]; then "
"scp -p -r {0}/ \"{1}:'{2}/'\" && rm -rf {0}/*; fi".format(
remote_dir, this_host, local_dir),
verbose=False)
if len(result) > 1 or 0 not in result:
raise SoakTestError(
"Error executing remote copy: {}".format(
", ".join(
[str(result[key]) for key in result if key != 0])))
def create_ior_cmdline(self, job_params, job_spec, pool):
"""Create an IOR cmdline to run in slurm batch.
Args:
job_params (str): job params from yaml file
job_spec (str): specific ior job to run
pool (obj): TestPool obj
Returns:
cmd: cmdline string
"""
command = []
iteration = self.test_iteration
ior_params = "/".join(["run", job_spec, "*"])
ior_cmd = IorCommand()
ior_cmd.namespace = ior_params
ior_cmd.get_params(self)
if iteration is not None and iteration < 0:
ior_cmd.repetitions.update(1000000)
ior_cmd.max_duration.update(self.params.get(
"time", "/".join([job_params, "*"])))
# IOR job specs with a list of parameters; update each value
# transfer_size
# block_size
# daos object class
tsize_list = ior_cmd.transfer_size.value
bsize_list = ior_cmd.block_size.value
oclass_list = ior_cmd.daos_oclass.value
for b_size in bsize_list:
ior_cmd.block_size.update(b_size)
for o_type in oclass_list:
ior_cmd.daos_oclass.update(o_type)
for t_size in tsize_list:
ior_cmd.transfer_size.update(t_size)
ior_cmd.set_daos_params(self.server_group, pool)
# export the user environment to test node
exports = ["ALL"]
if ior_cmd.api.value == "MPIIO":
env = {
"CRT_ATTACH_INFO_PATH": os.path.join(
self.basepath, "install/tmp"),
"DAOS_POOL": str(ior_cmd.daos_pool.value),
"MPI_LIB": "\"\"",
"DAOS_SVCL": str(ior_cmd.daos_svcl.value),
"DAOS_SINGLETON_CLI": 1,
"FI_PSM2_DISCONNECT": 1
}
exports.extend(
["{}={}".format(
key, val) for key, val in env.items()])
cmd = "srun -l --mpi=pmi2 --export={} {}".format(
",".join(exports), ior_cmd)
command.append(cmd)
self.log.debug("<<IOR cmdline >>: %s \n", cmd)
return command
def create_dmg_cmdline(self, job_params, job_spec, pool):
"""Create a dmg cmdline to run in slurm batch.
Args:
job_params (str): job params from yaml file
job_spec (str): specific dmg job to run
Returns:
cmd: [description]
"""
cmd = ""
return cmd
def build_job_script(self, nodesperjob, job, pool):
"""Create a slurm batch script that will execute a list of jobs.
Args:
nodesperjob(int): number of nodes executing each job
job(str): the job that will be defined in the slurm script with
/run/"job"/. It is currently defined in the yaml as:
Example job:
job1:
name: job1 - unique name
time: 10 - cmdline time in seconds; used in IOR -T param
tasks: 1 - number of processes per node --ntaskspernode
jobspec:
- ior_daos
- ior_mpiio
pool (obj): TestPool obj
Returns:
script_list: list of slurm batch scripts
"""
self.log.info("<<Build Script for job %s >> at %s", job, time.ctime())
script_list = []
# create one batch script per cmdline
# get job params
job_params = "/run/" + job
job_name = self.params.get("name", "/".join([job_params, "*"]))
job_specs = self.params.get("jobspec", "/".join([job_params, "*"]))
task_list = self.params.get("tasks", "/".join([job_params, "*"]))
job_time = self.params.get("time", "/".join([job_params, "*"]))
# job_time in minutes:seconds format
job_time = str(job_time) + ":00"
for job_spec in job_specs:
if "ior" in job_spec:
# Create IOR cmdline
cmd_list = self.create_ior_cmdline(job_params, job_spec, pool)
elif "dmg" in job_spec:
# create dmg cmdline
cmd_list = self.create_dmg_cmdline(job_params, job_spec, pool)
else:
raise SoakTestError(
"<<FAILED: Soak job: {} Job spec {} is invalid>>".format(
job, job_spec))
# a single cmdline per batch job; so that a failure is per cmdline
# change to multiple cmdlines per batch job later.
for cmd in cmd_list:
# additional sbatch params
for tasks in task_list:
output = os.path.join(
self.rem_pass_dir, "%N_" + self.test_name +
"_" + job_name + "_" + job_spec +
"_results.out_%j_%t_" + str(tasks) + "_")
num_tasks = nodesperjob * tasks
sbatch = {
"ntasks-per-node": tasks,
"ntasks": num_tasks,
"time": job_time,
"partition": self.partition_clients,
"exclude": NodeSet.fromlist(self.hostlist_servers)}
script = slurm_utils.write_slurm_script(
self.rem_pass_dir, job_name, output, nodesperjob,
[cmd], sbatch)
script_list.append(script)
return script_list
def job_setup(self, test_param, pool):
"""Create the slurm job batch script .
Args:
test_param (str): test_param from yaml file
pool (obj): TestPool obj
Returns:
scripts: list of slurm batch scripts
"""
# Get jobmanager
self.job_manager = self.params.get("jobmanager", "/run/*")
# Get test params
test_params = "".join([test_param, "*"])
self.test_name = self.params.get("name", test_params)
self.test_iteration = self.params.get("test_iteration", test_params)
self.job_list = self.params.get("joblist", test_params)
self.nodesperjob = self.params.get("nodesperjob", test_params)
self.soak_results = {}
script_list = []
self.log.info(
"<<Job_Setup %s >> at %s", self.test_name, time.ctime())
# Create the remote log directories from new loop/pass
self.rem_pass_dir = self.log_dir + "/pass" + str(self.loop)
self.local_pass_dir = self.outputsoakdir + "/pass" + str(self.loop)
result = pcmd(
NodeSet.fromlist(self.hostlist_clients),
"mkdir -p {}".format(self.rem_pass_dir),
verbose=False)
if len(result) > 1 or 0 not in result:
raise SoakTestError(
"<<FAILED: logfile directory not"
"created on clients>>: {}".format(", ".join(
[str(result[key]) for key in result if key != 0])))
# Create local log directory
os.makedirs(self.local_pass_dir)
# nodesperjob = -1 indicates to use all nodes in client hostlist
if self.nodesperjob < 0:
self.nodesperjob = len(self.hostlist_clients)
if len(self.hostlist_clients)/self.nodesperjob < 1:
raise SoakTestError(
"<<FAILED: There are only {} client nodes for this job. "
"Job requires {}".format(
len(self.hostlist_clients), self.nodesperjob))
if self.job_manager == "slurm":
# queue up slurm script and register a callback to retrieve
# results. The slurm batch script are single cmdline for now.
# scripts is a list of slurm batch scripts with a single cmdline
for job in self.job_list:
scripts = self.build_job_script(self.nodesperjob, job, pool)
script_list.extend(scripts)
return script_list
else:
raise SoakTestError(
"<<FAILED: Job manager {} is not yet enabled. "
"Job requires slurm".format(self.job_manager))
def job_startup(self, scripts):
"""Submit job batch script.
Args:
scripts (list): list of slurm batch scripts to submit to queue
Returns:
job_id_list: IDs of each job submitted to slurm.
"""
self.log.info(
"<<Job Startup - %s >> at %s", self.test_name, time.ctime())
job_id_list = []
# scripts is a list of batch script files
for script in scripts:
try:
job_id = slurm_utils.run_slurm_script(str(script))
except slurm_utils.SlurmFailed as error:
self.log.error(error)
# Force the test to exit with failure
job_id = None
if job_id:
print(
"<<Job {} started with {} >> at {}".format(
job_id, script, time.ctime()))
slurm_utils.register_for_job_results(
job_id, self, maxwait=self.test_timeout)
# keep a list of the job_id's
job_id_list.append(int(job_id))
else:
# one of the jobs failed to queue; exit on first fail for now.
err_msg = "Slurm failed to submit job for {}".format(script)
job_id_list = []
raise SoakTestError(
"<<FAILED: Soak {}: {}>>".format(self.test_name, err_msg))
return job_id_list
def job_completion(self, job_id_list):
"""Wait for job completion and cleanup.
Args:
job_id_list: IDs of each job submitted to slurm
"""
self.log.info(
"<<Job Completion - %s >> at %s", self.test_name, time.ctime())
# If there is nothing to do; exit
if len(job_id_list) > 0:
# wait for all the jobs to finish
while len(self.soak_results) < len(job_id_list):
# print("<<Waiting for results {} >>".format(
# self.soak_results))
time.sleep(2)
# check for job COMPLETED and remove it from the job queue
for job, result in self.soak_results.items():
# The queue include status of "COMPLETING"
# sleep to allow job to move to final state
if result == "COMPLETED":
job_id_list.remove(int(job))
else:
self.log.info(
"<< Job %s failed with status %s>>", job, result)
if len(job_id_list) > 0:
self.log.info(
"<<Cancel jobs in queue with id's %s >>", job_id_list)
for job in job_id_list:
status = slurm_utils.cancel_jobs(int(job))
if status == 0:
self.log.info("<<Job %s successfully cancelled>>", job)
# job_id_list.remove(int(job))
else:
self.log.info("<<Job %s could not be killed>>", job)
# gather all the logfiles for this pass and cleanup test nodes
# If there is a failure the files can be gathered again in Teardown
try:
self.remote_copy(
self.node_list, self.rem_pass_dir, self.outputsoakdir)
except SoakTestError as error:
self.log.info("Remote copy failed with %s", error)
self.soak_results = {}
return job_id_list
def execute_jobs(self, test_param, pools):
"""Execute the overall soak test.
Args:
test_param (str): test_params from yaml file
pools (list): list of TestPool obj
Raise:
SoakTestError
"""
cmdlist = []
# Setup cmdlines for jobs
for pool in pools:
cmdlist.extend(self.job_setup(test_param, pool))
# Gather the job_ids
self.job_id_list = self.job_startup(cmdlist)
# Initialize the failed_job_list to job_list so that any
# unexpected failures will clear the squeue in tearDown
self.failed_job_id_list = self.job_id_list
# Wait for jobs to finish and cancel/kill jobs if necessary
self.failed_job_id_list = self.job_completion(self.job_id_list)
# Test fails on first error but could use continue on error here
if len(self.failed_job_id_list) > 0:
raise SoakTestError(
"<<FAILED: The following jobs failed {} >>".format(
" ,".join(
str(job_id) for job_id in self.failed_job_id_list)))
def run_soak(self, test_param):
"""Run the soak test specified by the test params.
Args:
test_param (str): test_params from yaml file
"""
pool_list = self.params.get("poollist", "".join([test_param, "*"]))
self.test_timeout = self.params.get("test_timeout", test_param)
self.job_id_list = []
start_time = time.time()
rank = self.params.get("rank", "/run/container_reserved/*")
obj_class = self.params.get(
"object_class", "/run/container_reserved/*")
# Create the reserved pool with data
self.pool = self.create_pool(["pool_reserved"])
self.pool[0].connect()
self.container = TestContainer(self.pool[0])
self.container.namespace = "/run/container_reserved/*"
self.container.get_params(self)
self.container.create()
self.container.write_objects(rank, obj_class)
while time.time() < start_time + self.test_timeout:
print("<<Soak1 PASS {}: time until done {}>>".format(
self.loop, (start_time + self.test_timeout - time.time())))
# Create all specified pools
self.pool.extend(self.create_pool(pool_list))
try:
self.execute_jobs(test_param, self.pool[1:])
except SoakTestError as error:
self.fail(error)
errors = self.destroy_pools(self.pool[1:])
# delete the test pools from self.pool; preserving reserved pool
self.pool = [self.pool[0]]
self.assertEqual(len(errors), 0, "\n".join(errors))
self.loop += 1
# Break out of loop if smoke
if "smoke" in self.test_name:
break
# Commented out due to DAOS-3836.
## Check that the reserve pool is still allocated
#self.assertTrue(
# self.pool[0].check_files(self.hostlist_servers),
# "Pool data not detected on servers")
# Verify the data after soak is done
self.assertTrue(
self.container.read_objects(),
"Data verification error on reserved pool"
"after SOAK completed")
def setUp(self):
"""Define test setup to be done."""
print("<<setUp Started>> at {}".format(time.ctime()))
super(Soak, self).setUp()
# Initialize loop param for all tests
self.loop = 1
self.failed_job_id_list = []
# Fail if slurm partition daos_client is not defined
if not self.partition_clients:
raise SoakTestError(
"<<FAILED: Partition is not correctly setup for daos "
"slurm partition>>")
# Check if the server nodes are in the client list;
# this will happen when only one partition is specified
for host_server in self.hostlist_servers:
if host_server in self.hostlist_clients:
self.hostlist_clients.remove(host_server)
self.log.info(
"<<Updated hostlist_clients %s >>", self.hostlist_clients)
# include test node for log cleanup; remove from client list
self.test_node = [socket.gethostname().split('.', 1)[0]]
if self.test_node[0] in self.hostlist_clients:
self.hostlist_clients.remove(self.test_node[0])
self.log.info(
"<<Updated hostlist_clients %s >>", self.hostlist_clients)
self.node_list = self.hostlist_clients + self.test_node
# self.node_list = self.hostlist_clients
# Setup logging directories for soak logfiles
# self.output dir is an avocado directory .../data/
self.log_dir = "/tmp/soak"
self.outputsoakdir = self.outputdir + "/soak"
# Create the remote log directories on all client nodes
self.rem_pass_dir = self.log_dir + "/pass" + str(self.loop)
self.local_pass_dir = self.outputsoakdir + "/pass" + str(self.loop)
# cleanup soak log directories before test on all nodes
result = pcmd(
NodeSet.fromlist(self.node_list),
"rm -rf {}".format(self.log_dir),
verbose=False)
if len(result) > 1 or 0 not in result:
raise SoakTestError(
"<<FAILED: Soak directories not removed"
"from clients>>: {}".format(", ".join(
[str(result[key]) for key in result if key != 0])))
def tearDown(self):
"""Define tearDown and clear any left over jobs in squeue."""
print("<<tearDown Started>> at {}".format(time.ctime()))
# clear out any jobs in squeue;
errors_detected = False
if len(self.failed_job_id_list) > 0:
print("<<Cancel jobs in queue with ids {} >>".format(
self.failed_job_id_list))
for job_id in self.failed_job_id_list:
try:
slurm_utils.cancel_jobs(job_id)
except slurm_utils.SlurmFailed as error:
self.log.info(
" Failed to cancel job %s with error %s", job_id, str(
error))
errors_detected = True
# One last attempt to copy any logfiles from client nodes
try:
self.remote_copy(
self.node_list, self.rem_pass_dir, self.outputsoakdir)
except SoakTestError as error:
self.log.info("Remote copy failed with %s", error)
errors_detected = True
super(Soak, self).tearDown()
if errors_detected:
self.fail("Errors detected cancelling slurm jobs in tearDown()")
def test_soak_smoke(self):
"""Run soak smoke.
Test ID: DAOS-2192
Test Description: This will create a slurm batch job that runs IOR
with DAOS with the number of processes determined by the number of
nodes.
For this test a single pool will be created. It will run for ~10 min
:avocado: tags=soak,soak_smoke
"""
test_param = "/run/smoke/"
self.run_soak(test_param)
def test_soak_ior_daos(self):
"""Run soak test with IOR -a daos.
Test ID: DAOS-2256
Test Description: This will create a slurm batch job that runs
various jobs defined in the soak yaml
This test will run for the time specififed in
/run/test_param_test_timeout.
:avocado: tags=soak,soak_ior,soak_ior_daos
"""
test_param = "/run/soak_ior_daos/"
self.run_soak(test_param)
def test_soak_ior_mpiio(self):
"""Run soak test with IOR -a mpiio.
Test ID: DAOS-2401,
Test Description: This will create a slurm batch job that runs
various jobs defined in the soak yaml
This test will run for the time specififed in
/run/test_param_test_timeout.
:avocado: tags=soak,soak_ior,soak_ior_mpiio
"""
test_param = "/run/soak_ior_mpiio/"
self.run_soak(test_param)
def test_soak_stress(self):
"""Run soak stress.
Test ID: DAOS-2256
Test Description: This will create a slurm batch job that runs
various jobs defined in the soak yaml
This test will run for the time specififed in
/run/test_param_test_timeout.
:avocado: tags=soak,soak_stress
"""
test_param = "/run/soak_stress/"
self.run_soak(test_param)
| 39.930868 | 79 | 0.566856 | 23,540 | 0.94778 | 0 | 0 | 0 | 0 | 0 | 0 | 11,012 | 0.443371 |
d2dcba40eaf1e9db722986c2a78f80438fb6fdb3 | 1,066 | py | Python | aoc/year_2020/day_06/solver.py | logan-connolly/AoC | 23f47e72abaf438cc97897616be4d6b057a01bf3 | [
"MIT"
]
| 2 | 2020-12-06T10:59:52.000Z | 2021-09-29T22:14:03.000Z | aoc/year_2020/day_06/solver.py | logan-connolly/AoC | 23f47e72abaf438cc97897616be4d6b057a01bf3 | [
"MIT"
]
| null | null | null | aoc/year_2020/day_06/solver.py | logan-connolly/AoC | 23f47e72abaf438cc97897616be4d6b057a01bf3 | [
"MIT"
]
| 2 | 2021-09-29T22:14:18.000Z | 2022-01-18T02:20:26.000Z | """This is the Solution for Year 2020 Day 06"""
import re
from aoc.abstracts.solver import Answers, StrLines
class Solver:
def __init__(self, data: str) -> None:
self.data = data
def _preprocess(self) -> StrLines:
delim = "\n\n"
return self.data.split(delim)
def _solve_part_one(self, lines: StrLines) -> int:
cleaned = [re.sub(r"\n", "", answer).strip() for answer in lines]
return sum(len(set(answer)) for answer in cleaned)
def _solve_part_two(self, lines: StrLines) -> int:
cleaned = [answer.rstrip("\n").split("\n") for answer in lines]
shared_answer_count = 0
for group in cleaned:
shared_answers = set.intersection(*[set(member) for member in group])
shared_answer_count += len(shared_answers)
return shared_answer_count
def solve(self) -> Answers:
lines = self._preprocess()
ans_one = self._solve_part_one(lines)
ans_two = self._solve_part_two(lines)
return Answers(part_one=ans_one, part_two=ans_two)
| 32.30303 | 81 | 0.641651 | 952 | 0.893058 | 0 | 0 | 0 | 0 | 0 | 0 | 68 | 0.06379 |
d2defb686bfc61f23201cb71e5a9d368779c4dfa | 98 | py | Python | setup.py | kuzxnia/typer | 39007237d552e4f4920b2c6e13e5f0ce482d4427 | [
"MIT"
]
| null | null | null | setup.py | kuzxnia/typer | 39007237d552e4f4920b2c6e13e5f0ce482d4427 | [
"MIT"
]
| 3 | 2020-04-07T12:39:51.000Z | 2020-04-09T22:49:16.000Z | setup.py | kuzxnia/typer | 39007237d552e4f4920b2c6e13e5f0ce482d4427 | [
"MIT"
]
| null | null | null | from setuptools import find_packages, setup
setup(
name="typer", packages=find_packages(),
)
| 16.333333 | 43 | 0.744898 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 7 | 0.071429 |
d2dfa41f3a05071765ff4e4b5a6aecdae50d42b0 | 7,105 | py | Python | speedup.py | hjdeheer/malpaca | a0e5471a06175ef34aa95b3a1caea407e4e624a8 | [
"MIT"
]
| null | null | null | speedup.py | hjdeheer/malpaca | a0e5471a06175ef34aa95b3a1caea407e4e624a8 | [
"MIT"
]
| null | null | null | speedup.py | hjdeheer/malpaca | a0e5471a06175ef34aa95b3a1caea407e4e624a8 | [
"MIT"
]
| null | null | null | import numpy as np
from numba import jit, prange
from scipy.stats import mode
from sklearn.metrics import accuracy_score
__all__ = ['dtw_distance', 'KnnDTW']
@jit(nopython=True, fastmath=True)
def cosine_distance(u:np.ndarray, v:np.ndarray):
assert(u.shape[0] == v.shape[0])
uv = 0
uu = 0
vv = 0
for i in range(u.shape[0]):
uv += u[i]*v[i]
uu += u[i]*u[i]
vv += v[i]*v[i]
cos_theta = 1
if uu!=0 and vv!=0:
cos_theta = uv/np.sqrt(uu*vv)
return 1 - cos_theta
@jit(nopython=True, parallel=True, nogil=True)
def dtw_distance(dataset1, dataset2):
"""
Computes the dataset DTW distance matrix using multiprocessing.
Args:
dataset1: timeseries dataset of shape [N1, T1]
dataset2: timeseries dataset of shape [N2, T2]
Returns:
Distance matrix of shape [N1, N2]
"""
n1 = dataset1.shape[0]
n2 = dataset2.shape[0]
dist = np.empty((n1, n2), dtype=np.float64)
for i in prange(n1):
for j in prange(n2):
dist[i][j] = _dtw_distance(dataset1[i], dataset2[j])
return dist
@jit(nopython=True, cache=True)
def _dtw_distance(series1, series2):
"""
Returns the DTW similarity distance between two 1-D
timeseries numpy arrays.
Args:
series1, series2 : array of shape [n_timepoints]
Two arrays containing n_samples of timeseries data
whose DTW distance between each sample of A and B
will be compared.
Returns:
DTW distance between A and B
"""
l1 = series1.shape[0]
l2 = series2.shape[0]
E = np.empty((l1, l2))
# Fill First Cell
v = series1[0] - series2[0]
E[0][0] = v * v
# Fill First Column
for i in range(1, l1):
v = series1[i] - series2[0]
E[i][0] = E[i - 1][0] + v * v
# Fill First Row
for i in range(1, l2):
v = series1[0] - series2[i]
E[0][i] = E[0][i - 1] + v * v
for i in range(1, l1):
for j in range(1, l2):
v = series1[i] - series2[j]
v = v * v
v1 = E[i - 1][j]
v2 = E[i - 1][j - 1]
v3 = E[i][j - 1]
if v1 <= v2 and v1 <= v3:
E[i][j] = v1 + v
elif v2 <= v1 and v2 <= v3:
E[i][j] = v2 + v
else:
E[i][j] = v3 + v
return np.sqrt(E[-1][-1])
# Modified from https://github.com/markdregan/K-Nearest-Neighbors-with-Dynamic-Time-Warping
class KnnDTW(object):
"""K-nearest neighbor classifier using dynamic time warping
as the distance measure between pairs of time series arrays
Arguments
---------
n_neighbors : int, optional (default = 1)
Number of neighbors to use by default for KNN
"""
def __init__(self, n_neighbors=1):
self.n_neighbors = n_neighbors
def fit(self, x, y):
"""Fit the model using x as training data and y as class labels
Arguments
---------
x : array of shape [n_samples, n_timepoints]
Training data set for input into KNN classifer
y : array of shape [n_samples]
Training labels for input into KNN classifier
"""
self.x = np.copy(x)
self.y = np.copy(y)
def _dist_matrix(self, x, y):
"""Computes the M x N distance matrix between the training
dataset and testing dataset (y) using the DTW distance measure
Arguments
---------
x : array of shape [n_samples, n_timepoints]
y : array of shape [n_samples, n_timepoints]
Returns
-------
Distance matrix between each item of x and y with
shape [training_n_samples, testing_n_samples]
"""
dm = dtw_distance(x, y)
return dm
def predict(self, x):
"""Predict the class labels or probability estimates for
the provided data
Arguments
---------
x : array of shape [n_samples, n_timepoints]
Array containing the testing data set to be classified
Returns
-------
2 arrays representing:
(1) the predicted class labels
(2) the knn label count probability
"""
np.random.seed(0)
dm = self._dist_matrix(x, self.x)
# Identify the k nearest neighbors
knn_idx = dm.argsort()[:, :self.n_neighbors]
# Identify k nearest labels
knn_labels = self.y[knn_idx]
# Model Label
mode_data = mode(knn_labels, axis=1)
mode_label = mode_data[0]
mode_proba = mode_data[1] / self.n_neighbors
return mode_label.ravel(), mode_proba.ravel()
def evaluate(self, x, y):
"""
Predict the class labels or probability estimates for
the provided data and then evaluates the accuracy score.
Arguments
---------
x : array of shape [n_samples, n_timepoints]
Array containing the testing data set to be classified
y : array of shape [n_samples]
Array containing the labels of the testing dataset to be classified
Returns
-------
1 floating point value representing the accuracy of the classifier
"""
# Predict the labels and the probabilities
pred_labels, pred_probas = self.predict(x)
# Ensure labels are integers
y = y.astype('int32')
pred_labels = pred_labels.astype('int32')
# Compute accuracy measure
accuracy = accuracy_score(y, pred_labels)
return accuracy
def predict_proba(self, x):
"""Predict the class labels probability estimates for
the provided data
Arguments
---------
x : array of shape [n_samples, n_timepoints]
Array containing the testing data set to be classified
Returns
-------
2 arrays representing:
(1) the predicted class probabilities
(2) the knn labels
"""
np.random.seed(0)
dm = self._dist_matrix(x, self.x)
# Invert the distance matrix
dm = -dm
classes = np.unique(self.y)
class_dm = []
# Partition distance matrix by class
for i, cls in enumerate(classes):
idx = np.argwhere(self.y == cls)[:, 0]
cls_dm = dm[:, idx] # [N_test, N_train_c]
# Take maximum distance vector due to softmax probabilities
cls_dm = np.max(cls_dm, axis=-1) # [N_test,]
class_dm.append([cls_dm])
# Concatenate the classwise distance matrices and transpose
class_dm = np.concatenate(class_dm, axis=0) # [C, N_test]
class_dm = class_dm.transpose() # [N_test, C]
# Compute softmax probabilities
class_dm_exp = np.exp(class_dm - class_dm.max())
class_dm = class_dm_exp / np.sum(class_dm_exp, axis=-1, keepdims=True)
probabilities = class_dm
knn_labels = np.argmax(class_dm, axis=-1)
return probabilities, knn_labels | 28.194444 | 91 | 0.570443 | 4,611 | 0.64898 | 0 | 0 | 2,232 | 0.314145 | 0 | 0 | 3,628 | 0.510626 |
d2dfc266c6056fe94eecb550bf60b54a02eaa933 | 470 | py | Python | setup.py | colineRamee/UAM_simulator_scitech2021 | 0583f5ce195cf1ec4f6919d6523fa39851c419fc | [
"MIT"
]
| 1 | 2021-02-04T15:57:03.000Z | 2021-02-04T15:57:03.000Z | setup.py | colineRamee/UAM_simulator_scitech2021 | 0583f5ce195cf1ec4f6919d6523fa39851c419fc | [
"MIT"
]
| null | null | null | setup.py | colineRamee/UAM_simulator_scitech2021 | 0583f5ce195cf1ec4f6919d6523fa39851c419fc | [
"MIT"
]
| 2 | 2021-02-04T04:41:08.000Z | 2022-03-01T16:18:14.000Z | from setuptools import setup
setup(
name='uam_simulator',
version='1.0',
description='A tool to simulate different architectures for UAM traffic management',
author='Coline Ramee',
author_email='[email protected]',
packages=['uam_simulator'],
install_requires=['numpy', 'scikit-learn', 'gurobipy']
)
# If installing from source the package name is gurobipy, if installing with conda it's gurobi, but when importing it's still gurobipy
| 36.153846 | 134 | 0.734043 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 310 | 0.659574 |
d2e2156c949fb7522a291e88e911e831ba30e23c | 1,115 | py | Python | DFS/Leetcode1239.py | Rylie-W/LeetRecord | 623c4efe88b3af54b8a65f6ec23db850b8c6f46f | [
"Apache-2.0"
]
| null | null | null | DFS/Leetcode1239.py | Rylie-W/LeetRecord | 623c4efe88b3af54b8a65f6ec23db850b8c6f46f | [
"Apache-2.0"
]
| null | null | null | DFS/Leetcode1239.py | Rylie-W/LeetRecord | 623c4efe88b3af54b8a65f6ec23db850b8c6f46f | [
"Apache-2.0"
]
| null | null | null | class Solution:
def maxLength(self, arr) -> int:
def helper(word):
temp=[]
temp[:0]=word
res=set()
for w in temp:
if w not in res:
res.add(w)
else:
return None
return res
memo=[]
for a in arr:
temp=helper(a)
if temp is not None:
memo.append(temp)
memo.sort(key=lambda a:len(a),reverse=True)
def dfs(index,path):
if index==len(memo):
return 0
res=0
for i in range(index,len(memo)):
if len(path|memo[i])==len(path)+len(memo[i]):
res=max(res,len(memo[i])+dfs(i+1,path|memo[i]))
return res
return dfs(0,set())
if __name__ == '__main__':
sol=Solution()
arr = ["un", "iq", "ue"]
# arr = ["cha", "r", "act", "ers"]
# arr = ["abcdefghijklmnopqrstuvwxyz"]
# arr=["a", "b", "c", "d", "e", "f", "g", "h", "i", "j", "k", "l", "m", "n", "o", "p"]
print(sol.maxLength(arr))
| 26.547619 | 90 | 0.419731 | 832 | 0.746188 | 0 | 0 | 0 | 0 | 0 | 0 | 180 | 0.161435 |
d2e2e8b5aeb34c6ee7b5e4eefd603f0d67226b67 | 419 | py | Python | apps/addresses/migrations/0002_address_picture.py | skyride/python-docker-compose | b3ac1a4da4ae2133b94504447a6cb353cc96f45b | [
"MIT"
]
| null | null | null | apps/addresses/migrations/0002_address_picture.py | skyride/python-docker-compose | b3ac1a4da4ae2133b94504447a6cb353cc96f45b | [
"MIT"
]
| null | null | null | apps/addresses/migrations/0002_address_picture.py | skyride/python-docker-compose | b3ac1a4da4ae2133b94504447a6cb353cc96f45b | [
"MIT"
]
| null | null | null | # Generated by Django 3.0.6 on 2020-05-25 22:13
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('addresses', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='address',
name='picture',
field=models.ImageField(default=None, null=True, upload_to='addresses/images/'),
),
]
| 22.052632 | 92 | 0.606205 | 326 | 0.778043 | 0 | 0 | 0 | 0 | 0 | 0 | 109 | 0.260143 |
d2e3431a8ca64567f24a9939424b3256a13d8da9 | 34,809 | py | Python | liberapay/payin/common.py | Panquesito7/liberapay.com | d6806390a533061c2b9fb147d7139d06024f9a1b | [
"PostgreSQL",
"CC0-1.0"
]
| 1 | 2021-07-26T02:07:13.000Z | 2021-07-26T02:07:13.000Z | liberapay/payin/common.py | Panquesito7/liberapay.com | d6806390a533061c2b9fb147d7139d06024f9a1b | [
"PostgreSQL",
"CC0-1.0"
]
| null | null | null | liberapay/payin/common.py | Panquesito7/liberapay.com | d6806390a533061c2b9fb147d7139d06024f9a1b | [
"PostgreSQL",
"CC0-1.0"
]
| null | null | null | from collections import namedtuple
from datetime import timedelta
import itertools
from operator import attrgetter
from pando.utils import utcnow
from psycopg2.extras import execute_batch
from ..constants import SEPA
from ..exceptions import (
AccountSuspended, MissingPaymentAccount, RecipientAccountSuspended,
NoSelfTipping, UserDoesntAcceptTips,
)
from ..i18n.currencies import Money, MoneyBasket
from ..utils import group_by
ProtoTransfer = namedtuple(
'ProtoTransfer',
'amount recipient destination context unit_amount period team visibility',
)
def prepare_payin(db, payer, amount, route, proto_transfers, off_session=False):
"""Prepare to charge a user.
Args:
payer (Participant): the user who will be charged
amount (Money): the presentment amount of the charge
route (ExchangeRoute): the payment instrument to charge
proto_transfers ([ProtoTransfer]): the transfers to prepare
off_session (bool):
`True` means that the payment is being initiated because it was scheduled,
`False` means that the payer has initiated the operation just now
Returns:
Record: the row created in the `payins` table
Raises:
AccountSuspended: if the payer's account is suspended
"""
assert isinstance(amount, Money), type(amount)
assert route.participant == payer, (route.participant, payer)
assert route.status in ('pending', 'chargeable')
if payer.is_suspended or not payer.get_email_address():
raise AccountSuspended()
with db.get_cursor() as cursor:
payin = cursor.one("""
INSERT INTO payins
(payer, amount, route, status, off_session)
VALUES (%s, %s, %s, 'pre', %s)
RETURNING *
""", (payer.id, amount, route.id, off_session))
cursor.run("""
INSERT INTO payin_events
(payin, status, error, timestamp)
VALUES (%s, %s, NULL, current_timestamp)
""", (payin.id, payin.status))
payin_transfers = []
for t in proto_transfers:
payin_transfers.append(prepare_payin_transfer(
cursor, payin, t.recipient, t.destination, t.context, t.amount,
t.visibility, t.unit_amount, t.period, t.team,
))
return payin, payin_transfers
def update_payin(
db, payin_id, remote_id, status, error,
amount_settled=None, fee=None, intent_id=None, refunded_amount=None,
):
"""Update the status and other attributes of a charge.
Args:
payin_id (int): the ID of the charge in our database
remote_id (str): the ID of the charge in the payment processor's database
status (str): the new status of the charge
error (str): if the charge failed, an error message to show to the payer
Returns:
Record: the row updated in the `payins` table
"""
with db.get_cursor() as cursor:
payin = cursor.one("""
UPDATE payins
SET status = %(status)s
, error = %(error)s
, remote_id = coalesce(remote_id, %(remote_id)s)
, amount_settled = coalesce(amount_settled, %(amount_settled)s)
, fee = coalesce(fee, %(fee)s)
, intent_id = coalesce(intent_id, %(intent_id)s)
, refunded_amount = coalesce(%(refunded_amount)s, refunded_amount)
WHERE id = %(payin_id)s
RETURNING *
, (SELECT status FROM payins WHERE id = %(payin_id)s) AS old_status
""", locals())
if not payin:
return
if remote_id and payin.remote_id != remote_id:
raise AssertionError(f"the remote IDs don't match: {payin.remote_id!r} != {remote_id!r}")
if status != payin.old_status:
cursor.run("""
INSERT INTO payin_events
(payin, status, error, timestamp)
VALUES (%s, %s, %s, current_timestamp)
""", (payin_id, status, error))
if status in ('pending', 'succeeded'):
cursor.run("""
UPDATE exchange_routes
SET status = 'consumed'
WHERE id = %s
AND one_off IS TRUE
""", (payin.route,))
# Lock to avoid concurrent updates
cursor.run("SELECT * FROM participants WHERE id = %s FOR UPDATE",
(payin.payer,))
# Update scheduled payins, if appropriate
if status in ('pending', 'succeeded'):
sp = cursor.one("""
SELECT *
FROM scheduled_payins
WHERE payer = %s
AND payin = %s
""", (payin.payer, payin.id))
if not sp:
# Try to find a scheduled renewal that matches this payin.
# It doesn't have to be an exact match.
schedule = cursor.all("""
SELECT *
FROM scheduled_payins
WHERE payer = %s
AND payin IS NULL
AND mtime < %s
""", (payin.payer, payin.ctime))
today = utcnow().date()
schedule.sort(key=lambda sp: abs((sp.execution_date - today).days))
payin_tippees = set(cursor.all("""
SELECT coalesce(team, recipient) AS tippee
FROM payin_transfers
WHERE payer = %s
AND payin = %s
""", (payin.payer, payin.id)))
for sp in schedule:
if any((tr['tippee_id'] in payin_tippees) for tr in sp.transfers):
cursor.run("""
UPDATE scheduled_payins
SET payin = %s
, mtime = current_timestamp
WHERE id = %s
""", (payin.id, sp.id))
break
return payin
def adjust_payin_transfers(db, payin, net_amount):
"""Correct a payin's transfers once the net amount is known.
Args:
payin (Record): a row from the `payins` table
net_amount (Money): the amount of money available to transfer
"""
payer = db.Participant.from_id(payin.payer)
route = db.ExchangeRoute.from_id(payer, payin.route)
provider = route.network.split('-', 1)[0]
payer_country = route.country
# We have to update the transfer amounts in a single transaction to
# avoid ending up in an inconsistent state.
with db.get_cursor() as cursor:
payin_transfers = cursor.all("""
SELECT pt.id, pt.amount, pt.status, pt.remote_id, pt.team, pt.recipient, team_p
FROM payin_transfers pt
LEFT JOIN participants team_p ON team_p.id = pt.team
WHERE pt.payin = %s
ORDER BY pt.id
FOR UPDATE OF pt
""", (payin.id,))
assert payin_transfers
if any(pt.status == 'succeeded' for pt in payin_transfers):
# At least one of the transfers has already been executed, so it's
# too complicated to adjust the amounts now.
return
transfers_by_tippee = group_by(
payin_transfers, lambda pt: (pt.team or pt.recipient)
)
prorated_amounts = resolve_amounts(net_amount, {
tippee: MoneyBasket(pt.amount for pt in grouped).fuzzy_sum(net_amount.currency)
for tippee, grouped in transfers_by_tippee.items()
})
teams = set(pt.team for pt in payin_transfers if pt.team is not None)
updates = []
for tippee, prorated_amount in prorated_amounts.items():
transfers = transfers_by_tippee[tippee]
if tippee in teams:
team = transfers[0].team_p
tip = payer.get_tip_to(team)
try:
team_donations = resolve_team_donation(
db, team, provider, payer, payer_country,
prorated_amount, tip, sepa_only=True,
)
except (MissingPaymentAccount, NoSelfTipping):
team_amounts = resolve_amounts(prorated_amount, {
pt.id: pt.amount.convert(prorated_amount.currency)
for pt in transfers
})
for pt in transfers:
if pt.amount != team_amounts.get(pt.id):
assert pt.remote_id is None and pt.status in ('pre', 'pending')
updates.append((team_amounts[pt.id], pt.id))
else:
team_donations = {d.recipient.id: d for d in team_donations}
for pt in transfers:
if pt.status == 'failed':
continue
d = team_donations.pop(pt.recipient, None)
if d is None:
assert pt.remote_id is None and pt.status in ('pre', 'pending')
cursor.run("""
DELETE FROM payin_transfer_events
WHERE payin_transfer = %(pt_id)s
AND status = 'pending';
DELETE FROM payin_transfers WHERE id = %(pt_id)s;
""", dict(pt_id=pt.id))
elif pt.amount != d.amount:
assert pt.remote_id is None and pt.status in ('pre', 'pending')
updates.append((d.amount, pt.id))
n_periods = prorated_amount / tip.periodic_amount.convert(prorated_amount.currency)
for d in team_donations.values():
unit_amount = (d.amount / n_periods).round(allow_zero=False)
prepare_payin_transfer(
db, payin, d.recipient, d.destination, 'team-donation',
d.amount, tip.visibility, unit_amount, tip.period,
team=team.id,
)
else:
pt = transfers[0]
if pt.amount != prorated_amount:
assert pt.remote_id is None and pt.status in ('pre', 'pending')
updates.append((prorated_amount, pt.id))
if updates:
execute_batch(cursor, """
UPDATE payin_transfers
SET amount = %s
WHERE id = %s
AND status <> 'succeeded';
""", updates)
def resolve_tip(
db, tip, tippee, provider, payer, payer_country, payment_amount,
sepa_only=False, excluded_destinations=set(),
):
"""Prepare to fund a tip.
Args:
tip (Row): a row from the `tips` table
tippee (Participant): the intended beneficiary of the donation
provider (str): the payment processor ('paypal' or 'stripe')
payer (Participant): the donor
payer_country (str): the country the money is supposedly coming from
payment_amount (Money): the amount of money being sent
sepa_only (bool): only consider destination accounts within SEPA
excluded_destinations (set): any `payment_accounts.pk` values to exclude
Returns:
a list of `ProtoTransfer` objects
Raises:
MissingPaymentAccount: if no suitable destination has been found
NoSelfTipping: if the donor would end up sending money to themself
RecipientAccountSuspended: if the tippee's account is suspended
UserDoesntAcceptTips: if the tippee doesn't accept donations
"""
assert tip.tipper == payer.id
assert tip.tippee == tippee.id
if not tippee.accepts_tips:
raise UserDoesntAcceptTips(tippee.username)
if tippee.is_suspended:
raise RecipientAccountSuspended(tippee)
if tippee.kind == 'group':
return resolve_team_donation(
db, tippee, provider, payer, payer_country, payment_amount, tip,
sepa_only=sepa_only, excluded_destinations=excluded_destinations,
)
else:
destination = resolve_destination(
db, tippee, provider, payer, payer_country, payment_amount,
sepa_only=sepa_only, excluded_destinations=excluded_destinations,
)
return [ProtoTransfer(
payment_amount, tippee, destination, 'personal-donation',
tip.periodic_amount, tip.period, None, tip.visibility,
)]
def resolve_destination(
db, tippee, provider, payer, payer_country, payin_amount,
sepa_only=False, excluded_destinations=(),
):
"""Figure out where to send a payment.
Args:
tippee (Participant): the intended beneficiary of the payment
provider (str): the payment processor ('paypal' or 'stripe')
payer (Participant): the user who wants to pay
payer_country (str): the country the money is supposedly coming from
payin_amount (Money): the payment amount
sepa_only (bool): only consider destination accounts within SEPA
excluded_destinations (set): any `payment_accounts.pk` values to exclude
Returns:
Record: a row from the `payment_accounts` table
Raises:
MissingPaymentAccount: if no suitable destination has been found
NoSelfTipping: if the payer would end up sending money to themself
"""
tippee_id = tippee.id
if tippee_id == payer.id:
raise NoSelfTipping()
currency = payin_amount.currency
excluded_destinations = list(excluded_destinations)
destination = db.one("""
SELECT *
FROM payment_accounts
WHERE participant = %(tippee_id)s
AND provider = %(provider)s
AND is_current
AND verified
AND coalesce(charges_enabled, true)
AND array_position(%(excluded_destinations)s::bigint[], pk) IS NULL
AND ( country IN %(SEPA)s OR NOT %(sepa_only)s )
ORDER BY default_currency = %(currency)s DESC
, country = %(payer_country)s DESC
, connection_ts
LIMIT 1
""", dict(locals(), SEPA=SEPA))
if destination:
return destination
else:
raise MissingPaymentAccount(tippee)
def resolve_team_donation(
db, team, provider, payer, payer_country, payment_amount, tip,
sepa_only=False, excluded_destinations=(),
):
"""Figure out how to distribute a donation to a team's members.
Args:
team (Participant): the team the donation is for
provider (str): the payment processor ('paypal' or 'stripe')
payer (Participant): the donor
payer_country (str): the country code the money is supposedly coming from
payment_amount (Money): the amount of money being sent
tip (Row): the row from the `tips` table
sepa_only (bool): only consider destination accounts within SEPA
excluded_destinations (set): any `payment_accounts.pk` values to exclude
Returns:
a list of `ProtoTransfer` objects
Raises:
MissingPaymentAccount: if no suitable destination has been found
NoSelfTipping: if the payer would end up sending money to themself
RecipientAccountSuspended: if the team or all of its members are suspended
"""
if team.is_suspended:
raise RecipientAccountSuspended(team)
currency = payment_amount.currency
takes = team.get_current_takes_for_payment(currency, tip.amount)
if all(t.is_suspended for t in takes):
raise RecipientAccountSuspended(takes)
takes = [t for t in takes if not t.is_suspended]
if len(takes) == 1 and takes[0].member == payer.id:
raise NoSelfTipping()
member_ids = tuple([t.member for t in takes])
excluded_destinations = list(excluded_destinations)
payment_accounts = {row.participant: row for row in db.all("""
SELECT DISTINCT ON (participant) *
FROM payment_accounts
WHERE participant IN %(member_ids)s
AND provider = %(provider)s
AND is_current
AND verified
AND coalesce(charges_enabled, true)
AND array_position(%(excluded_destinations)s::bigint[], pk) IS NULL
ORDER BY participant
, default_currency = %(currency)s DESC
, country = %(payer_country)s DESC
, connection_ts
""", locals())}
del member_ids
if not payment_accounts:
raise MissingPaymentAccount(team)
takes = [t for t in takes if t.member in payment_accounts and t.member != payer.id]
if not takes:
raise NoSelfTipping()
takes.sort(key=lambda t: (
-(t.amount / (t.paid_in_advance + payment_amount)),
t.paid_in_advance,
t.ctime
))
# Try to distribute the donation to multiple members.
if sepa_only or provider == 'stripe':
sepa_accounts = {a.participant: a for a in db.all("""
SELECT DISTINCT ON (a.participant) a.*
FROM payment_accounts a
WHERE a.participant IN %(member_ids)s
AND a.provider = %(provider)s
AND a.is_current
AND a.verified
AND coalesce(a.charges_enabled, true)
AND array_position(%(excluded_destinations)s::bigint[], a.pk) IS NULL
AND a.country IN %(SEPA)s
ORDER BY a.participant
, a.default_currency = %(currency)s DESC
, a.connection_ts
""", dict(locals(), SEPA=SEPA, member_ids={t.member for t in takes}))}
if sepa_only or len(sepa_accounts) > 1 and takes[0].member in sepa_accounts:
selected_takes = [
t for t in takes if t.member in sepa_accounts and t.amount != 0
]
if selected_takes:
resolve_take_amounts(payment_amount, selected_takes)
selected_takes.sort(key=attrgetter('member'))
n_periods = payment_amount / tip.periodic_amount.convert(currency)
return [
ProtoTransfer(
t.resolved_amount,
db.Participant.from_id(t.member),
sepa_accounts[t.member],
'team-donation',
(t.resolved_amount / n_periods).round(allow_zero=False),
tip.period,
team.id,
tip.visibility,
)
for t in selected_takes if t.resolved_amount != 0
]
elif sepa_only:
raise MissingPaymentAccount(team)
# Fall back to sending the entire donation to the member who "needs" it most.
member = db.Participant.from_id(takes[0].member)
account = payment_accounts[member.id]
return [ProtoTransfer(
payment_amount, member, account, 'team-donation',
tip.periodic_amount, tip.period, team.id, tip.visibility,
)]
def resolve_take_amounts(payment_amount, takes):
"""Compute team transfer amounts.
Args:
payment_amount (Money): the total amount of money to transfer
takes (list): rows returned by `team.get_current_takes_for_payment(...)`
This function doesn't return anything, instead it mutates the given takes,
adding a `resolved_amount` attribute to each one.
"""
max_weeks_of_advance = 0
for t in takes:
if t.amount == 0:
t.weeks_of_advance = 0
continue
t.weeks_of_advance = t.paid_in_advance / t.amount
if t.weeks_of_advance > max_weeks_of_advance:
max_weeks_of_advance = t.weeks_of_advance
base_amounts = {t.member: t.amount for t in takes}
convergence_amounts = {
t.member: (
t.amount * (max_weeks_of_advance - t.weeks_of_advance)
).round_up()
for t in takes
}
tr_amounts = resolve_amounts(payment_amount, base_amounts, convergence_amounts)
for t in takes:
t.resolved_amount = tr_amounts.get(t.member, payment_amount.zero())
def resolve_amounts(available_amount, base_amounts, convergence_amounts=None, payday_id=1):
"""Compute transfer amounts.
Args:
available_amount (Money):
the payin amount to split into transfer amounts
base_amounts (Dict[Any, Money]):
a map of IDs to raw transfer amounts
convergence_amounts (Dict[Any, Money]):
an optional map of IDs to ideal additional amounts
payday_id (int):
the ID of the current or next payday, used to rotate who receives
the remainder when there is a tie
Returns a copy of `base_amounts` with updated values.
"""
min_transfer_amount = Money.MINIMUMS[available_amount.currency]
r = {}
amount_left = available_amount
# Attempt to converge
if convergence_amounts:
convergence_sum = Money.sum(convergence_amounts.values(), amount_left.currency)
if convergence_sum != 0:
convergence_amounts = {k: v for k, v in convergence_amounts.items() if v != 0}
if amount_left == convergence_sum:
# We have just enough money for convergence.
return convergence_amounts
elif amount_left > convergence_sum:
# We have more than enough money for full convergence, the extra
# funds will be allocated in proportion to `base_amounts`.
r.update(convergence_amounts)
amount_left -= convergence_sum
else:
# We only have enough for partial convergence, the funds will be
# allocated in proportion to `convergence_amounts`.
base_amounts = convergence_amounts
# Compute the prorated amounts
base_sum = Money.sum(base_amounts.values(), amount_left.currency)
base_ratio = 0 if base_sum == 0 else amount_left / base_sum
for key, base_amount in sorted(base_amounts.items()):
if base_amount == 0:
continue
assert amount_left >= min_transfer_amount
amount = min((base_amount * base_ratio).round_down(), amount_left)
r[key] = amount + r.get(key, 0)
amount_left -= amount
# Deal with rounding errors
if amount_left > 0:
# Try to distribute in a way that doesn't skew the percentages much.
# If there's a tie, use the payday ID to rotate the winner every week.
i = itertools.count(1)
n = len(r)
def compute_priority(item):
key, current_amount = item
base_amount = base_amounts[key] * base_ratio
return (
(current_amount - base_amount) / base_amount if base_amount else 2,
(next(i) - payday_id) % n
)
for key, amount in sorted(r.items(), key=compute_priority):
r[key] += min_transfer_amount
amount_left -= min_transfer_amount
if amount_left == 0:
break
# Final check and return
assert amount_left == 0, '%r != 0' % amount_left
return r
def prepare_payin_transfer(
db, payin, recipient, destination, context, amount, visibility,
unit_amount=None, period=None, team=None,
):
"""Prepare the allocation of funds from a payin.
Args:
payin (Record): a row from the `payins` table
recipient (Participant): the user who will receive the money
destination (Record): a row from the `payment_accounts` table
amount (Money): the amount of money that will be received
visibility (int): a copy of `tip.visibility`
unit_amount (Money): the `periodic_amount` of a recurrent donation
period (str): the period of a recurrent payment
team (int): the ID of the project this payment is tied to
Returns:
Record: the row created in the `payin_transfers` table
"""
assert recipient.id == destination.participant, (recipient, destination)
if recipient.is_suspended:
raise RecipientAccountSuspended()
if unit_amount:
n_units = int(amount / unit_amount.convert(amount.currency))
else:
n_units = None
return db.one("""
INSERT INTO payin_transfers
(payin, payer, recipient, destination, context, amount,
unit_amount, n_units, period, team, visibility,
status, ctime)
VALUES (%s, %s, %s, %s, %s, %s,
%s, %s, %s, %s, %s,
'pre', clock_timestamp())
RETURNING *
""", (payin.id, payin.payer, recipient.id, destination.pk, context, amount,
unit_amount, n_units, period, team, visibility))
def update_payin_transfer(
db, pt_id, remote_id, status, error,
amount=None, fee=None, update_donor=True, reversed_amount=None,
):
"""Update the status and other attributes of a payment.
Args:
pt_id (int): the ID of the payment in our database
remote_id (str): the ID of the transfer in the payment processor's database
status (str): the new status of the payment
error (str): if the payment failed, an error message to show to the payer
Returns:
Record: the row updated in the `payin_transfers` table
"""
with db.get_cursor() as cursor:
pt = cursor.one("""
UPDATE payin_transfers
SET status = %(status)s
, error = %(error)s
, remote_id = coalesce(remote_id, %(remote_id)s)
, amount = COALESCE(%(amount)s, amount)
, fee = COALESCE(%(fee)s, fee)
, reversed_amount = coalesce(%(reversed_amount)s, reversed_amount)
WHERE id = %(pt_id)s
RETURNING *
, (SELECT amount FROM payin_transfers WHERE id = %(pt_id)s) AS old_amount
, (SELECT reversed_amount FROM payin_transfers WHERE id = %(pt_id)s) AS old_reversed_amount
, (SELECT status FROM payin_transfers WHERE id = %(pt_id)s) AS old_status
""", locals())
if not pt:
return
if remote_id and pt.remote_id != remote_id:
raise AssertionError(f"the remote IDs don't match: {pt.remote_id!r} != {remote_id!r}")
if status != pt.old_status:
cursor.run("""
INSERT INTO payin_transfer_events
(payin_transfer, status, error, timestamp)
VALUES (%s, %s, %s, current_timestamp)
""", (pt_id, status, error))
# If the payment has failed or hasn't been settled yet, then stop here.
if status != 'succeeded':
return pt
# Update the `paid_in_advance` value of the donation.
params = pt._asdict()
params['delta'] = pt.amount
if pt.old_status == 'succeeded':
params['delta'] -= pt.old_amount
if pt.reversed_amount:
params['delta'] += -(pt.reversed_amount - (pt.old_reversed_amount or 0))
elif pt.old_reversed_amount:
params['delta'] += pt.old_reversed_amount
if params['delta'] == 0:
return pt
updated_tips = cursor.all("""
WITH latest_tip AS (
SELECT *
FROM tips
WHERE tipper = %(payer)s
AND tippee = COALESCE(%(team)s, %(recipient)s)
ORDER BY mtime DESC
LIMIT 1
)
UPDATE tips t
SET paid_in_advance = (
coalesce_currency_amount(t.paid_in_advance, t.amount::currency) +
convert(%(delta)s, t.amount::currency)
)
, is_funded = true
FROM latest_tip lt
WHERE t.tipper = lt.tipper
AND t.tippee = lt.tippee
AND t.mtime >= lt.mtime
RETURNING t.*
""", params)
if not updated_tips:
# This transfer isn't linked to a tip.
return pt
assert len(updated_tips) < 10, updated_tips
if any(t.paid_in_advance <= 0 for t in updated_tips):
cursor.run("""
UPDATE tips
SET is_funded = false
WHERE tipper = %(payer)s
AND paid_in_advance <= 0
""", params)
# If it's a team donation, update the `paid_in_advance` value of the take.
if pt.context == 'team-donation':
updated_takes = cursor.all("""
WITH latest_take AS (
SELECT *
FROM takes
WHERE team = %(team)s
AND member = %(recipient)s
AND amount IS NOT NULL
ORDER BY mtime DESC
LIMIT 1
)
UPDATE takes t
SET paid_in_advance = (
coalesce_currency_amount(lt.paid_in_advance, lt.amount::currency) +
convert(%(delta)s, lt.amount::currency)
)
FROM latest_take lt
WHERE t.team = lt.team
AND t.member = lt.member
AND t.mtime >= lt.mtime
RETURNING t.id
""", params)
assert 0 < len(updated_takes) < 10, params
# Recompute the cached `receiving` amount of the donee.
cursor.run("""
WITH our_tips AS (
SELECT t.amount
FROM current_tips t
WHERE t.tippee = %(p_id)s
AND t.is_funded
)
UPDATE participants AS p
SET receiving = taking + coalesce_currency_amount(
(SELECT sum(t.amount, p.main_currency) FROM our_tips t),
p.main_currency
)
, npatrons = (SELECT count(*) FROM our_tips)
WHERE p.id = %(p_id)s
""", dict(p_id=(pt.team or pt.recipient)))
# Recompute the donor's cached `giving` amount and payment schedule.
if update_donor:
donor = db.Participant.from_id(pt.payer)
donor.update_giving()
donor.schedule_renewals()
return pt
def abort_payin(db, payin, error='aborted by payer'):
"""Mark a payin as cancelled.
Args:
payin (Record): a row from the `payins` table
error (str): the error message to attach to the payin
Returns:
Record: the row updated in the `payins` table
"""
payin = update_payin(db, payin.id, payin.remote_id, 'failed', error)
db.run("""
WITH updated_transfers as (
UPDATE payin_transfers
SET status = 'failed'
, error = %(error)s
WHERE payin = %(payin_id)s
AND status <> 'failed'
RETURNING *
)
INSERT INTO payin_transfer_events
(payin_transfer, status, error, timestamp)
SELECT pt.id, 'failed', pt.error, current_timestamp
FROM updated_transfers pt
""", dict(error=error, payin_id=payin.id))
return payin
def record_payin_refund(
db, payin_id, remote_id, amount, reason, description, status, error=None, ctime=None,
):
"""Record a charge refund.
Args:
payin_id (int): the ID of the refunded payin in our database
remote_id (int): the ID of the refund in the payment processor's database
amount (Money): the refund amount, must be less or equal to the payin amount
reason (str): why this refund was initiated (`refund_reason` SQL type)
description (str): details of the circumstances of this refund
status (str): the current status of the refund (`refund_status` SQL type)
error (str): error message, if the refund has failed
ctime (datetime): when the refund was initiated
Returns:
Record: the row inserted in the `payin_refunds` table
"""
refund = db.one("""
INSERT INTO payin_refunds
(payin, remote_id, amount, reason, description,
status, error, ctime)
VALUES (%(payin_id)s, %(remote_id)s, %(amount)s, %(reason)s, %(description)s,
%(status)s, %(error)s, coalesce(%(ctime)s, current_timestamp))
ON CONFLICT (payin, remote_id) DO UPDATE
SET amount = excluded.amount
, reason = excluded.reason
, description = excluded.description
, status = excluded.status
, error = excluded.error
RETURNING *
, ( SELECT old.status
FROM payin_refunds old
WHERE old.payin = %(payin_id)s
AND old.remote_id = %(remote_id)s
) AS old_status
""", locals())
notify = (
refund.status in ('pending', 'succeeded') and
refund.status != refund.old_status and
refund.ctime > (utcnow() - timedelta(hours=24))
)
if notify:
payin = db.one("SELECT * FROM payins WHERE id = %s", (refund.payin,))
payer = db.Participant.from_id(payin.payer)
payer.notify(
'payin_refund_initiated',
payin_amount=payin.amount,
payin_ctime=payin.ctime,
refund_amount=refund.amount,
refund_reason=refund.reason,
email_unverified_address=True,
)
return refund
def record_payin_transfer_reversal(
db, pt_id, remote_id, amount, payin_refund_id=None, ctime=None
):
"""Record a transfer reversal.
Args:
pt_id (int): the ID of the reversed transfer in our database
remote_id (int): the ID of the reversal in the payment processor's database
amount (Money): the reversal amount, must be less or equal to the transfer amount
payin_refund_id (int): the ID of the associated payin refund in our database
ctime (datetime): when the refund was initiated
Returns:
Record: the row inserted in the `payin_transfer_reversals` table
"""
return db.one("""
INSERT INTO payin_transfer_reversals
(payin_transfer, remote_id, amount, payin_refund,
ctime)
VALUES (%(pt_id)s, %(remote_id)s, %(amount)s, %(payin_refund_id)s,
coalesce(%(ctime)s, current_timestamp))
ON CONFLICT (payin_transfer, remote_id) DO UPDATE
SET amount = excluded.amount
, payin_refund = excluded.payin_refund
RETURNING *
""", locals())
| 39.964409 | 108 | 0.579965 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 19,136 | 0.549743 |
d2e3ae6e131a5fa41bdb17b19d893736dfd4f861 | 4,967 | py | Python | vendor/func_lib/assert_handle.py | diudiu/featurefactory | ee02ad9e3ea66e2eeafe6e11859801f0420c7d9e | [
"MIT"
]
| null | null | null | vendor/func_lib/assert_handle.py | diudiu/featurefactory | ee02ad9e3ea66e2eeafe6e11859801f0420c7d9e | [
"MIT"
]
| null | null | null | vendor/func_lib/assert_handle.py | diudiu/featurefactory | ee02ad9e3ea66e2eeafe6e11859801f0420c7d9e | [
"MIT"
]
| null | null | null | # -*- coding:utf-8 -*-
from vendor.errors.feature import FeatureProcessError
"""
此目录下所有功能函数均为:
按一定条件检查传入参数合法性
**若不合法, 将抛出异常**
"""
def f_assert_not_null(seq):
"""检测值是否非空或值得列表是否存在非空元素"""
if seq in (None, '', [], {}, ()):
raise FeatureProcessError("value: %s f_assert_not_null Error" % seq)
if isinstance(seq, list):
for value in seq:
if value in (None, '', {}, [], ()):
raise FeatureProcessError("value: %s f_assert_not_null Error" % seq)
return seq
def f_assert_jsonpath_true(seq):
"""假设jsonpath查询到的为true seq为[]空列表时代表没查到字段"""
if seq in ([],):
raise FeatureProcessError("jsonpath not find field")
return seq
def f_assert_must_int(value_list):
"""检测列表中的元素是否为int类型"""
for value in value_list:
if not isinstance(value, int):
raise FeatureProcessError('%s f_assert_must_int Error' % value_list)
return value_list
def f_assert_must_list(value_list):
"""检测列表中的元素是否为list类型"""
for value in value_list:
if not isinstance(value, list):
raise FeatureProcessError('%s f_assert_must_list Error' % value_list)
return value_list
def f_assert_must_dict(value_list):
"""检测列表中的元素是否为dict类型"""
for value in value_list:
if not isinstance(value, dict):
raise FeatureProcessError('%s f_assert_must_dict Error' % value_list)
return value_list
def f_assert_must_digit(value_list, args=False):
"""
检测列表中的元素是否为数字
:param value_list: 待检测列表
:param args: 负数是否通过 false 不通过报异常 True 负数通过
:return: 异常或原值
example:
:value_list [-2,'-2', 3]
:args false
:return 异常
:value_list [-2,'-2', 3]
:args True
:return [-2,'-2', 3]
"""
for value in value_list:
if args:
if not str(value).lstrip('-').isdigit():
raise FeatureProcessError('%s negative number=%s f_assert_must_digit Error' % (value_list, args))
else:
if not str(value).isdigit():
raise FeatureProcessError('%s negative number=%s f_assert_must_digit Error' % (value_list, args))
return value_list
def f_assert_must_basestring(value_list):
"""检测列表中的元素是否为字符串"""
for value in value_list:
if not isinstance(value, basestring):
raise FeatureProcessError('%s f_assert_must_basestring Error' % value_list)
return value_list
def f_assert_must_digit_or_float(value_list, args=False):
"""
检测列表中的元素是否为数字或float, args=false 负数报异常 True 负数通过
:param value_list: 待检测列表
:param args: 负数是否通过 false 不通过报异常 True 负数通过
:return: 异常或原值
example:
:value_list [-2.0,'-2', 3]
:args false
:return 异常
:value_list [-2.0,'-2', 3]
:args True
:return [-2.0,'-2', 3]
"""
for value in value_list:
if args:
if not (str(value).count('.') <= 1 and str(value).replace('.', '').lstrip('-').isdigit()):
raise FeatureProcessError(
'%s negative number=%s f_assert_must_digit_or_float Error' % (value_list, args))
else:
if not (str(value).count('.') <= 1 and str(value).replace('.', '').isdigit()):
raise FeatureProcessError(
'%s negative number=%s f_assert_must_digit_or_float Error' % (value_list, args))
return value_list
def f_assert_must_percent(value_list):
"""
检测是否是百分数
"""
for value in value_list:
if not (str(value)[-1] == '%' and (str(value[:-1]).count('.') <= 1 and str(value[:-1]).replace('.', '').isdigit())):
raise FeatureProcessError(
'%s f_assert_must_percent Error' % value_list)
return value_list
def f_assert_must_between(value_list, args):
"""
检测列表中的元素是否为数字或浮点数且在args的范围内
:param value_list: 待检测列表
:param args: 范围列表
:return: 异常或原值
example:
:value_list [2, 2, 3]
:args [1,3]
:value_list ['-2', '-3', 3]
:args ['-5',3]
"""
assert len(args) == 2
for value in value_list:
if not (str(value).count('.') <= 1 and str(value).replace('.', '').lstrip('-').isdigit()
and float(args[0]) <= float(value) <= float(args[1])):
raise FeatureProcessError('%s f_assert_must_between %s Error' % (value_list, args))
return value_list
def f_assert_seq0_gte_seq1(value_list):
"""检测列表中的第一个元素是否大于等于第二个元素"""
if not value_list[0] >= value_list[1]:
raise FeatureProcessError('%s f_assert_seq0_gte_seq1 Error' % value_list)
return value_list
if __name__ == '__main__':
print f_assert_must_percent(['7.0%'])
| 29.742515 | 124 | 0.571774 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2,635 | 0.474518 |
d2e3af7e8020910904dd800db879455657d8308e | 4,993 | py | Python | main.py | Potapov-AA/CaesarCipherWithKeyword | 4bd520418254b56950be079d0fce638039d4e202 | [
"MIT"
]
| null | null | null | main.py | Potapov-AA/CaesarCipherWithKeyword | 4bd520418254b56950be079d0fce638039d4e202 | [
"MIT"
]
| null | null | null | main.py | Potapov-AA/CaesarCipherWithKeyword | 4bd520418254b56950be079d0fce638039d4e202 | [
"MIT"
]
| null | null | null | import time
from os import system, walk
from config import CONFIG
from encry import ENCRY
from decry import DECRY
# Функция настройки конфигурации
def conf_setting():
system('CLS')
print("Enter key elements: ")
# Выбор алфавита
alphabet = input("Select the used alphabet [EN]GLISH | [RU]SSIAN: ")
# Ввод числового ключа
numberKey = input("Enter a numeric key: ")
# Ввод ключевого слова
stringKey = input("Enter your keyword: ")
return CONFIG(alphabet, numberKey, stringKey)
def en_message():
print("Encryption")
def de_message():
print("Decryption")
def select_file():
# Создаем список всех .txt файлов
filelist = []
for root, dirs, files in walk("."):
for file in files:
if file.endswith(".txt"):
# Добавляем в список
filelist.append(file)
s = ''
while True:
system('CLS')
print("List of txt files: ")
for i in filelist:
print(i)
file = input("Select a file: ")
try:
f = open(file, 'r', encoding='utf-8')
s = f.read()
f.close()
break
except Exception:
print("Error: file not found")
return s
# Вывод меню
def print_menu(cryptMode, CONF, text):
file_text = text
while cryptMode != 'EXIT':
system('CLS')
# Выбор действия
cryptMode = input("[E]ncryption|[D]ecryption| [Select] file |[S]etting configure |[Show] configuration |[Show text] |[Exit]: ").upper()
# Если команды не существует
if cryptMode not in ['E', 'D', 'S', 'EXIT', 'SHOW', 'SELECT', 'SHOW TEXT']:
print("Error: command not find!")
time.sleep(2)
# Если выбрана настройка конфигурации
if cryptMode == 'S':
CONF = conf_setting()
# Если выбран шифровка или дешифровка
if cryptMode in ['E', 'D']:
# Проверка на то, что файл выбран и проведены настройки конфигурации
if CONF is not object:
try:
if cryptMode == 'E':
print("Encryption in progress please wait...")
en_text = ENCRY(CONF.alphaList, CONF.new_alphaList, file_text.upper()).new_text()
print(file_text)
print(en_text)
try:
f = open("en_text.txt", 'w', encoding='utf-8')
f.write(en_text)
f.close()
print("Successfully. Encrypted file written! (en_text.txt)")
input("Please enter something to continue ...")
except Exception:
print("Error: file don't creat!")
input("Please enter something to continue ...")
if cryptMode == 'D':
print("Decryption in progress please wait...")
de_text = DECRY(CONF.alphaList, CONF.new_alphaList, file_text.upper()).new_text()
print(file_text)
print(de_text)
try:
f = open("de_text.txt", 'w', encoding='utf-8')
f.write(de_text)
f.close()
print("Successfully. Encrypted file written! (de_text.txt)")
input("Please enter something to continue ...")
except Exception:
print("Error: file don't creat!")
input("Please enter something to continue ...")
except Exception:
print(Exception)
time.sleep(2)
else:
if CONF is object:
print("Customize the configuration!")
time.sleep(2)
if file_text == '':
print("Chose file!")
time.sleep(2)
print("Wait...")
time.sleep(2)
# Если выбран выбор файла
if cryptMode == 'SELECT':
file_text = select_file()
# Если выбран показать файлы конфигурации
if cryptMode == 'SHOW':
if CONF is not object:
CONF.print_conf()
input("Please enter something to continue ...")
else:
print("Customize the configuration!")
time.sleep(2)
# Если выбран показать текст
if cryptMode == 'SHOW TEXT':
if file_text != '':
print(file_text)
input("Please enter something to continue ...")
else:
print("Please choose file!")
time.sleep(2)
if __name__ == '__main__':
CONF = object
text = ''
cryptMode = ''
print_menu(cryptMode, CONF, text)
| 32.848684 | 143 | 0.488684 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,896 | 0.354326 |
d2e4753ddf7c063ce13b4c81cfba0d2c46394e4c | 504 | py | Python | frappe/email/doctype/email_queue_recipient/email_queue_recipient.py | oryxsolutions/frappe | d193ea22d17ca40d57432040a8afad72287d9e23 | [
"MIT"
]
| null | null | null | frappe/email/doctype/email_queue_recipient/email_queue_recipient.py | oryxsolutions/frappe | d193ea22d17ca40d57432040a8afad72287d9e23 | [
"MIT"
]
| null | null | null | frappe/email/doctype/email_queue_recipient/email_queue_recipient.py | oryxsolutions/frappe | d193ea22d17ca40d57432040a8afad72287d9e23 | [
"MIT"
]
| null | null | null | # -*- coding: utf-8 -*-
# Copyright (c) 2015, Frappe Technologies and contributors
# License: MIT. See LICENSE
import frappe
from frappe.model.document import Document
class EmailQueueRecipient(Document):
DOCTYPE = "Email Queue Recipient"
def is_mail_to_be_sent(self):
return self.status == "Not Sent"
def is_main_sent(self):
return self.status == "Sent"
def update_db(self, commit=False, **kwargs):
frappe.db.set_value(self.DOCTYPE, self.name, kwargs)
if commit:
frappe.db.commit()
| 22.909091 | 58 | 0.730159 | 332 | 0.65873 | 0 | 0 | 0 | 0 | 0 | 0 | 147 | 0.291667 |
d2e52be160ba41f3c7d6be5212d1c7221d94eb66 | 3,211 | py | Python | tests/groups/family/test_pseudo_dojo.py | mbercx/aiida-pseudo | 070bdfa37d30674e1f83bf6d14987aa977426d92 | [
"MIT"
]
| null | null | null | tests/groups/family/test_pseudo_dojo.py | mbercx/aiida-pseudo | 070bdfa37d30674e1f83bf6d14987aa977426d92 | [
"MIT"
]
| 2 | 2021-09-21T11:28:55.000Z | 2021-09-21T12:13:48.000Z | tests/groups/family/test_pseudo_dojo.py | mbercx/aiida-pseudo | 070bdfa37d30674e1f83bf6d14987aa977426d92 | [
"MIT"
]
| null | null | null | # -*- coding: utf-8 -*-
# pylint: disable=unused-argument,pointless-statement
"""Tests for the `PseudoDojoFamily` class."""
import pytest
from aiida_pseudo.data.pseudo import UpfData, Psp8Data, PsmlData, JthXmlData
from aiida_pseudo.groups.family import PseudoDojoConfiguration, PseudoDojoFamily
def test_type_string(clear_db):
"""Verify the `_type_string` class attribute is correctly set to the corresponding entry point name."""
assert PseudoDojoFamily._type_string == 'pseudo.family.pseudo_dojo' # pylint: disable=protected-access
def test_pseudo_types():
"""Test the `PseudoDojoFamily.pseudo_types` method."""
assert PseudoDojoFamily.pseudo_types == (UpfData, PsmlData, Psp8Data, JthXmlData)
def test_default_configuration():
"""Test the `PseudoDojoFamily.default_configuration` class attribute."""
assert isinstance(PseudoDojoFamily.default_configuration, PseudoDojoConfiguration)
def test_valid_configurations():
"""Test the `PseudoDojoFamily.valid_configurations` class attribute."""
valid_configurations = PseudoDojoFamily.valid_configurations
assert isinstance(valid_configurations, tuple)
for entry in valid_configurations:
assert isinstance(entry, PseudoDojoConfiguration)
def test_get_valid_labels():
"""Test the `PseudoDojoFamily.get_valid_labels` class method."""
valid_labels = PseudoDojoFamily.get_valid_labels()
assert isinstance(valid_labels, tuple)
for entry in valid_labels:
assert isinstance(entry, str)
def test_format_configuration_label():
"""Test the `PseudoDojoFamily.format_configuration_label` class method."""
configuration = PseudoDojoConfiguration('0.4', 'PBE', 'SR', 'standard', 'psp8')
assert PseudoDojoFamily.format_configuration_label(configuration) == 'PseudoDojo/0.4/PBE/SR/standard/psp8'
def test_constructor():
"""Test that the `PseudoDojoFamily` constructor validates the label."""
with pytest.raises(ValueError, match=r'the label `.*` is not a valid PseudoDojo configuration label'):
PseudoDojoFamily()
with pytest.raises(ValueError, match=r'the label `.*` is not a valid PseudoDojo configuration label'):
PseudoDojoFamily(label='nc-sr-04_pbe_standard_psp8')
label = PseudoDojoFamily.format_configuration_label(PseudoDojoFamily.default_configuration)
family = PseudoDojoFamily(label=label)
assert isinstance(family, PseudoDojoFamily)
@pytest.mark.usefixtures('clear_db')
def test_create_from_folder(filepath_pseudos):
"""Test the `PseudoDojoFamily.create_from_folder` class method."""
family = PseudoDojoFamily.create_from_folder(
filepath_pseudos('upf'), 'PseudoDojo/0.4/PBE/SR/standard/psp8', pseudo_type=UpfData
)
assert isinstance(family, PseudoDojoFamily)
@pytest.mark.usefixtures('clear_db')
def test_create_from_folder_duplicate(filepath_pseudos):
"""Test that `PseudoDojoFamily.create_from_folder` raises for duplicate label."""
label = 'PseudoDojo/0.4/PBE/SR/standard/psp8'
PseudoDojoFamily(label=label).store()
with pytest.raises(ValueError, match=r'the PseudoDojoFamily `.*` already exists'):
PseudoDojoFamily.create_from_folder(filepath_pseudos('upf'), label)
| 40.64557 | 110 | 0.766116 | 0 | 0 | 0 | 0 | 785 | 0.244472 | 0 | 0 | 1,206 | 0.375584 |
d2e5ecb02a9dd4eeeac961445b6d9553ecd7b3a1 | 1,743 | py | Python | converter.py | Poudingue/Max2Mitsuba | 857c67b91f524de3e33f66958f26b022fa0a38f0 | [
"WTFPL"
]
| 4 | 2019-10-30T09:18:42.000Z | 2020-06-18T12:50:06.000Z | converter.py | Poudingue/Fbx2Mitsuba | 857c67b91f524de3e33f66958f26b022fa0a38f0 | [
"WTFPL"
]
| null | null | null | converter.py | Poudingue/Fbx2Mitsuba | 857c67b91f524de3e33f66958f26b022fa0a38f0 | [
"WTFPL"
]
| null | null | null | import sys
import os
if sys.version_info[0] != 3 :
print("Running in python "+sys.version_info[0]+", should be python 3.")
print("Please install python 3.7 from the official site python.org")
print("Exiting now.")
exit()
import shutil
import argparse
import fbx2tree
import builder_fromfbx
import time
# config is useful to keep info for the different modules
import config
parser = argparse.ArgumentParser()
parser.add_argument("file", help="file")
parser.add_argument("-v", "--verbose", help="Print more stuff", action="store_true")
parser.add_argument("-d", "--debug", help="Create intermediate xml files for debug", action="store_true")
parser.add_argument("--closest", help="Try to stick as close to the original materials in 3dsmax, even if it is at the expense of realism", action="store_true")
parser.add_argument("--realist", help="Try to make materials as realist as possible, even if it is at the expense of fidelity to the original scene", action="store_true")
args = parser.parse_args()
if args.closest and args.realist :
print("Incompatible options : --closest and --realist. Choose one, or neither for a balanced result")
exit(0)
fullname = args.file
if fullname.split(".")[-1].lower() != "fbx" :
print("The file is not an fbx file")
exit(0)
config.curr_place = os.path.dirname(os.path.abspath(__file__))
config.filename = ".".join(fullname.split(".")[:-1]).split("\\")[-1]#Remove extension, remove path.
config.filepath = "\\".join(fullname.split("\\")[:-1])+"\\"#Keep only path
config.verbose = args.verbose
config.debug = args.debug
config.closest = args.closest
config.realist = args.realist
fbxtree = fbx2tree.transform()
builder_fromfbx.build(fbxtree)
print("Conversion finished !")
| 34.86 | 176 | 0.724613 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 776 | 0.445209 |
d2e64e022f433cd3fd044c614f4cd92d7a6f232d | 4,256 | py | Python | run.py | snandasena/disaster-response-pipeline | 709af8c5fcb520dae82dc3b75c30ab2609402f53 | [
"MIT"
]
| null | null | null | run.py | snandasena/disaster-response-pipeline | 709af8c5fcb520dae82dc3b75c30ab2609402f53 | [
"MIT"
]
| null | null | null | run.py | snandasena/disaster-response-pipeline | 709af8c5fcb520dae82dc3b75c30ab2609402f53 | [
"MIT"
]
| null | null | null | import sys
import json
import plotly
from flask import Flask
from flask import render_template, request
from plotly.graph_objects import Heatmap, Bar
from sklearn.externals import joblib
from sqlalchemy import create_engine
sys.path.append("common")
from common.nlp_common_utils import *
if len(sys.argv) == 1:
sys.argv.append('./data/DisasterResponse.db')
sys.argv.append('./models/classifier.pkl')
# this requires for joblib and pickle
def tokenize(text):
"""
Used a common utility functions for tokenize text in to cleaned token list.
INPUT:
text - raw message
OUTPUT:
clean_tokens -- cleaned tokenized list
"""
return tokenize_text(text)
# create a flask app
app = Flask(__name__, template_folder='app/templates')
#
database_file_location, model_location = sys.argv[1:]
# load data
engine = create_engine('sqlite:///{}'.format(database_file_location))
df = pd.read_sql_table('DisasterResponse', engine)
# category df
df_categories = df.iloc[:, 4:]
# load model
model = joblib.load(model_location)
def generate_graph_with_template(data, title, yaxis_title, xaxi_title):
"""
This common layout can be used to create Plotly graph layout.
INPUT:
data - a graph required JSON data i.e list
title - a tile of the chart
yaxis_title - Y title
xaxix_title - X title
OUTPUT:
layout for particular graph.
"""
return {
'data': [data],
'layout': {
'title': title,
'yaxis': {
'title': yaxis_title
},
'xaxis': {
'title': xaxi_title
}
}
}
def generate_message_genres_bar_chart():
"""
create a graph using extracted data for `genre`
"""
# extract data needed for visuals
genre_counts = df.groupby('genre').count()['message']
genre_names = list(genre_counts.index)
data = Bar(x=genre_names, y=genre_counts)
title = 'Distribution of Message Genres'
y_title = 'Count'
x_title = 'Genre'
return generate_graph_with_template(data, title, y_title, x_title)
def generate_message_categories_distribution_bar_chart():
"""
create a graph for distribution of the messages.
"""
data = Bar(x=df_categories.columns,
y=list(df_categories.sum().sort_values(ascending=False)))
title = 'Distribution of Message Categories'
y_title = 'Count'
x_title = 'Category'
return generate_graph_with_template(data, title, y_title, x_title)
def generate_two_cat_relation_heat_map():
"""
A correlation matrix for categories
"""
data = Heatmap(
z=df_categories.corr(),
y=df_categories.columns,
x=df_categories.columns)
title = 'Correlation Distribution of Categories'
y_title = 'Category'
x_title = 'Category'
return generate_graph_with_template(data, title, y_title, x_title)
def generate_graphs():
# create visuals
graphs = [generate_message_genres_bar_chart(),
generate_message_categories_distribution_bar_chart(),
generate_two_cat_relation_heat_map()]
return graphs
# index webpage displays cool visuals and receives user input text for model
@app.route('/')
@app.route('/index')
def index():
graphs = generate_graphs()
# encode plotly graphs in JSON
ids = ["graph-{}".format(i) for i, _ in enumerate(graphs)]
graph_json = json.dumps(graphs, cls=plotly.utils.PlotlyJSONEncoder)
# render web page with plotly graphs
return render_template('master.html', ids=ids, graphJSON=graph_json)
# web page that handles user query and displays model results
@app.route('/go')
def go():
# save user input in query
query = request.args.get('query', '')
# use model to predict classification for query
classification_labels = model.predict([query])[0]
classification_results = dict(zip(df.columns[4:], classification_labels))
# This will render the go.html Please see that file.
return render_template(
'go.html',
query=query,
classification_result=classification_results
)
def main():
app.run(host='0.0.0.0', port=3001, debug=True)
if __name__ == '__main__':
main()
| 25.95122 | 79 | 0.672227 | 0 | 0 | 0 | 0 | 836 | 0.196429 | 0 | 0 | 1,532 | 0.359962 |
d2e65a9a5236dcdd44347a721b18b12179871e04 | 840 | py | Python | process.py | s-xie/processing | e0f1a851bed6159a718ae4e4afb3bfe3a30f6af5 | [
"MIT"
]
| null | null | null | process.py | s-xie/processing | e0f1a851bed6159a718ae4e4afb3bfe3a30f6af5 | [
"MIT"
]
| null | null | null | process.py | s-xie/processing | e0f1a851bed6159a718ae4e4afb3bfe3a30f6af5 | [
"MIT"
]
| null | null | null | import re
import sys
from nltk.tokenize import word_tokenize
from unidecode import unidecode
from nltk.tokenize import sent_tokenize
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('fin')
parser.add_argument('fout')
args = parser.parse_args()
textproc = TextProc()
tokenizer = Tokenizer()
sentences=set()
with open(args.fin, 'r') as f:
count = 0
for line in f:
count+=1
sentences.add(line.strip())
if count % 100000==0:
print(count)
with open(args.fout, 'w') as f:
count = 0
group = ''
for s in sentences:
count+=1
if s !='':
group+=s+'\n'
if count % 20==0:
try:
p = sent_tokenize(unidecode(group))
f.write('\n'.join(p))
group = ''
except:
print("nltk error")
if count % 10000==0:
print(count)
| 20 | 45 | 0.613095 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 43 | 0.05119 |
d2e6ff17f08688e760eb2d19c6c6dfcc805a369d | 1,071 | py | Python | Aula 14 - Estrutura de repetição while/desafio058-jogo-da-adivinhação.py | josue-rosa/Python---Curso-em-Video | 2d74c7421a49952b7c3eadb1010533525f2de338 | [
"MIT"
]
| 3 | 2020-10-07T03:21:07.000Z | 2020-10-13T14:18:49.000Z | Aula 14 - Estrutura de repetição while/desafio058-jogo-da-adivinhação.py | josue-rosa/Python---Curso-em-Video | 2d74c7421a49952b7c3eadb1010533525f2de338 | [
"MIT"
]
| null | null | null | Aula 14 - Estrutura de repetição while/desafio058-jogo-da-adivinhação.py | josue-rosa/Python---Curso-em-Video | 2d74c7421a49952b7c3eadb1010533525f2de338 | [
"MIT"
]
| null | null | null | """
Melhore o jogo do DESAFIO 028 onde o computador vai "pensar" em um numero entre 0 e 10.
Só que agora o jogador vai tentar adivinhar até acertar, mostrando no final
quantos palpites foram necessários para vencer
"""
"""
from random import randint
tentativas = 1
computador = randint(0, 10)
jogador = int(input('Informe um numero para jogarmos '))
while jogador != computador:
jogador = int(input('Errou. Tente novamente. '))
tentativas += 1
print(f'Acertou. Pensei no {computador} também.')
print(f'Total de tentativas {tentativas}.')
"""
# Corrigido do Professor
from random import randint
computador = randint(0, 10)
print('Pensei em um número entre 0 e 10')
acertou = False
palpites = 0
while not acertou:
jogador = int(input('Qual o seu palpite? '))
palpites += 1
if jogador == computador:
acertou = True
else:
if jogador < computador:
print('Mais..Tente mais uma vez.')
elif jogador > computador:
print('Menos. Tente mais uma vez.')
print(f'Acertou com {palpites} tentativas. Parabéns!')
| 28.945946 | 87 | 0.687208 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 738 | 0.685237 |
d2e7114d8d4486671f83a30e7420ce1d69cd65c1 | 1,550 | py | Python | plugins/googlefight.py | serpis/pynik | 074e6b2d2282557976eee3681d8bfcd5659c011e | [
"MIT"
]
| 4 | 2016-08-09T21:25:23.000Z | 2019-08-16T21:55:17.000Z | plugins/googlefight.py | serpis/pynik | 074e6b2d2282557976eee3681d8bfcd5659c011e | [
"MIT"
]
| 10 | 2015-01-25T21:25:22.000Z | 2021-01-28T19:50:22.000Z | plugins/googlefight.py | serpis/pynik | 074e6b2d2282557976eee3681d8bfcd5659c011e | [
"MIT"
]
| 4 | 2015-05-06T21:45:39.000Z | 2018-07-02T16:47:36.000Z | # coding: utf-8
import re
import utility
from commands import Command
def google_pages(string):
url = 'http://www.google.se/search?q=' + utility.escape(string) + '&ie=UTF-8&oe=UTF-8'
response = utility.read_url(url)
data = response["data"]
search = re.search('swrnum=(\d+)">', data)
if search:
result = search.group(1)
if result:
return int(result, 10)
else:
return None
else:
return None
def google_divisor(int1, int2):
if int1 < int2:
biggest = int1
else:
biggest = int2
if biggest > 1000000:
divisor = 1000000.0
unit = 'm'
elif biggest > 1000:
divisor = 1000.0
unit = 'k'
else:
divisor = 1
unit = ''
return (divisor, unit)
class Googlefight(Command):
def __init__(self):
pass
def trig_googlefight(self, bot, source, target, trigger, argument):
args = argument.split('|', 2)
if len(args) == 2 and len(args[0]) > 0 and len(args[1]) > 0:
result1 = google_pages(args[0])
result2 = google_pages(args[1])
if result1 and result2:
grej = google_divisor(result1, result2)
result1 = result1 / grej[0]
result2 = result2 / grej[0]
unit = grej[1]
if result1 == result2:
return "It's a tie! " + str(result1/1000.0) + "k hits!"
elif result1 > result2:
return args[0] + ' is the winner! (' + str(result1) + unit + ' to ' + str(result2) + unit + ')'
else:
return args[1] + ' is the winner! (' + str(result2) + unit + ' to ' + str(result1) + unit + ')'
else:
return "Couldn't search."
else:
return "Usage: .googlefight arg1|arg2"
| 22.142857 | 100 | 0.623226 | 864 | 0.557419 | 0 | 0 | 0 | 0 | 0 | 0 | 228 | 0.147097 |
d2e7cc251d72d1b4b8afa5565221124b4f826ce6 | 457 | py | Python | was/lib/tuning/actions/ThreadPool.py | rocksun/ucmd | 486de31324195f48c4110e327d635aaafe3d74d6 | [
"Apache-2.0"
]
| 2 | 2019-10-09T06:59:47.000Z | 2019-10-10T03:20:17.000Z | was/lib/tuning/actions/ThreadPool.py | rocksun/ucmd | 486de31324195f48c4110e327d635aaafe3d74d6 | [
"Apache-2.0"
]
| null | null | null | was/lib/tuning/actions/ThreadPool.py | rocksun/ucmd | 486de31324195f48c4110e327d635aaafe3d74d6 | [
"Apache-2.0"
]
| 1 | 2021-11-25T06:41:17.000Z | 2021-11-25T06:41:17.000Z | import os
min=512
max=512
def app_server_tuning(server_confid):
server_name=AdminConfig.showAttribute(server_confid, "name")
threadpool_list=AdminConfig.list('ThreadPool',server_confid).split("\n")
for tp in threadpool_list:
if tp.count('WebContainer')==1:
print "Modify Server '%s' WebContainer Pool Min=%d, Max=%d"% (server_name, min, max)
AdminConfig.modify(tp,[["minimumSize" ,min],["maximumSize" ,max]])
| 30.466667 | 96 | 0.68709 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 115 | 0.251641 |
d2e833d9d9dbd44a801765209ab9f359cdd98029 | 6,770 | py | Python | app/api/v2/resources/saleorders.py | calebrotich10/store-manager-api-v2 | 16dff84823e77218f1135c99f0592f113fddee84 | [
"MIT"
]
| null | null | null | app/api/v2/resources/saleorders.py | calebrotich10/store-manager-api-v2 | 16dff84823e77218f1135c99f0592f113fddee84 | [
"MIT"
]
| null | null | null | app/api/v2/resources/saleorders.py | calebrotich10/store-manager-api-v2 | 16dff84823e77218f1135c99f0592f113fddee84 | [
"MIT"
]
| 1 | 2018-11-04T18:09:38.000Z | 2018-11-04T18:09:38.000Z | """This module contains objects for saleorders endpoints"""
from flask import Flask, jsonify, request, abort, make_response
from flask_restful import Resource
from flask_jwt_extended import get_jwt_identity, jwt_required
from . import common_functions
from ..models import products, saleorders
from ..utils import verify
from .. import database
class SaleOrder(Resource):
"""Class contains CRUD definitions
for saleorders
"""
def post(self):
"""POST /saleorder endpoint"""
user_id = verify.verify_tokens()[1]
data = request.get_json()
common_functions.no_json_in_request(data)
try:
items = data['items']
except KeyError:
return make_response(jsonify({
"message":"list of items missing"
}), 403)
if not isinstance(items, (list, )):
abort(make_response(jsonify(
message="The value should be a list of dictionaries"
), 400))
totalAmount = 0
saleorder = saleorders.SaleOrder(amount=totalAmount, made_by=user_id)
saleorder.save()
query = """SELECT saleorder_id from saleorders WHERE amount = 0
"""
saleorder_id = database.select_from_db(query)[0]['saleorder_id']
items_sold = []
for item in items:
try:
product = item['product']
except:
return make_response(jsonify({
"message":"Kindly specify the product you want to buy"
}), 403)
try:
quantity = item['quantity']
except:
return make_response(jsonify({
"message":"Kindly specify the quantity of the product you want"
}), 403)
if not isinstance(product, int):
rollback_saleorder = saleorders.SaleOrder(saleorder_id=saleorder_id)
rollback_saleorder.rollback_saleorder()
abort(make_response(jsonify(
message="Please select the a product you want to purchase"
), 400))
if not isinstance(quantity, int):
rollback_saleorder = saleorders.SaleOrder(saleorder_id=saleorder_id)
rollback_saleorder.rollback_saleorder()
abort(make_response(jsonify(
message="Please have a number for the quantity value"
), 400))
if quantity < 1:
rollback_saleorder = saleorders.SaleOrder(saleorder_id=saleorder_id)
rollback_saleorder.rollback_saleorder()
abort(make_response(jsonify(
message="Please have a quantity value over 0"
), 400))
query = """SELECT * FROM products WHERE product_id = '{}'""".format(product)
product_exists = database.select_from_db(query)
if product_exists:
product_name = product_exists[0]['product_name']
product_price = product_exists[0]['product_price']
inventory = product_exists[0]['inventory']
if inventory == 0:
rollback_saleorder = saleorders.SaleOrder(saleorder_id=saleorder_id)
rollback_saleorder.rollback_saleorder()
return abort(make_response(jsonify(
message="Please eliminate {} from your sale. It is currently out of stock".format(product_name)
), 400))
if quantity > inventory:
rollback_saleorder = saleorders.SaleOrder(saleorder_id=saleorder_id)
rollback_saleorder.rollback_saleorder()
return abort(make_response(jsonify(
message="Our current stock cannot serve an order of {}. You can currently order a maximum of {} for the product '{}'".format(quantity, inventory, product_name)
), 400))
product_amount = product_price * quantity
current_item = {
"product": product_exists[0]['product_name'],
"quantity": quantity,
"price": product_exists[0]['product_price'],
"product_amount": product_amount
}
items_sold.append(current_item)
totalAmount += product_amount
sale_item = saleorders.SaleItems(saleorder_id=saleorder_id, product=product, quantity=quantity)
sale_item.save()
updated_inventory = inventory - quantity
product_to_update = products.Products(product_id=product ,inventory=updated_inventory)
product_to_update.deduct_inventory()
if not product_exists:
rollback_saleorder = saleorders.SaleOrder(saleorder_id=saleorder_id)
rollback_saleorder.rollback_saleorder()
return abort(make_response(jsonify({
"message": "Product with id {} is not available in the store".format(product)
}), 404))
update_amount_query = """UPDATE saleorders SET amount = {} WHERE saleorder_id = {}""".format(totalAmount, saleorder_id)
database.insert_to_db(update_amount_query)
return make_response(jsonify({
"message": "Checkout complete",
"items_sold": items_sold,
"total_amount": totalAmount
}), 201)
def get(self):
"""GET /saleorder endpoint"""
verify.verify_tokens()
saleorder = saleorders.SaleOrder()
get_saleorder = saleorder.get()
if not get_saleorder:
return make_response(jsonify({
'message': "No sale orders created yet"
}), 404)
response = jsonify({
'message': "Successfully fetched all the sale orders",
'sale_orders': get_saleorder
})
response.status_code = 200
return response
class SpecificSaleOrder(Resource):
"""Class contains CRUD definitions
for saleorders
"""
def get(self, saleorder_id):
"""GET /saleorder/<int:saleorder_id>"""
verify.verify_tokens()
query = """SELECT * FROM saleorders WHERE saleorder_id = '{}'""".format(saleorder_id)
sale_order = database.select_from_db(query)
if not sale_order:
return make_response(jsonify({
"message": "Sale Order with id {} not found".format(saleorder_id)
}
), 404)
return make_response(jsonify({
"message": "Sale order fetched successfully",
"saleorder": sale_order
}
), 200) | 39.590643 | 180 | 0.578434 | 6,419 | 0.948154 | 0 | 0 | 0 | 0 | 0 | 0 | 1,479 | 0.218464 |
d2e9b98d6967be78af6014083084b5dab63e624c | 61 | py | Python | nautobot/circuits/__init__.py | psmware-ltd/nautobot | ac516287fb8edcc3482bd011839de837c6bbf0df | [
"Apache-2.0"
]
| 384 | 2021-02-24T01:40:40.000Z | 2022-03-30T10:30:59.000Z | nautobot/circuits/__init__.py | psmware-ltd/nautobot | ac516287fb8edcc3482bd011839de837c6bbf0df | [
"Apache-2.0"
]
| 1,067 | 2021-02-24T00:58:08.000Z | 2022-03-31T23:38:23.000Z | nautobot/circuits/__init__.py | psmware-ltd/nautobot | ac516287fb8edcc3482bd011839de837c6bbf0df | [
"Apache-2.0"
]
| 128 | 2021-02-24T02:45:16.000Z | 2022-03-20T18:48:36.000Z | default_app_config = "nautobot.circuits.apps.CircuitsConfig"
| 30.5 | 60 | 0.852459 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 39 | 0.639344 |
d2e9f3e2143b7da446094a72db5befcb7fc0a728 | 54,559 | py | Python | autogalaxy/profiles/mass_profiles/stellar_mass_profiles.py | Jammy2211/PyAutoModel | 02f54e71900de9ec12c9070dc00a4bd001b25afa | [
"MIT"
]
| 4 | 2019-10-29T13:27:23.000Z | 2020-03-24T11:13:35.000Z | autogalaxy/profiles/mass_profiles/stellar_mass_profiles.py | Jammy2211/PyAutoModel | 02f54e71900de9ec12c9070dc00a4bd001b25afa | [
"MIT"
]
| null | null | null | autogalaxy/profiles/mass_profiles/stellar_mass_profiles.py | Jammy2211/PyAutoModel | 02f54e71900de9ec12c9070dc00a4bd001b25afa | [
"MIT"
]
| 3 | 2020-02-12T10:29:59.000Z | 2020-03-24T11:13:53.000Z | import copy
import numpy as np
from scipy.special import wofz
from scipy.integrate import quad
from typing import List, Tuple
import autoarray as aa
from autogalaxy.profiles.mass_profiles import MassProfile
from autogalaxy.profiles.mass_profiles.mass_profiles import (
MassProfileMGE,
MassProfileCSE,
)
from autogalaxy.profiles.mass_profiles.mass_profiles import psi_from
class StellarProfile:
pass
class EllGaussian(MassProfile, StellarProfile):
def __init__(
self,
centre: Tuple[float, float] = (0.0, 0.0),
elliptical_comps: Tuple[float, float] = (0.0, 0.0),
intensity: float = 0.1,
sigma: float = 0.01,
mass_to_light_ratio: float = 1.0,
):
"""
The elliptical Gaussian light profile.
Parameters
----------
centre
The (y,x) arc-second coordinates of the profile centre.
elliptical_comps
The first and second ellipticity components of the elliptical coordinate system, (see the module
`autogalaxy -> convert.py` for the convention).
intensity
Overall intensity normalisation of the light profile (units are dimensionless and derived from the data
the light profile's image is compared too, which is expected to be electrons per second).
sigma
The sigma value of the Gaussian.
"""
super(EllGaussian, self).__init__(
centre=centre, elliptical_comps=elliptical_comps
)
super(MassProfile, self).__init__(
centre=centre, elliptical_comps=elliptical_comps
)
self.mass_to_light_ratio = mass_to_light_ratio
self.intensity = intensity
self.sigma = sigma
def deflections_yx_2d_from(self, grid: aa.type.Grid2DLike):
"""
Calculate the deflection angles at a given set of arc-second gridded coordinates.
Parameters
----------
grid
The grid of (y,x) arc-second coordinates the deflection angles are computed on.
"""
return self.deflections_2d_via_analytic_from(grid=grid)
@aa.grid_dec.grid_2d_to_structure
@aa.grid_dec.transform
@aa.grid_dec.relocate_to_radial_minimum
def deflections_2d_via_analytic_from(self, grid: aa.type.Grid2DLike):
"""
Calculate the deflection angles at a given set of arc-second gridded coordinates.
Parameters
----------
grid
The grid of (y,x) arc-second coordinates the deflection angles are computed on.
"""
deflections = (
self.mass_to_light_ratio
* self.intensity
* self.sigma
* np.sqrt((2 * np.pi) / (1.0 - self.axis_ratio ** 2.0))
* self.zeta_from(grid=grid)
)
return self.rotate_grid_from_reference_frame(
np.multiply(
1.0, np.vstack((-1.0 * np.imag(deflections), np.real(deflections))).T
)
)
@aa.grid_dec.grid_2d_to_structure
@aa.grid_dec.transform
@aa.grid_dec.relocate_to_radial_minimum
def deflections_2d_via_integral_from(self, grid: aa.type.Grid2DLike):
"""
Calculate the deflection angles at a given set of arc-second gridded coordinates.
Parameters
----------
grid
The grid of (y,x) arc-second coordinates the deflection angles are computed on.
Note: sigma is divided by sqrt(q) here.
"""
def calculate_deflection_component(npow, index):
deflection_grid = self.axis_ratio * grid[:, index]
for i in range(grid.shape[0]):
deflection_grid[i] *= (
self.intensity
* self.mass_to_light_ratio
* quad(
self.deflection_func,
a=0.0,
b=1.0,
args=(
grid[i, 0],
grid[i, 1],
npow,
self.axis_ratio,
self.sigma / np.sqrt(self.axis_ratio),
),
)[0]
)
return deflection_grid
deflection_y = calculate_deflection_component(1.0, 0)
deflection_x = calculate_deflection_component(0.0, 1)
return self.rotate_grid_from_reference_frame(
np.multiply(1.0, np.vstack((deflection_y, deflection_x)).T)
)
@staticmethod
def deflection_func(u, y, x, npow, axis_ratio, sigma):
eta_u = np.sqrt(axis_ratio) * np.sqrt(
(u * ((x ** 2) + (y ** 2 / (1 - (1 - axis_ratio ** 2) * u))))
)
return np.exp(-0.5 * np.square(np.divide(eta_u, sigma))) / (
(1 - (1 - axis_ratio ** 2) * u) ** (npow + 0.5)
)
@aa.grid_dec.grid_2d_to_structure
@aa.grid_dec.transform
@aa.grid_dec.relocate_to_radial_minimum
def convergence_2d_from(self, grid: aa.type.Grid2DLike):
"""Calculate the projected convergence at a given set of arc-second gridded coordinates.
Parameters
----------
grid
The grid of (y,x) arc-second coordinates the convergence is computed on.
"""
return self.convergence_func(self.grid_to_eccentric_radii(grid))
def convergence_func(self, grid_radius: float) -> float:
return self.mass_to_light_ratio * self.image_2d_via_radii_from(grid_radius)
@aa.grid_dec.grid_2d_to_structure
def potential_2d_from(self, grid: aa.type.Grid2DLike):
return np.zeros(shape=grid.shape[0])
def image_2d_via_radii_from(self, grid_radii: np.ndarray):
"""Calculate the intensity of the Gaussian light profile on a grid of radial coordinates.
Parameters
----------
grid_radii
The radial distance from the centre of the profile. for each coordinate on the grid.
Note: sigma is divided by sqrt(q) here.
"""
return np.multiply(
self.intensity,
np.exp(
-0.5
* np.square(
np.divide(grid_radii, self.sigma / np.sqrt(self.axis_ratio))
)
),
)
@property
def axis_ratio(self):
axis_ratio = super().axis_ratio
return axis_ratio if axis_ratio < 0.9999 else 0.9999
def zeta_from(self, grid: aa.type.Grid2DLike):
q2 = self.axis_ratio ** 2.0
ind_pos_y = grid[:, 0] >= 0
shape_grid = np.shape(grid)
output_grid = np.zeros((shape_grid[0]), dtype=np.complex128)
scale_factor = self.axis_ratio / (self.sigma * np.sqrt(2.0 * (1.0 - q2)))
xs_0 = grid[:, 1][ind_pos_y] * scale_factor
ys_0 = grid[:, 0][ind_pos_y] * scale_factor
xs_1 = grid[:, 1][~ind_pos_y] * scale_factor
ys_1 = -grid[:, 0][~ind_pos_y] * scale_factor
output_grid[ind_pos_y] = -1j * (
wofz(xs_0 + 1j * ys_0)
- np.exp(-(xs_0 ** 2.0) * (1.0 - q2) - ys_0 * ys_0 * (1.0 / q2 - 1.0))
* wofz(self.axis_ratio * xs_0 + 1j * ys_0 / self.axis_ratio)
)
output_grid[~ind_pos_y] = np.conj(
-1j
* (
wofz(xs_1 + 1j * ys_1)
- np.exp(-(xs_1 ** 2.0) * (1.0 - q2) - ys_1 * ys_1 * (1.0 / q2 - 1.0))
* wofz(self.axis_ratio * xs_1 + 1j * ys_1 / self.axis_ratio)
)
)
return output_grid
def with_new_normalization(self, normalization):
mass_profile = copy.copy(self)
mass_profile.mass_to_light_ratio = normalization
return mass_profile
# noinspection PyAbstractClass
class AbstractEllSersic(MassProfile, MassProfileMGE, MassProfileCSE, StellarProfile):
def __init__(
self,
centre: Tuple[float, float] = (0.0, 0.0),
elliptical_comps: Tuple[float, float] = (0.0, 0.0),
intensity: float = 0.1,
effective_radius: float = 0.6,
sersic_index: float = 0.6,
mass_to_light_ratio: float = 1.0,
):
"""
The Sersic mass profile, the mass profiles of the light profiles that are used to fit and subtract the lens \
model_galaxy's light.
Parameters
----------
centre
The (y,x) arc-second coordinates of the profile centre.
elliptical_comps
The first and second ellipticity components of the elliptical coordinate system, (see the module
`autogalaxy -> convert.py` for the convention).
intensity
Overall flux intensity normalisation in the light profiles (electrons per second).
effective_radius
The radius containing half the light of this profile.
sersic_index
Controls the concentration of the profile (lower -> less concentrated, higher -> more concentrated).
mass_to_light_ratio
The mass-to-light ratio of the light profiles
"""
super(AbstractEllSersic, self).__init__(
centre=centre, elliptical_comps=elliptical_comps
)
super(MassProfile, self).__init__(
centre=centre, elliptical_comps=elliptical_comps
)
super(MassProfileMGE, self).__init__()
super(MassProfileCSE, self).__init__()
self.mass_to_light_ratio = mass_to_light_ratio
self.intensity = intensity
self.effective_radius = effective_radius
self.sersic_index = sersic_index
def deflections_yx_2d_from(self, grid: aa.type.Grid2DLike):
return self.deflections_2d_via_cse_from(grid=grid)
@aa.grid_dec.grid_2d_to_structure
@aa.grid_dec.transform
@aa.grid_dec.relocate_to_radial_minimum
def deflections_2d_via_mge_from(self, grid: aa.type.Grid2DLike):
"""
Calculate the projected 2D deflection angles from a grid of (y,x) arc second coordinates, by computing and
summing the convergence of each individual cse used to decompose the mass profile.
The cored steep elliptical (cse) decomposition of a the elliptical NFW mass
profile (e.g. `decompose_convergence_via_cse`) is using equation (12) of
Oguri 2021 (https://arxiv.org/abs/2106.11464).
Parameters
----------
grid
The grid of (y,x) arc-second coordinates the convergence is computed on.
"""
return self._deflections_2d_via_mge_from(
grid=grid, sigmas_factor=np.sqrt(self.axis_ratio)
)
@aa.grid_dec.grid_2d_to_structure
@aa.grid_dec.transform
@aa.grid_dec.relocate_to_radial_minimum
def deflections_2d_via_cse_from(self, grid: aa.type.Grid2DLike):
"""
Calculate the projected 2D deflection angles from a grid of (y,x) arc second coordinates, by computing and
summing the convergence of each individual cse used to decompose the mass profile.
The cored steep elliptical (cse) decomposition of a the elliptical NFW mass
profile (e.g. `decompose_convergence_via_cse`) is using equation (12) of
Oguri 2021 (https://arxiv.org/abs/2106.11464).
Parameters
----------
grid
The grid of (y,x) arc-second coordinates the convergence is computed on.
"""
return self._deflections_2d_via_cse_from(grid=grid)
@aa.grid_dec.grid_2d_to_structure
@aa.grid_dec.transform
@aa.grid_dec.relocate_to_radial_minimum
def convergence_2d_from(self, grid: aa.type.Grid2DLike):
"""Calculate the projected convergence at a given set of arc-second gridded coordinates.
Parameters
----------
grid
The grid of (y,x) arc-second coordinates the convergence is computed on.
"""
return self.convergence_func(self.grid_to_eccentric_radii(grid))
@aa.grid_dec.grid_2d_to_structure
@aa.grid_dec.transform
@aa.grid_dec.relocate_to_radial_minimum
def convergence_2d_via_mge_from(self, grid: aa.type.Grid2DLike):
"""
Calculate the projected convergence at a given set of arc-second gridded coordinates.
Parameters
----------
grid
The grid of (y,x) arc-second coordinates the convergence is computed on.
"""
eccentric_radii = self.grid_to_eccentric_radii(grid=grid)
return self._convergence_2d_via_mge_from(grid_radii=eccentric_radii)
@aa.grid_dec.grid_2d_to_structure
@aa.grid_dec.transform
@aa.grid_dec.relocate_to_radial_minimum
def convergence_2d_via_cse_from(self, grid: aa.type.Grid2DLike):
"""
Calculate the projected 2D convergence from a grid of (y,x) arc second coordinates, by computing and summing
the convergence of each individual cse used to decompose the mass profile.
The cored steep elliptical (cse) decomposition of a the elliptical NFW mass
profile (e.g. `decompose_convergence_via_cse`) is using equation (12) of
Oguri 2021 (https://arxiv.org/abs/2106.11464).
Parameters
----------
grid
The grid of (y,x) arc-second coordinates the convergence is computed on.
"""
elliptical_radii = self.grid_to_elliptical_radii(grid=grid)
return self._convergence_2d_via_cse_from(grid_radii=elliptical_radii)
def convergence_func(self, grid_radius: float) -> float:
return self.mass_to_light_ratio * self.image_2d_via_radii_from(grid_radius)
@aa.grid_dec.grid_2d_to_structure
def potential_2d_from(self, grid: aa.type.Grid2DLike):
return np.zeros(shape=grid.shape[0])
def image_2d_via_radii_from(self, radius: np.ndarray):
"""
Returns the intensity of the profile at a given radius.
Parameters
----------
radius
The distance from the centre of the profile.
"""
return self.intensity * np.exp(
-self.sersic_constant
* (((radius / self.effective_radius) ** (1.0 / self.sersic_index)) - 1)
)
def decompose_convergence_via_mge(self) -> Tuple[List, List]:
radii_min = self.effective_radius / 100.0
radii_max = self.effective_radius * 20.0
def sersic_2d(r):
return (
self.mass_to_light_ratio
* self.intensity
* np.exp(
-self.sersic_constant
* (((r / self.effective_radius) ** (1.0 / self.sersic_index)) - 1.0)
)
)
return self._decompose_convergence_via_mge(
func=sersic_2d, radii_min=radii_min, radii_max=radii_max
)
def decompose_convergence_via_cse(self,) -> Tuple[List, List]:
"""
Decompose the convergence of the Sersic profile into cored steep elliptical (cse) profiles.
This decomposition uses the standard 2d profile of a Sersic mass profile.
Parameters
----------
func
The function representing the profile that is decomposed into CSEs.
radii_min:
The minimum radius to fit
radii_max:
The maximum radius to fit
total_cses
The number of CSEs used to approximate the input func.
sample_points: int (should be larger than 'total_cses')
The number of data points to fit
Returns
-------
Tuple[List, List]
A list of amplitudes and core radii of every cored steep elliptical (cse) the mass profile is decomposed
into.
"""
upper_dex, lower_dex, total_cses, sample_points = cse_settings_from(
effective_radius=self.effective_radius,
sersic_index=self.sersic_index,
sersic_constant=self.sersic_constant,
mass_to_light_gradient=0.0,
)
scaled_effective_radius = self.effective_radius / np.sqrt(self.axis_ratio)
radii_min = scaled_effective_radius / 10.0 ** lower_dex
radii_max = scaled_effective_radius * 10.0 ** upper_dex
def sersic_2d(r):
return (
self.mass_to_light_ratio
* self.intensity
* np.exp(
-self.sersic_constant
* (
((r / scaled_effective_radius) ** (1.0 / self.sersic_index))
- 1.0
)
)
)
return self._decompose_convergence_via_cse_from(
func=sersic_2d,
radii_min=radii_min,
radii_max=radii_max,
total_cses=total_cses,
sample_points=sample_points,
)
@property
def sersic_constant(self):
"""A parameter derived from Sersic index which ensures that effective radius contains 50% of the profile's
total integrated light.
"""
return (
(2 * self.sersic_index)
- (1.0 / 3.0)
+ (4.0 / (405.0 * self.sersic_index))
+ (46.0 / (25515.0 * self.sersic_index ** 2))
+ (131.0 / (1148175.0 * self.sersic_index ** 3))
- (2194697.0 / (30690717750.0 * self.sersic_index ** 4))
)
@property
def ellipticity_rescale(self):
return 1.0 - ((1.0 - self.axis_ratio) / 2.0)
@property
def elliptical_effective_radius(self):
"""
The effective_radius of a Sersic light profile is defined as the circular effective radius. This is the \
radius within which a circular aperture contains half the profiles's total integrated light. For elliptical \
systems, this won't robustly capture the light profile's elliptical shape.
The elliptical effective radius instead describes the major-axis radius of the ellipse containing \
half the light, and may be more appropriate for highly flattened systems like disk galaxies.
"""
return self.effective_radius / np.sqrt(self.axis_ratio)
def with_new_normalization(self, normalization):
mass_profile = copy.copy(self)
mass_profile.mass_to_light_ratio = normalization
return mass_profile
class EllSersic(AbstractEllSersic, MassProfileMGE, MassProfileCSE):
@aa.grid_dec.grid_2d_to_structure
@aa.grid_dec.transform
@aa.grid_dec.relocate_to_radial_minimum
def deflections_2d_via_integral_from(self, grid: aa.type.Grid2DLike):
"""
Calculate the deflection angles at a given set of arc-second gridded coordinates.
Parameters
----------
grid
The grid of (y,x) arc-second coordinates the deflection angles are computed on.
"""
def calculate_deflection_component(npow, index):
sersic_constant = self.sersic_constant
deflection_grid = self.axis_ratio * grid[:, index]
for i in range(grid.shape[0]):
deflection_grid[i] *= (
self.intensity
* self.mass_to_light_ratio
* quad(
self.deflection_func,
a=0.0,
b=1.0,
args=(
grid[i, 0],
grid[i, 1],
npow,
self.axis_ratio,
self.sersic_index,
self.effective_radius,
sersic_constant,
),
)[0]
)
return deflection_grid
deflection_y = calculate_deflection_component(1.0, 0)
deflection_x = calculate_deflection_component(0.0, 1)
return self.rotate_grid_from_reference_frame(
np.multiply(1.0, np.vstack((deflection_y, deflection_x)).T)
)
@staticmethod
def deflection_func(
u, y, x, npow, axis_ratio, sersic_index, effective_radius, sersic_constant
):
eta_u = np.sqrt(axis_ratio) * np.sqrt(
(u * ((x ** 2) + (y ** 2 / (1 - (1 - axis_ratio ** 2) * u))))
)
return np.exp(
-sersic_constant
* (((eta_u / effective_radius) ** (1.0 / sersic_index)) - 1)
) / ((1 - (1 - axis_ratio ** 2) * u) ** (npow + 0.5))
class SphSersic(EllSersic):
def __init__(
self,
centre: Tuple[float, float] = (0.0, 0.0),
intensity: float = 0.1,
effective_radius: float = 0.6,
sersic_index: float = 0.6,
mass_to_light_ratio: float = 1.0,
):
"""
The Sersic mass profile, the mass profiles of the light profiles that are used to fit and subtract the lens
model_galaxy's light.
Parameters
----------
centre
The (y,x) arc-second coordinates of the profile centre
intensity
Overall flux intensity normalisation in the light profiles (electrons per second)
effective_radius
The circular radius containing half the light of this profile.
sersic_index
Controls the concentration of the profile (lower -> less concentrated, higher -> more concentrated).
mass_to_light_ratio
The mass-to-light ratio of the light profile.
"""
super().__init__(
centre=centre,
elliptical_comps=(0.0, 0.0),
intensity=intensity,
effective_radius=effective_radius,
sersic_index=sersic_index,
mass_to_light_ratio=mass_to_light_ratio,
)
class EllExponential(EllSersic):
def __init__(
self,
centre: Tuple[float, float] = (0.0, 0.0),
elliptical_comps: Tuple[float, float] = (0.0, 0.0),
intensity: float = 0.1,
effective_radius: float = 0.6,
mass_to_light_ratio: float = 1.0,
):
"""
The EllExponential mass profile, the mass profiles of the light profiles that are used to fit and
subtract the lens model_galaxy's light.
Parameters
----------
centre
The (y,x) arc-second coordinates of the profile centre.
elliptical_comps
The first and second ellipticity components of the elliptical coordinate system, (see the module
`autogalaxy -> convert.py` for the convention).
intensity
Overall flux intensity normalisation in the light profiles (electrons per second).
effective_radius
The circular radius containing half the light of this profile.
mass_to_light_ratio
The mass-to-light ratio of the light profiles
"""
super().__init__(
centre=centre,
elliptical_comps=elliptical_comps,
intensity=intensity,
effective_radius=effective_radius,
sersic_index=1.0,
mass_to_light_ratio=mass_to_light_ratio,
)
class SphExponential(EllExponential):
def __init__(
self,
centre: Tuple[float, float] = (0.0, 0.0),
intensity: float = 0.1,
effective_radius: float = 0.6,
mass_to_light_ratio: float = 1.0,
):
"""
The Exponential mass profile, the mass profiles of the light profiles that are used to fit and subtract the lens
model_galaxy's light.
Parameters
----------
centre
The (y,x) arc-second coordinates of the profile centre.
intensity
Overall flux intensity normalisation in the light profiles (electrons per second).
effective_radius
The circular radius containing half the light of this profile.
mass_to_light_ratio
The mass-to-light ratio of the light profiles.
"""
super().__init__(
centre=centre,
elliptical_comps=(0.0, 0.0),
intensity=intensity,
effective_radius=effective_radius,
mass_to_light_ratio=mass_to_light_ratio,
)
class EllDevVaucouleurs(EllSersic):
def __init__(
self,
centre: Tuple[float, float] = (0.0, 0.0),
elliptical_comps: Tuple[float, float] = (0.0, 0.0),
intensity: float = 0.1,
effective_radius: float = 0.6,
mass_to_light_ratio: float = 1.0,
):
"""
The EllDevVaucouleurs mass profile, the mass profiles of the light profiles that are used to fit and
subtract the lens model_galaxy's light.
Parameters
----------
centre
The (y,x) arc-second coordinates of the profile centre.
elliptical_comps
The first and second ellipticity components of the elliptical coordinate system, (see the module
`autogalaxy -> convert.py` for the convention).
intensity
Overall flux intensity normalisation in the light profiles (electrons per second).
effective_radius
The radius containing half the light of this profile.
mass_to_light_ratio
The mass-to-light ratio of the light profile.
"""
super().__init__(
centre=centre,
elliptical_comps=elliptical_comps,
intensity=intensity,
effective_radius=effective_radius,
sersic_index=4.0,
mass_to_light_ratio=mass_to_light_ratio,
)
class SphDevVaucouleurs(EllDevVaucouleurs):
def __init__(
self,
centre: Tuple[float, float] = (0.0, 0.0),
intensity: float = 0.1,
effective_radius: float = 0.6,
mass_to_light_ratio: float = 1.0,
):
"""
The DevVaucouleurs mass profile, the mass profiles of the light profiles that are used to fit and subtract the
lens model_galaxy's light.
Parameters
----------
centre
The (y,x) arc-second coordinates of the profile centre.
intensity
Overall flux intensity normalisation in the light profiles (electrons per second).
effective_radius
The circular radius containing half the light of this profile.
mass_to_light_ratio
The mass-to-light ratio of the light profiles.
"""
super().__init__(
centre=centre,
elliptical_comps=(0.0, 0.0),
intensity=intensity,
effective_radius=effective_radius,
mass_to_light_ratio=mass_to_light_ratio,
)
class EllSersicRadialGradient(AbstractEllSersic):
def __init__(
self,
centre: Tuple[float, float] = (0.0, 0.0),
elliptical_comps: Tuple[float, float] = (0.0, 0.0),
intensity: float = 0.1,
effective_radius: float = 0.6,
sersic_index: float = 0.6,
mass_to_light_ratio: float = 1.0,
mass_to_light_gradient: float = 0.0,
):
"""
Setup a Sersic mass and light profiles.
Parameters
----------
centre
The (y,x) arc-second coordinates of the profile centre.
elliptical_comps
The first and second ellipticity components of the elliptical coordinate system, (see the module
`autogalaxy -> convert.py` for the convention).
intensity
Overall flux intensity normalisation in the light profiles (electrons per second).
effective_radius
The circular radius containing half the light of this profile.
sersic_index
Controls the concentration of the profile (lower -> less concentrated, higher -> more concentrated).
mass_to_light_ratio
The mass-to-light ratio of the light profile.
mass_to_light_gradient
The mass-to-light radial gradient.
"""
super().__init__(
centre=centre,
elliptical_comps=elliptical_comps,
intensity=intensity,
effective_radius=effective_radius,
sersic_index=sersic_index,
mass_to_light_ratio=mass_to_light_ratio,
)
self.mass_to_light_gradient = mass_to_light_gradient
@aa.grid_dec.grid_2d_to_structure
@aa.grid_dec.transform
@aa.grid_dec.relocate_to_radial_minimum
def deflections_2d_via_integral_from(self, grid: aa.type.Grid2DLike):
"""
Calculate the deflection angles at a given set of arc-second gridded coordinates.
Parameters
----------
grid
The grid of (y,x) arc-second coordinates the deflection angles are computed on.
"""
def calculate_deflection_component(npow, index):
sersic_constant = self.sersic_constant
deflection_grid = self.axis_ratio * grid[:, index]
for i in range(grid.shape[0]):
deflection_grid[i] *= (
self.intensity
* self.mass_to_light_ratio
* quad(
self.deflection_func,
a=0.0,
b=1.0,
args=(
grid[i, 0],
grid[i, 1],
npow,
self.axis_ratio,
self.sersic_index,
self.effective_radius,
self.mass_to_light_gradient,
sersic_constant,
),
)[0]
)
return deflection_grid
deflection_y = calculate_deflection_component(1.0, 0)
deflection_x = calculate_deflection_component(0.0, 1)
return self.rotate_grid_from_reference_frame(
np.multiply(1.0, np.vstack((deflection_y, deflection_x)).T)
)
@staticmethod
def deflection_func(
u,
y,
x,
npow,
axis_ratio,
sersic_index,
effective_radius,
mass_to_light_gradient,
sersic_constant,
):
eta_u = np.sqrt(axis_ratio) * np.sqrt(
(u * ((x ** 2) + (y ** 2 / (1 - (1 - axis_ratio ** 2) * u))))
)
return (
(((axis_ratio * eta_u) / effective_radius) ** -mass_to_light_gradient)
* np.exp(
-sersic_constant
* (((eta_u / effective_radius) ** (1.0 / sersic_index)) - 1)
)
/ ((1 - (1 - axis_ratio ** 2) * u) ** (npow + 0.5))
)
@aa.grid_dec.grid_2d_to_structure
@aa.grid_dec.transform
@aa.grid_dec.relocate_to_radial_minimum
def convergence_2d_from(self, grid: aa.type.Grid2DLike):
"""Calculate the projected convergence at a given set of arc-second gridded coordinates.
Parameters
----------
grid
The grid of (y,x) arc-second coordinates the convergence is computed on.
"""
return self.convergence_func(self.grid_to_eccentric_radii(grid))
def convergence_func(self, grid_radius: float) -> float:
return (
self.mass_to_light_ratio
* (
((self.axis_ratio * grid_radius) / self.effective_radius)
** -self.mass_to_light_gradient
)
* self.image_2d_via_radii_from(grid_radius)
)
def decompose_convergence_via_mge(self):
radii_min = self.effective_radius / 100.0
radii_max = self.effective_radius * 20.0
def sersic_radial_gradient_2D(r):
return (
self.mass_to_light_ratio
* self.intensity
* (
((self.axis_ratio * r) / self.effective_radius)
** -self.mass_to_light_gradient
)
* np.exp(
-self.sersic_constant
* (((r / self.effective_radius) ** (1.0 / self.sersic_index)) - 1.0)
)
)
return self._decompose_convergence_via_mge(
func=sersic_radial_gradient_2D, radii_min=radii_min, radii_max=radii_max
)
def decompose_convergence_via_cse(self) -> Tuple[List, List]:
"""
Decompose the convergence of the Sersic profile into singular isothermal elliptical (sie) profiles.
This decomposition uses the standard 2d profile of a Sersic mass profile.
Parameters
----------
func
The function representing the profile that is decomposed into CSEs.
radii_min:
The minimum radius to fit
radii_max:
The maximum radius to fit
total_sies
The number of SIEs used to approximate the input func.
sample_points: int (should be larger than 'total_sies')
The number of data points to fit
Returns
-------
Tuple[List, List]
A list of amplitudes and core radii of every singular isothernal ellipsoids (sie) the mass profile is decomposed
into.
"""
upper_dex, lower_dex, total_cses, sample_points = cse_settings_from(
effective_radius=self.effective_radius,
sersic_index=self.sersic_index,
sersic_constant=self.sersic_constant,
mass_to_light_gradient=self.mass_to_light_gradient,
)
scaled_effective_radius = self.effective_radius / np.sqrt(self.axis_ratio)
radii_min = scaled_effective_radius / 10.0 ** lower_dex
radii_max = scaled_effective_radius * 10.0 ** upper_dex
def sersic_radial_gradient_2D(r):
return (
self.mass_to_light_ratio
* self.intensity
* (
((self.axis_ratio * r) / scaled_effective_radius)
** -self.mass_to_light_gradient
)
* np.exp(
-self.sersic_constant
* (
((r / scaled_effective_radius) ** (1.0 / self.sersic_index))
- 1.0
)
)
)
return self._decompose_convergence_via_cse_from(
func=sersic_radial_gradient_2D,
radii_min=radii_min,
radii_max=radii_max,
total_cses=total_cses,
sample_points=sample_points,
)
class SphSersicRadialGradient(EllSersicRadialGradient):
def __init__(
self,
centre: Tuple[float, float] = (0.0, 0.0),
intensity: float = 0.1,
effective_radius: float = 0.6,
sersic_index: float = 0.6,
mass_to_light_ratio: float = 1.0,
mass_to_light_gradient: float = 0.0,
):
"""
Setup a Sersic mass and light profiles.
Parameters
----------
centre
The (y,x) arc-second coordinates of the profile centre.
intensity
Overall flux intensity normalisation in the light profiles (electrons per second).
effective_radius
The circular radius containing half the light of this profile.
sersic_index
Controls the concentration of the profile (lower -> less concentrated, higher -> more concentrated).
mass_to_light_ratio
The mass-to-light ratio of the light profile.
mass_to_light_gradient
The mass-to-light radial gradient.
"""
super().__init__(
centre=centre,
elliptical_comps=(0.0, 0.0),
intensity=intensity,
effective_radius=effective_radius,
sersic_index=sersic_index,
mass_to_light_ratio=mass_to_light_ratio,
mass_to_light_gradient=mass_to_light_gradient,
)
class EllSersicCore(EllSersic):
def __init__(
self,
centre: Tuple[float, float] = (0.0, 0.0),
elliptical_comps: Tuple[float, float] = (0.0, 0.0),
effective_radius: float = 0.6,
sersic_index: float = 4.0,
radius_break: float = 0.01,
intensity_break: float = 0.05,
gamma: float = 0.25,
alpha: float = 3.0,
mass_to_light_ratio: float = 1.0,
):
"""
The elliptical cored-Sersic light profile.
Parameters
----------
centre
The (y,x) arc-second coordinates of the profile centre.
elliptical_comps
The first and second ellipticity components of the elliptical coordinate system, (see the module
`autogalaxy -> convert.py` for the convention).
intensity
Overall intensity normalisation of the light profile (units are dimensionless and derived from the data
the light profile's image is compared too, which is expected to be electrons per second).
effective_radius
The circular radius containing half the light of this profile.
sersic_index
Controls the concentration of the profile (lower -> less concentrated, higher -> more concentrated).
radius_break
The break radius separating the inner power-law (with logarithmic slope gamma) and outer Sersic function.
intensity_break
The intensity at the break radius.
gamma
The logarithmic power-law slope of the inner core profiles
alpha :
Controls the sharpness of the transition between the inner core / outer Sersic profiles.
"""
super().__init__(
centre=centre,
elliptical_comps=elliptical_comps,
intensity=intensity_break,
effective_radius=effective_radius,
sersic_index=sersic_index,
mass_to_light_ratio=mass_to_light_ratio,
)
self.radius_break = radius_break
self.intensity_break = intensity_break
self.alpha = alpha
self.gamma = gamma
def deflections_yx_2d_from(self, grid: aa.type.Grid2DLike):
return self.deflections_2d_via_mge_from(grid=grid)
def image_2d_via_radii_from(self, grid_radii: np.ndarray):
"""
Calculate the intensity of the cored-Sersic light profile on a grid of radial coordinates.
Parameters
----------
grid_radii
The radial distance from the centre of the profile. for each coordinate on the grid.
"""
return np.multiply(
np.multiply(
self.intensity_prime,
np.power(
np.add(
1,
np.power(np.divide(self.radius_break, grid_radii), self.alpha),
),
(self.gamma / self.alpha),
),
),
np.exp(
np.multiply(
-self.sersic_constant,
(
np.power(
np.divide(
np.add(
np.power(grid_radii, self.alpha),
(self.radius_break ** self.alpha),
),
(self.effective_radius ** self.alpha),
),
(1.0 / (self.alpha * self.sersic_index)),
)
),
)
),
)
def decompose_convergence_via_mge(self):
radii_min = self.effective_radius / 50.0
radii_max = self.effective_radius * 20.0
def core_sersic_2D(r):
return (
self.mass_to_light_ratio
* self.intensity_prime
* (1.0 + (self.radius_break / r) ** self.alpha)
** (self.gamma / self.alpha)
* np.exp(
-self.sersic_constant
* (
(r ** self.alpha + self.radius_break ** self.alpha)
/ self.effective_radius ** self.alpha
)
** (1.0 / (self.sersic_index * self.alpha))
)
)
return self._decompose_convergence_via_mge(
func=core_sersic_2D, radii_min=radii_min, radii_max=radii_max
)
@property
def intensity_prime(self):
"""Overall intensity normalisation in the rescaled Core-Sersic light profiles (electrons per second)"""
return (
self.intensity_break
* (2.0 ** (-self.gamma / self.alpha))
* np.exp(
self.sersic_constant
* (
((2.0 ** (1.0 / self.alpha)) * self.radius_break)
/ self.effective_radius
)
** (1.0 / self.sersic_index)
)
)
class SphSersicCore(EllSersicCore):
def __init__(
self,
centre: Tuple[float, float] = (0.0, 0.0),
effective_radius: float = 0.6,
sersic_index: float = 4.0,
radius_break: float = 0.01,
intensity_break: float = 0.05,
gamma: float = 0.25,
alpha: float = 3.0,
):
"""
The elliptical cored-Sersic light profile.
Parameters
----------
centre
The (y,x) arc-second coordinates of the profile centre.
intensity
Overall intensity normalisation of the light profile (units are dimensionless and derived from the data
the light profile's image is compared too, which is expected to be electrons per second).
effective_radius
The circular radius containing half the light of this profile.
sersic_index
Controls the concentration of the profile (lower -> less concentrated, higher -> more concentrated).
radius_break
The break radius separating the inner power-law (with logarithmic slope gamma) and outer Sersic function.
intensity_break
The intensity at the break radius.
gamma
The logarithmic power-law slope of the inner core profiles
alpha :
Controls the sharpness of the transition between the inner core / outer Sersic profiles.
"""
super().__init__(
centre=centre,
elliptical_comps=(0.0, 0.0),
effective_radius=effective_radius,
sersic_index=sersic_index,
radius_break=radius_break,
intensity_break=intensity_break,
gamma=gamma,
alpha=alpha,
)
self.radius_break = radius_break
self.intensity_break = intensity_break
self.alpha = alpha
self.gamma = gamma
class EllChameleon(MassProfile, StellarProfile):
def __init__(
self,
centre: Tuple[float, float] = (0.0, 0.0),
elliptical_comps: Tuple[float, float] = (0.0, 0.0),
intensity: float = 0.1,
core_radius_0: float = 0.01,
core_radius_1: float = 0.02,
mass_to_light_ratio: float = 1.0,
):
"""
The elliptical Chamelon mass profile.
Parameters
----------
centre
The (y,x) arc-second coordinates of the profile centre.
elliptical_comps
The first and second ellipticity components of the elliptical coordinate system, (see the module
`autogalaxy -> convert.py` for the convention).
intensity
Overall intensity normalisation of the light profile (units are dimensionless and derived from the data
the light profile's image is compared too, which is expected to be electrons per second).
core_radius_0 : the core size of the first elliptical cored Isothermal profile.
core_radius_1 : core_radius_0 + core_radius_1 is the core size of the second elliptical cored Isothermal profile.
We use core_radius_1 here is to avoid negative values.
Profile form:
mass_to_light_ratio * intensity *\
(1.0 / Sqrt(x^2 + (y/q)^2 + core_radius_0^2) - 1.0 / Sqrt(x^2 + (y/q)^2 + (core_radius_0 + core_radius_1)**2.0))
"""
super(EllChameleon, self).__init__(
centre=centre, elliptical_comps=elliptical_comps
)
super(MassProfile, self).__init__(
centre=centre, elliptical_comps=elliptical_comps
)
self.mass_to_light_ratio = mass_to_light_ratio
self.intensity = intensity
self.core_radius_0 = core_radius_0
self.core_radius_1 = core_radius_1
def deflections_yx_2d_from(self, grid: aa.type.Grid2DLike):
return self.deflections_2d_via_analytic_from(grid=grid)
@aa.grid_dec.grid_2d_to_structure
@aa.grid_dec.transform
@aa.grid_dec.relocate_to_radial_minimum
def deflections_2d_via_analytic_from(self, grid: aa.type.Grid2DLike):
"""
Calculate the deflection angles at a given set of arc-second gridded coordinates.
Following Eq. (15) and (16), but the parameters are slightly different.
Parameters
----------
grid
The grid of (y,x) arc-second coordinates the deflection angles are computed on.
"""
factor = (
2.0
* self.mass_to_light_ratio
* self.intensity
/ (1 + self.axis_ratio)
* self.axis_ratio
/ np.sqrt(1.0 - self.axis_ratio ** 2.0)
)
core_radius_0 = np.sqrt(
(4.0 * self.core_radius_0 ** 2.0) / (1.0 + self.axis_ratio) ** 2
)
core_radius_1 = np.sqrt(
(4.0 * self.core_radius_1 ** 2.0) / (1.0 + self.axis_ratio) ** 2
)
psi0 = psi_from(
grid=grid, axis_ratio=self.axis_ratio, core_radius=core_radius_0
)
psi1 = psi_from(
grid=grid, axis_ratio=self.axis_ratio, core_radius=core_radius_1
)
deflection_y0 = np.arctanh(
np.divide(
np.multiply(np.sqrt(1.0 - self.axis_ratio ** 2.0), grid[:, 0]),
np.add(psi0, self.axis_ratio ** 2.0 * core_radius_0),
)
)
deflection_x0 = np.arctan(
np.divide(
np.multiply(np.sqrt(1.0 - self.axis_ratio ** 2.0), grid[:, 1]),
np.add(psi0, core_radius_0),
)
)
deflection_y1 = np.arctanh(
np.divide(
np.multiply(np.sqrt(1.0 - self.axis_ratio ** 2.0), grid[:, 0]),
np.add(psi1, self.axis_ratio ** 2.0 * core_radius_1),
)
)
deflection_x1 = np.arctan(
np.divide(
np.multiply(np.sqrt(1.0 - self.axis_ratio ** 2.0), grid[:, 1]),
np.add(psi1, core_radius_1),
)
)
deflection_y = np.subtract(deflection_y0, deflection_y1)
deflection_x = np.subtract(deflection_x0, deflection_x1)
return self.rotate_grid_from_reference_frame(
np.multiply(factor, np.vstack((deflection_y, deflection_x)).T)
)
@aa.grid_dec.grid_2d_to_structure
@aa.grid_dec.transform
@aa.grid_dec.relocate_to_radial_minimum
def convergence_2d_from(self, grid: aa.type.Grid2DLike):
"""Calculate the projected convergence at a given set of arc-second gridded coordinates.
Parameters
----------
grid
The grid of (y,x) arc-second coordinates the convergence is computed on.
"""
return self.convergence_func(self.grid_to_elliptical_radii(grid))
def convergence_func(self, grid_radius: float) -> float:
return self.mass_to_light_ratio * self.image_2d_via_radii_from(grid_radius)
@aa.grid_dec.grid_2d_to_structure
def potential_2d_from(self, grid: aa.type.Grid2DLike):
return np.zeros(shape=grid.shape[0])
def image_2d_via_radii_from(self, grid_radii: np.ndarray):
"""Calculate the intensity of the Chamelon light profile on a grid of radial coordinates.
Parameters
----------
grid_radii
The radial distance from the centre of the profile. for each coordinate on the grid.
"""
axis_ratio_factor = (1.0 + self.axis_ratio) ** 2.0
return np.multiply(
self.intensity / (1 + self.axis_ratio),
np.add(
np.divide(
1.0,
np.sqrt(
np.add(
np.square(grid_radii),
(4.0 * self.core_radius_0 ** 2.0) / axis_ratio_factor,
)
),
),
-np.divide(
1.0,
np.sqrt(
np.add(
np.square(grid_radii),
(4.0 * self.core_radius_1 ** 2.0) / axis_ratio_factor,
)
),
),
),
)
@property
def axis_ratio(self):
axis_ratio = super().axis_ratio
return axis_ratio if axis_ratio < 0.99999 else 0.99999
def with_new_normalization(self, normalization):
mass_profile = copy.copy(self)
mass_profile.mass_to_light_ratio = normalization
return mass_profile
class SphChameleon(EllChameleon):
def __init__(
self,
centre: Tuple[float, float] = (0.0, 0.0),
intensity: float = 0.1,
core_radius_0: float = 0.01,
core_radius_1: float = 0.02,
mass_to_light_ratio: float = 1.0,
):
"""
The spherica; Chameleon mass profile.
Profile form:
mass_to_light_ratio * intensity *\
(1.0 / Sqrt(x^2 + (y/q)^2 + core_radius_0^2) - 1.0 / Sqrt(x^2 + (y/q)^2 + (core_radius_0 + core_radius_1)**2.0))
Parameters
----------
centre
The (y,x) arc-second coordinates of the profile centre.
elliptical_comps
The first and second ellipticity components of the elliptical coordinate system, (see the module
`autogalaxy -> convert.py` for the convention).
intensity
Overall intensity normalisation of the light profile (units are dimensionless and derived from the data
the light profile's image is compared too, which is expected to be electrons per second).
core_radius_0 : the core size of the first elliptical cored Isothermal profile.
core_radius_1 : core_radius_0 + core_radius_1 is the core size of the second elliptical cored Isothermal profile.
We use core_radius_1 here is to avoid negative values.
"""
super().__init__(
centre=centre,
elliptical_comps=(0.0, 0.0),
intensity=intensity,
core_radius_0=core_radius_0,
core_radius_1=core_radius_1,
mass_to_light_ratio=mass_to_light_ratio,
)
def cse_settings_from(
effective_radius, sersic_index, sersic_constant, mass_to_light_gradient
):
if mass_to_light_gradient > 0.5:
if effective_radius > 0.2:
lower_dex = 6.0
upper_dex = np.min(
[np.log10((18.0 / sersic_constant) ** sersic_index), 1.1]
)
if sersic_index <= 1.2:
total_cses = 50
sample_points = 80
elif sersic_index > 3.8:
total_cses = 40
sample_points = 50
lower_dex = 6.5
else:
total_cses = 30
sample_points = 50
else:
if sersic_index <= 1.2:
upper_dex = 1.0
total_cses = 50
sample_points = 80
lower_dex = 4.5
elif sersic_index > 3.8:
total_cses = 40
sample_points = 50
lower_dex = 6.0
upper_dex = 1.5
else:
upper_dex = 1.1
lower_dex = 6.0
total_cses = 30
sample_points = 50
else:
upper_dex = np.min(
[
np.log10((23.0 / sersic_constant) ** sersic_index),
0.85 - np.log10(effective_radius),
]
)
if (sersic_index <= 0.9) and (sersic_index > 0.8):
total_cses = 50
sample_points = 80
upper_dex = np.log10((18.0 / sersic_constant) ** sersic_index)
lower_dex = 4.3 + np.log10(effective_radius)
elif sersic_index <= 0.8:
total_cses = 50
sample_points = 80
upper_dex = np.log10((16.0 / sersic_constant) ** sersic_index)
lower_dex = 4.0 + np.log10(effective_radius)
elif sersic_index > 3.8:
total_cses = 40
sample_points = 50
lower_dex = 4.5 + np.log10(effective_radius)
else:
lower_dex = 3.5 + np.log10(effective_radius)
total_cses = 30
sample_points = 50
return upper_dex, lower_dex, total_cses, sample_points
| 36.372667 | 129 | 0.560604 | 51,804 | 0.949504 | 0 | 0 | 17,582 | 0.322257 | 0 | 0 | 19,667 | 0.360472 |
d2ea517f3b08f633622c54a6e6b06e1d6019f32c | 627 | py | Python | installer/core/terraform/resources/variable.py | Diffblue-benchmarks/pacbot | 4709eb11f87636bc42a52e7a76b740f9d76d156d | [
"Apache-2.0"
]
| 1,165 | 2018-10-05T19:07:34.000Z | 2022-03-28T19:34:27.000Z | installer/core/terraform/resources/variable.py | Diffblue-benchmarks/pacbot | 4709eb11f87636bc42a52e7a76b740f9d76d156d | [
"Apache-2.0"
]
| 334 | 2018-10-10T14:00:41.000Z | 2022-03-19T16:32:08.000Z | installer/core/terraform/resources/variable.py | Diffblue-benchmarks/pacbot | 4709eb11f87636bc42a52e7a76b740f9d76d156d | [
"Apache-2.0"
]
| 268 | 2018-10-05T19:53:25.000Z | 2022-03-31T07:39:47.000Z | from core.terraform.resources import BaseTerraformVariable
class TerraformVariable(BaseTerraformVariable):
"""
Base resource class for Terraform tfvar variable
Attributes:
variable_dict_input (dict/none): Var dict values
available_args (dict): Instance configurations
variable_type (str): Define the variable i.e. terraform list var or terraform dict var etc
"""
variable_dict_input = None
variable_type = None
available_args = {
'variable_name': {'required': True},
'variable_type': {'required': False},
'default_value': {'required': False}
}
| 31.35 | 98 | 0.6874 | 565 | 0.901116 | 0 | 0 | 0 | 0 | 0 | 0 | 367 | 0.585327 |
d2eb169f57649820eef340c3a134f871d837dd00 | 887 | py | Python | bfs.py | mpHarm88/Algorithms-and-Data-Structures-In-Python | a0689e57e0895c375715f39d078704e6faf72f0e | [
"MIT"
]
| null | null | null | bfs.py | mpHarm88/Algorithms-and-Data-Structures-In-Python | a0689e57e0895c375715f39d078704e6faf72f0e | [
"MIT"
]
| null | null | null | bfs.py | mpHarm88/Algorithms-and-Data-Structures-In-Python | a0689e57e0895c375715f39d078704e6faf72f0e | [
"MIT"
]
| null | null | null |
class Node(object):
def __init__(self, name):
self.name = name;
self.adjacencyList = [];
self.visited = False;
self.predecessor = None;
class BreadthFirstSearch(object):
def bfs(self, startNode):
queue = [];
queue.append(startNode);
startNode.visited = True;
# BFS -> queue DFS --> stack BUT usually we implement it with recursion !!!
while queue:
actualNode = queue.pop(0);
print("%s " % actualNode.name);
for n in actualNode.adjacencyList:
if not n.visited:
n.visited = True;
queue.append(n);
node1 = Node("A");
node2 = Node("B");
node3 = Node("C");
node4 = Node("D");
node5 = Node("E");
node1.adjacencyList.append(node2);
node1.adjacencyList.append(node3);
node2.adjacencyList.append(node4);
node4.adjacencyList.append(node5);
bfs = BreadthFirstSearch();
bfs.bfs(node1); | 21.634146 | 83 | 0.626832 | 578 | 0.651635 | 0 | 0 | 0 | 0 | 0 | 0 | 101 | 0.113867 |
d2ed017d8f6bd12bbaded9891125e05125930fde | 3,932 | py | Python | supervisor/dbus/network/connection.py | peddamat/home-assistant-supervisor-test | 5da55772bcb2db3c6d8432cbc08e2ac9fbf480c4 | [
"Apache-2.0"
]
| 1 | 2022-02-08T21:32:33.000Z | 2022-02-08T21:32:33.000Z | supervisor/dbus/network/connection.py | peddamat/home-assistant-supervisor-test | 5da55772bcb2db3c6d8432cbc08e2ac9fbf480c4 | [
"Apache-2.0"
]
| 310 | 2020-03-12T16:02:13.000Z | 2022-03-31T06:01:49.000Z | supervisor/dbus/network/connection.py | peddamat/home-assistant-supervisor-test | 5da55772bcb2db3c6d8432cbc08e2ac9fbf480c4 | [
"Apache-2.0"
]
| 2 | 2021-09-22T00:13:58.000Z | 2021-09-22T15:06:27.000Z | """Connection object for Network Manager."""
from ipaddress import ip_address, ip_interface
from typing import Optional
from ...const import ATTR_ADDRESS, ATTR_PREFIX
from ...utils.gdbus import DBus
from ..const import (
DBUS_ATTR_ADDRESS_DATA,
DBUS_ATTR_CONNECTION,
DBUS_ATTR_GATEWAY,
DBUS_ATTR_ID,
DBUS_ATTR_IP4CONFIG,
DBUS_ATTR_IP6CONFIG,
DBUS_ATTR_NAMESERVER_DATA,
DBUS_ATTR_NAMESERVERS,
DBUS_ATTR_STATE,
DBUS_ATTR_TYPE,
DBUS_ATTR_UUID,
DBUS_NAME_CONNECTION_ACTIVE,
DBUS_NAME_IP4CONFIG,
DBUS_NAME_IP6CONFIG,
DBUS_NAME_NM,
DBUS_OBJECT_BASE,
)
from ..interface import DBusInterfaceProxy
from .configuration import IpConfiguration
class NetworkConnection(DBusInterfaceProxy):
"""NetworkConnection object for Network Manager."""
def __init__(self, object_path: str) -> None:
"""Initialize NetworkConnection object."""
self.object_path = object_path
self.properties = {}
self._ipv4: Optional[IpConfiguration] = None
self._ipv6: Optional[IpConfiguration] = None
@property
def id(self) -> str:
"""Return the id of the connection."""
return self.properties[DBUS_ATTR_ID]
@property
def type(self) -> str:
"""Return the type of the connection."""
return self.properties[DBUS_ATTR_TYPE]
@property
def uuid(self) -> str:
"""Return the uuid of the connection."""
return self.properties[DBUS_ATTR_UUID]
@property
def state(self) -> int:
"""Return the state of the connection."""
return self.properties[DBUS_ATTR_STATE]
@property
def setting_object(self) -> int:
"""Return the connection object path."""
return self.properties[DBUS_ATTR_CONNECTION]
@property
def ipv4(self) -> Optional[IpConfiguration]:
"""Return a ip4 configuration object for the connection."""
return self._ipv4
@property
def ipv6(self) -> Optional[IpConfiguration]:
"""Return a ip6 configuration object for the connection."""
return self._ipv6
async def connect(self) -> None:
"""Get connection information."""
self.dbus = await DBus.connect(DBUS_NAME_NM, self.object_path)
self.properties = await self.dbus.get_properties(DBUS_NAME_CONNECTION_ACTIVE)
# IPv4
if self.properties[DBUS_ATTR_IP4CONFIG] != DBUS_OBJECT_BASE:
ip4 = await DBus.connect(DBUS_NAME_NM, self.properties[DBUS_ATTR_IP4CONFIG])
ip4_data = await ip4.get_properties(DBUS_NAME_IP4CONFIG)
self._ipv4 = IpConfiguration(
ip_address(ip4_data[DBUS_ATTR_GATEWAY])
if ip4_data.get(DBUS_ATTR_GATEWAY)
else None,
[
ip_address(nameserver[ATTR_ADDRESS])
for nameserver in ip4_data.get(DBUS_ATTR_NAMESERVER_DATA, [])
],
[
ip_interface(f"{address[ATTR_ADDRESS]}/{address[ATTR_PREFIX]}")
for address in ip4_data.get(DBUS_ATTR_ADDRESS_DATA, [])
],
)
# IPv6
if self.properties[DBUS_ATTR_IP6CONFIG] != DBUS_OBJECT_BASE:
ip6 = await DBus.connect(DBUS_NAME_NM, self.properties[DBUS_ATTR_IP6CONFIG])
ip6_data = await ip6.get_properties(DBUS_NAME_IP6CONFIG)
self._ipv6 = IpConfiguration(
ip_address(ip6_data[DBUS_ATTR_GATEWAY])
if ip6_data.get(DBUS_ATTR_GATEWAY)
else None,
[
ip_address(bytes(nameserver))
for nameserver in ip6_data.get(DBUS_ATTR_NAMESERVERS)
],
[
ip_interface(f"{address[ATTR_ADDRESS]}/{address[ATTR_PREFIX]}")
for address in ip6_data.get(DBUS_ATTR_ADDRESS_DATA, [])
],
)
| 33.606838 | 88 | 0.630214 | 3,232 | 0.821974 | 0 | 0 | 977 | 0.248474 | 1,830 | 0.465412 | 597 | 0.151831 |
d2efe900f19b7e3838e3eb40b9017e440e296e62 | 4,969 | py | Python | quark/databricks.py | mistsys/quark | 7baef5e18d5b9d12384a92487151337878958f36 | [
"Apache-2.0"
]
| 2 | 2019-02-27T20:51:30.000Z | 2021-05-26T02:35:29.000Z | quark/databricks.py | mistsys/quark | 7baef5e18d5b9d12384a92487151337878958f36 | [
"Apache-2.0"
]
| null | null | null | quark/databricks.py | mistsys/quark | 7baef5e18d5b9d12384a92487151337878958f36 | [
"Apache-2.0"
]
| 1 | 2020-05-30T22:59:16.000Z | 2020-05-30T22:59:16.000Z | from __future__ import print_function, absolute_import
from .beats import Beat
from StringIO import StringIO
import sys
import os
import json
import urllib
import webbrowser
try:
import pycurl
except:
print("Need pycurl dependency to use qubole as the deployment platform. Run pip install pycurl in your virtualenv and try this again.")
sys.exit(1)
class Databricks:
def __init__(self, config, options):
self.config = config
self.options = options
projectsDir = self.config.get(self.options.env, "projects_dir")
schemasDir = os.path.join(projectsDir, "schemas")
schemasFile = os.path.join(schemasDir, "beats.schema.json")
if os.path.exists(schemasFile):
self.beats = Beat(file(schemasFile).read())
def _q_config(self,item):
return self.config.get(self.options.env, "databricks-{}".format(item))
def _do_request(self, method, path, base_url=None, **data):
# Uh, only using pycurl because that was the example that was around, will port to requests someday
# it's supposed to be faster, so oh well
c = pycurl.Curl()
#auth_token = self._q_config("auth_token")
username = self._q_config("username")
password = self._q_config("password")
if base_url == None:
base_url = self.config.get(self.options.env, "master")
url = base_url+ "/" + path
buffer = StringIO()
c.setopt(c.WRITEDATA, buffer)
print("Using", url, file=sys.stderr)
c.setopt(pycurl.URL, url)
c.setopt(pycurl.HTTPHEADER, ['Accept:application/json'])
#c.setopt(pycurl.HTTPHEADER, ["X-AUTH-TOKEN: "+ auth_token, "Content-Type:application/json", "Accept: application/json, text/plain"])
## Note: Only POST and GET have been tested...
## It's not very obvious with pycurl to do this properly with PUT and DELETE
## Review this if ever needed to add these methods
## http://www.programcreek.com/python/example/2132/pycurl.HTTPHEADER
if method.lower() == "post":
c.setopt(pycurl.POST,1)
post_data = urllib.urlencode(data)
print(post_data)
c.setopt(pycurl.POSTFIELDS, post_data)
elif method.lower() == "get":
c.setopt(pycurl.HTTPGET, 1)
elif method.lower() == "delete":
c.setopt(pycurl.DELETE, 1)
elif method.lower() == "put":
#c.setopt(pycurl.UPLOAD, 1)
post_data = urllib.urlencode(data)
c.setopt(pycurl.CUSTOMREQUEST, "PUT")
c.setopt(pycurl.POSTFIELDS, post_data)
elif method.lower() == "head":
c.setopt(pycurl.NOBODY,1)
else:
print("Unknown method ", method)
sys.exit(1)
if username != None and password != None:
c.setopt(pycurl.USERPWD, '%s:%s' % (username, password))
c.perform()
c.close()
body = buffer.getvalue()
return body
def _get_cluster_id(self):
cluster_id = self._q_config("cluster_id")
assert cluster_id is not None
return cluster_id
def invoke_task(self,name, *args):
if args == (None,):
getattr(self,name)()
else:
getattr(self,name)(*args)
def deploy(self, asset_path, *args):
# Use multipart upload to libraries/upload
print("TBD")
def logs(self, job_id):
print("TBD")
def status(self, job_id):
print("TBD")
def notebook(self):
print("TBD")
def _get_clusters(self):
resp_body = self._do_request("GET", "clusters/list")
j = json.loads(resp_body)
return j
def describecluster(self, name):
clusters = self._get_clusters()
for cluster in clusters:
if cluster['name'] == name:
print(cluster)
def lsclusters(self):
clusters = self._get_clusters()
if len(clusters) == 0:
print("No clusters created")
for cluster in clusters:
print(cluster)
def mkcluster(self, name, memory_gb=6, use_spot=True):
resp_body = self._do_request("POST", "clusters/create", name=name,
memoryGB=memory_gb,
useSpot=use_spot
)
print(resp_body)
def lslibraries(self):
resp_body = self._do_request("GET", "libraries/list")
j = json.loads(resp_body)
print(j)
def describelibraries(self):
resp_body = self._do_request("GET", "libraries/status")
j = json.loads(resp_body)
print(j)
def rmlibrary(self, library_id):
resp_body = self._do_request("DELETE", "clusters/create", libraryId=library_id)
print(resp_body)
def attachlibrary(self, library_id, cluster_id):
print("TBD")
def schedule(self, asset_path, schedule_id, schedule_iso8601):
print("TBD")
| 33.126667 | 141 | 0.602737 | 4,606 | 0.926947 | 0 | 0 | 0 | 0 | 0 | 0 | 1,111 | 0.223586 |
d2efeac4ab430fe4ec37a8045db0d9bc80676c48 | 9,658 | py | Python | appimagebuilder/builder/deploy/apt/venv.py | mssalvatore/appimage-builder | 2ecb7973cedfff9d03a21258419e515c48cafe84 | [
"MIT"
]
| null | null | null | appimagebuilder/builder/deploy/apt/venv.py | mssalvatore/appimage-builder | 2ecb7973cedfff9d03a21258419e515c48cafe84 | [
"MIT"
]
| null | null | null | appimagebuilder/builder/deploy/apt/venv.py | mssalvatore/appimage-builder | 2ecb7973cedfff9d03a21258419e515c48cafe84 | [
"MIT"
]
| null | null | null | # Copyright 2020 Alexis Lopez Zubieta
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
import fnmatch
import hashlib
import logging
import os
import subprocess
from pathlib import Path
from urllib import request
from appimagebuilder.common import shell
from .package import Package
DEPENDS_ON = ["dpkg-deb", "apt-get", "apt-key", "fakeroot", "apt-cache"]
class Venv:
def __init__(
self,
base_path: str,
sources: [str],
keys: [str],
architectures: [],
user_options: {} = None,
):
self.logger = logging.getLogger("apt")
self._deps = shell.resolve_commands_paths(DEPENDS_ON)
self.sources = sources
self.keys = keys
self.architectures = architectures
self.user_options = user_options
self._generate_paths(base_path)
self._write_apt_conf(user_options, architectures)
self._write_sources_list(sources)
self._write_keys(keys)
self._write_dpkg_arch(architectures)
def _generate_paths(self, base_path):
self._base_path = Path(base_path).absolute()
self._apt_conf_path = self._base_path / "apt.conf"
self._apt_conf_parts_path = self._base_path / "apt.conf.d"
self._apt_sources_list_path = self._base_path / "sources.list"
self._apt_sources_list_parts_path = self._base_path / "sources.list.d"
self._apt_preferences_parts_path = self._base_path / "preferences.d"
self._apt_key_parts_path = self._base_path / "keys"
self._dpkg_path = self._base_path / "dpkg"
self._dpkg_status_path = self._dpkg_path / "status"
self._apt_archives_path = self._base_path / "archives"
self._base_path.mkdir(parents=True, exist_ok=True)
self._apt_conf_parts_path.mkdir(parents=True, exist_ok=True)
self._apt_preferences_parts_path.mkdir(parents=True, exist_ok=True)
self._apt_key_parts_path.mkdir(parents=True, exist_ok=True)
self._dpkg_path.mkdir(parents=True, exist_ok=True)
self._dpkg_status_path.touch(exist_ok=True)
def _write_apt_conf(self, user_options, architectures: [str]):
options = {
"Dir": self._base_path,
"Dir::State": self._base_path,
"Dir::Cache": self._base_path,
"Dir::Etc::Main": self._apt_conf_path,
"Dir::Etc::Parts": self._apt_conf_parts_path,
"Dir::Etc::SourceList": self._apt_sources_list_path,
"Dir::Etc::SourceListParts": self._apt_sources_list_parts_path,
"Dir::Etc::PreferencesParts": self._apt_preferences_parts_path,
"Dir::Etc::TrustedParts": self._apt_key_parts_path,
"Dir::State::status": self._dpkg_status_path,
"Dir::Ignore-Files-Silently": False,
"APT::Install-Recommends": False,
"APT::Install-Suggests": False,
"APT::Immediate-Configure": False,
"APT::Architecture": architectures[0],
"APT::Architectures": architectures,
"Acquire::Languages": "none",
}
if user_options:
options.update(user_options)
# write apt.conf
with open(self._apt_conf_path, "w") as f:
for k, v in options.items():
if isinstance(v, str):
f.write('%s "%s";\n' % (k, v))
continue
if isinstance(v, list):
f.write("%s {" % k)
for sv in v:
f.write('"%s"; ' % sv)
f.write("}\n")
continue
f.write("%s %s;\n" % (k, v))
def _write_sources_list(self, sources):
with open(self._apt_sources_list_path, "w") as f:
for line in sources:
f.write("%s\n" % line)
def _write_keys(self, keys: [str]):
for key_url in keys:
key_url_hash = hashlib.md5(key_url.encode()).hexdigest()
key_path = os.path.join(self._apt_key_parts_path, "%s.asc" % key_url_hash)
if not os.path.exists(key_path):
self.logger.info("Download key file: %s" % key_url)
request.urlretrieve(key_url, key_path)
def _get_environment(self):
env = os.environ.copy()
env["APT_CONFIG"] = self._apt_conf_path
env["DEBIAN_FRONTEND"] = "noninteractive"
return env
def set_installed_packages(self, packages):
with open(self._dpkg_status_path, "w") as f:
for package in packages:
f.write(
"Package: %s\n"
"Status: install ok installed\n"
"Version: %s\n"
"Architecture: %s\n"
"\n" % (package.name, package.version, package.arch)
)
def _run_apt_cache_show(self, package_names: [str]):
if not package_names:
return None
command = "{apt-cache} show %s" % " ".join(package_names)
command = command.format(**self._deps)
self.logger.debug(command)
_proc = subprocess.run(
command, stdout=subprocess.PIPE, shell=True, env=self._get_environment()
)
shell.assert_successful_result(_proc)
return _proc
def update(self) -> None:
command = "apt-get update"
self.logger.info(command)
_proc = subprocess.run(command, shell=True, env=self._get_environment())
shell.assert_successful_result(_proc)
def search_names(self, patterns: [str]):
output = self._run_apt_cache_pkgnames()
packages = output.stdout.decode("utf-8").splitlines()
filtered_packages = []
for pattern in patterns:
filtered_packages.extend(fnmatch.filter(packages, pattern))
return filtered_packages
def _run_apt_cache_pkgnames(self):
command = "{apt-cache} pkgnames".format(**self._deps)
self.logger.debug(command)
proc = subprocess.run(
command, stdout=subprocess.PIPE, shell=True, env=self._get_environment()
)
shell.assert_successful_result(proc)
return proc
def resolve_packages(self, packages: [Package]) -> [Package]:
packages_str = [str(package) for package in packages]
output = self._run_apt_get_install_download_only(packages_str)
stdout_str = output.stderr.decode("utf-8")
installed_packages = []
for line in stdout_str.splitlines():
if line.startswith("Dequeuing") and line.endswith(".deb"):
file_path = Path(line.split(" ")[1])
installed_packages.append(Package.from_file_path(file_path))
return installed_packages
def _run_apt_get_install_download_only(self, packages: [str]):
command = (
"{apt-get} install -y --no-install-recommends --download-only -o Debug::pkgAcquire=1 "
"{packages}".format(**self._deps, packages=" ".join(packages))
)
self.logger.debug(command)
command = subprocess.run(
command,
stderr=subprocess.PIPE,
shell=True,
env=self._get_environment(),
)
shell.assert_successful_result(command)
return command
def resolve_archive_paths(self, packages: [Package]):
paths = [
self._apt_archives_path / pkg.get_expected_file_name() for pkg in packages
]
return paths
def extract_package(self, package, target):
path = self._apt_archives_path / package.get_expected_file_name()
command = "{dpkg-deb} -x {archive} {directory}".format(
**self._deps, archive=path, directory=target
)
self.logger.debug(command)
output = subprocess.run(command, shell=True, env=self._get_environment())
shell.assert_successful_result(output)
def _write_dpkg_arch(self, architectures: [str]):
with open(self._dpkg_path / "arch", "w") as f:
for arch in architectures:
f.write("%s\n" % arch)
def search_packages(self, names):
packages = []
pkg_name = None
pkg_version = None
pkg_arch = None
output = self._run_apt_cache_show(names)
for line in output.stdout.decode("utf-8").splitlines():
if line.startswith("Package:"):
pkg_name = line.split(" ", maxsplit=2)[1]
if line.startswith("Architecture"):
pkg_arch = line.split(" ", maxsplit=2)[1]
if line.startswith("Version:"):
pkg_version = line.split(" ", maxsplit=2)[1]
# empty lines indicate the end of a package description block
if not line and pkg_name:
packages.append(Package(pkg_name, pkg_version, pkg_arch))
pkg_name = None
pkg_arch = None
pkg_version = None
# empty lines indicate the end of a package description block
if pkg_name:
packages.append(Package(pkg_name, pkg_version, pkg_arch))
return packages
| 37.003831 | 98 | 0.610685 | 8,746 | 0.905571 | 0 | 0 | 0 | 0 | 0 | 0 | 1,782 | 0.18451 |
d2f040bef7df7c165fa2e1f80723815e7bebcf83 | 11,453 | py | Python | tests/test_assertion_method.py | katakumpo/nicepy | fa2b0bae8e4b66d92e756687ded58d355c444eca | [
"MIT"
]
| null | null | null | tests/test_assertion_method.py | katakumpo/nicepy | fa2b0bae8e4b66d92e756687ded58d355c444eca | [
"MIT"
]
| null | null | null | tests/test_assertion_method.py | katakumpo/nicepy | fa2b0bae8e4b66d92e756687ded58d355c444eca | [
"MIT"
]
| null | null | null | # -*- coding: utf-8 *-*
import logging
from unittest import TestCase
from nicepy import assert_equal_struct, multi_assert_equal_struct, pretty_repr, permuteflat
log = logging.getLogger(__name__)
class Foo(object):
def __init__(self, **kwargs):
for k, v in kwargs.iteritems():
self[k] = v
def __setitem__(self, name, value):
# helper to add attributes per self[attr] = value -> self.attr == value
setattr(self, name, value)
def __repr__(self):
return pretty_repr(self, ignore_own_repr=True)
class TestAssertEqualStruct(TestCase):
def run_assert(self, args, expected_msg=None):
log.debug('args: %s' % str(args))
msg = None
try:
assert_equal_struct(*args)
except AssertionError as e:
msg = e.message
log.debug('msg: %s' % msg)
self.assertEqual(msg, expected_msg)
def check(self, actual_classes=(list,),
expected_classes=(list,),
expected_obj=None, expected_kwargs={},
working_obj=None, working_kwargs={},
failing_obj=None, failing_kwargs={},
failure_msg=None,
namepaths=None,
expected_namepaths=None):
for actual_cls, expected_cls in permuteflat(actual_classes, expected_classes):
expected_obj = expected_obj or expected_cls(**expected_kwargs)
working_obj = working_obj or actual_cls(**working_kwargs)
self.run_assert((working_obj, expected_obj, namepaths, expected_namepaths))
failing_obj = failing_obj or actual_cls(**failing_kwargs)
self.run_assert((failing_obj, expected_obj, namepaths, expected_namepaths),
failure_msg)
def test_directly(self):
"""
*assert_equal_struct* can compare similar flat structures directly.
"""
self.check(actual_classes=(dict, Foo),
expected_classes=(dict, Foo),
expected_kwargs=dict(x=1),
working_kwargs=dict(x=1, y=2),
failing_kwargs=dict(x=3, y=2),
failure_msg='actual values != expected values:\n\tx: 3 != 1')
self.check(expected_obj=[1],
working_obj=[1, 2],
failing_obj=[3, 2],
failure_msg='actual values != expected values:\n\t0: 3 != 1')
def test_with_namepaths(self):
"""
With namepaths *assert_equal_struct* can compare similar structures and structures with
lists of values in full depth.
This ignores all additional paths at the expected object.
"""
self.check(actual_classes=(dict, Foo),
expected_classes=(dict, Foo),
expected_kwargs=dict(x=1, y=4),
namepaths=['x'],
working_kwargs=dict(x=1, y=2),
failing_kwargs=dict(x=3, y=2),
failure_msg='actual values != expected values:\n\tx: 3 != 1')
self.check(actual_classes=(dict, Foo),
expected_obj=[1, 4],
namepaths=['x'],
working_kwargs=dict(x=1, y=2),
failing_kwargs=dict(x=3, y=2),
failure_msg='actual values != expected values:\n\tx: 3 != 1')
self.check(expected_obj=[1, 4],
namepaths=['0'],
working_obj=[1, 2],
failing_obj=[3, 2],
failure_msg='actual values != expected values:\n\t0: 3 != 1')
def test_with_namepaths_and_expected_namepaths(self):
"""
Like just with namepaths, the values are sometimes at other paths at the expected object and
will be compared using expected_namepaths in same order as namepaths.
"""
self.check(actual_classes=(dict, Foo),
expected_classes=(dict, Foo),
expected_kwargs=dict(a=1, b=4),
namepaths=['x'],
expected_namepaths=['a'],
working_kwargs=dict(x=1, y=2),
failing_kwargs=dict(x=3, y=2),
failure_msg='actual values != expected values:\n\tx != a: 3 != 1')
self.check(actual_classes=(dict, Foo),
expected_obj=[4, 1],
namepaths=['x'],
expected_namepaths=['1'],
working_kwargs=dict(x=1, y=2),
failing_kwargs=dict(x=3, y=2),
failure_msg='actual values != expected values:\n\tx != 1: 3 != 1')
self.check(expected_obj=[4, 1],
namepaths=['0'],
expected_namepaths=['1'],
working_obj=[1, 2],
failing_obj=[3, 2],
failure_msg='actual values != expected values:\n\t0 != 1: 3 != 1')
class TestMultiAssertEqualStruct(TestCase):
def run_assert(self, args, expected_msg=None):
log.debug('args: %s' % str(args))
msg = None
try:
multi_assert_equal_struct(*args)
except AssertionError as e:
msg = e.message
log.debug('msg: %s' % msg)
self.assertEqual(msg, expected_msg)
def check(self, actual_classes=(list,),
expected_classes=(list,),
expected_objs=None, expected_kwargs_list=[],
working_objs=None, working_kwargs_list=[],
failing_objs=None, failing_kwargs_list=[],
failure_msg=None,
namepaths=None,
expected_namepaths=None):
for actual_cls1, actual_cls2, expected_cls1, expected_cls2 in \
permuteflat(*([actual_classes] * 2 + [expected_classes] * 2)):
if not expected_objs:
expected_objs = (expected_cls1(**expected_kwargs_list[0]),
expected_cls2(**expected_kwargs_list[1]))
if not working_objs:
working_objs = (actual_cls1(**working_kwargs_list[0]),
actual_cls2(**working_kwargs_list[1]))
self.run_assert((working_objs, expected_objs, namepaths, expected_namepaths))
if not failing_objs:
failing_objs = (actual_cls1(**failing_kwargs_list[0]),
actual_cls2(**failing_kwargs_list[1]))
self.run_assert((failing_objs, expected_objs, namepaths, expected_namepaths),
failure_msg)
def test_directly(self):
"""
*multi_assert_equal_struct* can compare multiple similar flat structures directly.
"""
self.check(actual_classes=(dict, Foo),
expected_classes=(dict, Foo),
expected_kwargs_list=[dict(x=1), dict(x=2, y=3)],
working_kwargs_list=[dict(x=1, y=0), dict(x=2, y=3)],
failing_kwargs_list=[dict(x=4, y=0), dict(x=2, y=5)],
failure_msg='Multi-assert failed:\n' \
'Index 0: actual values != expected values:\n\tx: 4 != 1\n'\
'Index 1: actual values != expected values:\n\ty: 5 != 3')
self.check(expected_objs=[[1], [2, 3]],
working_objs=[[1, 0], [2, 3]],
failing_objs=[[4, 0], [2, 5]],
failure_msg='Multi-assert failed:\n' \
'Index 0: actual values != expected values:\n\t0: 4 != 1\n'\
'Index 1: actual values != expected values:\n\t1: 5 != 3')
def test_with_namepaths(self):
"""
With namepaths *multi_assert_equal_struct* can compare multiple similar structures and
structures with lists of values in full depth.
This ignores all additional paths at the expected objects.
"""
self.check(actual_classes=(dict, Foo),
expected_classes=(dict, Foo),
expected_kwargs_list=[dict(x=1), dict(x=2, y=3)],
working_kwargs_list=[dict(x=1, y=0), dict(x=2)],
failing_kwargs_list=[dict(x=4, y=0), dict(x=5)],
namepaths=['x'],
failure_msg='Multi-assert failed:\n' \
'Index 0: actual values != expected values:\n\tx: 4 != 1\n'\
'Index 1: actual values != expected values:\n\tx: 5 != 2')
self.check(actual_classes=(dict, Foo),
expected_objs=[[1], [2, 0]],
working_kwargs_list=[dict(x=1, y=5), dict(x=2)],
failing_kwargs_list=[dict(x=3, y=5), dict(x=4)],
namepaths=['x'],
failure_msg='Multi-assert failed:\n' \
'Index 0: actual values != expected values:\n\tx: 3 != 1\n'\
'Index 1: actual values != expected values:\n\tx: 4 != 2')
self.check(expected_objs=[[1], [2, 3]],
working_objs=[[1, 0], [2, 0]],
failing_objs=[[4, 0], [5, 0]],
namepaths=['0'],
failure_msg='Multi-assert failed:\n' \
'Index 0: actual values != expected values:\n\t0: 4 != 1\n'\
'Index 1: actual values != expected values:\n\t0: 5 != 2')
def test_with_namepaths_and_expected_namepaths(self):
"""
Like just with namepaths, the values are sometimes at other paths at the expected object and
will be compared using expected_namepaths in same order as namepaths.
"""
self.check(actual_classes=(dict, Foo),
expected_classes=(dict, Foo),
expected_kwargs_list=[dict(y=1), dict(y=2, x=3)],
working_kwargs_list=[dict(x=1, y=0), dict(x=2)],
failing_kwargs_list=[dict(x=4, y=0), dict(x=5)],
namepaths=['x'],
expected_namepaths=['y'],
failure_msg='Multi-assert failed:\n' \
'Index 0: actual values != expected values:\n\tx != y: 4 != 1\n'\
'Index 1: actual values != expected values:\n\tx != y: 5 != 2')
self.check(actual_classes=(dict, Foo),
expected_objs=[[0, 1], [0, 2]],
working_kwargs_list=[dict(x=1, y=5), dict(x=2)],
failing_kwargs_list=[dict(x=3, y=5), dict(x=4)],
namepaths=['x'],
expected_namepaths=['1'],
failure_msg='Multi-assert failed:\n' \
'Index 0: actual values != expected values:\n\tx != 1: 3 != 1\n'\
'Index 1: actual values != expected values:\n\tx != 1: 4 != 2')
self.check(expected_objs=[[1, 2], [3, 4]],
working_objs=[[2, 1], [4, 3]],
failing_objs=[[2, 5], [6, 3]],
namepaths=['0', '1'],
expected_namepaths=['1', '0'],
failure_msg='Multi-assert failed:\n' \
'Index 0: actual values != expected values:\n\t1 != 0: 5 != 1\n'\
'Index 1: actual values != expected values:\n\t0 != 1: 6 != 4')
| 42.895131 | 100 | 0.516546 | 11,248 | 0.982101 | 0 | 0 | 0 | 0 | 0 | 0 | 2,782 | 0.242906 |
d2f17cb8a3f0726fbc17e46d02f025d7c4a03f17 | 4,322 | py | Python | usaspending_api/awards/migrations/0074_auto_20170320_1607.py | toolness/usaspending-api | ed9a396e20a52749f01f43494763903cc371f9c2 | [
"CC0-1.0"
]
| 1 | 2021-06-17T05:09:00.000Z | 2021-06-17T05:09:00.000Z | usaspending_api/awards/migrations/0074_auto_20170320_1607.py | toolness/usaspending-api | ed9a396e20a52749f01f43494763903cc371f9c2 | [
"CC0-1.0"
]
| null | null | null | usaspending_api/awards/migrations/0074_auto_20170320_1607.py | toolness/usaspending-api | ed9a396e20a52749f01f43494763903cc371f9c2 | [
"CC0-1.0"
]
| null | null | null | # -*- coding: utf-8 -*-
# Generated by Django 1.10.1 on 2017-03-20 16:07
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('awards', '0073_auto_20170320_1455'),
]
operations = [
migrations.AlterField(
model_name='award',
name='fain',
field=models.CharField(blank=True, db_index=True, help_text='An identification code assigned to each financial assistance award tracking purposes. The FAIN is tied to that award (and all future modifications to that award) throughout the award’s life. Each FAIN is assigned by an agency. Within an agency, FAIN are unique: each new award must be issued a new FAIN. FAIN stands for Federal Award Identification Number, though the digits are letters, not numbers.', max_length=30, null=True),
),
migrations.AlterField(
model_name='award',
name='period_of_performance_current_end_date',
field=models.DateField(db_index=True, help_text='The current, not original, period of performance end date', null=True, verbose_name='End Date'),
),
migrations.AlterField(
model_name='award',
name='period_of_performance_start_date',
field=models.DateField(db_index=True, help_text='The start date for the period of performance', null=True, verbose_name='Start Date'),
),
migrations.AlterField(
model_name='award',
name='piid',
field=models.CharField(blank=True, db_index=True, help_text='Procurement Instrument Identifier - A unique identifier assigned to a federal contract, purchase order, basic ordering agreement, basic agreement, and blanket purchase agreement. It is used to track the contract, and any modifications or transactions related to it. After October 2017, it is between 13 and 17 digits, both letters and numbers.', max_length=50, null=True),
),
migrations.AlterField(
model_name='award',
name='potential_total_value_of_award',
field=models.DecimalField(blank=True, db_index=True, decimal_places=2, help_text='The sum of the potential_value_of_award from associated transactions', max_digits=20, null=True, verbose_name='Potential Total Value of Award'),
),
migrations.AlterField(
model_name='award',
name='total_obligation',
field=models.DecimalField(db_index=True, decimal_places=2, help_text='The amount of money the government is obligated to pay for the award', max_digits=15, null=True, verbose_name='Total Obligated'),
),
migrations.AlterField(
model_name='award',
name='total_outlay',
field=models.DecimalField(db_index=True, decimal_places=2, help_text='The total amount of money paid out for this award', max_digits=15, null=True),
),
migrations.AlterField(
model_name='award',
name='type',
field=models.CharField(choices=[('U', 'Unknown Type'), ('02', 'Block Grant'), ('03', 'Formula Grant'), ('04', 'Project Grant'), ('05', 'Cooperative Agreement'), ('06', 'Direct Payment for Specified Use'), ('07', 'Direct Loan'), ('08', 'Guaranteed/Insured Loan'), ('09', 'Insurance'), ('10', 'Direct Payment unrestricted'), ('11', 'Other'), ('A', 'BPA Call'), ('B', 'Purchase Order'), ('C', 'Delivery Order'), ('D', 'Definitive Contract')], db_index=True, default='U', help_text='\tThe mechanism used to distribute funding. The federal government can distribute funding in several forms. These award types include contracts, grants, loans, and direct payments.', max_length=5, null=True, verbose_name='Award Type'),
),
migrations.AlterField(
model_name='award',
name='uri',
field=models.CharField(blank=True, db_index=True, help_text='The uri of the award', max_length=70, null=True),
),
migrations.AlterField(
model_name='transaction',
name='federal_action_obligation',
field=models.DecimalField(blank=True, db_index=True, decimal_places=2, help_text='The obligation of the federal government for this transaction', max_digits=20, null=True),
),
]
| 65.484848 | 726 | 0.672837 | 4,166 | 0.96346 | 0 | 0 | 0 | 0 | 0 | 0 | 2,083 | 0.48173 |
d2f1e1f4951c3e0fd8684c1a41e6225fa4a4907c | 100 | py | Python | COVIDSafepassage/passsystem/apps.py | VICS-CORE/safepassage_server | 58bc04dbfa55430c0218567211e5259de77518ae | [
"MIT"
]
| null | null | null | COVIDSafepassage/passsystem/apps.py | VICS-CORE/safepassage_server | 58bc04dbfa55430c0218567211e5259de77518ae | [
"MIT"
]
| 8 | 2020-04-25T09:42:25.000Z | 2022-03-12T00:23:32.000Z | COVIDSafepassage/passsystem/apps.py | VICS-CORE/safepassage_server | 58bc04dbfa55430c0218567211e5259de77518ae | [
"MIT"
]
| null | null | null | from django.apps import AppConfig
class PasssystemConfig(AppConfig):
name = 'passsystem'
| 16.666667 | 35 | 0.73 | 59 | 0.59 | 0 | 0 | 0 | 0 | 0 | 0 | 12 | 0.12 |
d2f36e17b1fe05c90facefa1af9d3583979040ce | 220 | py | Python | src/intervals/once.py | Eagerod/tasker | b2bfbd6557063da389d1839f4f151bb4ad78b075 | [
"MIT"
]
| null | null | null | src/intervals/once.py | Eagerod/tasker | b2bfbd6557063da389d1839f4f151bb4ad78b075 | [
"MIT"
]
| null | null | null | src/intervals/once.py | Eagerod/tasker | b2bfbd6557063da389d1839f4f151bb4ad78b075 | [
"MIT"
]
| null | null | null | from base_interval import BaseInterval
class OnceInterval(BaseInterval):
@staticmethod
def next_interval(start_date):
return start_date
@staticmethod
def approximate_period():
return 0
| 18.333333 | 38 | 0.718182 | 178 | 0.809091 | 0 | 0 | 134 | 0.609091 | 0 | 0 | 0 | 0 |
d2f4c426757a6a0f92d35c0788647479f59e49fb | 118,437 | py | Python | env/Lib/site-packages/azure/mgmt/storage/storagemanagement.py | Ammar12/simplebanking | 6080d638b2e98bfcf96d782703e1dce25aebfcbc | [
"MIT"
]
| null | null | null | env/Lib/site-packages/azure/mgmt/storage/storagemanagement.py | Ammar12/simplebanking | 6080d638b2e98bfcf96d782703e1dce25aebfcbc | [
"MIT"
]
| null | null | null | env/Lib/site-packages/azure/mgmt/storage/storagemanagement.py | Ammar12/simplebanking | 6080d638b2e98bfcf96d782703e1dce25aebfcbc | [
"MIT"
]
| null | null | null | #
# Copyright (c) Microsoft and contributors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Warning: This code was generated by a tool.
#
# Changes to this file may cause incorrect behavior and will be lost if the
# code is regenerated.
from datetime import datetime
import json
from requests import Session, Request
import time
import uuid
try:
from urllib import quote, unquote
except:
from urllib.parse import quote, unquote
from azure.common import AzureHttpError
from azure.mgmt.common import AzureOperationResponse, OperationStatusResponse, OperationStatus, Service
from azure.mgmt.common.arm import ResourceBase, ResourceBaseExtended
class StorageAccountCreateResponse(AzureOperationResponse):
"""
The Create storage account operation response.
"""
def __init__(self, **kwargs):
super(StorageAccountCreateResponse, self).__init__(**kwargs)
self._storage_account = kwargs.get('storage_account')
self._operation_status_link = kwargs.get('operation_status_link')
self._retry_after = kwargs.get('retry_after')
self._status = kwargs.get('status')
@property
def operation_status_link(self):
"""
Gets the URL where the status of the create operation can be checked.
"""
return self._operation_status_link
@operation_status_link.setter
def operation_status_link(self, value):
self._operation_status_link = value
@property
def retry_after(self):
"""
Gets the delay that the client should use when checking for the status
of the operation. This delay is specified in seconds as an integer;
min 5 seconds, max 900 seconds (15 minutes). The storage resource
provider will return 25 seconds initially.
"""
return self._retry_after
@retry_after.setter
def retry_after(self, value):
self._retry_after = value
@property
def status(self):
"""
Gets the status of the create request.
"""
return self._status
@status.setter
def status(self, value):
self._status = value
@property
def storage_account(self):
"""
Gets the storage account with the created properties populated.
"""
return self._storage_account
@storage_account.setter
def storage_account(self, value):
self._storage_account = value
class CheckNameAvailabilityResponse(AzureOperationResponse):
"""
The CheckNameAvailability operation response.
"""
def __init__(self, **kwargs):
super(CheckNameAvailabilityResponse, self).__init__(**kwargs)
self._name_available = kwargs.get('name_available')
self._reason = kwargs.get('reason')
self._message = kwargs.get('message')
@property
def message(self):
"""
Gets an error message explaining the Reason value in more detail.
"""
return self._message
@message.setter
def message(self, value):
self._message = value
@property
def name_available(self):
"""
Gets a boolean value that indicates whether the name is available for
you to use. If true, the name is available. If false, the name has
already been taken or invalid and cannot be used.
"""
return self._name_available
@name_available.setter
def name_available(self, value):
self._name_available = value
@property
def reason(self):
"""
Gets the reason that a storage account name could not be used. The
Reason element is only returned if NameAvailable is false.
"""
return self._reason
@reason.setter
def reason(self, value):
self._reason = value
class StorageAccountCreateParameters(object):
"""
The parameters to provide for the account.
"""
def __init__(self, **kwargs):
self._account_type = kwargs.get('account_type')
self._location = kwargs.get('location')
self._tags = kwargs.get('tags')
@property
def account_type(self):
"""
Gets or sets the account type.
"""
return self._account_type
@account_type.setter
def account_type(self, value):
self._account_type = value
@property
def location(self):
"""
Gets or sets the location of the resource. This will be one of the
supported and registered Azure Geo Regions (e.g. West US, East US,
Southeast Asia, etc.). The geo region of a resource cannot be changed
once it is created.
"""
return self._location
@location.setter
def location(self, value):
self._location = value
@property
def tags(self):
"""
Gets or sets a list of key value pairs that describe the resource.
These tags can be used in viewing and grouping this resource (across
resource groups). A maximum of 15 tags can be provided for a
resource. Each tag must have a key no greater than 128 characters and
value no greater than 256 characters.
"""
return self._tags
@tags.setter
def tags(self, value):
self._tags = value
class StorageAccountGetPropertiesResponse(AzureOperationResponse):
"""
The Get storage account operation response.
"""
def __init__(self, **kwargs):
super(StorageAccountGetPropertiesResponse, self).__init__(**kwargs)
self._storage_account = kwargs.get('storage_account')
@property
def storage_account(self):
"""
Gets the returned storage account.
"""
return self._storage_account
@storage_account.setter
def storage_account(self, value):
self._storage_account = value
class StorageAccountListKeysResponse(AzureOperationResponse):
"""
The ListKeys operation response.
"""
def __init__(self, **kwargs):
super(StorageAccountListKeysResponse, self).__init__(**kwargs)
self._storage_account_keys = kwargs.get('storage_account_keys')
@property
def storage_account_keys(self):
"""
Gets the access keys for the storage account.
"""
return self._storage_account_keys
@storage_account_keys.setter
def storage_account_keys(self, value):
self._storage_account_keys = value
class StorageAccountListResponse(AzureOperationResponse):
"""
The list storage accounts operation response.
"""
def __init__(self, **kwargs):
super(StorageAccountListResponse, self).__init__(**kwargs)
self._storage_accounts = kwargs.get('storage_accounts')
self._next_link = kwargs.get('next_link')
@property
def next_link(self):
"""
Gets the link to the next set of results. Currently this will always
be empty as the API does not support pagination.
"""
return self._next_link
@next_link.setter
def next_link(self, value):
self._next_link = value
@property
def storage_accounts(self):
"""
Gets the list of storage accounts and their properties.
"""
return self._storage_accounts
@storage_accounts.setter
def storage_accounts(self, value):
self._storage_accounts = value
class StorageAccountUpdateResponse(AzureOperationResponse):
"""
The Update storage account operation response.
"""
def __init__(self, **kwargs):
super(StorageAccountUpdateResponse, self).__init__(**kwargs)
self._storage_account = kwargs.get('storage_account')
@property
def storage_account(self):
"""
Gets the storage account with the updated properties populated.
"""
return self._storage_account
@storage_account.setter
def storage_account(self, value):
self._storage_account = value
class StorageAccountUpdateParameters(object):
"""
The parameters to update on the account.
"""
def __init__(self, **kwargs):
self._account_type = kwargs.get('account_type')
self._custom_domain = kwargs.get('custom_domain')
self._tags = kwargs.get('tags')
@property
def account_type(self):
"""
Gets or sets the account type. Note that StandardZRS and PremiumLRS
accounts cannot be changed to other account types, and other account
types cannot be changed to StandardZRS or PremiumLRS.
"""
return self._account_type
@account_type.setter
def account_type(self, value):
self._account_type = value
@property
def custom_domain(self):
"""
User domain assigned to the storage account. Name is the CNAME source.
Only one custom domain is supported per storage account at this time.
To clear the existing custom domain, use an empty string for the
custom domain name property.
"""
return self._custom_domain
@custom_domain.setter
def custom_domain(self, value):
self._custom_domain = value
@property
def tags(self):
"""
Gets or sets a list of key value pairs that describe the resource.
These tags can be used in viewing and grouping this resource (across
resource groups). A maximum of 15 tags can be provided for a
resource. Each tag must have a key no greater than 128 characters and
value no greater than 256 characters. This is a full replace so all
the existing tags will be replaced on Update.
"""
return self._tags
@tags.setter
def tags(self, value):
self._tags = value
class StorageAccountRegenerateKeyResponse(AzureOperationResponse):
"""
The RegenerateKey operation response.
"""
def __init__(self, **kwargs):
super(StorageAccountRegenerateKeyResponse, self).__init__(**kwargs)
self._storage_account_keys = kwargs.get('storage_account_keys')
@property
def storage_account_keys(self):
"""
Gets the access keys associated with the storage account, one of which
mayhave been regenerated by this operation.
"""
return self._storage_account_keys
@storage_account_keys.setter
def storage_account_keys(self, value):
self._storage_account_keys = value
class KeyName(object):
"""
The key names.
"""
key1 = "key1"
key2 = "key2"
class StorageAccount(ResourceBaseExtended):
"""
The storage account.
"""
def __init__(self, **kwargs):
super(StorageAccount, self).__init__(**kwargs)
self._provisioning_state = kwargs.get('provisioning_state')
self._account_type = kwargs.get('account_type')
self._primary_endpoints = kwargs.get('primary_endpoints')
self._primary_location = kwargs.get('primary_location')
self._status_of_primary = kwargs.get('status_of_primary')
self._last_geo_failover_time = kwargs.get('last_geo_failover_time')
self._secondary_endpoints = kwargs.get('secondary_endpoints')
self._secondary_location = kwargs.get('secondary_location')
self._status_of_secondary = kwargs.get('status_of_secondary')
self._creation_time = kwargs.get('creation_time')
self._custom_domain = kwargs.get('custom_domain')
@property
def account_type(self):
"""
Gets the type of the storage account.
"""
return self._account_type
@account_type.setter
def account_type(self, value):
self._account_type = value
@property
def creation_time(self):
"""
Gets the creation date and time of the storage account in UTC.
"""
return self._creation_time
@creation_time.setter
def creation_time(self, value):
self._creation_time = value
@property
def custom_domain(self):
"""
Gets the user assigned custom domain assigned to this storage account.
"""
return self._custom_domain
@custom_domain.setter
def custom_domain(self, value):
self._custom_domain = value
@property
def last_geo_failover_time(self):
"""
Gets the timestamp of the most recent instance of a failover to the
secondary location. Only the most recent timestamp is retained. This
element is not returned if there has never been a failover instance.
Only available if the accountType is StandardGRS or StandardRAGRS.
"""
return self._last_geo_failover_time
@last_geo_failover_time.setter
def last_geo_failover_time(self, value):
self._last_geo_failover_time = value
@property
def primary_endpoints(self):
"""
Gets the URLs that are used to perform a retrieval of a public blob,
queue or table object.Note that StandardZRS and PremiumLRS accounts
only return the blob endpoint.
"""
return self._primary_endpoints
@primary_endpoints.setter
def primary_endpoints(self, value):
self._primary_endpoints = value
@property
def primary_location(self):
"""
Gets the location of the primary for the storage account.
"""
return self._primary_location
@primary_location.setter
def primary_location(self, value):
self._primary_location = value
@property
def provisioning_state(self):
"""
Gets the status of the storage account at the time the operation was
called.
"""
return self._provisioning_state
@provisioning_state.setter
def provisioning_state(self, value):
self._provisioning_state = value
@property
def secondary_endpoints(self):
"""
Gets the URLs that are used to perform a retrieval of a public blob,
queue or table object from the secondary location of the storage
account. Only available if the accountType is StandardRAGRS.
"""
return self._secondary_endpoints
@secondary_endpoints.setter
def secondary_endpoints(self, value):
self._secondary_endpoints = value
@property
def secondary_location(self):
"""
Gets the location of the geo replicated secondary for the storage
account. Only available if the accountType is StandardGRS or
StandardRAGRS.
"""
return self._secondary_location
@secondary_location.setter
def secondary_location(self, value):
self._secondary_location = value
@property
def status_of_primary(self):
"""
Gets the status indicating whether the primary location of the storage
account is available or unavailable.
"""
return self._status_of_primary
@status_of_primary.setter
def status_of_primary(self, value):
self._status_of_primary = value
@property
def status_of_secondary(self):
"""
Gets the status indicating whether the secondary location of the
storage account is available or unavailable. Only available if the
accountType is StandardGRS or StandardRAGRS.
"""
return self._status_of_secondary
@status_of_secondary.setter
def status_of_secondary(self, value):
self._status_of_secondary = value
class ProvisioningState(object):
creating = "Creating"
resolving_dns = "ResolvingDNS"
succeeded = "Succeeded"
class AccountType(object):
standard_lrs = "Standard_LRS"
standard_zrs = "Standard_ZRS"
standard_grs = "Standard_GRS"
standard_ragrs = "Standard_RAGRS"
premium_lrs = "Premium_LRS"
class Endpoints(object):
"""
The URIs that are used to perform a retrieval of a public blob, queue or
table object.
"""
def __init__(self, **kwargs):
self._blob = kwargs.get('blob')
self._queue = kwargs.get('queue')
self._table = kwargs.get('table')
@property
def blob(self):
"""
Gets the blob endpoint.
"""
return self._blob
@blob.setter
def blob(self, value):
self._blob = value
@property
def queue(self):
"""
Gets the queue endpoint.
"""
return self._queue
@queue.setter
def queue(self, value):
self._queue = value
@property
def table(self):
"""
Gets the table endpoint.
"""
return self._table
@table.setter
def table(self, value):
self._table = value
class AccountStatus(object):
available = "Available"
unavailable = "Unavailable"
class CustomDomain(object):
"""
The custom domain assigned to this storage account. This can be set via
Update.
"""
def __init__(self, **kwargs):
self._name = kwargs.get('name')
self._use_sub_domain = kwargs.get('use_sub_domain')
@property
def name(self):
"""
Gets or sets the custom domain name. Name is the CNAME source.
"""
return self._name
@name.setter
def name(self, value):
self._name = value
@property
def use_sub_domain(self):
"""
Indicates whether indirect CName validation is enabled. Default value
is false. This should only be set on updates
"""
return self._use_sub_domain
@use_sub_domain.setter
def use_sub_domain(self, value):
self._use_sub_domain = value
class Reason(object):
account_name_invalid = "AccountNameInvalid"
already_exists = "AlreadyExists"
class StorageAccountKeys(object):
"""
The access keys for the storage account.
"""
def __init__(self, **kwargs):
self._key1 = kwargs.get('key1')
self._key2 = kwargs.get('key2')
@property
def key1(self):
"""
Gets the value of key 1.
"""
return self._key1
@key1.setter
def key1(self, value):
self._key1 = value
@property
def key2(self):
"""
Gets the value of key 2.
"""
return self._key2
@key2.setter
def key2(self, value):
self._key2 = value
class StorageManagementClient(Service):
"""
The Storage Management Client.
"""
@property
def api_version(self):
"""
Gets the API version.
"""
return self._api_version
@property
def long_running_operation_initial_timeout(self):
"""
Gets or sets the initial timeout for Long Running Operations.
"""
return self._long_running_operation_initial_timeout
@long_running_operation_initial_timeout.setter
def long_running_operation_initial_timeout(self, value):
self._long_running_operation_initial_timeout = value
@property
def long_running_operation_retry_timeout(self):
"""
Gets or sets the retry timeout for Long Running Operations.
"""
return self._long_running_operation_retry_timeout
@long_running_operation_retry_timeout.setter
def long_running_operation_retry_timeout(self, value):
self._long_running_operation_retry_timeout = value
@property
def storage_accounts(self):
"""
Operations for managing storage accounts.
"""
return self._storage_accounts
def __init__(self, credentials, **kwargs):
super(StorageManagementClient, self).__init__(credentials, **kwargs)
if getattr(self, '_base_uri', None) is None:
self._base_uri = 'https://management.azure.com/'
if getattr(self, '_api_version', None) is None:
self._api_version = '2015-05-01-preview'
if getattr(self, '_long_running_operation_initial_timeout', None) is None:
self._long_running_operation_initial_timeout = -1
if getattr(self, '_long_running_operation_retry_timeout', None) is None:
self._long_running_operation_retry_timeout = -1
self._storage_accounts = StorageAccountOperations(self)
def parse_account_type(self, value):
"""
Parse enum values for type AccountType.
Args:
value (string): The value to parse.
Returns:
AccountType: The enum value.
"""
if 'Standard_LRS'.lower() == value.lower():
return AccountType.StandardLRS
if 'Standard_ZRS'.lower() == value.lower():
return AccountType.StandardZRS
if 'Standard_GRS'.lower() == value.lower():
return AccountType.StandardGRS
if 'Standard_RAGRS'.lower() == value.lower():
return AccountType.StandardRAGRS
if 'Premium_LRS'.lower() == value.lower():
return AccountType.PremiumLRS
raise IndexError('value is outside the valid range.')
def account_type_to_string(self, value):
"""
Convert an enum of type AccountType to a string.
Args:
value (AccountType): The value to convert to a string.
Returns:
string: The enum value as a string.
"""
if value == AccountType.StandardLRS:
return 'Standard_LRS'
if value == AccountType.StandardZRS:
return 'Standard_ZRS'
if value == AccountType.StandardGRS:
return 'Standard_GRS'
if value == AccountType.StandardRAGRS:
return 'Standard_RAGRS'
if value == AccountType.PremiumLRS:
return 'Premium_LRS'
raise IndexError('value is outside the valid range.')
def parse_key_name(self, value):
"""
Parse enum values for type KeyName.
Args:
value (string): The value to parse.
Returns:
KeyName: The enum value.
"""
if 'key1'.lower() == value.lower():
return KeyName.Key1
if 'key2'.lower() == value.lower():
return KeyName.Key2
raise IndexError('value is outside the valid range.')
def key_name_to_string(self, value):
"""
Convert an enum of type KeyName to a string.
Args:
value (KeyName): The value to convert to a string.
Returns:
string: The enum value as a string.
"""
if value == KeyName.Key1:
return 'key1'
if value == KeyName.Key2:
return 'key2'
raise IndexError('value is outside the valid range.')
def get_create_operation_status(self, operation_status_link):
"""
The Get Create Operation Status operation returns the status of the
specified create operation. After calling the asynchronous Begin
Create operation, you can call Get Create Operation Status to
determine whether the operation has succeeded, failed, or is still in
progress.
Args:
operation_status_link (string): The URL where the status of the
long-running create operation can be checked.
Returns:
StorageAccountCreateResponse: The Create storage account operation
response.
"""
# Validate
if operation_status_link is None:
raise ValueError('operation_status_link cannot be None.')
# Tracing
# Construct URL
url = ''
url = url + operation_status_link
url = url.replace(' ', '%20')
# Create HTTP transport objects
http_request = Request()
http_request.url = url
http_request.method = 'GET'
# Set Headers
http_request.headers['x-ms-client-request-id'] = str(uuid.uuid4())
# Send Request
response = self.send_request(http_request)
body = response.content
status_code = response.status_code
if status_code != 200 and status_code != 202 and status_code != 500:
error = AzureHttpError(body, response.status_code)
raise error
# Create Result
result = None
# Deserialize Response
if status_code == 200 or status_code == 202 or status_code == 500:
response_content = body
result = StorageAccountCreateResponse()
response_doc = None
if response_content:
response_doc = json.loads(response_content.decode())
if response_doc is not None:
storage_account_instance = StorageAccount(tags={})
result.storage_account = storage_account_instance
id_value = response_doc.get('id', None)
if id_value is not None:
id_instance = id_value
storage_account_instance.id = id_instance
name_value = response_doc.get('name', None)
if name_value is not None:
name_instance = name_value
storage_account_instance.name = name_instance
type_value = response_doc.get('type', None)
if type_value is not None:
type_instance = type_value
storage_account_instance.type = type_instance
location_value = response_doc.get('location', None)
if location_value is not None:
location_instance = location_value
storage_account_instance.location = location_instance
tags_sequence_element = response_doc.get('tags', None)
if tags_sequence_element is not None:
for property in tags_sequence_element:
tags_key = property
tags_value = tags_sequence_element[property]
storage_account_instance.tags[tags_key] = tags_value
properties_value = response_doc.get('properties', None)
if properties_value is not None:
provisioning_state_value = properties_value.get('provisioningState', None)
if provisioning_state_value is not None:
provisioning_state_instance = provisioning_state_value
storage_account_instance.provisioning_state = provisioning_state_instance
account_type_value = properties_value.get('accountType', None)
if account_type_value is not None:
account_type_instance = account_type_value
storage_account_instance.account_type = account_type_instance
primary_endpoints_value = properties_value.get('primaryEndpoints', None)
if primary_endpoints_value is not None:
primary_endpoints_instance = Endpoints()
storage_account_instance.primary_endpoints = primary_endpoints_instance
blob_value = primary_endpoints_value.get('blob', None)
if blob_value is not None:
blob_instance = blob_value
primary_endpoints_instance.blob = blob_instance
queue_value = primary_endpoints_value.get('queue', None)
if queue_value is not None:
queue_instance = queue_value
primary_endpoints_instance.queue = queue_instance
table_value = primary_endpoints_value.get('table', None)
if table_value is not None:
table_instance = table_value
primary_endpoints_instance.table = table_instance
primary_location_value = properties_value.get('primaryLocation', None)
if primary_location_value is not None:
primary_location_instance = primary_location_value
storage_account_instance.primary_location = primary_location_instance
status_of_primary_value = properties_value.get('statusOfPrimary', None)
if status_of_primary_value is not None:
status_of_primary_instance = status_of_primary_value
storage_account_instance.status_of_primary = status_of_primary_instance
last_geo_failover_time_value = properties_value.get('lastGeoFailoverTime', None)
if last_geo_failover_time_value is not None:
last_geo_failover_time_instance = last_geo_failover_time_value
storage_account_instance.last_geo_failover_time = last_geo_failover_time_instance
secondary_location_value = properties_value.get('secondaryLocation', None)
if secondary_location_value is not None:
secondary_location_instance = secondary_location_value
storage_account_instance.secondary_location = secondary_location_instance
status_of_secondary_value = properties_value.get('statusOfSecondary', None)
if status_of_secondary_value is not None:
status_of_secondary_instance = status_of_secondary_value
storage_account_instance.status_of_secondary = status_of_secondary_instance
creation_time_value = properties_value.get('creationTime', None)
if creation_time_value is not None:
creation_time_instance = creation_time_value
storage_account_instance.creation_time = creation_time_instance
custom_domain_value = properties_value.get('customDomain', None)
if custom_domain_value is not None:
custom_domain_instance = CustomDomain()
storage_account_instance.custom_domain = custom_domain_instance
name_value2 = custom_domain_value.get('name', None)
if name_value2 is not None:
name_instance2 = name_value2
custom_domain_instance.name = name_instance2
use_sub_domain_value = custom_domain_value.get('useSubDomain', None)
if use_sub_domain_value is not None:
use_sub_domain_instance = use_sub_domain_value
custom_domain_instance.use_sub_domain = use_sub_domain_instance
secondary_endpoints_value = properties_value.get('secondaryEndpoints', None)
if secondary_endpoints_value is not None:
secondary_endpoints_instance = Endpoints()
storage_account_instance.secondary_endpoints = secondary_endpoints_instance
blob_value2 = secondary_endpoints_value.get('blob', None)
if blob_value2 is not None:
blob_instance2 = blob_value2
secondary_endpoints_instance.blob = blob_instance2
queue_value2 = secondary_endpoints_value.get('queue', None)
if queue_value2 is not None:
queue_instance2 = queue_value2
secondary_endpoints_instance.queue = queue_instance2
table_value2 = secondary_endpoints_value.get('table', None)
if table_value2 is not None:
table_instance2 = table_value2
secondary_endpoints_instance.table = table_instance2
result.status_code = status_code
result.retry_after = int(response.headers.get('retryafter', '0'))
result.request_id = response.headers.get('x-ms-request-id')
if status_code == 409:
result.status = OperationStatus.Failed
if status_code == 500:
result.status = OperationStatus.InProgress
if status_code == 202:
result.status = OperationStatus.InProgress
if status_code == 200:
result.status = OperationStatus.Succeeded
return result
class StorageAccountOperations(object):
"""
Operations for managing storage accounts.
__NOTE__: An instance of this class is automatically created for an
instance of the [StorageManagementClient]
"""
def __init__(self, client):
self._client = client
@property
def client(self):
"""
Gets a reference to the
Microsoft.Azure.Management.Storage.StorageManagementClient.
"""
return self._client
def begin_create(self, resource_group_name, account_name, parameters):
"""
Asynchronously creates a new storage account with the specified
parameters. Existing accounts cannot be updated with this API and
should instead use the Update Storage Account API. If an account is
already created and subsequent PUT request is issued with exact same
set of properties, then HTTP 200 would be returned.
Args:
resource_group_name (string): The name of the resource group within
the user’s subscription.
account_name (string): The name of the storage account within the
specified resource group. Storage account names must be between 3 and
24 characters in length and use numbers and lower-case letters only.
parameters (StorageAccountCreateParameters): The parameters to provide
for the created account.
Returns:
StorageAccountCreateResponse: The Create storage account operation
response.
"""
# Validate
if resource_group_name is None:
raise ValueError('resource_group_name cannot be None.')
if account_name is None:
raise ValueError('account_name cannot be None.')
if len(account_name) < 3:
raise IndexError('account_name is outside the valid range.')
if len(account_name) > 24:
raise IndexError('account_name is outside the valid range.')
for account_name_char in account_name:
if account_name_char.islower() == False and account_name_char.isdigit() == False:
raise IndexError('account_name is outside the valid range.')
if parameters is None:
raise ValueError('parameters cannot be None.')
if parameters.account_type is None:
raise ValueError('parameters.account_type cannot be None.')
if parameters.location is None:
raise ValueError('parameters.location cannot be None.')
# Tracing
# Construct URL
url = ''
url = url + '/subscriptions/'
if self.client.credentials.subscription_id is not None:
url = url + quote(self.client.credentials.subscription_id)
url = url + '/resourceGroups/'
url = url + quote(resource_group_name)
url = url + '/providers/Microsoft.Storage/storageAccounts/'
url = url + quote(account_name)
query_parameters = []
query_parameters.append('api-version=2015-05-01-preview')
if len(query_parameters) > 0:
url = url + '?' + '&'.join(query_parameters)
base_url = self.client.base_uri
# Trim '/' character from the end of baseUrl and beginning of url.
if base_url[len(base_url) - 1] == '/':
base_url = base_url[0 : len(base_url) - 1]
if url[0] == '/':
url = url[1 : ]
url = base_url + '/' + url
url = url.replace(' ', '%20')
# Create HTTP transport objects
http_request = Request()
http_request.url = url
http_request.method = 'PUT'
# Set Headers
http_request.headers['Content-Type'] = 'application/json'
http_request.headers['x-ms-client-request-id'] = str(uuid.uuid4())
# Serialize Request
request_content = None
request_doc = None
storage_account_create_parameters_json_value = {}
request_doc = storage_account_create_parameters_json_value
storage_account_create_parameters_json_value['location'] = parameters.location
if parameters.tags is not None:
tags_dictionary = {}
for tags_key in parameters.tags:
tags_value = parameters.tags[tags_key]
tags_dictionary[tags_key] = tags_value
storage_account_create_parameters_json_value['tags'] = tags_dictionary
properties_value = {}
storage_account_create_parameters_json_value['properties'] = properties_value
properties_value['accountType'] = str(parameters.account_type) if parameters.account_type is not None else 'StandardLRS'
request_content = json.dumps(request_doc)
http_request.data = request_content
http_request.headers['Content-Length'] = len(request_content)
# Send Request
response = self.client.send_request(http_request)
body = response.content
status_code = response.status_code
if status_code != 200 and status_code != 202:
error = AzureHttpError(body, response.status_code)
raise error
# Create Result
result = None
# Deserialize Response
if status_code == 200 or status_code == 202:
response_content = body
result = StorageAccountCreateResponse()
response_doc = None
if response_content:
response_doc = json.loads(response_content.decode())
if response_doc is not None:
storage_account_instance = StorageAccount(tags={})
result.storage_account = storage_account_instance
id_value = response_doc.get('id', None)
if id_value is not None:
id_instance = id_value
storage_account_instance.id = id_instance
name_value = response_doc.get('name', None)
if name_value is not None:
name_instance = name_value
storage_account_instance.name = name_instance
type_value = response_doc.get('type', None)
if type_value is not None:
type_instance = type_value
storage_account_instance.type = type_instance
location_value = response_doc.get('location', None)
if location_value is not None:
location_instance = location_value
storage_account_instance.location = location_instance
tags_sequence_element = response_doc.get('tags', None)
if tags_sequence_element is not None:
for property in tags_sequence_element:
tags_key2 = property
tags_value2 = tags_sequence_element[property]
storage_account_instance.tags[tags_key2] = tags_value2
properties_value2 = response_doc.get('properties', None)
if properties_value2 is not None:
provisioning_state_value = properties_value2.get('provisioningState', None)
if provisioning_state_value is not None:
provisioning_state_instance = provisioning_state_value
storage_account_instance.provisioning_state = provisioning_state_instance
account_type_value = properties_value2.get('accountType', None)
if account_type_value is not None:
account_type_instance = account_type_value
storage_account_instance.account_type = account_type_instance
primary_endpoints_value = properties_value2.get('primaryEndpoints', None)
if primary_endpoints_value is not None:
primary_endpoints_instance = Endpoints()
storage_account_instance.primary_endpoints = primary_endpoints_instance
blob_value = primary_endpoints_value.get('blob', None)
if blob_value is not None:
blob_instance = blob_value
primary_endpoints_instance.blob = blob_instance
queue_value = primary_endpoints_value.get('queue', None)
if queue_value is not None:
queue_instance = queue_value
primary_endpoints_instance.queue = queue_instance
table_value = primary_endpoints_value.get('table', None)
if table_value is not None:
table_instance = table_value
primary_endpoints_instance.table = table_instance
primary_location_value = properties_value2.get('primaryLocation', None)
if primary_location_value is not None:
primary_location_instance = primary_location_value
storage_account_instance.primary_location = primary_location_instance
status_of_primary_value = properties_value2.get('statusOfPrimary', None)
if status_of_primary_value is not None:
status_of_primary_instance = status_of_primary_value
storage_account_instance.status_of_primary = status_of_primary_instance
last_geo_failover_time_value = properties_value2.get('lastGeoFailoverTime', None)
if last_geo_failover_time_value is not None:
last_geo_failover_time_instance = last_geo_failover_time_value
storage_account_instance.last_geo_failover_time = last_geo_failover_time_instance
secondary_location_value = properties_value2.get('secondaryLocation', None)
if secondary_location_value is not None:
secondary_location_instance = secondary_location_value
storage_account_instance.secondary_location = secondary_location_instance
status_of_secondary_value = properties_value2.get('statusOfSecondary', None)
if status_of_secondary_value is not None:
status_of_secondary_instance = status_of_secondary_value
storage_account_instance.status_of_secondary = status_of_secondary_instance
creation_time_value = properties_value2.get('creationTime', None)
if creation_time_value is not None:
creation_time_instance = creation_time_value
storage_account_instance.creation_time = creation_time_instance
custom_domain_value = properties_value2.get('customDomain', None)
if custom_domain_value is not None:
custom_domain_instance = CustomDomain()
storage_account_instance.custom_domain = custom_domain_instance
name_value2 = custom_domain_value.get('name', None)
if name_value2 is not None:
name_instance2 = name_value2
custom_domain_instance.name = name_instance2
use_sub_domain_value = custom_domain_value.get('useSubDomain', None)
if use_sub_domain_value is not None:
use_sub_domain_instance = use_sub_domain_value
custom_domain_instance.use_sub_domain = use_sub_domain_instance
secondary_endpoints_value = properties_value2.get('secondaryEndpoints', None)
if secondary_endpoints_value is not None:
secondary_endpoints_instance = Endpoints()
storage_account_instance.secondary_endpoints = secondary_endpoints_instance
blob_value2 = secondary_endpoints_value.get('blob', None)
if blob_value2 is not None:
blob_instance2 = blob_value2
secondary_endpoints_instance.blob = blob_instance2
queue_value2 = secondary_endpoints_value.get('queue', None)
if queue_value2 is not None:
queue_instance2 = queue_value2
secondary_endpoints_instance.queue = queue_instance2
table_value2 = secondary_endpoints_value.get('table', None)
if table_value2 is not None:
table_instance2 = table_value2
secondary_endpoints_instance.table = table_instance2
result.status_code = status_code
result.operation_status_link = response.headers.get('location')
result.retry_after = int(response.headers.get('retryafter', '0'))
result.request_id = response.headers.get('x-ms-request-id')
if status_code == 409 or status_code == 400:
result.status = OperationStatus.Failed
if status_code == 200:
result.status = OperationStatus.Succeeded
return result
def check_name_availability(self, account_name):
"""
Checks that account name is valid and is not in use.
Args:
account_name (string): The name of the storage account within the
specified resource group. Storage account names must be between 3 and
24 characters in length and use numbers and lower-case letters only.
Returns:
CheckNameAvailabilityResponse: The CheckNameAvailability operation
response.
"""
# Validate
if account_name is None:
raise ValueError('account_name cannot be None.')
# Tracing
# Construct URL
url = ''
url = url + '/subscriptions/'
if self.client.credentials.subscription_id is not None:
url = url + quote(self.client.credentials.subscription_id)
url = url + '/providers/Microsoft.Storage/checkNameAvailability'
query_parameters = []
query_parameters.append('api-version=2015-05-01-preview')
if len(query_parameters) > 0:
url = url + '?' + '&'.join(query_parameters)
base_url = self.client.base_uri
# Trim '/' character from the end of baseUrl and beginning of url.
if base_url[len(base_url) - 1] == '/':
base_url = base_url[0 : len(base_url) - 1]
if url[0] == '/':
url = url[1 : ]
url = base_url + '/' + url
url = url.replace(' ', '%20')
# Create HTTP transport objects
http_request = Request()
http_request.url = url
http_request.method = 'POST'
# Set Headers
http_request.headers['Content-Type'] = 'application/json'
http_request.headers['x-ms-client-request-id'] = str(uuid.uuid4())
# Serialize Request
request_content = None
request_doc = None
storage_account_check_name_availability_parameters_value = {}
request_doc = storage_account_check_name_availability_parameters_value
storage_account_check_name_availability_parameters_value['name'] = account_name
storage_account_check_name_availability_parameters_value['type'] = 'Microsoft.Storage/storageAccounts'
request_content = json.dumps(request_doc)
http_request.data = request_content
http_request.headers['Content-Length'] = len(request_content)
# Send Request
response = self.client.send_request(http_request)
body = response.content
status_code = response.status_code
if status_code != 200:
error = AzureHttpError(body, response.status_code)
raise error
# Create Result
result = None
# Deserialize Response
if status_code == 200:
response_content = body
result = CheckNameAvailabilityResponse()
response_doc = None
if response_content:
response_doc = json.loads(response_content.decode())
if response_doc is not None:
name_available_value = response_doc.get('nameAvailable', None)
if name_available_value is not None:
name_available_instance = name_available_value
result.name_available = name_available_instance
reason_value = response_doc.get('reason', None)
if reason_value is not None:
reason_instance = reason_value
result.reason = reason_instance
message_value = response_doc.get('message', None)
if message_value is not None:
message_instance = message_value
result.message = message_instance
result.status_code = status_code
result.request_id = response.headers.get('x-ms-request-id')
return result
def create(self, resource_group_name, account_name, parameters):
"""
Asynchronously creates a new storage account with the specified
parameters. Existing accounts cannot be updated with this API and
should instead use the Update Storage Account API. If an account is
already created and subsequent create request is issued with exact
same set of properties, the request succeeds.The max number of
storage accounts that can be created per subscription is limited to
20.
Args:
resource_group_name (string): The name of the resource group within
the user’s subscription.
account_name (string): The name of the storage account within the
specified resource group. Storage account names must be between 3 and
24 characters in length and use numbers and lower-case letters only.
parameters (StorageAccountCreateParameters): The parameters to provide
for the created account.
Returns:
StorageAccountCreateResponse: The Create storage account operation
response.
"""
client2 = self.client
response = client2.storage_accounts.begin_create(resource_group_name, account_name, parameters)
if response.status == OperationStatus.succeeded:
return response
result = client2.get_create_operation_status(response.operation_status_link)
delay_in_seconds = response.retry_after
if delay_in_seconds == 0:
delay_in_seconds = 25
if client2.long_running_operation_initial_timeout >= 0:
delay_in_seconds = client2.long_running_operation_initial_timeout
while (result.status != OperationStatus.in_progress) == False:
time.sleep(delay_in_seconds)
result = client2.get_create_operation_status(response.operation_status_link)
delay_in_seconds = result.retry_after
if delay_in_seconds == 0:
delay_in_seconds = 25
if client2.long_running_operation_retry_timeout >= 0:
delay_in_seconds = client2.long_running_operation_retry_timeout
return result
def delete(self, resource_group_name, account_name):
"""
Deletes a storage account in Microsoft Azure.
Args:
resource_group_name (string): The name of the resource group within
the user’s subscription.
account_name (string): The name of the storage account within the
specified resource group. Storage account names must be between 3 and
24 characters in length and use numbers and lower-case letters only.
Returns:
AzureOperationResponse: A standard service response including an HTTP
status code and request ID.
"""
# Validate
if resource_group_name is None:
raise ValueError('resource_group_name cannot be None.')
if account_name is None:
raise ValueError('account_name cannot be None.')
if len(account_name) < 3:
raise IndexError('account_name is outside the valid range.')
if len(account_name) > 24:
raise IndexError('account_name is outside the valid range.')
for account_name_char in account_name:
if account_name_char.islower() == False and account_name_char.isdigit() == False:
raise IndexError('account_name is outside the valid range.')
# Tracing
# Construct URL
url = ''
url = url + '/subscriptions/'
if self.client.credentials.subscription_id is not None:
url = url + quote(self.client.credentials.subscription_id)
url = url + '/resourceGroups/'
url = url + quote(resource_group_name)
url = url + '/providers/Microsoft.Storage/storageAccounts/'
url = url + quote(account_name)
query_parameters = []
query_parameters.append('api-version=2015-05-01-preview')
if len(query_parameters) > 0:
url = url + '?' + '&'.join(query_parameters)
base_url = self.client.base_uri
# Trim '/' character from the end of baseUrl and beginning of url.
if base_url[len(base_url) - 1] == '/':
base_url = base_url[0 : len(base_url) - 1]
if url[0] == '/':
url = url[1 : ]
url = base_url + '/' + url
url = url.replace(' ', '%20')
# Create HTTP transport objects
http_request = Request()
http_request.url = url
http_request.method = 'DELETE'
# Set Headers
http_request.headers['x-ms-client-request-id'] = str(uuid.uuid4())
# Send Request
response = self.client.send_request(http_request)
body = response.content
status_code = response.status_code
if status_code != 200 and status_code != 204:
error = AzureHttpError(body, response.status_code)
raise error
# Create Result
result = None
# Deserialize Response
result = AzureOperationResponse()
result.status_code = status_code
result.request_id = response.headers.get('x-ms-request-id')
return result
def get_properties(self, resource_group_name, account_name):
"""
Returns the properties for the specified storage account including but
not limited to name, account type, location, and account status. The
ListKeys operation should be used to retrieve storage keys.
Args:
resource_group_name (string): The name of the resource group within
the user’s subscription.
account_name (string): The name of the storage account within the
specified resource group. Storage account names must be between 3 and
24 characters in length and use numbers and lower-case letters only.
Returns:
StorageAccountGetPropertiesResponse: The Get storage account operation
response.
"""
# Validate
if resource_group_name is None:
raise ValueError('resource_group_name cannot be None.')
if account_name is None:
raise ValueError('account_name cannot be None.')
if len(account_name) < 3:
raise IndexError('account_name is outside the valid range.')
if len(account_name) > 24:
raise IndexError('account_name is outside the valid range.')
for account_name_char in account_name:
if account_name_char.islower() == False and account_name_char.isdigit() == False:
raise IndexError('account_name is outside the valid range.')
# Tracing
# Construct URL
url = ''
url = url + '/subscriptions/'
if self.client.credentials.subscription_id is not None:
url = url + quote(self.client.credentials.subscription_id)
url = url + '/resourceGroups/'
url = url + quote(resource_group_name)
url = url + '/providers/Microsoft.Storage/storageAccounts/'
url = url + quote(account_name)
query_parameters = []
query_parameters.append('api-version=2015-05-01-preview')
if len(query_parameters) > 0:
url = url + '?' + '&'.join(query_parameters)
base_url = self.client.base_uri
# Trim '/' character from the end of baseUrl and beginning of url.
if base_url[len(base_url) - 1] == '/':
base_url = base_url[0 : len(base_url) - 1]
if url[0] == '/':
url = url[1 : ]
url = base_url + '/' + url
url = url.replace(' ', '%20')
# Create HTTP transport objects
http_request = Request()
http_request.url = url
http_request.method = 'GET'
# Set Headers
http_request.headers['x-ms-client-request-id'] = str(uuid.uuid4())
# Send Request
response = self.client.send_request(http_request)
body = response.content
status_code = response.status_code
if status_code != 200:
error = AzureHttpError(body, response.status_code)
raise error
# Create Result
result = None
# Deserialize Response
if status_code == 200:
response_content = body
result = StorageAccountGetPropertiesResponse()
response_doc = None
if response_content:
response_doc = json.loads(response_content.decode())
if response_doc is not None:
storage_account_instance = StorageAccount(tags={})
result.storage_account = storage_account_instance
id_value = response_doc.get('id', None)
if id_value is not None:
id_instance = id_value
storage_account_instance.id = id_instance
name_value = response_doc.get('name', None)
if name_value is not None:
name_instance = name_value
storage_account_instance.name = name_instance
type_value = response_doc.get('type', None)
if type_value is not None:
type_instance = type_value
storage_account_instance.type = type_instance
location_value = response_doc.get('location', None)
if location_value is not None:
location_instance = location_value
storage_account_instance.location = location_instance
tags_sequence_element = response_doc.get('tags', None)
if tags_sequence_element is not None:
for property in tags_sequence_element:
tags_key = property
tags_value = tags_sequence_element[property]
storage_account_instance.tags[tags_key] = tags_value
properties_value = response_doc.get('properties', None)
if properties_value is not None:
provisioning_state_value = properties_value.get('provisioningState', None)
if provisioning_state_value is not None:
provisioning_state_instance = provisioning_state_value
storage_account_instance.provisioning_state = provisioning_state_instance
account_type_value = properties_value.get('accountType', None)
if account_type_value is not None:
account_type_instance = account_type_value
storage_account_instance.account_type = account_type_instance
primary_endpoints_value = properties_value.get('primaryEndpoints', None)
if primary_endpoints_value is not None:
primary_endpoints_instance = Endpoints()
storage_account_instance.primary_endpoints = primary_endpoints_instance
blob_value = primary_endpoints_value.get('blob', None)
if blob_value is not None:
blob_instance = blob_value
primary_endpoints_instance.blob = blob_instance
queue_value = primary_endpoints_value.get('queue', None)
if queue_value is not None:
queue_instance = queue_value
primary_endpoints_instance.queue = queue_instance
table_value = primary_endpoints_value.get('table', None)
if table_value is not None:
table_instance = table_value
primary_endpoints_instance.table = table_instance
primary_location_value = properties_value.get('primaryLocation', None)
if primary_location_value is not None:
primary_location_instance = primary_location_value
storage_account_instance.primary_location = primary_location_instance
status_of_primary_value = properties_value.get('statusOfPrimary', None)
if status_of_primary_value is not None:
status_of_primary_instance = status_of_primary_value
storage_account_instance.status_of_primary = status_of_primary_instance
last_geo_failover_time_value = properties_value.get('lastGeoFailoverTime', None)
if last_geo_failover_time_value is not None:
last_geo_failover_time_instance = last_geo_failover_time_value
storage_account_instance.last_geo_failover_time = last_geo_failover_time_instance
secondary_location_value = properties_value.get('secondaryLocation', None)
if secondary_location_value is not None:
secondary_location_instance = secondary_location_value
storage_account_instance.secondary_location = secondary_location_instance
status_of_secondary_value = properties_value.get('statusOfSecondary', None)
if status_of_secondary_value is not None:
status_of_secondary_instance = status_of_secondary_value
storage_account_instance.status_of_secondary = status_of_secondary_instance
creation_time_value = properties_value.get('creationTime', None)
if creation_time_value is not None:
creation_time_instance = creation_time_value
storage_account_instance.creation_time = creation_time_instance
custom_domain_value = properties_value.get('customDomain', None)
if custom_domain_value is not None:
custom_domain_instance = CustomDomain()
storage_account_instance.custom_domain = custom_domain_instance
name_value2 = custom_domain_value.get('name', None)
if name_value2 is not None:
name_instance2 = name_value2
custom_domain_instance.name = name_instance2
use_sub_domain_value = custom_domain_value.get('useSubDomain', None)
if use_sub_domain_value is not None:
use_sub_domain_instance = use_sub_domain_value
custom_domain_instance.use_sub_domain = use_sub_domain_instance
secondary_endpoints_value = properties_value.get('secondaryEndpoints', None)
if secondary_endpoints_value is not None:
secondary_endpoints_instance = Endpoints()
storage_account_instance.secondary_endpoints = secondary_endpoints_instance
blob_value2 = secondary_endpoints_value.get('blob', None)
if blob_value2 is not None:
blob_instance2 = blob_value2
secondary_endpoints_instance.blob = blob_instance2
queue_value2 = secondary_endpoints_value.get('queue', None)
if queue_value2 is not None:
queue_instance2 = queue_value2
secondary_endpoints_instance.queue = queue_instance2
table_value2 = secondary_endpoints_value.get('table', None)
if table_value2 is not None:
table_instance2 = table_value2
secondary_endpoints_instance.table = table_instance2
result.status_code = status_code
result.request_id = response.headers.get('x-ms-request-id')
return result
def list(self):
"""
Lists all the storage accounts available under the subscription. Note
that storage keys are not returned; use the ListKeys operation for
this.
Returns:
StorageAccountListResponse: The list storage accounts operation
response.
"""
# Validate
# Tracing
# Construct URL
url = ''
url = url + '/subscriptions/'
if self.client.credentials.subscription_id is not None:
url = url + quote(self.client.credentials.subscription_id)
url = url + '/providers/Microsoft.Storage/storageAccounts'
query_parameters = []
query_parameters.append('api-version=2015-05-01-preview')
if len(query_parameters) > 0:
url = url + '?' + '&'.join(query_parameters)
base_url = self.client.base_uri
# Trim '/' character from the end of baseUrl and beginning of url.
if base_url[len(base_url) - 1] == '/':
base_url = base_url[0 : len(base_url) - 1]
if url[0] == '/':
url = url[1 : ]
url = base_url + '/' + url
url = url.replace(' ', '%20')
# Create HTTP transport objects
http_request = Request()
http_request.url = url
http_request.method = 'GET'
# Set Headers
http_request.headers['x-ms-client-request-id'] = str(uuid.uuid4())
# Send Request
response = self.client.send_request(http_request)
body = response.content
status_code = response.status_code
if status_code != 200:
error = AzureHttpError(body, response.status_code)
raise error
# Create Result
result = None
# Deserialize Response
if status_code == 200:
response_content = body
result = StorageAccountListResponse(storage_accounts=[])
response_doc = None
if response_content:
response_doc = json.loads(response_content.decode())
if response_doc is not None:
value_array = response_doc.get('value', None)
if value_array is not None:
for value_value in value_array:
storage_account_json_instance = StorageAccount(tags={})
result.storage_accounts.append(storage_account_json_instance)
id_value = value_value.get('id', None)
if id_value is not None:
id_instance = id_value
storage_account_json_instance.id = id_instance
name_value = value_value.get('name', None)
if name_value is not None:
name_instance = name_value
storage_account_json_instance.name = name_instance
type_value = value_value.get('type', None)
if type_value is not None:
type_instance = type_value
storage_account_json_instance.type = type_instance
location_value = value_value.get('location', None)
if location_value is not None:
location_instance = location_value
storage_account_json_instance.location = location_instance
tags_sequence_element = value_value.get('tags', None)
if tags_sequence_element is not None:
for property in tags_sequence_element:
tags_key = property
tags_value = tags_sequence_element[property]
storage_account_json_instance.tags[tags_key] = tags_value
properties_value = value_value.get('properties', None)
if properties_value is not None:
provisioning_state_value = properties_value.get('provisioningState', None)
if provisioning_state_value is not None:
provisioning_state_instance = provisioning_state_value
storage_account_json_instance.provisioning_state = provisioning_state_instance
account_type_value = properties_value.get('accountType', None)
if account_type_value is not None:
account_type_instance = account_type_value
storage_account_json_instance.account_type = account_type_instance
primary_endpoints_value = properties_value.get('primaryEndpoints', None)
if primary_endpoints_value is not None:
primary_endpoints_instance = Endpoints()
storage_account_json_instance.primary_endpoints = primary_endpoints_instance
blob_value = primary_endpoints_value.get('blob', None)
if blob_value is not None:
blob_instance = blob_value
primary_endpoints_instance.blob = blob_instance
queue_value = primary_endpoints_value.get('queue', None)
if queue_value is not None:
queue_instance = queue_value
primary_endpoints_instance.queue = queue_instance
table_value = primary_endpoints_value.get('table', None)
if table_value is not None:
table_instance = table_value
primary_endpoints_instance.table = table_instance
primary_location_value = properties_value.get('primaryLocation', None)
if primary_location_value is not None:
primary_location_instance = primary_location_value
storage_account_json_instance.primary_location = primary_location_instance
status_of_primary_value = properties_value.get('statusOfPrimary', None)
if status_of_primary_value is not None:
status_of_primary_instance = status_of_primary_value
storage_account_json_instance.status_of_primary = status_of_primary_instance
last_geo_failover_time_value = properties_value.get('lastGeoFailoverTime', None)
if last_geo_failover_time_value is not None:
last_geo_failover_time_instance = last_geo_failover_time_value
storage_account_json_instance.last_geo_failover_time = last_geo_failover_time_instance
secondary_location_value = properties_value.get('secondaryLocation', None)
if secondary_location_value is not None:
secondary_location_instance = secondary_location_value
storage_account_json_instance.secondary_location = secondary_location_instance
status_of_secondary_value = properties_value.get('statusOfSecondary', None)
if status_of_secondary_value is not None:
status_of_secondary_instance = status_of_secondary_value
storage_account_json_instance.status_of_secondary = status_of_secondary_instance
creation_time_value = properties_value.get('creationTime', None)
if creation_time_value is not None:
creation_time_instance = creation_time_value
storage_account_json_instance.creation_time = creation_time_instance
custom_domain_value = properties_value.get('customDomain', None)
if custom_domain_value is not None:
custom_domain_instance = CustomDomain()
storage_account_json_instance.custom_domain = custom_domain_instance
name_value2 = custom_domain_value.get('name', None)
if name_value2 is not None:
name_instance2 = name_value2
custom_domain_instance.name = name_instance2
use_sub_domain_value = custom_domain_value.get('useSubDomain', None)
if use_sub_domain_value is not None:
use_sub_domain_instance = use_sub_domain_value
custom_domain_instance.use_sub_domain = use_sub_domain_instance
secondary_endpoints_value = properties_value.get('secondaryEndpoints', None)
if secondary_endpoints_value is not None:
secondary_endpoints_instance = Endpoints()
storage_account_json_instance.secondary_endpoints = secondary_endpoints_instance
blob_value2 = secondary_endpoints_value.get('blob', None)
if blob_value2 is not None:
blob_instance2 = blob_value2
secondary_endpoints_instance.blob = blob_instance2
queue_value2 = secondary_endpoints_value.get('queue', None)
if queue_value2 is not None:
queue_instance2 = queue_value2
secondary_endpoints_instance.queue = queue_instance2
table_value2 = secondary_endpoints_value.get('table', None)
if table_value2 is not None:
table_instance2 = table_value2
secondary_endpoints_instance.table = table_instance2
next_link_value = response_doc.get('nextLink', None)
if next_link_value is not None:
next_link_instance = next_link_value
result.next_link = next_link_instance
result.status_code = status_code
result.request_id = response.headers.get('x-ms-request-id')
return result
def list_by_resource_group(self, resource_group_name):
"""
Lists all the storage accounts available under the given resource
group. Note that storage keys are not returned; use the ListKeys
operation for this.
Args:
resource_group_name (string): The name of the resource group within
the user’s subscription.
Returns:
StorageAccountListResponse: The list storage accounts operation
response.
"""
# Validate
if resource_group_name is None:
raise ValueError('resource_group_name cannot be None.')
# Tracing
# Construct URL
url = ''
url = url + '/subscriptions/'
if self.client.credentials.subscription_id is not None:
url = url + quote(self.client.credentials.subscription_id)
url = url + '/resourceGroups/'
url = url + quote(resource_group_name)
url = url + '/providers/Microsoft.Storage/storageAccounts'
query_parameters = []
query_parameters.append('api-version=2015-05-01-preview')
if len(query_parameters) > 0:
url = url + '?' + '&'.join(query_parameters)
base_url = self.client.base_uri
# Trim '/' character from the end of baseUrl and beginning of url.
if base_url[len(base_url) - 1] == '/':
base_url = base_url[0 : len(base_url) - 1]
if url[0] == '/':
url = url[1 : ]
url = base_url + '/' + url
url = url.replace(' ', '%20')
# Create HTTP transport objects
http_request = Request()
http_request.url = url
http_request.method = 'GET'
# Set Headers
http_request.headers['x-ms-client-request-id'] = str(uuid.uuid4())
# Send Request
response = self.client.send_request(http_request)
body = response.content
status_code = response.status_code
if status_code != 200:
error = AzureHttpError(body, response.status_code)
raise error
# Create Result
result = None
# Deserialize Response
if status_code == 200:
response_content = body
result = StorageAccountListResponse(storage_accounts=[])
response_doc = None
if response_content:
response_doc = json.loads(response_content.decode())
if response_doc is not None:
value_array = response_doc.get('value', None)
if value_array is not None:
for value_value in value_array:
storage_account_json_instance = StorageAccount(tags={})
result.storage_accounts.append(storage_account_json_instance)
id_value = value_value.get('id', None)
if id_value is not None:
id_instance = id_value
storage_account_json_instance.id = id_instance
name_value = value_value.get('name', None)
if name_value is not None:
name_instance = name_value
storage_account_json_instance.name = name_instance
type_value = value_value.get('type', None)
if type_value is not None:
type_instance = type_value
storage_account_json_instance.type = type_instance
location_value = value_value.get('location', None)
if location_value is not None:
location_instance = location_value
storage_account_json_instance.location = location_instance
tags_sequence_element = value_value.get('tags', None)
if tags_sequence_element is not None:
for property in tags_sequence_element:
tags_key = property
tags_value = tags_sequence_element[property]
storage_account_json_instance.tags[tags_key] = tags_value
properties_value = value_value.get('properties', None)
if properties_value is not None:
provisioning_state_value = properties_value.get('provisioningState', None)
if provisioning_state_value is not None:
provisioning_state_instance = provisioning_state_value
storage_account_json_instance.provisioning_state = provisioning_state_instance
account_type_value = properties_value.get('accountType', None)
if account_type_value is not None:
account_type_instance = account_type_value
storage_account_json_instance.account_type = account_type_instance
primary_endpoints_value = properties_value.get('primaryEndpoints', None)
if primary_endpoints_value is not None:
primary_endpoints_instance = Endpoints()
storage_account_json_instance.primary_endpoints = primary_endpoints_instance
blob_value = primary_endpoints_value.get('blob', None)
if blob_value is not None:
blob_instance = blob_value
primary_endpoints_instance.blob = blob_instance
queue_value = primary_endpoints_value.get('queue', None)
if queue_value is not None:
queue_instance = queue_value
primary_endpoints_instance.queue = queue_instance
table_value = primary_endpoints_value.get('table', None)
if table_value is not None:
table_instance = table_value
primary_endpoints_instance.table = table_instance
primary_location_value = properties_value.get('primaryLocation', None)
if primary_location_value is not None:
primary_location_instance = primary_location_value
storage_account_json_instance.primary_location = primary_location_instance
status_of_primary_value = properties_value.get('statusOfPrimary', None)
if status_of_primary_value is not None:
status_of_primary_instance = status_of_primary_value
storage_account_json_instance.status_of_primary = status_of_primary_instance
last_geo_failover_time_value = properties_value.get('lastGeoFailoverTime', None)
if last_geo_failover_time_value is not None:
last_geo_failover_time_instance = last_geo_failover_time_value
storage_account_json_instance.last_geo_failover_time = last_geo_failover_time_instance
secondary_location_value = properties_value.get('secondaryLocation', None)
if secondary_location_value is not None:
secondary_location_instance = secondary_location_value
storage_account_json_instance.secondary_location = secondary_location_instance
status_of_secondary_value = properties_value.get('statusOfSecondary', None)
if status_of_secondary_value is not None:
status_of_secondary_instance = status_of_secondary_value
storage_account_json_instance.status_of_secondary = status_of_secondary_instance
creation_time_value = properties_value.get('creationTime', None)
if creation_time_value is not None:
creation_time_instance = creation_time_value
storage_account_json_instance.creation_time = creation_time_instance
custom_domain_value = properties_value.get('customDomain', None)
if custom_domain_value is not None:
custom_domain_instance = CustomDomain()
storage_account_json_instance.custom_domain = custom_domain_instance
name_value2 = custom_domain_value.get('name', None)
if name_value2 is not None:
name_instance2 = name_value2
custom_domain_instance.name = name_instance2
use_sub_domain_value = custom_domain_value.get('useSubDomain', None)
if use_sub_domain_value is not None:
use_sub_domain_instance = use_sub_domain_value
custom_domain_instance.use_sub_domain = use_sub_domain_instance
secondary_endpoints_value = properties_value.get('secondaryEndpoints', None)
if secondary_endpoints_value is not None:
secondary_endpoints_instance = Endpoints()
storage_account_json_instance.secondary_endpoints = secondary_endpoints_instance
blob_value2 = secondary_endpoints_value.get('blob', None)
if blob_value2 is not None:
blob_instance2 = blob_value2
secondary_endpoints_instance.blob = blob_instance2
queue_value2 = secondary_endpoints_value.get('queue', None)
if queue_value2 is not None:
queue_instance2 = queue_value2
secondary_endpoints_instance.queue = queue_instance2
table_value2 = secondary_endpoints_value.get('table', None)
if table_value2 is not None:
table_instance2 = table_value2
secondary_endpoints_instance.table = table_instance2
next_link_value = response_doc.get('nextLink', None)
if next_link_value is not None:
next_link_instance = next_link_value
result.next_link = next_link_instance
result.status_code = status_code
result.request_id = response.headers.get('x-ms-request-id')
return result
def list_keys(self, resource_group_name, account_name):
"""
Lists the access keys for the specified storage account.
Args:
resource_group_name (string): The name of the resource group.
account_name (string): The name of the storage account.
Returns:
StorageAccountListKeysResponse: The ListKeys operation response.
"""
# Validate
if resource_group_name is None:
raise ValueError('resource_group_name cannot be None.')
if account_name is None:
raise ValueError('account_name cannot be None.')
if len(account_name) < 3:
raise IndexError('account_name is outside the valid range.')
if len(account_name) > 24:
raise IndexError('account_name is outside the valid range.')
for account_name_char in account_name:
if account_name_char.islower() == False and account_name_char.isdigit() == False:
raise IndexError('account_name is outside the valid range.')
# Tracing
# Construct URL
url = ''
url = url + '/subscriptions/'
if self.client.credentials.subscription_id is not None:
url = url + quote(self.client.credentials.subscription_id)
url = url + '/resourceGroups/'
url = url + quote(resource_group_name)
url = url + '/providers/Microsoft.Storage/storageAccounts/'
url = url + quote(account_name)
url = url + '/listKeys'
query_parameters = []
query_parameters.append('api-version=2015-05-01-preview')
if len(query_parameters) > 0:
url = url + '?' + '&'.join(query_parameters)
base_url = self.client.base_uri
# Trim '/' character from the end of baseUrl and beginning of url.
if base_url[len(base_url) - 1] == '/':
base_url = base_url[0 : len(base_url) - 1]
if url[0] == '/':
url = url[1 : ]
url = base_url + '/' + url
url = url.replace(' ', '%20')
# Create HTTP transport objects
http_request = Request()
http_request.url = url
http_request.method = 'POST'
# Set Headers
http_request.headers['x-ms-client-request-id'] = str(uuid.uuid4())
# Send Request
response = self.client.send_request(http_request)
body = response.content
status_code = response.status_code
if status_code != 200:
error = AzureHttpError(body, response.status_code)
raise error
# Create Result
result = None
# Deserialize Response
if status_code == 200:
response_content = body
result = StorageAccountListKeysResponse()
response_doc = None
if response_content:
response_doc = json.loads(response_content.decode())
if response_doc is not None:
storage_account_keys_instance = StorageAccountKeys()
result.storage_account_keys = storage_account_keys_instance
key1_value = response_doc.get('key1', None)
if key1_value is not None:
key1_instance = key1_value
storage_account_keys_instance.key1 = key1_instance
key2_value = response_doc.get('key2', None)
if key2_value is not None:
key2_instance = key2_value
storage_account_keys_instance.key2 = key2_instance
result.status_code = status_code
result.request_id = response.headers.get('x-ms-request-id')
return result
def regenerate_key(self, resource_group_name, account_name, regenerate_key):
"""
Regenerates the access keys for the specified storage account.
Args:
resource_group_name (string): The name of the resource group within
the user’s subscription.
account_name (string): The name of the storage account within the
specified resource group. Storage account names must be between 3 and
24 characters in length and use numbers and lower-case letters only.
regenerate_key (KeyName): Specifies name of the key which should be
regenerated.
Returns:
StorageAccountRegenerateKeyResponse: The RegenerateKey operation
response.
"""
# Validate
if resource_group_name is None:
raise ValueError('resource_group_name cannot be None.')
if account_name is None:
raise ValueError('account_name cannot be None.')
if len(account_name) < 3:
raise IndexError('account_name is outside the valid range.')
if len(account_name) > 24:
raise IndexError('account_name is outside the valid range.')
for account_name_char in account_name:
if account_name_char.islower() == False and account_name_char.isdigit() == False:
raise IndexError('account_name is outside the valid range.')
if regenerate_key is None:
raise ValueError('regenerate_key cannot be None.')
# Tracing
# Construct URL
url = ''
url = url + '/subscriptions/'
if self.client.credentials.subscription_id is not None:
url = url + quote(self.client.credentials.subscription_id)
url = url + '/resourceGroups/'
url = url + quote(resource_group_name)
url = url + '/providers/Microsoft.Storage/storageAccounts/'
url = url + quote(account_name)
url = url + '/regenerateKey'
query_parameters = []
query_parameters.append('api-version=2015-05-01-preview')
if len(query_parameters) > 0:
url = url + '?' + '&'.join(query_parameters)
base_url = self.client.base_uri
# Trim '/' character from the end of baseUrl and beginning of url.
if base_url[len(base_url) - 1] == '/':
base_url = base_url[0 : len(base_url) - 1]
if url[0] == '/':
url = url[1 : ]
url = base_url + '/' + url
url = url.replace(' ', '%20')
# Create HTTP transport objects
http_request = Request()
http_request.url = url
http_request.method = 'POST'
# Set Headers
http_request.headers['Content-Type'] = 'application/json'
http_request.headers['x-ms-client-request-id'] = str(uuid.uuid4())
# Serialize Request
request_content = None
request_doc = None
storage_account_regenerate_key_parameters_value = {}
request_doc = storage_account_regenerate_key_parameters_value
storage_account_regenerate_key_parameters_value['keyName'] = str(regenerate_key) if regenerate_key is not None else 'Key1'
request_content = json.dumps(request_doc)
http_request.data = request_content
http_request.headers['Content-Length'] = len(request_content)
# Send Request
response = self.client.send_request(http_request)
body = response.content
status_code = response.status_code
if status_code != 200:
error = AzureHttpError(body, response.status_code)
raise error
# Create Result
result = None
# Deserialize Response
if status_code == 200:
response_content = body
result = StorageAccountRegenerateKeyResponse()
response_doc = None
if response_content:
response_doc = json.loads(response_content.decode())
if response_doc is not None:
storage_account_keys_instance = StorageAccountKeys()
result.storage_account_keys = storage_account_keys_instance
key1_value = response_doc.get('key1', None)
if key1_value is not None:
key1_instance = key1_value
storage_account_keys_instance.key1 = key1_instance
key2_value = response_doc.get('key2', None)
if key2_value is not None:
key2_instance = key2_value
storage_account_keys_instance.key2 = key2_instance
result.status_code = status_code
result.request_id = response.headers.get('x-ms-request-id')
return result
def update(self, resource_group_name, account_name, parameters):
"""
Updates the account type or tags for a storage account. It can also be
used to add a custom domain (note that custom domains cannot be added
via the Create operation). Only one custom domain is supported per
storage account. This API can only be used to update one of tags,
accountType, or customDomain per call. To update multiple of these
properties, call the API multiple times with one change per call.
This call does not change the storage keys for the account. If you
want to change storage account keys, use the RegenerateKey operation.
The location and name of the storage account cannot be changed after
creation.
Args:
resource_group_name (string): The name of the resource group within
the user’s subscription.
account_name (string): The name of the storage account within the
specified resource group. Storage account names must be between 3 and
24 characters in length and use numbers and lower-case letters only.
parameters (StorageAccountUpdateParameters): The parameters to update
on the account. Note that only one property can be changed at a time
using this API.
Returns:
StorageAccountUpdateResponse: The Update storage account operation
response.
"""
# Validate
if resource_group_name is None:
raise ValueError('resource_group_name cannot be None.')
if account_name is None:
raise ValueError('account_name cannot be None.')
if len(account_name) < 3:
raise IndexError('account_name is outside the valid range.')
if len(account_name) > 24:
raise IndexError('account_name is outside the valid range.')
for account_name_char in account_name:
if account_name_char.islower() == False and account_name_char.isdigit() == False:
raise IndexError('account_name is outside the valid range.')
if parameters is None:
raise ValueError('parameters cannot be None.')
if parameters.custom_domain is not None:
if parameters.custom_domain.name is None:
raise ValueError('parameters.custom_domain.name cannot be None.')
# Tracing
# Construct URL
url = ''
url = url + '/subscriptions/'
if self.client.credentials.subscription_id is not None:
url = url + quote(self.client.credentials.subscription_id)
url = url + '/resourceGroups/'
url = url + quote(resource_group_name)
url = url + '/providers/Microsoft.Storage/storageAccounts/'
url = url + quote(account_name)
query_parameters = []
query_parameters.append('api-version=2015-05-01-preview')
if len(query_parameters) > 0:
url = url + '?' + '&'.join(query_parameters)
base_url = self.client.base_uri
# Trim '/' character from the end of baseUrl and beginning of url.
if base_url[len(base_url) - 1] == '/':
base_url = base_url[0 : len(base_url) - 1]
if url[0] == '/':
url = url[1 : ]
url = base_url + '/' + url
url = url.replace(' ', '%20')
# Create HTTP transport objects
http_request = Request()
http_request.url = url
http_request.method = 'PATCH'
# Set Headers
http_request.headers['Content-Type'] = 'application/json'
http_request.headers['x-ms-client-request-id'] = str(uuid.uuid4())
# Serialize Request
request_content = None
request_doc = None
storage_account_update_parameters_json_value = {}
request_doc = storage_account_update_parameters_json_value
if parameters.tags is not None:
tags_dictionary = {}
for tags_key in parameters.tags:
tags_value = parameters.tags[tags_key]
tags_dictionary[tags_key] = tags_value
storage_account_update_parameters_json_value['tags'] = tags_dictionary
properties_value = {}
storage_account_update_parameters_json_value['properties'] = properties_value
if parameters.account_type is not None:
properties_value['accountType'] = str(parameters.account_type) if parameters.account_type is not None else 'StandardLRS'
if parameters.custom_domain is not None:
custom_domain_value = {}
properties_value['customDomain'] = custom_domain_value
custom_domain_value['name'] = parameters.custom_domain.name
if parameters.custom_domain.use_sub_domain is not None:
custom_domain_value['useSubDomain'] = parameters.custom_domain.use_sub_domain
request_content = json.dumps(request_doc)
http_request.data = request_content
http_request.headers['Content-Length'] = len(request_content)
# Send Request
response = self.client.send_request(http_request)
body = response.content
status_code = response.status_code
if status_code != 200:
error = AzureHttpError(body, response.status_code)
raise error
# Create Result
result = None
# Deserialize Response
if status_code == 200:
response_content = body
result = StorageAccountUpdateResponse()
response_doc = None
if response_content:
response_doc = json.loads(response_content.decode())
if response_doc is not None:
storage_account_instance = StorageAccount(tags={})
result.storage_account = storage_account_instance
id_value = response_doc.get('id', None)
if id_value is not None:
id_instance = id_value
storage_account_instance.id = id_instance
name_value = response_doc.get('name', None)
if name_value is not None:
name_instance = name_value
storage_account_instance.name = name_instance
type_value = response_doc.get('type', None)
if type_value is not None:
type_instance = type_value
storage_account_instance.type = type_instance
location_value = response_doc.get('location', None)
if location_value is not None:
location_instance = location_value
storage_account_instance.location = location_instance
tags_sequence_element = response_doc.get('tags', None)
if tags_sequence_element is not None:
for property in tags_sequence_element:
tags_key2 = property
tags_value2 = tags_sequence_element[property]
storage_account_instance.tags[tags_key2] = tags_value2
properties_value2 = response_doc.get('properties', None)
if properties_value2 is not None:
provisioning_state_value = properties_value2.get('provisioningState', None)
if provisioning_state_value is not None:
provisioning_state_instance = provisioning_state_value
storage_account_instance.provisioning_state = provisioning_state_instance
account_type_value = properties_value2.get('accountType', None)
if account_type_value is not None:
account_type_instance = account_type_value
storage_account_instance.account_type = account_type_instance
primary_endpoints_value = properties_value2.get('primaryEndpoints', None)
if primary_endpoints_value is not None:
primary_endpoints_instance = Endpoints()
storage_account_instance.primary_endpoints = primary_endpoints_instance
blob_value = primary_endpoints_value.get('blob', None)
if blob_value is not None:
blob_instance = blob_value
primary_endpoints_instance.blob = blob_instance
queue_value = primary_endpoints_value.get('queue', None)
if queue_value is not None:
queue_instance = queue_value
primary_endpoints_instance.queue = queue_instance
table_value = primary_endpoints_value.get('table', None)
if table_value is not None:
table_instance = table_value
primary_endpoints_instance.table = table_instance
primary_location_value = properties_value2.get('primaryLocation', None)
if primary_location_value is not None:
primary_location_instance = primary_location_value
storage_account_instance.primary_location = primary_location_instance
status_of_primary_value = properties_value2.get('statusOfPrimary', None)
if status_of_primary_value is not None:
status_of_primary_instance = status_of_primary_value
storage_account_instance.status_of_primary = status_of_primary_instance
last_geo_failover_time_value = properties_value2.get('lastGeoFailoverTime', None)
if last_geo_failover_time_value is not None:
last_geo_failover_time_instance = last_geo_failover_time_value
storage_account_instance.last_geo_failover_time = last_geo_failover_time_instance
secondary_location_value = properties_value2.get('secondaryLocation', None)
if secondary_location_value is not None:
secondary_location_instance = secondary_location_value
storage_account_instance.secondary_location = secondary_location_instance
status_of_secondary_value = properties_value2.get('statusOfSecondary', None)
if status_of_secondary_value is not None:
status_of_secondary_instance = status_of_secondary_value
storage_account_instance.status_of_secondary = status_of_secondary_instance
creation_time_value = properties_value2.get('creationTime', None)
if creation_time_value is not None:
creation_time_instance = creation_time_value
storage_account_instance.creation_time = creation_time_instance
custom_domain_value2 = properties_value2.get('customDomain', None)
if custom_domain_value2 is not None:
custom_domain_instance = CustomDomain()
storage_account_instance.custom_domain = custom_domain_instance
name_value2 = custom_domain_value2.get('name', None)
if name_value2 is not None:
name_instance2 = name_value2
custom_domain_instance.name = name_instance2
use_sub_domain_value = custom_domain_value2.get('useSubDomain', None)
if use_sub_domain_value is not None:
use_sub_domain_instance = use_sub_domain_value
custom_domain_instance.use_sub_domain = use_sub_domain_instance
secondary_endpoints_value = properties_value2.get('secondaryEndpoints', None)
if secondary_endpoints_value is not None:
secondary_endpoints_instance = Endpoints()
storage_account_instance.secondary_endpoints = secondary_endpoints_instance
blob_value2 = secondary_endpoints_value.get('blob', None)
if blob_value2 is not None:
blob_instance2 = blob_value2
secondary_endpoints_instance.blob = blob_instance2
queue_value2 = secondary_endpoints_value.get('queue', None)
if queue_value2 is not None:
queue_instance2 = queue_value2
secondary_endpoints_instance.queue = queue_instance2
table_value2 = secondary_endpoints_value.get('table', None)
if table_value2 is not None:
table_instance2 = table_value2
secondary_endpoints_instance.table = table_instance2
result.status_code = status_code
result.request_id = response.headers.get('x-ms-request-id')
return result
| 44.012263 | 133 | 0.558753 | 117,146 | 0.988966 | 0 | 0 | 13,432 | 0.113395 | 0 | 0 | 25,760 | 0.21747 |
d2f56951f340d9aa264e8c54df9fedc28d30df30 | 1,832 | py | Python | src/nucleotide/component/linux/gcc/atom/rtl.py | dmilos/nucleotide | aad5d60508c9e4baf4888069284f2cb5c9fd7c55 | [
"Apache-2.0"
]
| 1 | 2020-09-04T13:00:04.000Z | 2020-09-04T13:00:04.000Z | src/nucleotide/component/linux/gcc/atom/rtl.py | dmilos/nucleotide | aad5d60508c9e4baf4888069284f2cb5c9fd7c55 | [
"Apache-2.0"
]
| 1 | 2020-04-10T01:52:32.000Z | 2020-04-10T09:11:29.000Z | src/nucleotide/component/linux/gcc/atom/rtl.py | dmilos/nucleotide | aad5d60508c9e4baf4888069284f2cb5c9fd7c55 | [
"Apache-2.0"
]
| null | null | null |
#!/usr/bin/env python2
# Copyright 2015 Dejan D. M. Milosavljevic
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import platform
import nucleotide
import nucleotide.component
import nucleotide.component.function
def _linux_RTL_LINKFLAGS( P_data ):
I_flag = ''
#if( 'dynamic' == P_data['type'] ):
# I_flag += 'D'
if( 'static' == P_data['type'] ):
I_flag += '-static'
return [ I_flag ]
atom_linux_RTL = {
'platform' : {
'host' : 'Linux',
'guest' : 'Linux'
},
'cc' : {
'vendor': 'FSF',
'name' : 'gcc',
'version': 'X'
},
'config' : {
'LINKFLAGS' : _linux_RTL_LINKFLAGS
},
'name' :'RTL',
'class': [ 'RTL', 'linux:RTL' ]
}
class RTL:
def __init__(self):
pass
@staticmethod
def extend( P_option ):
nucleotide.component.function.extend( P_option, 'A:linux:RTL', atom_linux_RTL )
atom_linux_RTL['platform']['host'] = 'X';
nucleotide.component.function.extend( P_option, 'x:linux:RTL', atom_linux_RTL )
atom_linux_RTL['platform']['guest'] = 'X';
nucleotide.component.function.extend( P_option, 'y:linux:RTL', atom_linux_RTL )
@staticmethod
def check():
pass
| 27.343284 | 104 | 0.60917 | 561 | 0.306223 | 0 | 0 | 502 | 0.274017 | 0 | 0 | 904 | 0.49345 |
d2f5d91da9ad5c16c7e8d867f33c570f4ad80d87 | 1,127 | py | Python | notebooks/denerator_tests/actions/config.py | Collen-Roller/Rasa-Denerator | 728d21d93f21a18c9de7be303ceae59392de9a41 | [
"MIT"
]
| 11 | 2019-09-11T13:48:53.000Z | 2021-11-26T00:48:57.000Z | notebooks/denerator_tests/actions/config.py | Collen-Roller/Rasa-Denerator | 728d21d93f21a18c9de7be303ceae59392de9a41 | [
"MIT"
]
| 2 | 2019-10-18T17:21:54.000Z | 2021-10-08T06:45:11.000Z | notebooks/denerator_tests/actions/config.py | Collen-Roller/Rasa-Denerator | 728d21d93f21a18c9de7be303ceae59392de9a41 | [
"MIT"
]
| 4 | 2019-10-04T14:43:06.000Z | 2021-06-16T21:23:23.000Z | import os
policy_model_dir = os.environ.get("POLICY_MODEL_DIR", "models/dialogue/")
rasa_nlu_config = os.environ.get("RASA_NLU_CONFIG", "nlu_config.yml")
account_sid = os.environ.get("ACCOUNT_SID", "")
auth_token = os.environ.get("AUTH_TOKEN", "")
twilio_number = os.environ.get("TWILIO_NUMBER", "")
platform_api = os.environ.get("RASA_API_ENDPOINT_URL", "")
self_port = int(os.environ.get("SELF_PORT", "5001"))
core_model_dir = os.environ.get("CORE_MODEL_DIR", "models/dialogue/")
remote_core_endpoint = os.environ.get("RASA_REMOTE_CORE_ENDPOINT_URL", "")
rasa_core_token = os.environ.get("RASA_CORE_TOKEN", "")
mailchimp_api_key = os.environ.get("MAILCHIMP_API_KEY", "")
mailchimp_list = os.environ.get("MAILCHIMP_LIST", "")
gdrive_credentials = os.environ.get("GDRIVE_CREDENTIALS", "")
access_token = os.environ.get("TELEGRAM_TOKEN", "")
verify = os.environ.get("TELEGRAM_VERIFY", "rasas_bot")
webhook_url = os.environ.get("WEBHOOK_URL", "https://website-demo.rasa.com/webhook")
rasa_platform_token = os.environ.get("RASA_PLATFORM_TOKEN", "")
rasa_nlg_endpoint = os.environ.get("RASA_NLG_ENDPOINT_URL", "") | 30.459459 | 84 | 0.747116 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 450 | 0.39929 |
d2f65b3512d928c10cc32ae1efdfb3cff693d569 | 876 | py | Python | python/moderation_text_token_demo.py | huaweicloud/huaweicloud-sdk-moderation | fa7cfda017a71ec8abf3afc57a0e476dd7508167 | [
"Apache-2.0"
]
| 8 | 2019-06-04T06:24:54.000Z | 2022-01-29T13:16:53.000Z | python/moderation_text_token_demo.py | huaweicloud/huaweicloud-sdk-moderation | fa7cfda017a71ec8abf3afc57a0e476dd7508167 | [
"Apache-2.0"
]
| 4 | 2021-12-14T21:21:03.000Z | 2022-01-04T16:34:33.000Z | python/moderation_text_token_demo.py | huaweicloud/huaweicloud-sdk-moderation | fa7cfda017a71ec8abf3afc57a0e476dd7508167 | [
"Apache-2.0"
]
| 8 | 2019-08-12T02:18:03.000Z | 2021-11-30T10:39:23.000Z | # -*- coding:utf-8 -*-
from moderation_sdk.gettoken import get_token
from moderation_sdk.moderation_text import moderation_text
from moderation_sdk.utils import init_global_env
if __name__ == '__main__':
# Services currently support North China-Beijing(cn-north-4),China East-Shanghai1(cn-east-3), CN-Hong Kong(ap-southeast-1),AP-Singapore(ap-southeast-3)
init_global_env('cn-north-4')
#
# access moderation text enhance,posy data by token
#
user_name = '******'
password = '******'
account_name = '******' # the same as user_name in commonly use
token = get_token(user_name, password, account_name)
# call interface use the text
result = moderation_text(token, '666666luo聊请+110亚砷酸钾六位qq,fuck666666666666666', 'content',
['ad', 'politics', 'porn', 'abuse', 'contraband', 'flood'])
print(result)
| 38.086957 | 155 | 0.680365 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 458 | 0.512304 |
d2f6c77eeb49683e8ab27570e5b6c4f101091a5b | 2,195 | py | Python | tests/system/action/test_general.py | FinnStutzenstein/openslides-backend | fffc152f79d3446591e07a6913d9fdf30b46f577 | [
"MIT"
]
| null | null | null | tests/system/action/test_general.py | FinnStutzenstein/openslides-backend | fffc152f79d3446591e07a6913d9fdf30b46f577 | [
"MIT"
]
| null | null | null | tests/system/action/test_general.py | FinnStutzenstein/openslides-backend | fffc152f79d3446591e07a6913d9fdf30b46f577 | [
"MIT"
]
| null | null | null | from .base import BaseActionTestCase
class GeneralActionWSGITester(BaseActionTestCase):
"""
Tests the action WSGI application in general.
"""
def test_request_wrong_method(self) -> None:
response = self.client.get("/")
self.assert_status_code(response, 405)
def test_request_wrong_media_type(self) -> None:
response = self.client.post("/")
self.assert_status_code(response, 400)
self.assertIn("Wrong media type.", response.json["message"])
def test_request_missing_body(self) -> None:
response = self.client.post("/", content_type="application/json")
self.assert_status_code(response, 400)
self.assertIn("Failed to decode JSON object", response.json["message"])
def test_request_fuzzy_body(self) -> None:
response = self.client.post(
"/",
json={"fuzzy_key_Eeng7pha3a": "fuzzy_value_eez3Ko6quu"},
)
self.assert_status_code(response, 400)
self.assertIn("data must be array", response.json["message"])
def test_request_fuzzy_body_2(self) -> None:
response = self.client.post(
"/",
json=[{"fuzzy_key_Voh8in7aec": "fuzzy_value_phae3iew4W"}],
)
self.assert_status_code(response, 400)
self.assertIn(
"data[0] must contain ['action', 'data'] properties",
response.json["message"],
)
def test_request_no_existing_action(self) -> None:
response = self.request("fuzzy_action_hamzaeNg4a", {})
self.assert_status_code(response, 400)
self.assertIn(
"Action fuzzy_action_hamzaeNg4a does not exist.",
response.json["message"],
)
def test_health_route(self) -> None:
response = self.client.get("/health")
self.assert_status_code(response, 200)
self.assertIn("healthinfo", response.json)
actions = response.json["healthinfo"]["actions"]
some_example_actions = (
"topic.create",
"motion.delete",
"user.update_temporary",
)
for action in some_example_actions:
self.assertIn(action, actions.keys())
| 35.403226 | 79 | 0.625968 | 2,155 | 0.981777 | 0 | 0 | 0 | 0 | 0 | 0 | 519 | 0.236446 |
d2f71173ca42ab7fa57a0943b698ed9189ef93d3 | 2,897 | py | Python | src/thead/cls/amsart.py | jakub-oprsal/thead | df175adf6ad0b3b16ec0703a31e7020327df4c92 | [
"MIT"
]
| null | null | null | src/thead/cls/amsart.py | jakub-oprsal/thead | df175adf6ad0b3b16ec0703a31e7020327df4c92 | [
"MIT"
]
| null | null | null | src/thead/cls/amsart.py | jakub-oprsal/thead | df175adf6ad0b3b16ec0703a31e7020327df4c92 | [
"MIT"
]
| null | null | null | from .common import *
HEADER = r'''\usepackage{tikz}
\definecolor{purple}{cmyk}{0.55,1,0,0.15}
\definecolor{darkblue}{cmyk}{1,0.58,0,0.21}
\usepackage[colorlinks,
linkcolor=black,
urlcolor=darkblue,
citecolor=purple]{hyperref}
\urlstyle{same}
\newtheorem{theorem}{Theorem}[section]
\newtheorem{lemma}[theorem]{Lemma}
\newtheorem{proposition}[theorem]{Proposition}
\newtheorem{corollary}[theorem]{Corollary}
\newtheorem{conjecture}[theorem]{Conjecture}
\newtheorem{claim}[theorem]{Claim}
\theoremstyle{definition}
\newtheorem{definition}[theorem]{Definition}
\newtheorem{example}[theorem]{Example}
\newtheorem{remark}[theorem]{Remark}
'''
def render_pdfmeta(authors, title):
author_list = authors_list(authors, short=True)
return f'''\\hypersetup{{%
pdftitle = {{{title}}},
pdfauthor = {{{author_list}}}}}\n'''
def render_author(author):
out = render_command('author', author['name'])
if 'affiliation' in author:
out += render_command('address',
", ".join(value for _, value in author['affiliation'].items()))
if 'email' in author:
out += render_command('email', author['email'])
return out
def render_funding(funds):
funding_note = '\n'.join(grant['note']
for grant in funds
if 'note' in grant)
return render_command('thanks', funding_note)
def render_acks(acks):
return f'\\subsection*{{Acknowledgements}}\n\n{acks.strip()}\n'
def header(data, cname=None, classoptions=[], **kwargs):
if cname is None:
cname = 'amsart'
if 'noheader' in classoptions:
classoptions.remove('noheader')
include_header = False
else:
include_header = True
headers = [
render_command(
'documentclass',
cname,
','.join(classoptions)),
render_encs]
if include_header:
headers.append(HEADER)
if 'include' in kwargs:
headers += [include(file) for file in kwargs['include']]
shorttitle = data['shorttitle'] if 'shorttitle' in data else ''
headers += [
render_pdfmeta(data['authors'], data['title']),
begin_document,
render_command('title', data['title'], shorttitle),
'\n'.join(map(render_author, data['authors']))]
if 'funding' in data:
headers.append(render_funding(data['funding']))
if 'abstract' in data:
headers.append(render_abstract(data['abstract']))
if 'keywords' in data:
headers.append(render_keywords(data['keywords']))
headers += [maketitle, '']
return '\n'.join(headers)
def footer(data, bib):
footers = ['']
if 'acknowledgements' in data: # and not anonymous:
footers.append(render_acks(data['acknowledgements']))
if bib:
footers.append(render_bib('alphaurl', bib))
footers.append(end_document)
return '\n'.join(footers)
| 27.074766 | 75 | 0.643079 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,125 | 0.388333 |
d2f89e6b57c9a1b93947576a30ec79f4c0bc634e | 88 | py | Python | Workflow/packages/__init__.py | MATS64664-2021-Group-2/Hydride-Connect-Group-2 | fa95d38174ffd85461bf66f923c38a3908a469a7 | [
"MIT"
]
| null | null | null | Workflow/packages/__init__.py | MATS64664-2021-Group-2/Hydride-Connect-Group-2 | fa95d38174ffd85461bf66f923c38a3908a469a7 | [
"MIT"
]
| 2 | 2021-04-12T20:30:48.000Z | 2021-05-24T14:07:24.000Z | Workflow/packages/__init__.py | MATS64664-2021-Group-2/Hydride_Connection | fa95d38174ffd85461bf66f923c38a3908a469a7 | [
"MIT"
]
| null | null | null | # -*- coding: utf-8 -*-
"""
Created on Thu Apr 15 11:31:06 2021
@author: a77510jm
"""
| 11 | 35 | 0.579545 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 85 | 0.965909 |
d2f90e2105f715bfa385ede947f0041c8746e8c3 | 6,133 | py | Python | in_progress/GiRaFFEfood_NRPy/GiRaFFEfood_NRPy_Aligned_Rotator.py | fedelopezar/nrpytutorial | 753acd954be4a2f99639c9f9fd5e623689fc7493 | [
"BSD-2-Clause"
]
| 1 | 2021-12-13T05:51:18.000Z | 2021-12-13T05:51:18.000Z | in_progress/GiRaFFEfood_NRPy/GiRaFFEfood_NRPy_Aligned_Rotator.py | fedelopezar/nrpytutorial | 753acd954be4a2f99639c9f9fd5e623689fc7493 | [
"BSD-2-Clause"
]
| null | null | null | in_progress/GiRaFFEfood_NRPy/GiRaFFEfood_NRPy_Aligned_Rotator.py | fedelopezar/nrpytutorial | 753acd954be4a2f99639c9f9fd5e623689fc7493 | [
"BSD-2-Clause"
]
| null | null | null | #!/usr/bin/env python
# coding: utf-8
# <a id='top'></a>
#
#
# # $\texttt{GiRaFFEfood}$: Initial data for $\texttt{GiRaFFE}$
#
# ## Aligned Rotator
#
# $$\label{top}$$
#
# This module provides another initial data option for $\texttt{GiRaFFE}$. This is a flat-spacetime test with initial data $$A_{\phi} = \frac{\mu \varpi}{r^3},$$ where $\mu = B_p R_{\rm NS} / 2$, $R_{\rm NS}$ is the neutron star radius, and $\varpi = \sqrt{x^2+y^2}$ is the cylindrical radius. We let $A_r = A_\theta = 0$.
#
# Additionally, the drift velocity $v^i = \Omega \textbf{e}_z \times \textbf{r} = [ijk] \Omega \textbf{e}^j_z x^k$, where $[ijk]$ is the Levi-Civita permutation symbol and $\textbf{e}^i_z = (0,0,1)$.
# <a id='preliminaries'></a>
#
# ### Steps 0-1: Preliminaries
# $$\label{preliminaries}$$
#
# \[Back to [top](#top)\]
#
# Here, we will import the NRPy+ core modules and set the reference metric to Cartesian, set commonly used NRPy+ parameters, and set C parameters that will be set from outside the code eventually generated from these expressions. We will also set up a parameter to determine what initial data is set up, although it won't do much yet.
# Step 0: Import the NRPy+ core modules and set the reference metric to Cartesian
import NRPy_param_funcs as par
import indexedexp as ixp
import sympy as sp # SymPy: The Python computer algebra package upon which NRPy+ depends
import reference_metric as rfm
par.set_parval_from_str("reference_metric::CoordSystem","Cartesian")
rfm.reference_metric()
# Step 1a: Set commonly used parameters.
thismodule = __name__
B_p_aligned_rotator,R_NS_aligned_rotator = par.Cparameters("REAL",thismodule,
# B_p_aligned_rotator = the intensity of the magnetic field and
# R_NS_aligned_rotator= "Neutron star" radius
["B_p_aligned_rotator","R_NS_aligned_rotator"],
[1e-5, 1.0])
# The angular velocity of the "neutron star"
Omega_aligned_rotator = par.Cparameters("REAL",thismodule,"Omega_aligned_rotator",1e3)
# <a id='step2'></a>
#
# ### Step 2: Set the vectors A in Spherical coordinates
# $$\label{step2}$$
#
# \[Back to [top](#top)\]
#
# We will first build the fundamental vector $A_i$ in spherical coordinates (see [Table 3](https://arxiv.org/pdf/1704.00599.pdf)). Note that we use reference_metric.py to set $r$ and $\theta$ in terms of Cartesian coordinates; this will save us a step later when we convert to Cartesian coordinates. So, we set
# \begin{align}
# A_{\phi} &= \frac{\mu \varpi}{r^3}, \\
# \end{align}
# with $\mu = B_p R_{\rm NS} / 2$, $R_{\rm NS}$ is the neutron star radius, and $\varpi = \sqrt{x^2+y^2}$
def GiRaFFEfood_NRPy_Aligned_Rotator():
r = rfm.xxSph[0]
varpi = sp.sqrt(rfm.xx_to_Cart[0]**2 + rfm.xx_to_Cart[1]**2)
mu = B_p_aligned_rotator * R_NS_aligned_rotator**3 / 2
ASphD = ixp.zerorank1()
ASphD[2] = mu * varpi**2 / (r**3) # The other components were already declared to be 0.
# <a id='step3'></a>
#
# ### Step 3: Use the Jacobian matrix to transform the vectors to Cartesian coordinates.
# $$\label{step3}$$
#
# \[Back to [top](#top)\]
#
# Now, we will use the coordinate transformation definitions provided by reference_metric.py to build the Jacobian
# $$
# \frac{\partial x_{\rm Sph}^j}{\partial x_{\rm Cart}^i},
# $$
# where $x_{\rm Sph}^j \in \{r,\theta,\phi\}$ and $x_{\rm Cart}^i \in \{x,y,z\}$. We would normally compute its inverse, but since none of the quantities we need to transform have upper indices, it is not necessary. Then, since $A_i$ and has one lower index, it will need to be multiplied by the Jacobian:
#
# $$
# A_i^{\rm Cart} = A_j^{\rm Sph} \frac{\partial x_{\rm Sph}^j}{\partial x_{\rm Cart}^i},
# $$
# Step 3: Use the Jacobian matrix to transform the vectors to Cartesian coordinates.
drrefmetric__dx_0UDmatrix = sp.Matrix([[sp.diff(rfm.xxSph[0],rfm.xx[0]), sp.diff(rfm.xxSph[0],rfm.xx[1]), sp.diff(rfm.xxSph[0],rfm.xx[2])],
[sp.diff(rfm.xxSph[1],rfm.xx[0]), sp.diff(rfm.xxSph[1],rfm.xx[1]), sp.diff(rfm.xxSph[1],rfm.xx[2])],
[sp.diff(rfm.xxSph[2],rfm.xx[0]), sp.diff(rfm.xxSph[2],rfm.xx[1]), sp.diff(rfm.xxSph[2],rfm.xx[2])]])
#dx__drrefmetric_0UDmatrix = drrefmetric__dx_0UDmatrix.inv() # We don't actually need this in this case.
global AD
AD = ixp.zerorank1(DIM=3)
for i in range(3):
for j in range(3):
AD[i] = drrefmetric__dx_0UDmatrix[(j,i)]*ASphD[j]
# <a id='step4'></a>
#
# ### Step 4: Calculate $v^i$
# $$\label{step4}$$
#
# \[Back to [top](#top)\]
#
# Here, we will calculate the drift velocity $v^i = \Omega \textbf{e}_z \times \textbf{r} = [ijk] \Omega \textbf{e}^j_z x^k$, where $[ijk]$ is the Levi-Civita permutation symbol and $\textbf{e}^i_z = (0,0,1)$. Conveniently, in flat space, the drift velocity reduces to the Valencia velocity because $\alpha = 1$ and $\beta^i = 0$.
# Step 4: Calculate v^i
LeviCivitaSymbolDDD = ixp.LeviCivitaSymbol_dim3_rank3()
import Min_Max_and_Piecewise_Expressions as noif
unit_zU = ixp.zerorank1()
unit_zU[2] = sp.sympify(1)
global ValenciavU
ValenciavU = ixp.zerorank1()
for i in range(3):
for j in range(3):
for k in range(3):
ValenciavU[i] += noif.coord_leq_bound(r,R_NS_aligned_rotator)*LeviCivitaSymbolDDD[i][j][k] * Omega_aligned_rotator * unit_zU[j] * rfm.xx[k]
# ### NRPy+ Module Code Validation
#
# \[Back to [top](#top)\]
#
# Here, as a code validation check, we verify agreement in the SymPy expressions for the $\texttt{GiRaFFE}$ Aligned Rotator initial data equations we intend to use between
# 1. this tutorial and
# 2. the NRPy+ [GiRaFFEfood_NRPy_Aligned_Rotator.py](../edit/GiRaFFEfood_NRPy/GiRaFFEfood_NRPy_Aligned_Rotator.py) module.
#
| 43.807143 | 334 | 0.633458 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 4,001 | 0.652372 |
d2fa22173570793bad17191d495756a260b18a45 | 803 | py | Python | deploys/call_httpx.py | vic9527/ViClassifier | fd6c4730e880f35a9429277a6025219315e067cc | [
"MIT"
]
| 1 | 2021-11-03T05:05:34.000Z | 2021-11-03T05:05:34.000Z | deploys/call_httpx.py | vic9527/viclassifier | fd6c4730e880f35a9429277a6025219315e067cc | [
"MIT"
]
| null | null | null | deploys/call_httpx.py | vic9527/viclassifier | fd6c4730e880f35a9429277a6025219315e067cc | [
"MIT"
]
| null | null | null | """
比requests更强大python库,让你的爬虫效率提高一倍
https://mp.weixin.qq.com/s/jqGx-4t4ytDDnXxDkzbPqw
HTTPX 基础教程
https://zhuanlan.zhihu.com/p/103824900
"""
def interface(url, data):
import httpx
head = {"Content-Type": "application/json; charset=UTF-8"}
return httpx.request('POST', url, json=data, headers=head)
if __name__ == '__main__':
post_url = "http://127.0.0.1:8888"
post_data = {"image": 112, "name": 1}
response = interface(post_url, post_data)
print('status_code: ', response.status_code) # 打印状态码
# print('url: ', response.url) # 打印请求url
# print('headers: ', response.headers) # 打印头信息
# print('cookies: ', response.cookies) # 打印cookie信息
print('text: ', response.text) # 以文本形式打印网页源码
# print('content: ', response.content) #以字节流形式打印
| 27.689655 | 62 | 0.651308 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 602 | 0.65506 |
d2fb4e383d869720b16333431cb622b5be807b1f | 9,034 | py | Python | src/rgt/THOR/THOR.py | mguo123/pan_omics | e1cacd543635b398fb08c0b31d08fa6b7c389658 | [
"MIT"
]
| null | null | null | src/rgt/THOR/THOR.py | mguo123/pan_omics | e1cacd543635b398fb08c0b31d08fa6b7c389658 | [
"MIT"
]
| null | null | null | src/rgt/THOR/THOR.py | mguo123/pan_omics | e1cacd543635b398fb08c0b31d08fa6b7c389658 | [
"MIT"
]
| null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
THOR detects differential peaks in multiple ChIP-seq profiles associated
with two distinct biological conditions.
Copyright (C) 2014-2016 Manuel Allhoff ([email protected])
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
@author: Manuel Allhoff
"""
# Python
from __future__ import print_function
import sys
# Internal
from .dpc_help import get_peaks, _fit_mean_var_distr, initialize, merge_output, handle_input
from .tracker import Tracker
from .postprocessing import _output_BED, _output_narrowPeak
from ..THOR.neg_bin_rep_hmm import NegBinRepHMM, get_init_parameters, _get_pvalue_distr
from ..THOR.RegionGiver import RegionGiver
from ..THOR.postprocessing import filter_by_pvalue_strand_lag
from .. import __version__
# External
TEST = False #enable to test THOR locally
def _write_info(tracker, report, **data):
"""Write information to tracker"""
tracker.write(text=data['func_para'][0], header="Parameters for both estimated quadr. function y=max(|a|*x^2 + x + |c|, 0) (a)")
tracker.write(text=data['func_para'][1], header="Parameters for both estimated quadr. function y=max(|a|*x^2 + x + |c|, 0) (c)")
#tracker.write(text=data['init_mu'], header="Inital parameter estimate for HMM's Neg. Bin. Emission distribution (mu)")
#tracker.write(text=data['init_alpha'], header="Inital parameter estimate for HMM's Neg. Bin. Emission distribution (alpha)")
#tracker.write(text=data['m'].mu, header="Final HMM's Neg. Bin. Emission distribution (mu)")
#tracker.write(text=data['m'].alpha, header="Final HMM's Neg. Bin. Emission distribution (alpha)")
#tracker.write(text=data['m']._get_transmat(), header="Transmission matrix")
if report:
tracker.make_html()
def train_HMM(region_giver, options, bamfiles, genome, chrom_sizes, dims, inputs, tracker):
"""Train HMM"""
while True:
train_regions = region_giver.get_training_regionset()
exp_data = initialize(name=options.name, dims=dims, genome_path=genome, regions=train_regions,
stepsize=options.stepsize, binsize=options.binsize, bamfiles=bamfiles,
exts=options.exts, inputs=inputs, exts_inputs=options.exts_inputs,
debug=options.debug, verbose=options.verbose, no_gc_content=options.no_gc_content,
factors_inputs=options.factors_inputs, chrom_sizes=chrom_sizes,
tracker=tracker, norm_regions=options.norm_regions,
scaling_factors_ip=options.scaling_factors_ip, save_wig=options.save_wig,
housekeeping_genes=options.housekeeping_genes, test=TEST, report=options.report,
chrom_sizes_dict=region_giver.get_chrom_dict(), end=True, counter=0, output_bw=False,
save_input=options.save_input, m_threshold=options.m_threshold,
a_threshold=options.a_threshold, rmdup=options.rmdup)
if exp_data.count_positive_signal() > len(train_regions.sequences[0]) * 0.00001:
tracker.write(text=" ".join(map(lambda x: str(x), exp_data.exts)), header="Extension size (rep1, rep2, input1, input2)")
tracker.write(text=map(lambda x: str(x), exp_data.scaling_factors_ip), header="Scaling factors")
break
func, func_para = _fit_mean_var_distr(exp_data.overall_coverage, options.name, options.debug,
verbose=options.verbose, outputdir=options.outputdir,
report=options.report, poisson=options.poisson)
exp_data.compute_putative_region_index()
print('Compute HMM\'s training set', file=sys.stderr)
training_set, s0, s1, s2 = exp_data.get_training_set(TEST, exp_data, options.name, options.foldchange,
options.threshold, options.size_ts, 3)
init_alpha, init_mu = get_init_parameters(s0, s1, s2)
m = NegBinRepHMM(alpha=init_alpha, mu=init_mu, dim_cond_1=dims[0], dim_cond_2=dims[1], func=func)
training_set_obs = exp_data.get_observation(training_set)
print('Train HMM', file=sys.stderr)
m.fit([training_set_obs], options.hmm_free_para)
distr = _get_pvalue_distr(m.mu, m.alpha, tracker)
return m, exp_data, func_para, init_mu, init_alpha, distr
def run_HMM(region_giver, options, bamfiles, genome, chrom_sizes, dims, inputs, tracker, exp_data, m, distr):
"""Run trained HMM chromosome-wise on genomic signal and call differential peaks"""
output, pvalues, ratios, no_bw_files = [], [], [], []
print("Compute HMM's posterior probabilities and Viterbi path to call differential peaks", file=sys.stderr)
for i, r in enumerate(region_giver):
end = True if i == len(region_giver) - 1 else False
print("- taking into account %s" % r.sequences[0].chrom, file=sys.stderr)
exp_data = initialize(name=options.name, dims=dims, genome_path=genome, regions=r,
stepsize=options.stepsize, binsize=options.binsize,
bamfiles=bamfiles, exts=exp_data.exts, inputs=inputs,
exts_inputs=exp_data.exts_inputs, debug=options.debug,
verbose=False, no_gc_content=options.no_gc_content,
factors_inputs=exp_data.factors_inputs, chrom_sizes=chrom_sizes,
tracker=tracker, norm_regions=options.norm_regions,
scaling_factors_ip=exp_data.scaling_factors_ip, save_wig=options.save_wig,
housekeeping_genes=options.housekeeping_genes, test=TEST, report=False,
chrom_sizes_dict=region_giver.get_chrom_dict(), gc_content_cov=exp_data.gc_content_cov,
avg_gc_content=exp_data.avg_gc_content, gc_hist=exp_data.gc_hist,
end=end, counter=i, m_threshold=options.m_threshold, a_threshold=options.a_threshold,
rmdup=options.rmdup)
if exp_data.no_data:
continue
no_bw_files.append(i)
exp_data.compute_putative_region_index()
if exp_data.indices_of_interest is None:
continue
states = m.predict(exp_data.get_observation(exp_data.indices_of_interest))
inst_ratios, inst_pvalues, inst_output = get_peaks(name=options.name, states=states, DCS=exp_data,
distr=distr, merge=options.merge, exts=exp_data.exts,
pcutoff=options.pcutoff, debug=options.debug, p=options.par,
no_correction=options.no_correction,
merge_bin=options.merge_bin, deadzones=options.deadzones)
# if not inst_output:
output += inst_output
pvalues += inst_pvalues
ratios += inst_ratios
res_output, res_pvalues, res_filter_pass = filter_by_pvalue_strand_lag(ratios, options.pcutoff, pvalues, output,
options.no_correction, options.name,
options.singlestrand)
_output_BED(options.name, res_output, res_pvalues, res_filter_pass)
_output_narrowPeak(options.name, res_output, res_pvalues, res_filter_pass)
merge_output(bamfiles, dims, options, no_bw_files, chrom_sizes)
def main():
options, bamfiles, genome, chrom_sizes, dims, inputs = handle_input()
tracker = Tracker(options.name + '-setup.info', bamfiles, genome, chrom_sizes, dims, inputs, options, __version__)
region_giver = RegionGiver(chrom_sizes, options.regions)
m, exp_data, func_para, init_mu, init_alpha, distr = train_HMM(region_giver, options, bamfiles, genome,
chrom_sizes, dims, inputs, tracker)
run_HMM(region_giver, options, bamfiles, genome, chrom_sizes, dims, inputs, tracker, exp_data, m, distr)
_write_info(tracker, options.report, func_para=func_para, init_mu=init_mu, init_alpha=init_alpha, m=m)
| 55.765432 | 132 | 0.649878 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2,000 | 0.221386 |
d2fb7b436323415834f7a74459e3f1d624c2d737 | 5,864 | py | Python | web/api/classroom.py | bbougon/crm-pilates | 47de4bad3d48208f9b499139fcddb7f8955b2509 | [
"MIT"
]
| null | null | null | web/api/classroom.py | bbougon/crm-pilates | 47de4bad3d48208f9b499139fcddb7f8955b2509 | [
"MIT"
]
| 2 | 2021-05-26T20:47:29.000Z | 2021-07-11T23:18:55.000Z | web/api/classroom.py | bbougon/crm-pilates | 47de4bad3d48208f9b499139fcddb7f8955b2509 | [
"MIT"
]
| 1 | 2021-06-30T15:20:54.000Z | 2021-06-30T15:20:54.000Z | from http import HTTPStatus
from typing import Tuple
from uuid import UUID
from fastapi import status, APIRouter, Response, Depends, HTTPException
from command.command_handler import Status
from domain.classroom.classroom_creation_command_handler import ClassroomCreated
from domain.classroom.classroom_type import ClassroomSubject
from domain.commands import ClassroomCreationCommand, ClassroomPatchCommand
from domain.exceptions import DomainException, AggregateNotFoundException
from infrastructure.command_bus_provider import CommandBusProvider
from web.presentation.domain.detailed_classroom import DetailedClassroom
from web.presentation.service.classroom_service import get_detailed_classroom
from web.schema.classroom_response import ClassroomReadResponse, ClassroomCreatedResponse
from web.schema.classroom_schemas import ClassroomCreation, ClassroomPatch
router = APIRouter()
@router.post("/classrooms",
response_model=ClassroomCreatedResponse,
status_code=status.HTTP_201_CREATED,
responses={
201: {
"description": "Create a classroom",
"headers": {
"location": {
"description": "The absolute path URL location of the newly created classroom",
"schema": {"type": "URL"},
}
}
},
404: {
"description": "See body message details"
},
409: {
"description": "See body message details"
}
}
)
def create_classroom(classroom_creation: ClassroomCreation, response: Response,
command_bus_provider: CommandBusProvider = Depends(CommandBusProvider)):
try:
command = ClassroomCreationCommand(classroom_creation.name, classroom_creation.position,
classroom_creation.duration,
ClassroomSubject[classroom_creation.subject],
classroom_creation.start_date, classroom_creation.stop_date,
list(map(lambda attendee: attendee.id, classroom_creation.attendees)))
from command.response import Response
result: Tuple[Response, Status] = command_bus_provider.command_bus.send(command)
event: ClassroomCreated = result[0].event
response.headers["location"] = f"/classrooms/{event.root_id}"
return {
"name": event.name,
"id": event.root_id,
"position": event.position,
"subject": event.subject.value,
"schedule": {
"start": event.schedule.start,
"stop": event.schedule.stop
},
"duration": ClassroomReadResponse.to_duration(event.duration),
"attendees": list(map(lambda attendee: {"id": attendee["id"]}, event.attendees))
}
except AggregateNotFoundException as e:
raise HTTPException(status_code=HTTPStatus.NOT_FOUND,
detail=f"One of the attendees with id '{e.unknown_id}' has not been found")
except DomainException as e:
raise HTTPException(status_code=HTTPStatus.CONFLICT, detail=e.message)
@router.get("/classrooms/{id}",
response_model=ClassroomReadResponse,
responses={
404: {
"description": "Classroom has not been found"
}
}
)
def get_classroom(id: UUID):
try:
detailed_classroom: DetailedClassroom = get_detailed_classroom(id)
return {
"name": detailed_classroom.name,
"id": detailed_classroom.id,
"position": detailed_classroom.position,
"subject": detailed_classroom.subject.value,
"schedule": {
"start": detailed_classroom.start,
"stop": detailed_classroom.stop
},
"duration": {
"duration": detailed_classroom.duration.duration,
"time_unit": detailed_classroom.duration.time_unit
},
"attendees": detailed_classroom.attendees
}
except AggregateNotFoundException:
raise HTTPException(status_code=HTTPStatus.NOT_FOUND, detail=f"Classroom with id '{str(id)}' not found")
@router.patch("/classrooms/{id}",
status_code=status.HTTP_204_NO_CONTENT,
description="Add attendees to a classroom. This resource works as a patch, "
"you must provide all classroom attendees (i.e: you had Clara already added to the classroom,"
" if you want John to join, you must provide both Clara and John "
"otherwise Clara will be removed",
responses={
404: {
"description": "See body message details"
},
409: {
"description": "See body message details"
}
}
)
def update_classroom(id: UUID, classroom_patch: ClassroomPatch,
command_bus_provider: CommandBusProvider = Depends(CommandBusProvider)):
try:
command_bus_provider.command_bus.send(
ClassroomPatchCommand(id, list(map(lambda client: client.id, classroom_patch.attendees))))
except AggregateNotFoundException as e:
raise HTTPException(status_code=HTTPStatus.NOT_FOUND,
detail=f"One of the attendees with id '{e.unknown_id}' has not been found")
except DomainException as e:
raise HTTPException(status_code=HTTPStatus.CONFLICT, detail=e.message)
| 45.8125 | 120 | 0.603342 | 0 | 0 | 0 | 0 | 4,966 | 0.846862 | 0 | 0 | 1,043 | 0.177865 |
d2fb7f6e9f85db6c80048daaef30c307b92d98da | 2,145 | py | Python | community_codebook/eda.py | etstieber/ledatascifi-2022 | 67bc56a60ec498c62ceba03e0b6b9ae8f3fc7fd9 | [
"MIT"
]
| null | null | null | community_codebook/eda.py | etstieber/ledatascifi-2022 | 67bc56a60ec498c62ceba03e0b6b9ae8f3fc7fd9 | [
"MIT"
]
| 3 | 2022-01-30T18:34:22.000Z | 2022-02-10T15:48:48.000Z | community_codebook/eda.py | etstieber/ledatascifi-2022 | 67bc56a60ec498c62ceba03e0b6b9ae8f3fc7fd9 | [
"MIT"
]
| 14 | 2022-01-26T10:45:19.000Z | 2022-03-28T15:59:56.000Z | ###############################################################
#
# This function is... INSUFFICIENT. It was developed as an
# illustration of EDA lessons in the 2021 class. It's quick and
# works well.
#
# Want a higher grade version of me? Then try pandas-profiling:
# https://github.com/pandas-profiling/pandas-profiling
#
###############################################################
def insufficient_but_starting_eda(df,cat_vars_list=None):
'''
Parameters
----------
df : DATAFRAME
cat_vars_list : LIST, optional
A list of strings containing variable names in the dataframe
for variables where you want to see the number of unique values
and the 10 most common values. Likely used for categorical values.
Returns
-------
None. It simply prints.
Description
-------
This function will print a MINIMUM amount of info about a new dataframe.
You should ****look**** at all this output below and consider the data
exploration and cleaning questions from
https://ledatascifi.github.io/ledatascifi-2021/content/03/02e_eda_golden.html#member
Also LOOK at more of the data manually.
Then write up anything notable you observe.
TIP: put this function in your codebook to reuse easily.
PROTIP: Improve this function (better outputs, better formatting).
FEATURE REQUEST: optionally print the nunique and top 10 values under the describe matrix
FEATURE REQUEST: optionally print more stats (percentiles)
'''
print(df.head(), '\n---')
print(df.tail(), '\n---')
print(df.columns, '\n---')
print("The shape is: ",df.shape, '\n---')
print("Info:",df.info(), '\n---') # memory usage, name, dtype, and # of non-null obs (--> # of missing obs) per variable
print(df.describe(), '\n---') # summary stats, and you can customize the list!
if cat_vars_list != None:
for var in cat_vars_list:
print(var,"has",df[var].nunique(),"values and its top 10 most common are:")
print(df[var].value_counts().head(10), '\n---')
| 35.75 | 124 | 0.607459 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,753 | 0.817249 |
d2fd24c8d34e5c25a5210eb1ab2a18308730ef2b | 2,778 | py | Python | angr/codenode.py | mariusmue/angr | f8304c4b1f0097a721a6692b02a45cabaae137c5 | [
"BSD-2-Clause"
]
| 2 | 2018-05-02T17:41:36.000Z | 2020-05-18T02:49:16.000Z | angr/codenode.py | mariusmue/angr | f8304c4b1f0097a721a6692b02a45cabaae137c5 | [
"BSD-2-Clause"
]
| null | null | null | angr/codenode.py | mariusmue/angr | f8304c4b1f0097a721a6692b02a45cabaae137c5 | [
"BSD-2-Clause"
]
| 1 | 2019-08-07T01:42:01.000Z | 2019-08-07T01:42:01.000Z | import logging
l = logging.getLogger("angr.codenode")
class CodeNode(object):
__slots__ = ['addr', 'size', '_graph', 'thumb']
def __init__(self, addr, size, graph=None, thumb=False):
self.addr = addr
self.size = size
self.thumb = thumb
self._graph = graph
def __len__(self):
return self.size
def __eq__(self, other):
if type(other) is Block: # pylint: disable=unidiomatic-typecheck
raise TypeError("You do not want to be comparing a CodeNode to a Block")
return type(self) is type(other) and \
self.addr == other.addr and \
self.size == other.size and \
self.is_hook == other.is_hook and \
self.thumb == other.thumb
def __ne__(self, other):
return not self == other
def __cmp__(self, other):
raise TypeError("Comparison with a code node")
def __hash__(self):
return hash((self.addr, self.size))
def successors(self):
if self._graph is None:
raise ValueError("Cannot calculate successors for graphless node")
return list(self._graph.successors(self))
def predecessors(self):
if self._graph is None:
raise ValueError("Cannot calculate predecessors for graphless node")
return list(self._graph.predecessors(self))
def __getstate__(self):
return (self.addr, self.size)
def __setstate__(self, dat):
self.__init__(*dat)
is_hook = None
class BlockNode(CodeNode):
__slots__ = ['bytestr']
is_hook = False
def __init__(self, addr, size, bytestr=None, **kwargs):
super(BlockNode, self).__init__(addr, size, **kwargs)
self.bytestr = bytestr
def __repr__(self):
return '<BlockNode at %#x (size %d)>' % (self.addr, self.size)
def __getstate__(self):
return (self.addr, self.size, self.bytestr, self.thumb)
def __setstate__(self, dat):
self.__init__(*dat[:-1], thumb=dat[-1])
class HookNode(CodeNode):
__slots__ = ['sim_procedure']
is_hook = True
def __init__(self, addr, size, sim_procedure, **kwargs):
super(HookNode, self).__init__(addr, size, **kwargs)
self.sim_procedure = sim_procedure
def __repr__(self):
return '<HookNode %r at %#x (size %s)>' % (self.sim_procedure, self.addr, self.size)
def __hash__(self):
return hash((self.addr, self.size, self.sim_procedure))
def __eq__(self, other):
return super(HookNode, self).__eq__(other) and \
self.sim_procedure == other.sim_procedure
def __getstate__(self):
return (self.addr, self.size, self.sim_procedure)
def __setstate__(self, dat):
self.__init__(*dat)
from .block import Block
| 27.78 | 92 | 0.62527 | 2,689 | 0.967963 | 0 | 0 | 0 | 0 | 0 | 0 | 349 | 0.12563 |
d2fd57ba506b050706da4ce9ab6b0a547ce3b622 | 806 | py | Python | 第12章/program/Requester/Launcher.py | kingname/SourceCodeOfBook | ab7275108994dca564905818b678bbd2f771c18e | [
"MIT"
]
| 274 | 2018-10-01T11:07:25.000Z | 2022-03-17T13:48:45.000Z | 第12章/program/Requester/Launcher.py | kingname/SourceCodeOfBook | ab7275108994dca564905818b678bbd2f771c18e | [
"MIT"
]
| 6 | 2019-02-28T14:18:21.000Z | 2022-03-02T14:57:39.000Z | 第12章/program/Requester/Launcher.py | kingname/SourceCodeOfBook | ab7275108994dca564905818b678bbd2f771c18e | [
"MIT"
]
| 110 | 2018-10-16T06:08:37.000Z | 2022-03-16T08:19:29.000Z | import os
scrapy_project_path = '/Users/kingname/book/chapter_12/DeploySpider'
os.chdir(scrapy_project_path) #切换工作区,进入爬虫工程根目录执行命令
os.system('scrapyd-deploy')
import json
import time
import requests
start_url = 'http://45.76.110.210:6800/schedule.json'
start_data = {'project': 'DeploySpider',
'spider': 'Example'}
end_url = 'http://45.76.110.210:6800/cancel.json'
end_data = {'project': 'DeploySpider'}
result = requests.post(start_url, data=start_data, auth=('kingname', 'genius')).text
result = requests.post(end_url, data=end_data, auth=('kingname', 'genius')).text
# result_dict = json.loads(result)
# job_id = result_dict['jobid']
# print(f'启动的爬虫,jobid为:{job_id}')
#
# time.sleep(5)
# end_data['job'] = job_id
# result = requests.post(end_url, data=end_data).text
# print(result)
| 26.866667 | 84 | 0.719603 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 523 | 0.60814 |
d2fdf4a6c5371384e165ae59f3bd959f997c90d9 | 511 | py | Python | unittest_example/mathfunc.py | RobinCPC/experiment_code | 0d3791a97815651945ad7787ba4e6c7df037740b | [
"MIT"
]
| null | null | null | unittest_example/mathfunc.py | RobinCPC/experiment_code | 0d3791a97815651945ad7787ba4e6c7df037740b | [
"MIT"
]
| null | null | null | unittest_example/mathfunc.py | RobinCPC/experiment_code | 0d3791a97815651945ad7787ba4e6c7df037740b | [
"MIT"
]
| null | null | null | """
Simple math operating functions for unit test
"""
def add(a, b):
"""
Adding to parameters and return result
:param a:
:param b:
:return:
"""
return a + b
def minus(a, b):
"""
subtraction
:param a:
:param b:
:return:
"""
return a - b
def multi(a, b):
"""
multiple
:param a:
:param b:
:return:
"""
return a * b
def divide(a, b):
"""
division
:param a:
:param b:
:return:
"""
return a // b
| 11.613636 | 45 | 0.473581 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 346 | 0.677104 |
d2ff009598eedc70cbe497c5d19827bdffd07954 | 144,055 | py | Python | test/test_parameters.py | HubukiNinten/imgaug | 2570c5651ed1c90addbaffc0f8be226646c55334 | [
"MIT"
]
| 1 | 2019-10-25T17:43:20.000Z | 2019-10-25T17:43:20.000Z | test/test_parameters.py | HubukiNinten/imgaug | 2570c5651ed1c90addbaffc0f8be226646c55334 | [
"MIT"
]
| null | null | null | test/test_parameters.py | HubukiNinten/imgaug | 2570c5651ed1c90addbaffc0f8be226646c55334 | [
"MIT"
]
| null | null | null | from __future__ import print_function, division, absolute_import
import itertools
import sys
# unittest only added in 3.4 self.subTest()
if sys.version_info[0] < 3 or sys.version_info[1] < 4:
import unittest2 as unittest
else:
import unittest
# unittest.mock is not available in 2.7 (though unittest2 might contain it?)
try:
import unittest.mock as mock
except ImportError:
import mock
import matplotlib
matplotlib.use('Agg') # fix execution of tests involving matplotlib on travis
import numpy as np
import six.moves as sm
import skimage
import skimage.data
import skimage.morphology
import scipy
import scipy.special
import imgaug as ia
import imgaug.random as iarandom
from imgaug import parameters as iap
from imgaug.testutils import reseed
def _eps(arr):
if ia.is_np_array(arr) and arr.dtype.kind == "f":
return np.finfo(arr.dtype).eps
return 1e-4
class Test_handle_continuous_param(unittest.TestCase):
def test_value_range_is_none(self):
result = iap.handle_continuous_param(
1, "[test1]",
value_range=None, tuple_to_uniform=True, list_to_choice=True)
self.assertTrue(isinstance(result, iap.Deterministic))
def test_value_range_is_tuple_of_nones(self):
result = iap.handle_continuous_param(
1, "[test1b]",
value_range=(None, None),
tuple_to_uniform=True,
list_to_choice=True)
self.assertTrue(isinstance(result, iap.Deterministic))
def test_param_is_stochastic_parameter(self):
result = iap.handle_continuous_param(
iap.Deterministic(1), "[test2]",
value_range=None, tuple_to_uniform=True, list_to_choice=True)
self.assertTrue(isinstance(result, iap.Deterministic))
def test_value_range_is_tuple_of_integers(self):
result = iap.handle_continuous_param(
1, "[test3]",
value_range=(0, 10),
tuple_to_uniform=True,
list_to_choice=True)
self.assertTrue(isinstance(result, iap.Deterministic))
def test_param_is_outside_of_value_range(self):
with self.assertRaises(Exception) as context:
_ = iap.handle_continuous_param(
1, "[test4]",
value_range=(2, 12),
tuple_to_uniform=True,
list_to_choice=True)
self.assertTrue("[test4]" in str(context.exception))
def test_param_is_inside_value_range_and_no_lower_bound(self):
# value within value range (without lower bound)
result = iap.handle_continuous_param(
1, "[test5]",
value_range=(None, 12),
tuple_to_uniform=True,
list_to_choice=True)
self.assertTrue(isinstance(result, iap.Deterministic))
def test_param_is_outside_of_value_range_and_no_lower_bound(self):
# value outside of value range (without lower bound)
with self.assertRaises(Exception) as context:
_ = iap.handle_continuous_param(
1, "[test6]",
value_range=(None, 0),
tuple_to_uniform=True,
list_to_choice=True)
self.assertTrue("[test6]" in str(context.exception))
def test_param_is_inside_value_range_and_no_upper_bound(self):
# value within value range (without upper bound)
result = iap.handle_continuous_param(
1, "[test7]",
value_range=(-1, None),
tuple_to_uniform=True,
list_to_choice=True)
self.assertTrue(isinstance(result, iap.Deterministic))
def test_param_is_outside_of_value_range_and_no_upper_bound(self):
# value outside of value range (without upper bound)
with self.assertRaises(Exception) as context:
_ = iap.handle_continuous_param(
1, "[test8]",
value_range=(2, None),
tuple_to_uniform=True,
list_to_choice=True)
self.assertTrue("[test8]" in str(context.exception))
def test_tuple_as_value_but_no_tuples_allowed(self):
# tuple as value, but no tuples allowed
with self.assertRaises(Exception) as context:
_ = iap.handle_continuous_param(
(1, 2), "[test9]",
value_range=None,
tuple_to_uniform=False,
list_to_choice=True)
self.assertTrue("[test9]" in str(context.exception))
def test_tuple_as_value_and_tuples_allowed(self):
# tuple as value and tuple allowed
result = iap.handle_continuous_param(
(1, 2), "[test10]",
value_range=None,
tuple_to_uniform=True,
list_to_choice=True)
self.assertTrue(isinstance(result, iap.Uniform))
def test_tuple_as_value_and_tuples_allowed_and_inside_value_range(self):
# tuple as value and tuple allowed and tuple within value range
result = iap.handle_continuous_param(
(1, 2), "[test11]",
value_range=(0, 10),
tuple_to_uniform=True,
list_to_choice=True)
self.assertTrue(isinstance(result, iap.Uniform))
def test_tuple_value_and_allowed_and_partially_outside_value_range(self):
# tuple as value and tuple allowed and tuple partially outside of
# value range
with self.assertRaises(Exception) as context:
_ = iap.handle_continuous_param(
(1, 2), "[test12]",
value_range=(1.5, 13),
tuple_to_uniform=True,
list_to_choice=True)
self.assertTrue("[test12]" in str(context.exception))
def test_tuple_value_and_allowed_and_fully_outside_value_range(self):
# tuple as value and tuple allowed and tuple fully outside of value
# range
with self.assertRaises(Exception) as context:
_ = iap.handle_continuous_param(
(1, 2), "[test13]",
value_range=(3, 13),
tuple_to_uniform=True,
list_to_choice=True)
self.assertTrue("[test13]" in str(context.exception))
def test_list_as_value_but_no_lists_allowed(self):
# list as value, but no list allowed
with self.assertRaises(Exception) as context:
_ = iap.handle_continuous_param(
[1, 2, 3], "[test14]",
value_range=None,
tuple_to_uniform=True,
list_to_choice=False)
self.assertTrue("[test14]" in str(context.exception))
def test_list_as_value_and_lists_allowed(self):
# list as value and list allowed
result = iap.handle_continuous_param(
[1, 2, 3], "[test15]",
value_range=None,
tuple_to_uniform=True,
list_to_choice=True)
self.assertTrue(isinstance(result, iap.Choice))
def test_list_value_and_allowed_and_partially_outside_value_range(self):
# list as value and list allowed and list partially outside of value
# range
with self.assertRaises(Exception) as context:
_ = iap.handle_continuous_param(
[1, 2], "[test16]",
value_range=(1.5, 13),
tuple_to_uniform=True,
list_to_choice=True)
self.assertTrue("[test16]" in str(context.exception))
def test_list_value_and_allowed_and_fully_outside_of_value_range(self):
# list as value and list allowed and list fully outside of value range
with self.assertRaises(Exception) as context:
_ = iap.handle_continuous_param(
[1, 2], "[test17]",
value_range=(3, 13),
tuple_to_uniform=True,
list_to_choice=True)
self.assertTrue("[test17]" in str(context.exception))
def test_value_inside_value_range_and_value_range_given_as_callable(self):
# single value within value range given as callable
def _value_range(x):
return -1 < x < 1
result = iap.handle_continuous_param(
1, "[test18]",
value_range=_value_range,
tuple_to_uniform=True,
list_to_choice=True)
self.assertTrue(isinstance(result, iap.Deterministic))
def test_bad_datatype_as_value_range(self):
# bad datatype for value range
with self.assertRaises(Exception) as context:
_ = iap.handle_continuous_param(
1, "[test19]",
value_range=False,
tuple_to_uniform=True,
list_to_choice=True)
self.assertTrue(
"Unexpected input for value_range" in str(context.exception))
class Test_handle_discrete_param(unittest.TestCase):
def test_float_value_inside_value_range_but_no_floats_allowed(self):
# float value without value range when no float value is allowed
with self.assertRaises(Exception) as context:
_ = iap.handle_discrete_param(
1.5, "[test0]",
value_range=None,
tuple_to_uniform=True,
list_to_choice=True, allow_floats=False)
self.assertTrue("[test0]" in str(context.exception))
def test_value_range_is_none(self):
# value without value range
result = iap.handle_discrete_param(
1, "[test1]", value_range=None, tuple_to_uniform=True,
list_to_choice=True, allow_floats=True)
self.assertTrue(isinstance(result, iap.Deterministic))
def test_value_range_is_tuple_of_nones(self):
# value without value range as (None, None)
result = iap.handle_discrete_param(
1, "[test1b]", value_range=(None, None), tuple_to_uniform=True,
list_to_choice=True, allow_floats=True)
self.assertTrue(isinstance(result, iap.Deterministic))
def test_value_is_stochastic_parameter(self):
# stochastic parameter
result = iap.handle_discrete_param(
iap.Deterministic(1), "[test2]", value_range=None,
tuple_to_uniform=True, list_to_choice=True, allow_floats=True)
self.assertTrue(isinstance(result, iap.Deterministic))
def test_value_inside_value_range(self):
# value within value range
result = iap.handle_discrete_param(
1, "[test3]", value_range=(0, 10), tuple_to_uniform=True,
list_to_choice=True, allow_floats=True)
self.assertTrue(isinstance(result, iap.Deterministic))
def test_value_outside_value_range(self):
# value outside of value range
with self.assertRaises(Exception) as context:
_ = iap.handle_discrete_param(
1, "[test4]", value_range=(2, 12), tuple_to_uniform=True,
list_to_choice=True, allow_floats=True)
self.assertTrue("[test4]" in str(context.exception))
def test_value_inside_value_range_no_lower_bound(self):
# value within value range (without lower bound)
result = iap.handle_discrete_param(
1, "[test5]", value_range=(None, 12), tuple_to_uniform=True,
list_to_choice=True, allow_floats=True)
self.assertTrue(isinstance(result, iap.Deterministic))
def test_value_outside_value_range_no_lower_bound(self):
# value outside of value range (without lower bound)
with self.assertRaises(Exception) as context:
_ = iap.handle_discrete_param(
1, "[test6]", value_range=(None, 0), tuple_to_uniform=True,
list_to_choice=True, allow_floats=True)
self.assertTrue("[test6]" in str(context.exception))
def test_value_inside_value_range_no_upper_bound(self):
# value within value range (without upper bound)
result = iap.handle_discrete_param(
1, "[test7]", value_range=(-1, None), tuple_to_uniform=True,
list_to_choice=True, allow_floats=True)
self.assertTrue(isinstance(result, iap.Deterministic))
def test_value_outside_value_range_no_upper_bound(self):
# value outside of value range (without upper bound)
with self.assertRaises(Exception) as context:
_ = iap.handle_discrete_param(
1, "[test8]", value_range=(2, None), tuple_to_uniform=True,
list_to_choice=True, allow_floats=True)
self.assertTrue("[test8]" in str(context.exception))
def test_value_is_tuple_but_no_tuples_allowed(self):
# tuple as value, but no tuples allowed
with self.assertRaises(Exception) as context:
_ = iap.handle_discrete_param(
(1, 2), "[test9]", value_range=None, tuple_to_uniform=False,
list_to_choice=True, allow_floats=True)
self.assertTrue("[test9]" in str(context.exception))
def test_value_is_tuple_and_tuples_allowed(self):
# tuple as value and tuple allowed
result = iap.handle_discrete_param(
(1, 2), "[test10]", value_range=None, tuple_to_uniform=True,
list_to_choice=True, allow_floats=True)
self.assertTrue(isinstance(result, iap.DiscreteUniform))
def test_value_tuple_and_allowed_and_inside_value_range(self):
# tuple as value and tuple allowed and tuple within value range
result = iap.handle_discrete_param(
(1, 2), "[test11]", value_range=(0, 10), tuple_to_uniform=True,
list_to_choice=True, allow_floats=True)
self.assertTrue(isinstance(result, iap.DiscreteUniform))
def test_value_tuple_and_allowed_and_inside_vr_allow_floats_false(self):
# tuple as value and tuple allowed and tuple within value range with
# allow_floats=False
result = iap.handle_discrete_param(
(1, 2), "[test11b]", value_range=(0, 10),
tuple_to_uniform=True, list_to_choice=True, allow_floats=False)
self.assertTrue(isinstance(result, iap.DiscreteUniform))
def test_value_tuple_and_allowed_and_partially_outside_value_range(self):
# tuple as value and tuple allowed and tuple partially outside of
# value range
with self.assertRaises(Exception) as context:
_ = iap.handle_discrete_param(
(1, 3), "[test12]", value_range=(2, 13), tuple_to_uniform=True,
list_to_choice=True, allow_floats=True)
self.assertTrue("[test12]" in str(context.exception))
def test_value_tuple_and_allowed_and_fully_outside_value_range(self):
# tuple as value and tuple allowed and tuple fully outside of value
# range
with self.assertRaises(Exception) as context:
_ = iap.handle_discrete_param(
(1, 2), "[test13]", value_range=(3, 13), tuple_to_uniform=True,
list_to_choice=True, allow_floats=True)
self.assertTrue("[test13]" in str(context.exception))
def test_value_list_but_not_allowed(self):
# list as value, but no list allowed
with self.assertRaises(Exception) as context:
_ = iap.handle_discrete_param(
[1, 2, 3], "[test14]", value_range=None, tuple_to_uniform=True,
list_to_choice=False, allow_floats=True)
self.assertTrue("[test14]" in str(context.exception))
def test_value_list_and_allowed(self):
# list as value and list allowed
result = iap.handle_discrete_param(
[1, 2, 3], "[test15]", value_range=None, tuple_to_uniform=True,
list_to_choice=True, allow_floats=True)
self.assertTrue(isinstance(result, iap.Choice))
def test_value_list_and_allowed_and_partially_outside_value_range(self):
# list as value and list allowed and list partially outside of value range
with self.assertRaises(Exception) as context:
_ = iap.handle_discrete_param(
[1, 3], "[test16]", value_range=(2, 13), tuple_to_uniform=True,
list_to_choice=True, allow_floats=True)
self.assertTrue("[test16]" in str(context.exception))
def test_value_list_and_allowed_and_fully_outside_value_range(self):
# list as value and list allowed and list fully outside of value range
with self.assertRaises(Exception) as context:
_ = iap.handle_discrete_param(
[1, 2], "[test17]", value_range=(3, 13), tuple_to_uniform=True,
list_to_choice=True, allow_floats=True)
self.assertTrue("[test17]" in str(context.exception))
def test_value_inside_value_range_given_as_callable(self):
# single value within value range given as callable
def _value_range(x):
return -1 < x < 1
result = iap.handle_discrete_param(
1, "[test18]",
value_range=_value_range,
tuple_to_uniform=True,
list_to_choice=True)
self.assertTrue(isinstance(result, iap.Deterministic))
def test_bad_datatype_as_value_range(self):
# bad datatype for value range
with self.assertRaises(Exception) as context:
_ = iap.handle_discrete_param(
1, "[test19]", value_range=False, tuple_to_uniform=True,
list_to_choice=True)
self.assertTrue(
"Unexpected input for value_range" in str(context.exception))
class Test_handle_categorical_string_param(unittest.TestCase):
def test_arg_is_all(self):
valid_values = ["class1", "class2"]
param = iap.handle_categorical_string_param(
ia.ALL, "foo", valid_values)
assert isinstance(param, iap.Choice)
assert param.a == valid_values
def test_arg_is_valid_str(self):
valid_values = ["class1", "class2"]
param = iap.handle_categorical_string_param(
"class1", "foo", valid_values)
assert isinstance(param, iap.Deterministic)
assert param.value == "class1"
def test_arg_is_invalid_str(self):
valid_values = ["class1", "class2"]
with self.assertRaises(AssertionError) as ctx:
_param = iap.handle_categorical_string_param(
"class3", "foo", valid_values)
expected = (
"Expected parameter 'foo' to be one of: class1, class2. "
"Got: class3.")
assert expected == str(ctx.exception)
def test_arg_is_valid_list(self):
valid_values = ["class1", "class2", "class3"]
param = iap.handle_categorical_string_param(
["class1", "class3"], "foo", valid_values)
assert isinstance(param, iap.Choice)
assert param.a == ["class1", "class3"]
def test_arg_is_list_with_invalid_types(self):
valid_values = ["class1", "class2", "class3"]
with self.assertRaises(AssertionError) as ctx:
_param = iap.handle_categorical_string_param(
["class1", False], "foo", valid_values)
expected = (
"Expected list provided for parameter 'foo' to only contain "
"strings, got types: str, bool."
)
assert expected in str(ctx.exception)
def test_arg_is_invalid_list(self):
valid_values = ["class1", "class2", "class3"]
with self.assertRaises(AssertionError) as ctx:
_param = iap.handle_categorical_string_param(
["class1", "class4"], "foo", valid_values)
expected = (
"Expected list provided for parameter 'foo' to only contain "
"the following allowed strings: class1, class2, class3. "
"Got strings: class1, class4."
)
assert expected in str(ctx.exception)
def test_arg_is_stochastic_param(self):
param = iap.Deterministic("class1")
param_out = iap.handle_categorical_string_param(
param, "foo", ["class1"])
assert param_out is param
def test_arg_is_invalid_datatype(self):
with self.assertRaises(Exception) as ctx:
_ = iap.handle_categorical_string_param(
False, "foo", ["class1"])
expected = "Expected parameter 'foo' to be imgaug.ALL"
assert expected in str(ctx.exception)
class Test_handle_probability_param(unittest.TestCase):
def test_bool_like_values(self):
for val in [True, False, 0, 1, 0.0, 1.0]:
with self.subTest(param=val):
p = iap.handle_probability_param(val, "[test1]")
assert isinstance(p, iap.Deterministic)
assert p.value == int(val)
def test_float_probabilities(self):
for val in [0.0001, 0.001, 0.01, 0.1, 0.9, 0.99, 0.999, 0.9999]:
with self.subTest(param=val):
p = iap.handle_probability_param(val, "[test2]")
assert isinstance(p, iap.Binomial)
assert isinstance(p.p, iap.Deterministic)
assert val-1e-8 < p.p.value < val+1e-8
def test_probability_is_stochastic_parameter(self):
det = iap.Deterministic(1)
p = iap.handle_probability_param(det, "[test3]")
assert p == det
def test_probability_has_bad_datatype(self):
with self.assertRaises(Exception) as context:
_p = iap.handle_probability_param("test", "[test4]")
self.assertTrue("Expected " in str(context.exception))
def test_probability_is_negative(self):
with self.assertRaises(AssertionError):
_p = iap.handle_probability_param(-0.01, "[test5]")
def test_probability_is_above_100_percent(self):
with self.assertRaises(AssertionError):
_p = iap.handle_probability_param(1.01, "[test6]")
class Test_force_np_float_dtype(unittest.TestCase):
def test_common_dtypes(self):
dtypes = [
("float16", "float16"),
("float32", "float32"),
("float64", "float64"),
("uint8", "float64"),
("int32", "float64")
]
for dtype_in, expected in dtypes:
with self.subTest(dtype_in=dtype_in):
arr = np.zeros((1,), dtype=dtype_in)
observed = iap.force_np_float_dtype(arr).dtype
assert observed.name == expected
class Test_both_np_float_if_one_is_float(unittest.TestCase):
def test_float16_float32(self):
a1 = np.zeros((1,), dtype=np.float16)
b1 = np.zeros((1,), dtype=np.float32)
a2, b2 = iap.both_np_float_if_one_is_float(a1, b1)
assert a2.dtype.name == "float16"
assert b2.dtype.name == "float32"
def test_float16_int32(self):
a1 = np.zeros((1,), dtype=np.float16)
b1 = np.zeros((1,), dtype=np.int32)
a2, b2 = iap.both_np_float_if_one_is_float(a1, b1)
assert a2.dtype.name == "float16"
assert b2.dtype.name == "float64"
def test_int32_float16(self):
a1 = np.zeros((1,), dtype=np.int32)
b1 = np.zeros((1,), dtype=np.float16)
a2, b2 = iap.both_np_float_if_one_is_float(a1, b1)
assert a2.dtype.name == "float64"
assert b2.dtype.name == "float16"
def test_int32_uint8(self):
a1 = np.zeros((1,), dtype=np.int32)
b1 = np.zeros((1,), dtype=np.uint8)
a2, b2 = iap.both_np_float_if_one_is_float(a1, b1)
assert a2.dtype.name == "float64"
assert b2.dtype.name == "float64"
class Test_draw_distributions_grid(unittest.TestCase):
def setUp(self):
reseed()
def test_basic_functionality(self):
params = [mock.Mock(), mock.Mock()]
params[0].draw_distribution_graph.return_value = \
np.zeros((1, 1, 3), dtype=np.uint8)
params[1].draw_distribution_graph.return_value = \
np.zeros((1, 1, 3), dtype=np.uint8)
draw_grid_mock = mock.Mock()
draw_grid_mock.return_value = np.zeros((4, 3, 2), dtype=np.uint8)
with mock.patch('imgaug.imgaug.draw_grid', draw_grid_mock):
grid_observed = iap.draw_distributions_grid(
params, rows=2, cols=3, graph_sizes=(20, 21),
sample_sizes=[(1, 2), (3, 4)], titles=["A", "B"])
assert grid_observed.shape == (4, 3, 2)
assert params[0].draw_distribution_graph.call_count == 1
assert params[1].draw_distribution_graph.call_count == 1
assert params[0].draw_distribution_graph.call_args[1]["size"] == (1, 2)
assert params[0].draw_distribution_graph.call_args[1]["title"] == "A"
assert params[1].draw_distribution_graph.call_args[1]["size"] == (3, 4)
assert params[1].draw_distribution_graph.call_args[1]["title"] == "B"
assert draw_grid_mock.call_count == 1
assert draw_grid_mock.call_args[0][0][0].shape == (20, 21, 3)
assert draw_grid_mock.call_args[0][0][1].shape == (20, 21, 3)
assert draw_grid_mock.call_args[1]["rows"] == 2
assert draw_grid_mock.call_args[1]["cols"] == 3
class Test_draw_distributions_graph(unittest.TestCase):
def test_basic_functionality(self):
# this test is very rough as we get a not-very-well-defined image out
# of the function
param = iap.Uniform(0.0, 1.0)
graph_img = param.draw_distribution_graph(title=None, size=(10000,),
bins=100)
# at least 10% of the image should be white-ish (background)
nb_white = np.sum(graph_img[..., :] > [200, 200, 200])
nb_all = np.prod(graph_img.shape)
graph_img_title = param.draw_distribution_graph(title="test",
size=(10000,),
bins=100)
assert graph_img.ndim == 3
assert graph_img.shape[2] == 3
assert nb_white > 0.1 * nb_all
assert graph_img_title.ndim == 3
assert graph_img_title.shape[2] == 3
assert not np.array_equal(graph_img_title, graph_img)
class TestStochasticParameter(unittest.TestCase):
def setUp(self):
reseed()
def test_copy(self):
other_param = iap.Uniform(1.0, 10.0)
param = iap.Discretize(other_param)
other_param.a = [1.0]
param_copy = param.copy()
param.other_param.a[0] += 1
assert isinstance(param_copy, iap.Discretize)
assert isinstance(param_copy.other_param, iap.Uniform)
assert param_copy.other_param.a[0] == param.other_param.a[0]
def test_deepcopy(self):
other_param = iap.Uniform(1.0, 10.0)
param = iap.Discretize(other_param)
other_param.a = [1.0]
param_copy = param.deepcopy()
param.other_param.a[0] += 1
assert isinstance(param_copy, iap.Discretize)
assert isinstance(param_copy.other_param, iap.Uniform)
assert param_copy.other_param.a[0] != param.other_param.a[0]
class TestStochasticParameterOperators(unittest.TestCase):
def setUp(self):
reseed()
def test_multiply_stochasic_params(self):
param1 = iap.Normal(0, 1)
param2 = iap.Uniform(-1.0, 1.0)
param3 = param1 * param2
assert isinstance(param3, iap.Multiply)
assert param3.other_param == param1
assert param3.val == param2
def test_multiply_stochastic_param_with_integer(self):
param1 = iap.Normal(0, 1)
param3 = param1 * 2
assert isinstance(param3, iap.Multiply)
assert param3.other_param == param1
assert isinstance(param3.val, iap.Deterministic)
assert param3.val.value == 2
def test_multiply_integer_with_stochastic_param(self):
param1 = iap.Normal(0, 1)
param3 = 2 * param1
assert isinstance(param3, iap.Multiply)
assert isinstance(param3.other_param, iap.Deterministic)
assert param3.other_param.value == 2
assert param3.val == param1
def test_multiply_string_with_stochastic_param_should_fail(self):
param1 = iap.Normal(0, 1)
with self.assertRaises(Exception) as context:
_ = "test" * param1
self.assertTrue("Invalid datatypes" in str(context.exception))
def test_multiply_stochastic_param_with_string_should_fail(self):
param1 = iap.Normal(0, 1)
with self.assertRaises(Exception) as context:
_ = param1 * "test"
self.assertTrue("Invalid datatypes" in str(context.exception))
def test_divide_stochastic_params(self):
# Divide (__truediv__)
param1 = iap.Normal(0, 1)
param2 = iap.Uniform(-1.0, 1.0)
param3 = param1 / param2
assert isinstance(param3, iap.Divide)
assert param3.other_param == param1
assert param3.val == param2
def test_divide_stochastic_param_by_integer(self):
param1 = iap.Normal(0, 1)
param3 = param1 / 2
assert isinstance(param3, iap.Divide)
assert param3.other_param == param1
assert isinstance(param3.val, iap.Deterministic)
assert param3.val.value == 2
def test_divide_integer_by_stochastic_param(self):
param1 = iap.Normal(0, 1)
param3 = 2 / param1
assert isinstance(param3, iap.Divide)
assert isinstance(param3.other_param, iap.Deterministic)
assert param3.other_param.value == 2
assert param3.val == param1
def test_divide_string_by_stochastic_param_should_fail(self):
param1 = iap.Normal(0, 1)
with self.assertRaises(Exception) as context:
_ = "test" / param1
self.assertTrue("Invalid datatypes" in str(context.exception))
def test_divide_stochastic_param_by_string_should_fail(self):
param1 = iap.Normal(0, 1)
with self.assertRaises(Exception) as context:
_ = param1 / "test"
self.assertTrue("Invalid datatypes" in str(context.exception))
def test_div_stochastic_params(self):
# Divide (__div__)
param1 = iap.Normal(0, 1)
param2 = iap.Uniform(-1.0, 1.0)
param3 = param1.__div__(param2)
assert isinstance(param3, iap.Divide)
assert param3.other_param == param1
assert param3.val == param2
def test_div_stochastic_param_by_integer(self):
param1 = iap.Normal(0, 1)
param3 = param1.__div__(2)
assert isinstance(param3, iap.Divide)
assert param3.other_param == param1
assert isinstance(param3.val, iap.Deterministic)
assert param3.val.value == 2
def test_div_stochastic_param_by_string_should_fail(self):
param1 = iap.Normal(0, 1)
with self.assertRaises(Exception) as context:
_ = param1.__div__("test")
self.assertTrue("Invalid datatypes" in str(context.exception))
def test_rdiv_stochastic_param_by_integer(self):
# Divide (__rdiv__)
param1 = iap.Normal(0, 1)
param3 = param1.__rdiv__(2)
assert isinstance(param3, iap.Divide)
assert isinstance(param3.other_param, iap.Deterministic)
assert param3.other_param.value == 2
assert param3.val == param1
def test_rdiv_stochastic_param_by_string_should_fail(self):
param1 = iap.Normal(0, 1)
with self.assertRaises(Exception) as context:
_ = param1.__rdiv__("test")
self.assertTrue("Invalid datatypes" in str(context.exception))
def test_floordiv_stochastic_params(self):
# Divide (__floordiv__)
param1_int = iap.DiscreteUniform(0, 10)
param2_int = iap.Choice([1, 2])
param3 = param1_int // param2_int
assert isinstance(param3, iap.Discretize)
assert isinstance(param3.other_param, iap.Divide)
assert param3.other_param.other_param == param1_int
assert param3.other_param.val == param2_int
def test_floordiv_symbol_stochastic_param_by_integer(self):
param1_int = iap.DiscreteUniform(0, 10)
param3 = param1_int // 2
assert isinstance(param3, iap.Discretize)
assert isinstance(param3.other_param, iap.Divide)
assert param3.other_param.other_param == param1_int
assert isinstance(param3.other_param.val, iap.Deterministic)
assert param3.other_param.val.value == 2
def test_floordiv_symbol_integer_by_stochastic_param(self):
param1_int = iap.DiscreteUniform(0, 10)
param3 = 2 // param1_int
assert isinstance(param3, iap.Discretize)
assert isinstance(param3.other_param, iap.Divide)
assert isinstance(param3.other_param.other_param, iap.Deterministic)
assert param3.other_param.other_param.value == 2
assert param3.other_param.val == param1_int
def test_floordiv_symbol_string_by_stochastic_should_fail(self):
param1_int = iap.DiscreteUniform(0, 10)
with self.assertRaises(Exception) as context:
_ = "test" // param1_int
self.assertTrue("Invalid datatypes" in str(context.exception))
def test_floordiv_symbol_stochastic_param_by_string_should_fail(self):
param1_int = iap.DiscreteUniform(0, 10)
with self.assertRaises(Exception) as context:
_ = param1_int // "test"
self.assertTrue("Invalid datatypes" in str(context.exception))
def test_add_stochastic_params(self):
param1 = iap.Normal(0, 1)
param2 = iap.Uniform(-1.0, 1.0)
param3 = param1 + param2
assert isinstance(param3, iap.Add)
assert param3.other_param == param1
assert param3.val == param2
def test_add_integer_to_stochastic_param(self):
param1 = iap.Normal(0, 1)
param3 = param1 + 2
assert isinstance(param3, iap.Add)
assert param3.other_param == param1
assert isinstance(param3.val, iap.Deterministic)
assert param3.val.value == 2
def test_add_stochastic_param_to_integer(self):
param1 = iap.Normal(0, 1)
param3 = 2 + param1
assert isinstance(param3, iap.Add)
assert isinstance(param3.other_param, iap.Deterministic)
assert param3.other_param.value == 2
assert param3.val == param1
def test_add_stochastic_param_to_string(self):
param1 = iap.Normal(0, 1)
with self.assertRaises(Exception) as context:
_ = "test" + param1
self.assertTrue("Invalid datatypes" in str(context.exception))
def test_add_string_to_stochastic_param(self):
param1 = iap.Normal(0, 1)
with self.assertRaises(Exception) as context:
_ = param1 + "test"
self.assertTrue("Invalid datatypes" in str(context.exception))
def test_subtract_stochastic_params(self):
param1 = iap.Normal(0, 1)
param2 = iap.Uniform(-1.0, 1.0)
param3 = param1 - param2
assert isinstance(param3, iap.Subtract)
assert param3.other_param == param1
assert param3.val == param2
def test_subtract_integer_from_stochastic_param(self):
param1 = iap.Normal(0, 1)
param3 = param1 - 2
assert isinstance(param3, iap.Subtract)
assert param3.other_param == param1
assert isinstance(param3.val, iap.Deterministic)
assert param3.val.value == 2
def test_subtract_stochastic_param_from_integer(self):
param1 = iap.Normal(0, 1)
param3 = 2 - param1
assert isinstance(param3, iap.Subtract)
assert isinstance(param3.other_param, iap.Deterministic)
assert param3.other_param.value == 2
assert param3.val == param1
def test_subtract_stochastic_param_from_string_should_fail(self):
param1 = iap.Normal(0, 1)
with self.assertRaises(Exception) as context:
_ = "test" - param1
self.assertTrue("Invalid datatypes" in str(context.exception))
def test_subtract_string_from_stochastic_param_should_fail(self):
param1 = iap.Normal(0, 1)
with self.assertRaises(Exception) as context:
_ = param1 - "test"
self.assertTrue("Invalid datatypes" in str(context.exception))
def test_exponentiate_stochastic_params(self):
param1 = iap.Normal(0, 1)
param2 = iap.Uniform(-1.0, 1.0)
param3 = param1 ** param2
assert isinstance(param3, iap.Power)
assert param3.other_param == param1
assert param3.val == param2
def test_exponentiate_stochastic_param_by_integer(self):
param1 = iap.Normal(0, 1)
param3 = param1 ** 2
assert isinstance(param3, iap.Power)
assert param3.other_param == param1
assert isinstance(param3.val, iap.Deterministic)
assert param3.val.value == 2
def test_exponentiate_integer_by_stochastic_param(self):
param1 = iap.Normal(0, 1)
param3 = 2 ** param1
assert isinstance(param3, iap.Power)
assert isinstance(param3.other_param, iap.Deterministic)
assert param3.other_param.value == 2
assert param3.val == param1
def test_exponentiate_string_by_stochastic_param(self):
param1 = iap.Normal(0, 1)
with self.assertRaises(Exception) as context:
_ = "test" ** param1
self.assertTrue("Invalid datatypes" in str(context.exception))
def test_exponentiate_stochastic_param_by_string(self):
param1 = iap.Normal(0, 1)
with self.assertRaises(Exception) as context:
_ = param1 ** "test"
self.assertTrue("Invalid datatypes" in str(context.exception))
class TestBinomial(unittest.TestCase):
def setUp(self):
reseed()
def test___init___p_is_zero(self):
param = iap.Binomial(0)
assert (
param.__str__()
== param.__repr__()
== "Binomial(Deterministic(int 0))"
)
def test___init___p_is_one(self):
param = iap.Binomial(1.0)
assert (
param.__str__()
== param.__repr__()
== "Binomial(Deterministic(float 1.00000000))"
)
def test_p_is_zero(self):
param = iap.Binomial(0)
sample = param.draw_sample()
samples = param.draw_samples((10, 5))
assert sample.shape == tuple()
assert samples.shape == (10, 5)
assert sample == 0
assert np.all(samples == 0)
def test_p_is_one(self):
param = iap.Binomial(1.0)
sample = param.draw_sample()
samples = param.draw_samples((10, 5))
assert sample.shape == tuple()
assert samples.shape == (10, 5)
assert sample == 1
assert np.all(samples == 1)
def test_p_is_50_percent(self):
param = iap.Binomial(0.5)
sample = param.draw_sample()
samples = param.draw_samples((10000,))
unique, counts = np.unique(samples, return_counts=True)
assert sample.shape == tuple()
assert samples.shape == (10000,)
assert sample in [0, 1]
assert len(unique) == 2
for val, count in zip(unique, counts):
if val == 0:
assert 5000 - 500 < count < 5000 + 500
elif val == 1:
assert 5000 - 500 < count < 5000 + 500
else:
assert False
def test_p_is_list(self):
param = iap.Binomial(iap.Choice([0.25, 0.75]))
for _ in sm.xrange(10):
samples = param.draw_samples((1000,))
p = np.sum(samples) / samples.size
assert (
(0.25 - 0.05 < p < 0.25 + 0.05)
or (0.75 - 0.05 < p < 0.75 + 0.05)
)
def test_p_is_tuple(self):
param = iap.Binomial((0.0, 1.0))
last_p = 0.5
diffs = []
for _ in sm.xrange(30):
samples = param.draw_samples((1000,))
p = np.sum(samples).astype(np.float32) / samples.size
diffs.append(abs(p - last_p))
last_p = p
nb_p_changed = sum([diff > 0.05 for diff in diffs])
assert nb_p_changed > 15
def test_samples_same_values_for_same_seeds(self):
param = iap.Binomial(0.5)
samples1 = param.draw_samples((10, 5),
random_state=iarandom.RNG(1234))
samples2 = param.draw_samples((10, 5),
random_state=iarandom.RNG(1234))
assert np.array_equal(samples1, samples2)
class TestChoice(unittest.TestCase):
def setUp(self):
reseed()
def test___init__(self):
param = iap.Choice([0, 1, 2])
assert (
param.__str__()
== param.__repr__()
== "Choice(a=[0, 1, 2], replace=True, p=None)"
)
def test_value_is_list(self):
param = iap.Choice([0, 1, 2])
sample = param.draw_sample()
samples = param.draw_samples((10, 5))
assert sample.shape == tuple()
assert samples.shape == (10, 5)
assert sample in [0, 1, 2]
assert np.all(
np.logical_or(
np.logical_or(samples == 0, samples == 1),
samples == 2
)
)
def test_sampled_values_match_expected_counts(self):
param = iap.Choice([0, 1, 2])
samples = param.draw_samples((10000,))
expected = 10000/3
expected_tolerance = expected * 0.05
for v in [0, 1, 2]:
count = np.sum(samples == v)
assert (
expected - expected_tolerance
< count <
expected + expected_tolerance
)
def test_value_is_list_containing_negative_number(self):
param = iap.Choice([-1, 1])
sample = param.draw_sample()
samples = param.draw_samples((10, 5))
assert sample.shape == tuple()
assert samples.shape == (10, 5)
assert sample in [-1, 1]
assert np.all(np.logical_or(samples == -1, samples == 1))
def test_value_is_list_of_floats(self):
param = iap.Choice([-1.2, 1.7])
sample = param.draw_sample()
samples = param.draw_samples((10, 5))
assert sample.shape == tuple()
assert samples.shape == (10, 5)
assert (
(
-1.2 - _eps(sample)
< sample <
-1.2 + _eps(sample)
)
or
(
1.7 - _eps(sample)
< sample <
1.7 + _eps(sample)
)
)
assert np.all(
np.logical_or(
np.logical_and(
-1.2 - _eps(sample) < samples,
samples < -1.2 + _eps(sample)
),
np.logical_and(
1.7 - _eps(sample) < samples,
samples < 1.7 + _eps(sample)
)
)
)
def test_value_is_list_of_strings(self):
param = iap.Choice(["first", "second", "third"])
sample = param.draw_sample()
samples = param.draw_samples((10, 5))
assert sample.shape == tuple()
assert samples.shape == (10, 5)
assert sample in ["first", "second", "third"]
assert np.all(
np.logical_or(
np.logical_or(
samples == "first",
samples == "second"
),
samples == "third"
)
)
def test_sample_without_replacing(self):
param = iap.Choice([1+i for i in sm.xrange(100)], replace=False)
samples = param.draw_samples((50,))
seen = [0 for _ in sm.xrange(100)]
for sample in samples:
seen[sample-1] += 1
assert all([count in [0, 1] for count in seen])
def test_non_uniform_probabilities_over_elements(self):
param = iap.Choice([0, 1], p=[0.25, 0.75])
samples = param.draw_samples((10000,))
unique, counts = np.unique(samples, return_counts=True)
assert len(unique) == 2
for val, count in zip(unique, counts):
if val == 0:
assert 2500 - 500 < count < 2500 + 500
elif val == 1:
assert 7500 - 500 < count < 7500 + 500
else:
assert False
def test_list_contains_stochastic_parameter(self):
param = iap.Choice([iap.Choice([0, 1]), 2])
samples = param.draw_samples((10000,))
unique, counts = np.unique(samples, return_counts=True)
assert len(unique) == 3
for val, count in zip(unique, counts):
if val in [0, 1]:
assert 2500 - 500 < count < 2500 + 500
elif val == 2:
assert 5000 - 500 < count < 5000 + 500
else:
assert False
def test_samples_same_values_for_same_seeds(self):
param = iap.Choice([-1, 0, 1, 2, 3])
samples1 = param.draw_samples((10, 5),
random_state=iarandom.RNG(1234))
samples2 = param.draw_samples((10, 5),
random_state=iarandom.RNG(1234))
assert np.array_equal(samples1, samples2)
def test_value_is_bad_datatype(self):
with self.assertRaises(Exception) as context:
_ = iap.Choice(123)
self.assertTrue(
"Expected a to be an iterable" in str(context.exception))
def test_p_is_bad_datatype(self):
with self.assertRaises(Exception) as context:
_ = iap.Choice([1, 2], p=123)
self.assertTrue("Expected p to be" in str(context.exception))
def test_value_and_p_have_unequal_lengths(self):
with self.assertRaises(Exception) as context:
_ = iap.Choice([1, 2], p=[1])
self.assertTrue("Expected lengths of" in str(context.exception))
class TestDiscreteUniform(unittest.TestCase):
def setUp(self):
reseed()
def test___init__(self):
param = iap.DiscreteUniform(0, 2)
assert (
param.__str__()
== param.__repr__()
== "DiscreteUniform(Deterministic(int 0), Deterministic(int 2))"
)
def test_bounds_are_ints(self):
param = iap.DiscreteUniform(0, 2)
sample = param.draw_sample()
samples = param.draw_samples((10, 5))
assert sample.shape == tuple()
assert samples.shape == (10, 5)
assert sample in [0, 1, 2]
assert np.all(
np.logical_or(
np.logical_or(samples == 0, samples == 1),
samples == 2
)
)
def test_samples_match_expected_counts(self):
param = iap.DiscreteUniform(0, 2)
samples = param.draw_samples((10000,))
expected = 10000/3
expected_tolerance = expected * 0.05
for v in [0, 1, 2]:
count = np.sum(samples == v)
assert (
expected - expected_tolerance
< count <
expected + expected_tolerance
)
def test_lower_bound_is_negative(self):
param = iap.DiscreteUniform(-1, 1)
sample = param.draw_sample()
samples = param.draw_samples((10, 5))
assert sample.shape == tuple()
assert samples.shape == (10, 5)
assert sample in [-1, 0, 1]
assert np.all(
np.logical_or(
np.logical_or(samples == -1, samples == 0),
samples == 1
)
)
def test_bounds_are_floats(self):
param = iap.DiscreteUniform(-1.2, 1.2)
sample = param.draw_sample()
samples = param.draw_samples((10, 5))
assert sample.shape == tuple()
assert samples.shape == (10, 5)
assert sample in [-1, 0, 1]
assert np.all(
np.logical_or(
np.logical_or(
samples == -1, samples == 0
),
samples == 1
)
)
def test_lower_and_upper_bound_have_wrong_order(self):
param = iap.DiscreteUniform(1, -1)
sample = param.draw_sample()
samples = param.draw_samples((10, 5))
assert sample.shape == tuple()
assert samples.shape == (10, 5)
assert sample in [-1, 0, 1]
assert np.all(
np.logical_or(
np.logical_or(
samples == -1, samples == 0
),
samples == 1
)
)
def test_lower_and_upper_bound_are_the_same(self):
param = iap.DiscreteUniform(1, 1)
sample = param.draw_sample()
samples = param.draw_samples((100,))
assert sample == 1
assert np.all(samples == 1)
def test_samples_same_values_for_same_seeds(self):
param = iap.Uniform(-1, 1)
samples1 = param.draw_samples((10, 5),
random_state=iarandom.RNG(1234))
samples2 = param.draw_samples((10, 5),
random_state=iarandom.RNG(1234))
assert np.array_equal(samples1, samples2)
class TestPoisson(unittest.TestCase):
def setUp(self):
reseed()
def test___init__(self):
param = iap.Poisson(1)
assert (
param.__str__()
== param.__repr__()
== "Poisson(Deterministic(int 1))"
)
def test_draw_sample(self):
param = iap.Poisson(1)
sample = param.draw_sample()
assert sample.shape == tuple()
assert 0 <= sample
def test_via_comparison_to_np_poisson(self):
param = iap.Poisson(1)
samples = param.draw_samples((100, 1000))
samples_direct = iarandom.RNG(1234).poisson(
lam=1, size=(100, 1000))
assert samples.shape == (100, 1000)
for i in [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]:
count_direct = int(np.sum(samples_direct == i))
count = np.sum(samples == i)
tolerance = max(count_direct * 0.1, 250)
assert count_direct - tolerance < count < count_direct + tolerance
def test_samples_same_values_for_same_seeds(self):
param = iap.Poisson(1)
samples1 = param.draw_samples((10, 5),
random_state=iarandom.RNG(1234))
samples2 = param.draw_samples((10, 5),
random_state=iarandom.RNG(1234))
assert np.array_equal(samples1, samples2)
class TestNormal(unittest.TestCase):
def setUp(self):
reseed()
def test___init__(self):
param = iap.Normal(0, 1)
assert (
param.__str__()
== param.__repr__()
== "Normal(loc=Deterministic(int 0), scale=Deterministic(int 1))"
)
def test_draw_sample(self):
param = iap.Normal(0, 1)
sample = param.draw_sample()
assert sample.shape == tuple()
def test_via_comparison_to_np_normal(self):
param = iap.Normal(0, 1)
samples = param.draw_samples((100, 1000))
samples_direct = iarandom.RNG(1234).normal(loc=0, scale=1,
size=(100, 1000))
samples = np.clip(samples, -1, 1)
samples_direct = np.clip(samples_direct, -1, 1)
nb_bins = 10
hist, _ = np.histogram(samples, bins=nb_bins, range=(-1.0, 1.0),
density=False)
hist_direct, _ = np.histogram(samples_direct, bins=nb_bins,
range=(-1.0, 1.0), density=False)
tolerance = 0.05
for nb_samples, nb_samples_direct in zip(hist, hist_direct):
density = nb_samples / samples.size
density_direct = nb_samples_direct / samples_direct.size
assert (
density_direct - tolerance
< density <
density_direct + tolerance
)
def test_loc_is_stochastic_parameter(self):
param = iap.Normal(iap.Choice([-100, 100]), 1)
seen = [0, 0]
for _ in sm.xrange(1000):
samples = param.draw_samples((100,))
exp = np.mean(samples)
if -100 - 10 < exp < -100 + 10:
seen[0] += 1
elif 100 - 10 < exp < 100 + 10:
seen[1] += 1
else:
assert False
assert 500 - 100 < seen[0] < 500 + 100
assert 500 - 100 < seen[1] < 500 + 100
def test_scale(self):
param1 = iap.Normal(0, 1)
param2 = iap.Normal(0, 100)
samples1 = param1.draw_samples((1000,))
samples2 = param2.draw_samples((1000,))
assert np.std(samples1) < np.std(samples2)
assert 100 - 10 < np.std(samples2) < 100 + 10
def test_samples_same_values_for_same_seeds(self):
param = iap.Normal(0, 1)
samples1 = param.draw_samples((10, 5),
random_state=iarandom.RNG(1234))
samples2 = param.draw_samples((10, 5),
random_state=iarandom.RNG(1234))
assert np.allclose(samples1, samples2)
class TestTruncatedNormal(unittest.TestCase):
def setUp(self):
reseed()
def test___init__(self):
param = iap.TruncatedNormal(0, 1)
expected = (
"TruncatedNormal("
"loc=Deterministic(int 0), "
"scale=Deterministic(int 1), "
"low=Deterministic(float -inf), "
"high=Deterministic(float inf)"
")"
)
assert (
param.__str__()
== param.__repr__()
== expected
)
def test___init___custom_range(self):
param = iap.TruncatedNormal(0, 1, low=-100, high=50.0)
expected = (
"TruncatedNormal("
"loc=Deterministic(int 0), "
"scale=Deterministic(int 1), "
"low=Deterministic(int -100), "
"high=Deterministic(float 50.00000000)"
")"
)
assert (
param.__str__()
== param.__repr__()
== expected
)
def test_scale_is_zero(self):
param = iap.TruncatedNormal(0.5, 0, low=-10, high=10)
samples = param.draw_samples((100,))
assert np.allclose(samples, 0.5)
def test_scale(self):
param1 = iap.TruncatedNormal(0.0, 0.1, low=-100, high=100)
param2 = iap.TruncatedNormal(0.0, 5.0, low=-100, high=100)
samples1 = param1.draw_samples((1000,))
samples2 = param2.draw_samples((1000,))
assert np.std(samples1) < np.std(samples2)
assert np.isclose(np.std(samples1), 0.1, rtol=0, atol=0.20)
assert np.isclose(np.std(samples2), 5.0, rtol=0, atol=0.40)
def test_loc_is_stochastic_parameter(self):
param = iap.TruncatedNormal(iap.Choice([-100, 100]), 0.01,
low=-1000, high=1000)
seen = [0, 0]
for _ in sm.xrange(200):
samples = param.draw_samples((5,))
observed = np.mean(samples)
dist1 = np.abs(-100 - observed)
dist2 = np.abs(100 - observed)
if dist1 < 1:
seen[0] += 1
elif dist2 < 1:
seen[1] += 1
else:
assert False
assert np.isclose(seen[0], 100, rtol=0, atol=20)
assert np.isclose(seen[1], 100, rtol=0, atol=20)
def test_samples_are_within_bounds(self):
param = iap.TruncatedNormal(0, 10.0, low=-5, high=7.5)
samples = param.draw_samples((1000,))
# are all within bounds
assert np.all(samples >= -5.0 - 1e-4)
assert np.all(samples <= 7.5 + 1e-4)
# at least some samples close to bounds
assert np.any(samples <= -4.5)
assert np.any(samples >= 7.0)
# at least some samples close to loc
assert np.any(np.abs(samples) < 0.5)
def test_samples_same_values_for_same_seeds(self):
param = iap.TruncatedNormal(0, 1)
samples1 = param.draw_samples((10, 5), random_state=1234)
samples2 = param.draw_samples((10, 5), random_state=1234)
assert np.allclose(samples1, samples2)
def test_samples_different_values_for_different_seeds(self):
param = iap.TruncatedNormal(0, 1)
samples1 = param.draw_samples((10, 5), random_state=1234)
samples2 = param.draw_samples((10, 5), random_state=2345)
assert not np.allclose(samples1, samples2)
class TestLaplace(unittest.TestCase):
def setUp(self):
reseed()
def test___init__(self):
param = iap.Laplace(0, 1)
assert (
param.__str__()
== param.__repr__()
== "Laplace(loc=Deterministic(int 0), scale=Deterministic(int 1))"
)
def test_draw_sample(self):
param = iap.Laplace(0, 1)
sample = param.draw_sample()
assert sample.shape == tuple()
def test_via_comparison_to_np_laplace(self):
param = iap.Laplace(0, 1)
samples = param.draw_samples((100, 1000))
samples_direct = iarandom.RNG(1234).laplace(loc=0, scale=1,
size=(100, 1000))
assert samples.shape == (100, 1000)
samples = np.clip(samples, -1, 1)
samples_direct = np.clip(samples_direct, -1, 1)
nb_bins = 10
hist, _ = np.histogram(samples, bins=nb_bins, range=(-1.0, 1.0),
density=False)
hist_direct, _ = np.histogram(samples_direct, bins=nb_bins,
range=(-1.0, 1.0), density=False)
tolerance = 0.05
for nb_samples, nb_samples_direct in zip(hist, hist_direct):
density = nb_samples / samples.size
density_direct = nb_samples_direct / samples_direct.size
assert (
density_direct - tolerance
< density <
density_direct + tolerance
)
def test_loc_is_stochastic_parameter(self):
param = iap.Laplace(iap.Choice([-100, 100]), 1)
seen = [0, 0]
for _ in sm.xrange(1000):
samples = param.draw_samples((100,))
exp = np.mean(samples)
if -100 - 10 < exp < -100 + 10:
seen[0] += 1
elif 100 - 10 < exp < 100 + 10:
seen[1] += 1
else:
assert False
assert 500 - 100 < seen[0] < 500 + 100
assert 500 - 100 < seen[1] < 500 + 100
def test_scale(self):
param1 = iap.Laplace(0, 1)
param2 = iap.Laplace(0, 100)
samples1 = param1.draw_samples((1000,))
samples2 = param2.draw_samples((1000,))
assert np.var(samples1) < np.var(samples2)
def test_scale_is_zero(self):
param1 = iap.Laplace(1, 0)
samples = param1.draw_samples((100,))
assert np.all(np.logical_and(
samples > 1 - _eps(samples),
samples < 1 + _eps(samples)
))
def test_samples_same_values_for_same_seeds(self):
param = iap.Laplace(0, 1)
samples1 = param.draw_samples((10, 5),
random_state=iarandom.RNG(1234))
samples2 = param.draw_samples((10, 5),
random_state=iarandom.RNG(1234))
assert np.allclose(samples1, samples2)
class TestChiSquare(unittest.TestCase):
def setUp(self):
reseed()
def test___init__(self):
param = iap.ChiSquare(1)
assert (
param.__str__()
== param.__repr__()
== "ChiSquare(df=Deterministic(int 1))"
)
def test_draw_sample(self):
param = iap.ChiSquare(1)
sample = param.draw_sample()
assert sample.shape == tuple()
assert 0 <= sample
def test_via_comparison_to_np_chisquare(self):
param = iap.ChiSquare(1)
samples = param.draw_samples((100, 1000))
samples_direct = iarandom.RNG(1234).chisquare(df=1,
size=(100, 1000))
assert samples.shape == (100, 1000)
assert np.all(0 <= samples)
samples = np.clip(samples, 0, 3)
samples_direct = np.clip(samples_direct, 0, 3)
nb_bins = 10
hist, _ = np.histogram(samples, bins=nb_bins, range=(0, 3.0),
density=False)
hist_direct, _ = np.histogram(samples_direct, bins=nb_bins,
range=(0, 3.0), density=False)
tolerance = 0.05
for nb_samples, nb_samples_direct in zip(hist, hist_direct):
density = nb_samples / samples.size
density_direct = nb_samples_direct / samples_direct.size
assert (
density_direct - tolerance
< density <
density_direct + tolerance
)
def test_df_is_stochastic_parameter(self):
param = iap.ChiSquare(iap.Choice([1, 10]))
seen = [0, 0]
for _ in sm.xrange(1000):
samples = param.draw_samples((100,))
exp = np.mean(samples)
if 1 - 1.0 < exp < 1 + 1.0:
seen[0] += 1
elif 10 - 4.0 < exp < 10 + 4.0:
seen[1] += 1
else:
assert False
assert 500 - 100 < seen[0] < 500 + 100
assert 500 - 100 < seen[1] < 500 + 100
def test_larger_df_leads_to_more_variance(self):
param1 = iap.ChiSquare(1)
param2 = iap.ChiSquare(10)
samples1 = param1.draw_samples((1000,))
samples2 = param2.draw_samples((1000,))
assert np.var(samples1) < np.var(samples2)
assert 2*1 - 1.0 < np.var(samples1) < 2*1 + 1.0
assert 2*10 - 5.0 < np.var(samples2) < 2*10 + 5.0
def test_samples_same_values_for_same_seeds(self):
param = iap.ChiSquare(1)
samples1 = param.draw_samples((10, 5),
random_state=iarandom.RNG(1234))
samples2 = param.draw_samples((10, 5),
random_state=iarandom.RNG(1234))
assert np.allclose(samples1, samples2)
class TestWeibull(unittest.TestCase):
def setUp(self):
reseed()
def test___init__(self):
param = iap.Weibull(1)
assert (
param.__str__()
== param.__repr__()
== "Weibull(a=Deterministic(int 1))"
)
def test_draw_sample(self):
param = iap.Weibull(1)
sample = param.draw_sample()
assert sample.shape == tuple()
assert 0 <= sample
def test_via_comparison_to_np_weibull(self):
param = iap.Weibull(1)
samples = param.draw_samples((100, 1000))
samples_direct = iarandom.RNG(1234).weibull(a=1,
size=(100, 1000))
assert samples.shape == (100, 1000)
assert np.all(0 <= samples)
samples = np.clip(samples, 0, 2)
samples_direct = np.clip(samples_direct, 0, 2)
nb_bins = 10
hist, _ = np.histogram(samples, bins=nb_bins, range=(0, 2.0),
density=False)
hist_direct, _ = np.histogram(samples_direct, bins=nb_bins,
range=(0, 2.0), density=False)
tolerance = 0.05
for nb_samples, nb_samples_direct in zip(hist, hist_direct):
density = nb_samples / samples.size
density_direct = nb_samples_direct / samples_direct.size
assert (
density_direct - tolerance
< density <
density_direct + tolerance
)
def test_argument_is_stochastic_parameter(self):
param = iap.Weibull(iap.Choice([1, 0.5]))
expected_first = scipy.special.gamma(1 + 1/1)
expected_second = scipy.special.gamma(1 + 1/0.5)
seen = [0, 0]
for _ in sm.xrange(100):
samples = param.draw_samples((50000,))
observed = np.mean(samples)
matches_first = (
expected_first - 0.2 * expected_first
< observed <
expected_first + 0.2 * expected_first
)
matches_second = (
expected_second - 0.2 * expected_second
< observed <
expected_second + 0.2 * expected_second
)
if matches_first:
seen[0] += 1
elif matches_second:
seen[1] += 1
else:
assert False
assert 50 - 25 < seen[0] < 50 + 25
assert 50 - 25 < seen[1] < 50 + 25
def test_different_strengths(self):
param1 = iap.Weibull(1)
param2 = iap.Weibull(0.5)
samples1 = param1.draw_samples((10000,))
samples2 = param2.draw_samples((10000,))
expected_first = (
scipy.special.gamma(1 + 2/1)
- (scipy.special.gamma(1 + 1/1))**2
)
expected_second = (
scipy.special.gamma(1 + 2/0.5)
- (scipy.special.gamma(1 + 1/0.5))**2
)
assert np.var(samples1) < np.var(samples2)
assert (
expected_first - 0.2 * expected_first
< np.var(samples1) <
expected_first + 0.2 * expected_first
)
assert (
expected_second - 0.2 * expected_second
< np.var(samples2) <
expected_second + 0.2 * expected_second
)
def test_samples_same_values_for_same_seeds(self):
param = iap.Weibull(1)
samples1 = param.draw_samples((10, 5),
random_state=iarandom.RNG(1234))
samples2 = param.draw_samples((10, 5),
random_state=iarandom.RNG(1234))
assert np.allclose(samples1, samples2)
class TestUniform(unittest.TestCase):
def setUp(self):
reseed()
def test___init__(self):
param = iap.Uniform(0, 1.0)
assert (
param.__str__()
== param.__repr__()
== "Uniform(Deterministic(int 0), Deterministic(float 1.00000000))"
)
def test_draw_sample(self):
param = iap.Uniform(0, 1.0)
sample = param.draw_sample()
assert sample.shape == tuple()
assert 0 - _eps(sample) < sample < 1.0 + _eps(sample)
def test_draw_samples(self):
param = iap.Uniform(0, 1.0)
samples = param.draw_samples((10, 5))
assert samples.shape == (10, 5)
assert np.all(
np.logical_and(
0 - _eps(samples) < samples,
samples < 1.0 + _eps(samples)
)
)
def test_via_density_histogram(self):
param = iap.Uniform(0, 1.0)
samples = param.draw_samples((10000,))
nb_bins = 10
hist, _ = np.histogram(samples, bins=nb_bins, range=(0.0, 1.0),
density=False)
density_expected = 1.0/nb_bins
density_tolerance = 0.05
for nb_samples in hist:
density = nb_samples / samples.size
assert (
density_expected - density_tolerance
< density <
density_expected + density_tolerance
)
def test_negative_value(self):
param = iap.Uniform(-1.0, 1.0)
sample = param.draw_sample()
samples = param.draw_samples((10, 5))
assert sample.shape == tuple()
assert samples.shape == (10, 5)
assert -1.0 - _eps(sample) < sample < 1.0 + _eps(sample)
assert np.all(
np.logical_and(
-1.0 - _eps(samples) < samples,
samples < 1.0 + _eps(samples)
)
)
def test_wrong_argument_order(self):
param = iap.Uniform(1.0, -1.0)
sample = param.draw_sample()
samples = param.draw_samples((10, 5))
assert sample.shape == tuple()
assert samples.shape == (10, 5)
assert -1.0 - _eps(sample) < sample < 1.0 + _eps(sample)
assert np.all(
np.logical_and(
-1.0 - _eps(samples) < samples,
samples < 1.0 + _eps(samples)
)
)
def test_arguments_are_integers(self):
param = iap.Uniform(-1, 1)
sample = param.draw_sample()
samples = param.draw_samples((10, 5))
assert sample.shape == tuple()
assert samples.shape == (10, 5)
assert -1.0 - _eps(sample) < sample < 1.0 + _eps(sample)
assert np.all(
np.logical_and(
-1.0 - _eps(samples) < samples,
samples < 1.0 + _eps(samples)
)
)
def test_arguments_are_identical(self):
param = iap.Uniform(1, 1)
sample = param.draw_sample()
samples = param.draw_samples((10, 5))
assert sample.shape == tuple()
assert samples.shape == (10, 5)
assert 1.0 - _eps(sample) < sample < 1.0 + _eps(sample)
assert np.all(
np.logical_and(
1.0 - _eps(samples) < samples,
samples < 1.0 + _eps(samples)
)
)
def test_samples_same_values_for_same_seeds(self):
param = iap.Uniform(-1.0, 1.0)
samples1 = param.draw_samples((10, 5),
random_state=iarandom.RNG(1234))
samples2 = param.draw_samples((10, 5),
random_state=iarandom.RNG(1234))
assert np.allclose(samples1, samples2)
class TestBeta(unittest.TestCase):
@classmethod
def _mean(cls, alpha, beta):
return alpha / (alpha + beta)
@classmethod
def _var(cls, alpha, beta):
return (alpha * beta) / ((alpha + beta)**2 * (alpha + beta + 1))
def setUp(self):
reseed()
def test___init__(self):
param = iap.Beta(0.5, 0.5)
assert (
param.__str__()
== param.__repr__()
== "Beta("
"Deterministic(float 0.50000000), "
"Deterministic(float 0.50000000)"
")"
)
def test_draw_sample(self):
param = iap.Beta(0.5, 0.5)
sample = param.draw_sample()
assert sample.shape == tuple()
assert 0 - _eps(sample) < sample < 1.0 + _eps(sample)
def test_draw_samples(self):
param = iap.Beta(0.5, 0.5)
samples = param.draw_samples((100, 1000))
assert samples.shape == (100, 1000)
assert np.all(
np.logical_and(
0 - _eps(samples) <= samples,
samples <= 1.0 + _eps(samples)
)
)
def test_via_comparison_to_np_beta(self):
param = iap.Beta(0.5, 0.5)
samples = param.draw_samples((100, 1000))
samples_direct = iarandom.RNG(1234).beta(
a=0.5, b=0.5, size=(100, 1000))
nb_bins = 10
hist, _ = np.histogram(samples, bins=nb_bins, range=(0, 1.0),
density=False)
hist_direct, _ = np.histogram(samples_direct, bins=nb_bins,
range=(0, 1.0), density=False)
tolerance = 0.05
for nb_samples, nb_samples_direct in zip(hist, hist_direct):
density = nb_samples / samples.size
density_direct = nb_samples_direct / samples_direct.size
assert (
density_direct - tolerance
< density <
density_direct + tolerance
)
def test_argument_is_stochastic_parameter(self):
param = iap.Beta(iap.Choice([0.5, 2]), 0.5)
expected_first = self._mean(0.5, 0.5)
expected_second = self._mean(2, 0.5)
seen = [0, 0]
for _ in sm.xrange(100):
samples = param.draw_samples((10000,))
observed = np.mean(samples)
if expected_first - 0.05 < observed < expected_first + 0.05:
seen[0] += 1
elif expected_second - 0.05 < observed < expected_second + 0.05:
seen[1] += 1
else:
assert False
assert 50 - 25 < seen[0] < 50 + 25
assert 50 - 25 < seen[1] < 50 + 25
def test_compare_curves_of_different_arguments(self):
param1 = iap.Beta(2, 2)
param2 = iap.Beta(0.5, 0.5)
samples1 = param1.draw_samples((10000,))
samples2 = param2.draw_samples((10000,))
expected_first = self._var(2, 2)
expected_second = self._var(0.5, 0.5)
assert np.var(samples1) < np.var(samples2)
assert (
expected_first - 0.1 * expected_first
< np.var(samples1) <
expected_first + 0.1 * expected_first
)
assert (
expected_second - 0.1 * expected_second
< np.var(samples2) <
expected_second + 0.1 * expected_second
)
def test_samples_same_values_for_same_seeds(self):
param = iap.Beta(0.5, 0.5)
samples1 = param.draw_samples((10, 5),
random_state=iarandom.RNG(1234))
samples2 = param.draw_samples((10, 5),
random_state=iarandom.RNG(1234))
assert np.allclose(samples1, samples2)
class TestDeterministic(unittest.TestCase):
def setUp(self):
reseed()
def test___init__(self):
pairs = [
(0, "Deterministic(int 0)"),
(1.0, "Deterministic(float 1.00000000)"),
("test", "Deterministic(test)")
]
for value, expected in pairs:
with self.subTest(value=value):
param = iap.Deterministic(value)
assert (
param.__str__()
== param.__repr__()
== expected
)
def test_samples_same_values_for_same_seeds(self):
values = [
-100, -54, -1, 0, 1, 54, 100,
-100.0, -54.3, -1.0, 0.1, 0.0, 0.1, 1.0, 54.4, 100.0
]
for value in values:
with self.subTest(value=value):
param = iap.Deterministic(value)
rs1 = iarandom.RNG(123456)
rs2 = iarandom.RNG(123456)
samples1 = param.draw_samples(20, random_state=rs1)
samples2 = param.draw_samples(20, random_state=rs2)
assert np.array_equal(samples1, samples2)
def test_draw_sample_int(self):
values = [-100, -54, -1, 0, 1, 54, 100]
for value in values:
with self.subTest(value=value):
param = iap.Deterministic(value)
sample1 = param.draw_sample()
sample2 = param.draw_sample()
assert sample1.shape == tuple()
assert sample1 == sample2
def test_draw_sample_float(self):
values = [-100.0, -54.3, -1.0, 0.1, 0.0, 0.1, 1.0, 54.4, 100.0]
for value in values:
with self.subTest(value=value):
param = iap.Deterministic(value)
sample1 = param.draw_sample()
sample2 = param.draw_sample()
assert sample1.shape == tuple()
assert np.isclose(
sample1, sample2, rtol=0, atol=_eps(sample1))
def test_draw_samples_int(self):
values = [-100, -54, -1, 0, 1, 54, 100]
shapes = [10, 10, (5, 3), (5, 3), (4, 5, 3), (4, 5, 3)]
for value, shape in itertools.product(values, shapes):
with self.subTest(value=value, shape=shape):
param = iap.Deterministic(value)
samples = param.draw_samples(shape)
shape_expected = (
shape
if isinstance(shape, tuple)
else tuple([shape]))
assert samples.shape == shape_expected
assert np.all(samples == value)
def test_draw_samples_float(self):
values = [-100.0, -54.3, -1.0, 0.1, 0.0, 0.1, 1.0, 54.4, 100.0]
shapes = [10, 10, (5, 3), (5, 3), (4, 5, 3), (4, 5, 3)]
for value, shape in itertools.product(values, shapes):
with self.subTest(value=value, shape=shape):
param = iap.Deterministic(value)
samples = param.draw_samples(shape)
shape_expected = (
shape
if isinstance(shape, tuple)
else tuple([shape]))
assert samples.shape == shape_expected
assert np.allclose(samples, value, rtol=0, atol=_eps(samples))
def test_argument_is_stochastic_parameter(self):
seen = [0, 0]
for _ in sm.xrange(200):
param = iap.Deterministic(iap.Choice([0, 1]))
seen[param.value] += 1
assert 100 - 50 < seen[0] < 100 + 50
assert 100 - 50 < seen[1] < 100 + 50
def test_argument_has_invalid_type(self):
with self.assertRaises(Exception) as context:
_ = iap.Deterministic([1, 2, 3])
self.assertTrue(
"Expected StochasticParameter object or number or string"
in str(context.exception))
class TestFromLowerResolution(unittest.TestCase):
def setUp(self):
reseed()
def test___init___size_percent(self):
param = iap.FromLowerResolution(other_param=iap.Deterministic(0),
size_percent=1, method="nearest")
assert (
param.__str__()
== param.__repr__()
== "FromLowerResolution("
"size_percent=Deterministic(int 1), "
"method=Deterministic(nearest), "
"other_param=Deterministic(int 0)"
")"
)
def test___init___size_px(self):
param = iap.FromLowerResolution(other_param=iap.Deterministic(0),
size_px=1, method="nearest")
assert (
param.__str__()
== param.__repr__()
== "FromLowerResolution("
"size_px=Deterministic(int 1), "
"method=Deterministic(nearest), "
"other_param=Deterministic(int 0)"
")"
)
def test_binomial_hwc(self):
param = iap.FromLowerResolution(iap.Binomial(0.5), size_px=8)
samples = param.draw_samples((8, 8, 1))
uq = np.unique(samples)
assert samples.shape == (8, 8, 1)
assert len(uq) == 2
assert 0 in uq
assert 1 in uq
def test_binomial_nhwc(self):
param = iap.FromLowerResolution(iap.Binomial(0.5), size_px=8)
samples_nhwc = param.draw_samples((1, 8, 8, 1))
uq = np.unique(samples_nhwc)
assert samples_nhwc.shape == (1, 8, 8, 1)
assert len(uq) == 2
assert 0 in uq
assert 1 in uq
def test_draw_samples_with_too_many_dimensions(self):
# (N, H, W, C, something) causing error
param = iap.FromLowerResolution(iap.Binomial(0.5), size_px=8)
with self.assertRaises(Exception) as context:
_ = param.draw_samples((1, 8, 8, 1, 1))
self.assertTrue(
"FromLowerResolution can only generate samples of shape"
in str(context.exception)
)
def test_binomial_hw3(self):
# C=3
param = iap.FromLowerResolution(iap.Binomial(0.5), size_px=8)
samples = param.draw_samples((8, 8, 3))
uq = np.unique(samples)
assert samples.shape == (8, 8, 3)
assert len(uq) == 2
assert 0 in uq
assert 1 in uq
def test_different_size_px_arguments(self):
# different sizes in px
param1 = iap.FromLowerResolution(iap.Binomial(0.5), size_px=2)
param2 = iap.FromLowerResolution(iap.Binomial(0.5), size_px=16)
seen_components = [0, 0]
seen_pixels = [0, 0]
for _ in sm.xrange(100):
samples1 = param1.draw_samples((16, 16, 1))
samples2 = param2.draw_samples((16, 16, 1))
_, num1 = skimage.morphology.label(samples1, connectivity=1,
background=0, return_num=True)
_, num2 = skimage.morphology.label(samples2, connectivity=1,
background=0, return_num=True)
seen_components[0] += num1
seen_components[1] += num2
seen_pixels[0] += np.sum(samples1 == 1)
seen_pixels[1] += np.sum(samples2 == 1)
assert seen_components[0] < seen_components[1]
assert (
seen_pixels[0] / seen_components[0]
> seen_pixels[1] / seen_components[1]
)
def test_different_size_px_arguments_with_tuple(self):
# different sizes in px, one given as tuple (a, b)
param1 = iap.FromLowerResolution(iap.Binomial(0.5), size_px=2)
param2 = iap.FromLowerResolution(iap.Binomial(0.5), size_px=(2, 16))
seen_components = [0, 0]
seen_pixels = [0, 0]
for _ in sm.xrange(400):
samples1 = param1.draw_samples((16, 16, 1))
samples2 = param2.draw_samples((16, 16, 1))
_, num1 = skimage.morphology.label(samples1, connectivity=1,
background=0, return_num=True)
_, num2 = skimage.morphology.label(samples2, connectivity=1,
background=0, return_num=True)
seen_components[0] += num1
seen_components[1] += num2
seen_pixels[0] += np.sum(samples1 == 1)
seen_pixels[1] += np.sum(samples2 == 1)
assert seen_components[0] < seen_components[1]
assert (
seen_pixels[0] / seen_components[0]
> seen_pixels[1] / seen_components[1]
)
def test_different_size_px_argument_with_stochastic_parameters(self):
# different sizes in px, given as StochasticParameter
param1 = iap.FromLowerResolution(iap.Binomial(0.5),
size_px=iap.Deterministic(1))
param2 = iap.FromLowerResolution(iap.Binomial(0.5),
size_px=iap.Choice([8, 16]))
seen_components = [0, 0]
seen_pixels = [0, 0]
for _ in sm.xrange(100):
samples1 = param1.draw_samples((16, 16, 1))
samples2 = param2.draw_samples((16, 16, 1))
_, num1 = skimage.morphology.label(samples1, connectivity=1,
background=0, return_num=True)
_, num2 = skimage.morphology.label(samples2, connectivity=1,
background=0, return_num=True)
seen_components[0] += num1
seen_components[1] += num2
seen_pixels[0] += np.sum(samples1 == 1)
seen_pixels[1] += np.sum(samples2 == 1)
assert seen_components[0] < seen_components[1]
assert (
seen_pixels[0] / seen_components[0]
> seen_pixels[1] / seen_components[1]
)
def test_size_px_has_invalid_datatype(self):
# bad datatype for size_px
with self.assertRaises(Exception) as context:
_ = iap.FromLowerResolution(iap.Binomial(0.5), size_px=False)
self.assertTrue("Expected " in str(context.exception))
def test_min_size(self):
# min_size
param1 = iap.FromLowerResolution(iap.Binomial(0.5), size_px=2)
param2 = iap.FromLowerResolution(iap.Binomial(0.5), size_px=1,
min_size=16)
seen_components = [0, 0]
seen_pixels = [0, 0]
for _ in sm.xrange(100):
samples1 = param1.draw_samples((16, 16, 1))
samples2 = param2.draw_samples((16, 16, 1))
_, num1 = skimage.morphology.label(samples1, connectivity=1,
background=0, return_num=True)
_, num2 = skimage.morphology.label(samples2, connectivity=1,
background=0, return_num=True)
seen_components[0] += num1
seen_components[1] += num2
seen_pixels[0] += np.sum(samples1 == 1)
seen_pixels[1] += np.sum(samples2 == 1)
assert seen_components[0] < seen_components[1]
assert (
seen_pixels[0] / seen_components[0]
> seen_pixels[1] / seen_components[1]
)
def test_size_percent(self):
# different sizes in percent
param1 = iap.FromLowerResolution(iap.Binomial(0.5), size_percent=0.01)
param2 = iap.FromLowerResolution(iap.Binomial(0.5), size_percent=0.8)
seen_components = [0, 0]
seen_pixels = [0, 0]
for _ in sm.xrange(100):
samples1 = param1.draw_samples((16, 16, 1))
samples2 = param2.draw_samples((16, 16, 1))
_, num1 = skimage.morphology.label(samples1, connectivity=1,
background=0, return_num=True)
_, num2 = skimage.morphology.label(samples2, connectivity=1,
background=0, return_num=True)
seen_components[0] += num1
seen_components[1] += num2
seen_pixels[0] += np.sum(samples1 == 1)
seen_pixels[1] += np.sum(samples2 == 1)
assert seen_components[0] < seen_components[1]
assert (
seen_pixels[0] / seen_components[0]
> seen_pixels[1] / seen_components[1]
)
def test_size_percent_as_stochastic_parameters(self):
# different sizes in percent, given as StochasticParameter
param1 = iap.FromLowerResolution(iap.Binomial(0.5),
size_percent=iap.Deterministic(0.01))
param2 = iap.FromLowerResolution(iap.Binomial(0.5),
size_percent=iap.Choice([0.4, 0.8]))
seen_components = [0, 0]
seen_pixels = [0, 0]
for _ in sm.xrange(100):
samples1 = param1.draw_samples((16, 16, 1))
samples2 = param2.draw_samples((16, 16, 1))
_, num1 = skimage.morphology.label(samples1, connectivity=1,
background=0, return_num=True)
_, num2 = skimage.morphology.label(samples2, connectivity=1,
background=0, return_num=True)
seen_components[0] += num1
seen_components[1] += num2
seen_pixels[0] += np.sum(samples1 == 1)
seen_pixels[1] += np.sum(samples2 == 1)
assert seen_components[0] < seen_components[1]
assert (
seen_pixels[0] / seen_components[0]
> seen_pixels[1] / seen_components[1]
)
def test_size_percent_has_invalid_datatype(self):
# bad datatype for size_percent
with self.assertRaises(Exception) as context:
_ = iap.FromLowerResolution(iap.Binomial(0.5), size_percent=False)
self.assertTrue("Expected " in str(context.exception))
def test_method(self):
# method given as StochasticParameter
param = iap.FromLowerResolution(
iap.Binomial(0.5), size_px=4,
method=iap.Choice(["nearest", "linear"]))
seen = [0, 0]
for _ in sm.xrange(200):
samples = param.draw_samples((16, 16, 1))
nb_in_between = np.sum(
np.logical_and(0.05 < samples, samples < 0.95))
if nb_in_between == 0:
seen[0] += 1
else:
seen[1] += 1
assert 100 - 50 < seen[0] < 100 + 50
assert 100 - 50 < seen[1] < 100 + 50
def test_method_has_invalid_datatype(self):
# bad datatype for method
with self.assertRaises(Exception) as context:
_ = iap.FromLowerResolution(iap.Binomial(0.5), size_px=4,
method=False)
self.assertTrue("Expected " in str(context.exception))
def test_samples_same_values_for_same_seeds(self):
# multiple calls with same random_state
param = iap.FromLowerResolution(iap.Binomial(0.5), size_px=2)
samples1 = param.draw_samples((10, 5, 1),
random_state=iarandom.RNG(1234))
samples2 = param.draw_samples((10, 5, 1),
random_state=iarandom.RNG(1234))
assert np.allclose(samples1, samples2)
class TestClip(unittest.TestCase):
def setUp(self):
reseed()
def test___init__(self):
param = iap.Clip(iap.Deterministic(0), -1, 1)
assert (
param.__str__()
== param.__repr__()
== "Clip(Deterministic(int 0), -1.000000, 1.000000)"
)
def test_value_within_bounds(self):
param = iap.Clip(iap.Deterministic(0), -1, 1)
sample = param.draw_sample()
samples = param.draw_samples((10, 5))
assert sample.shape == tuple()
assert samples.shape == (10, 5)
assert sample == 0
assert np.all(samples == 0)
def test_value_exactly_at_upper_bound(self):
param = iap.Clip(iap.Deterministic(1), -1, 1)
sample = param.draw_sample()
samples = param.draw_samples((10, 5))
assert sample.shape == tuple()
assert samples.shape == (10, 5)
assert sample == 1
assert np.all(samples == 1)
def test_value_exactly_at_lower_bound(self):
param = iap.Clip(iap.Deterministic(-1), -1, 1)
sample = param.draw_sample()
samples = param.draw_samples((10, 5))
assert sample.shape == tuple()
assert samples.shape == (10, 5)
assert sample == -1
assert np.all(samples == -1)
def test_value_is_within_bounds_and_float(self):
param = iap.Clip(iap.Deterministic(0.5), -1, 1)
sample = param.draw_sample()
samples = param.draw_samples((10, 5))
assert sample.shape == tuple()
assert samples.shape == (10, 5)
assert 0.5 - _eps(sample) < sample < 0.5 + _eps(sample)
assert np.all(
np.logical_and(
0.5 - _eps(sample) <= samples,
samples <= 0.5 + _eps(sample)
)
)
def test_value_is_above_upper_bound(self):
param = iap.Clip(iap.Deterministic(2), -1, 1)
sample = param.draw_sample()
samples = param.draw_samples((10, 5))
assert sample.shape == tuple()
assert samples.shape == (10, 5)
assert sample == 1
assert np.all(samples == 1)
def test_value_is_below_lower_bound(self):
param = iap.Clip(iap.Deterministic(-2), -1, 1)
sample = param.draw_sample()
samples = param.draw_samples((10, 5))
assert sample.shape == tuple()
assert samples.shape == (10, 5)
assert sample == -1
assert np.all(samples == -1)
def test_value_is_sometimes_without_bounds_sometimes_beyond(self):
param = iap.Clip(iap.Choice([0, 2]), -1, 1)
sample = param.draw_sample()
samples = param.draw_samples((10, 5))
assert sample.shape == tuple()
assert samples.shape == (10, 5)
assert sample in [0, 1]
assert np.all(np.logical_or(samples == 0, samples == 1))
def test_samples_same_values_for_same_seeds(self):
param = iap.Clip(iap.Choice([0, 2]), -1, 1)
samples1 = param.draw_samples((10, 5),
random_state=iarandom.RNG(1234))
samples2 = param.draw_samples((10, 5),
random_state=iarandom.RNG(1234))
assert np.array_equal(samples1, samples2)
def test_lower_bound_is_none(self):
param = iap.Clip(iap.Deterministic(0), None, 1)
sample = param.draw_sample()
assert sample == 0
assert (
param.__str__()
== param.__repr__()
== "Clip(Deterministic(int 0), None, 1.000000)"
)
def test_upper_bound_is_none(self):
param = iap.Clip(iap.Deterministic(0), 0, None)
sample = param.draw_sample()
assert sample == 0
assert (
param.__str__()
== param.__repr__()
== "Clip(Deterministic(int 0), 0.000000, None)"
)
def test_both_bounds_are_none(self):
param = iap.Clip(iap.Deterministic(0), None, None)
sample = param.draw_sample()
assert sample == 0
assert (
param.__str__()
== param.__repr__()
== "Clip(Deterministic(int 0), None, None)"
)
class TestDiscretize(unittest.TestCase):
def setUp(self):
reseed()
def test___init__(self):
param = iap.Discretize(iap.Deterministic(0))
assert (
param.__str__()
== param.__repr__()
== "Discretize(Deterministic(int 0))"
)
def test_applied_to_deterministic(self):
values = [-100.2, -54.3, -1.0, -1, -0.7, -0.00043,
0,
0.00043, 0.7, 1.0, 1, 54.3, 100.2]
for value in values:
with self.subTest(value=value):
param = iap.Discretize(iap.Deterministic(value))
value_expected = np.round(
np.float64([value])
).astype(np.int32)[0]
sample = param.draw_sample()
samples = param.draw_samples((10, 5))
assert sample.shape == tuple()
assert samples.shape == (10, 5)
assert sample == value_expected
assert np.all(samples == value_expected)
# TODO why are these tests applied to DiscreteUniform instead of Uniform?
def test_applied_to_discrete_uniform(self):
param_orig = iap.DiscreteUniform(0, 1)
param = iap.Discretize(param_orig)
sample = param.draw_sample()
samples = param.draw_samples((10, 5))
assert sample.shape == tuple()
assert samples.shape == (10, 5)
assert sample in [0, 1]
assert np.all(np.logical_or(samples == 0, samples == 1))
def test_applied_to_discrete_uniform_with_wider_range(self):
param_orig = iap.DiscreteUniform(0, 2)
param = iap.Discretize(param_orig)
samples1 = param_orig.draw_samples((10000,))
samples2 = param.draw_samples((10000,))
assert np.all(np.abs(samples1 - samples2) < 0.2*(10000/3))
def test_samples_same_values_for_same_seeds(self):
param_orig = iap.DiscreteUniform(0, 2)
param = iap.Discretize(param_orig)
samples1 = param.draw_samples((10, 5),
random_state=iarandom.RNG(1234))
samples2 = param.draw_samples((10, 5),
random_state=iarandom.RNG(1234))
assert np.array_equal(samples1, samples2)
class TestMultiply(unittest.TestCase):
def setUp(self):
reseed()
def test___init__(self):
param = iap.Multiply(iap.Deterministic(0), 1, elementwise=False)
assert (
param.__str__()
== param.__repr__()
== "Multiply(Deterministic(int 0), Deterministic(int 1), False)"
)
def test_multiply_example_integer_values(self):
values_int = [-100, -54, -1, 0, 1, 54, 100]
for v1, v2 in itertools.product(values_int, values_int):
with self.subTest(left=v1, right=v2):
p = iap.Multiply(iap.Deterministic(v1), v2)
samples = p.draw_samples((2, 3))
assert p.draw_sample() == v1 * v2
assert samples.dtype.kind == "i"
assert np.array_equal(
samples,
np.zeros((2, 3), dtype=np.int64) + v1 * v2
)
def test_multiply_example_integer_values_both_deterministic(self):
values_int = [-100, -54, -1, 0, 1, 54, 100]
for v1, v2 in itertools.product(values_int, values_int):
with self.subTest(left=v1, right=v2):
p = iap.Multiply(iap.Deterministic(v1), iap.Deterministic(v2))
samples = p.draw_samples((2, 3))
assert p.draw_sample() == v1 * v2
assert samples.dtype.name == "int32"
assert np.array_equal(
samples,
np.zeros((2, 3), dtype=np.int32) + v1 * v2
)
def test_multiply_example_float_values(self):
values_float = [-100.0, -54.3, -1.0, 0.1, 0.0, 0.1, 1.0, 54.4, 100.0]
for v1, v2 in itertools.product(values_float, values_float):
with self.subTest(left=v1, right=v2):
p = iap.Multiply(iap.Deterministic(v1), v2)
sample = p.draw_sample()
samples = p.draw_samples((2, 3))
assert np.isclose(sample, v1 * v2, atol=1e-3, rtol=0)
assert samples.dtype.kind == "f"
assert np.allclose(
samples,
np.zeros((2, 3), dtype=np.float32) + v1 * v2
)
def test_multiply_example_float_values_both_deterministic(self):
values_float = [-100.0, -54.3, -1.0, 0.1, 0.0, 0.1, 1.0, 54.4, 100.0]
for v1, v2 in itertools.product(values_float, values_float):
with self.subTest(left=v1, right=v2):
p = iap.Multiply(iap.Deterministic(v1), iap.Deterministic(v2))
sample = p.draw_sample()
samples = p.draw_samples((2, 3))
assert np.isclose(sample, v1 * v2, atol=1e-3, rtol=0)
assert samples.dtype.kind == "f"
assert np.allclose(
samples,
np.zeros((2, 3), dtype=np.float32) + v1 * v2
)
def test_multiply_by_stochastic_parameter(self):
param = iap.Multiply(iap.Deterministic(1.0),
(1.0, 2.0),
elementwise=False)
samples = param.draw_samples((10, 20))
samples_sorted = np.sort(samples.flatten())
assert samples.shape == (10, 20)
assert np.all(samples > 1.0 * 1.0 - _eps(samples))
assert np.all(samples < 1.0 * 2.0 + _eps(samples))
assert (
samples_sorted[0] - _eps(samples_sorted[0])
< samples_sorted[-1] <
samples_sorted[0] + _eps(samples_sorted[0])
)
def test_multiply_by_stochastic_parameter_elementwise(self):
param = iap.Multiply(iap.Deterministic(1.0),
(1.0, 2.0),
elementwise=True)
samples = param.draw_samples((10, 20))
samples_sorted = np.sort(samples.flatten())
assert samples.shape == (10, 20)
assert np.all(samples > 1.0 * 1.0 - _eps(samples))
assert np.all(samples < 1.0 * 2.0 + _eps(samples))
assert not (
samples_sorted[0] - _eps(samples_sorted[0])
< samples_sorted[-1] <
samples_sorted[0] + _eps(samples_sorted[0])
)
def test_multiply_stochastic_parameter_by_fixed_value(self):
param = iap.Multiply(iap.Uniform(1.0, 2.0),
1.0,
elementwise=False)
samples = param.draw_samples((10, 20))
samples_sorted = np.sort(samples.flatten())
assert samples.shape == (10, 20)
assert np.all(samples > 1.0 * 1.0 - _eps(samples))
assert np.all(samples < 2.0 * 1.0 + _eps(samples))
assert not (
samples_sorted[0] - _eps(samples_sorted[0])
< samples_sorted[-1] <
samples_sorted[0] + _eps(samples_sorted[0])
)
def test_multiply_stochastic_parameter_by_fixed_value_elementwise(self):
param = iap.Multiply(iap.Uniform(1.0, 2.0), 1.0, elementwise=True)
samples = param.draw_samples((10, 20))
samples_sorted = np.sort(samples.flatten())
assert samples.shape == (10, 20)
assert np.all(samples > 1.0 * 1.0 - _eps(samples))
assert np.all(samples < 2.0 * 1.0 + _eps(samples))
assert not (
samples_sorted[0] - _eps(samples_sorted[0])
< samples_sorted[-1] <
samples_sorted[0] + _eps(samples_sorted[0])
)
class TestDivide(unittest.TestCase):
def setUp(self):
reseed()
def test___init__(self):
param = iap.Divide(iap.Deterministic(0), 1, elementwise=False)
assert (
param.__str__()
== param.__repr__()
== "Divide(Deterministic(int 0), Deterministic(int 1), False)"
)
def test_divide_integers(self):
values_int = [-100, -54, -1, 0, 1, 54, 100]
for v1, v2 in itertools.product(values_int, values_int):
if v2 == 0:
v2 = 1
with self.subTest(left=v1, right=v2):
p = iap.Divide(iap.Deterministic(v1), v2)
sample = p.draw_sample()
samples = p.draw_samples((2, 3))
assert sample == (v1 / v2)
assert samples.dtype.kind == "f"
assert np.array_equal(
samples,
np.zeros((2, 3), dtype=np.float64) + (v1 / v2)
)
def test_divide_integers_both_deterministic(self):
values_int = [-100, -54, -1, 0, 1, 54, 100]
for v1, v2 in itertools.product(values_int, values_int):
if v2 == 0:
v2 = 1
with self.subTest(left=v1, right=v2):
p = iap.Divide(iap.Deterministic(v1), iap.Deterministic(v2))
sample = p.draw_sample()
samples = p.draw_samples((2, 3))
assert sample == (v1 / v2)
assert samples.dtype.kind == "f"
assert np.array_equal(
samples,
np.zeros((2, 3), dtype=np.float64) + (v1 / v2)
)
def test_divide_floats(self):
values_float = [-100.0, -54.3, -1.0, 0.1, 0.0, 0.1, 1.0, 54.4, 100.0]
for v1, v2 in itertools.product(values_float, values_float):
if v2 == 0:
v2 = 1
with self.subTest(left=v1, right=v2):
p = iap.Divide(iap.Deterministic(v1), v2)
sample = p.draw_sample()
samples = p.draw_samples((2, 3))
assert (
(v1 / v2) - _eps(sample)
<= sample <=
(v1 / v2) + _eps(sample)
)
assert samples.dtype.kind == "f"
assert np.allclose(
samples,
np.zeros((2, 3), dtype=np.float64) + (v1 / v2)
)
def test_divide_floats_both_deterministic(self):
values_float = [-100.0, -54.3, -1.0, 0.1, 0.0, 0.1, 1.0, 54.4, 100.0]
for v1, v2 in itertools.product(values_float, values_float):
if v2 == 0:
v2 = 1
with self.subTest(left=v1, right=v2):
p = iap.Divide(iap.Deterministic(v1), iap.Deterministic(v2))
sample = p.draw_sample()
samples = p.draw_samples((2, 3))
assert (
(v1 / v2) - _eps(sample)
<= sample <=
(v1 / v2) + _eps(sample)
)
assert samples.dtype.kind == "f"
assert np.allclose(
samples,
np.zeros((2, 3), dtype=np.float64) + (v1 / v2)
)
def test_divide_by_stochastic_parameter(self):
param = iap.Divide(iap.Deterministic(1.0),
(1.0, 2.0),
elementwise=False)
samples = param.draw_samples((10, 20))
samples_sorted = np.sort(samples.flatten())
assert samples.shape == (10, 20)
assert np.all(samples > (1.0 / 2.0) - _eps(samples))
assert np.all(samples < (1.0 / 1.0) + _eps(samples))
assert (
samples_sorted[0] - _eps(samples)
< samples_sorted[-1] <
samples_sorted[0] + _eps(samples)
)
def test_divide_by_stochastic_parameter_elementwise(self):
param = iap.Divide(iap.Deterministic(1.0),
(1.0, 2.0),
elementwise=True)
samples = param.draw_samples((10, 20))
samples_sorted = np.sort(samples.flatten())
assert samples.shape == (10, 20)
assert np.all(samples > (1.0 / 2.0) - _eps(samples))
assert np.all(samples < (1.0 / 1.0) + _eps(samples))
assert not (
samples_sorted[0] - _eps(samples)
< samples_sorted[-1] <
samples_sorted[0] + _eps(samples)
)
def test_divide_stochastic_parameter_by_float(self):
param = iap.Divide(iap.Uniform(1.0, 2.0),
1.0,
elementwise=False)
samples = param.draw_samples((10, 20))
samples_sorted = np.sort(samples.flatten())
assert samples.shape == (10, 20)
assert np.all(samples > (1.0 / 1.0) - _eps(samples))
assert np.all(samples < (2.0 / 1.0) + _eps(samples))
assert not (
samples_sorted[0] - _eps(samples)
< samples_sorted[-1] <
samples_sorted[0] + _eps(samples)
)
def test_divide_stochastic_parameter_by_float_elementwise(self):
param = iap.Divide(iap.Uniform(1.0, 2.0),
1.0,
elementwise=True)
samples = param.draw_samples((10, 20))
samples_sorted = np.sort(samples.flatten())
assert samples.shape == (10, 20)
assert np.all(samples > (1.0 / 1.0) - _eps(samples))
assert np.all(samples < (2.0 / 1.0) + _eps(samples))
assert not (
samples_sorted[0] - _eps(samples_sorted)
< samples_sorted[-1]
< samples_sorted[-1]
< samples_sorted[0] + _eps(samples_sorted)
)
def test_divide_by_stochastic_parameter_that_can_by_zero(self):
# test division by zero automatically being converted to division by 1
param = iap.Divide(2,
iap.Choice([0, 2]),
elementwise=True)
samples = param.draw_samples((10, 20))
samples_unique = np.sort(np.unique(samples.flatten()))
assert samples_unique[0] == 1 and samples_unique[1] == 2
def test_divide_by_zero(self):
param = iap.Divide(iap.Deterministic(1), 0, elementwise=False)
sample = param.draw_sample()
assert sample == 1
class TestAdd(unittest.TestCase):
def setUp(self):
reseed()
def test___init__(self):
param = iap.Add(iap.Deterministic(0), 1, elementwise=False)
assert (
param.__str__()
== param.__repr__()
== "Add(Deterministic(int 0), Deterministic(int 1), False)"
)
def test_add_integers(self):
values_int = [-100, -54, -1, 0, 1, 54, 100]
for v1, v2 in itertools.product(values_int, values_int):
with self.subTest(left=v1, right=v2):
p = iap.Add(iap.Deterministic(v1), v2)
sample = p.draw_sample()
samples = p.draw_samples((2, 3))
assert sample == v1 + v2
assert samples.dtype.kind == "i"
assert np.array_equal(
samples,
np.zeros((2, 3), dtype=np.int32) + v1 + v2
)
def test_add_integers_both_deterministic(self):
values_int = [-100, -54, -1, 0, 1, 54, 100]
for v1, v2 in itertools.product(values_int, values_int):
with self.subTest(left=v1, right=v2):
p = iap.Add(iap.Deterministic(v1), iap.Deterministic(v2))
sample = p.draw_sample()
samples = p.draw_samples((2, 3))
assert sample == v1 + v2
assert samples.dtype.kind == "i"
assert np.array_equal(
samples,
np.zeros((2, 3), dtype=np.int32) + v1 + v2
)
def test_add_floats(self):
values_float = [-100.0, -54.3, -1.0, 0.1, 0.0, 0.1, 1.0, 54.4, 100.0]
for v1, v2 in itertools.product(values_float, values_float):
with self.subTest(left=v1, right=v2):
p = iap.Add(iap.Deterministic(v1), v2)
sample = p.draw_sample()
samples = p.draw_samples((2, 3))
assert np.isclose(sample, v1 + v2, atol=1e-3, rtol=0)
assert samples.dtype.kind == "f"
assert np.allclose(
samples,
np.zeros((2, 3), dtype=np.float32) + v1 + v2
)
def test_add_floats_both_deterministic(self):
values_float = [-100.0, -54.3, -1.0, 0.1, 0.0, 0.1, 1.0, 54.4, 100.0]
for v1, v2 in itertools.product(values_float, values_float):
with self.subTest(left=v1, right=v2):
p = iap.Add(iap.Deterministic(v1), iap.Deterministic(v2))
sample = p.draw_sample()
samples = p.draw_samples((2, 3))
assert np.isclose(sample, v1 + v2, atol=1e-3, rtol=0)
assert samples.dtype.kind == "f"
assert np.allclose(
samples,
np.zeros((2, 3), dtype=np.float32) + v1 + v2
)
def test_add_stochastic_parameter(self):
param = iap.Add(iap.Deterministic(1.0), (1.0, 2.0), elementwise=False)
samples = param.draw_samples((10, 20))
samples_sorted = np.sort(samples.flatten())
assert samples.shape == (10, 20)
assert np.all(samples >= 1.0 + 1.0 - _eps(samples))
assert np.all(samples <= 1.0 + 2.0 + _eps(samples))
assert (
samples_sorted[0] - _eps(samples_sorted[0])
< samples_sorted[-1]
< samples_sorted[0] + _eps(samples_sorted[0])
)
def test_add_stochastic_parameter_elementwise(self):
param = iap.Add(iap.Deterministic(1.0), (1.0, 2.0), elementwise=True)
samples = param.draw_samples((10, 20))
samples_sorted = np.sort(samples.flatten())
assert samples.shape == (10, 20)
assert np.all(samples >= 1.0 + 1.0 - _eps(samples))
assert np.all(samples <= 1.0 + 2.0 + _eps(samples))
assert not (
samples_sorted[0] - _eps(samples_sorted[0])
< samples_sorted[-1]
< samples_sorted[0] + _eps(samples_sorted[0])
)
def test_add_to_stochastic_parameter(self):
param = iap.Add(iap.Uniform(1.0, 2.0), 1.0, elementwise=False)
samples = param.draw_samples((10, 20))
samples_sorted = np.sort(samples.flatten())
assert samples.shape == (10, 20)
assert np.all(samples >= 1.0 + 1.0 - _eps(samples))
assert np.all(samples <= 2.0 + 1.0 + _eps(samples))
assert not (
samples_sorted[0] - _eps(samples_sorted[0])
< samples_sorted[-1]
< samples_sorted[0] + _eps(samples_sorted[0])
)
def test_add_to_stochastic_parameter_elementwise(self):
param = iap.Add(iap.Uniform(1.0, 2.0), 1.0, elementwise=True)
samples = param.draw_samples((10, 20))
samples_sorted = np.sort(samples.flatten())
assert samples.shape == (10, 20)
assert np.all(samples >= 1.0 + 1.0 - _eps(samples))
assert np.all(samples <= 2.0 + 1.0 + _eps(samples))
assert not (
samples_sorted[0] - _eps(samples_sorted[0])
< samples_sorted[-1]
< samples_sorted[0] + _eps(samples_sorted[0])
)
class TestSubtract(unittest.TestCase):
def setUp(self):
reseed()
def test___init__(self):
param = iap.Subtract(iap.Deterministic(0), 1, elementwise=False)
assert (
param.__str__()
== param.__repr__()
== "Subtract(Deterministic(int 0), Deterministic(int 1), False)"
)
def test_subtract_integers(self):
values_int = [-100, -54, -1, 0, 1, 54, 100]
for v1, v2 in itertools.product(values_int, values_int):
with self.subTest(left=v1, right=v2):
p = iap.Subtract(iap.Deterministic(v1), v2)
sample = p.draw_sample()
samples = p.draw_samples((2, 3))
assert sample == v1 - v2
assert samples.dtype.kind == "i"
assert np.array_equal(
samples,
np.zeros((2, 3), dtype=np.int64) + v1 - v2
)
def test_subtract_integers_both_deterministic(self):
values_int = [-100, -54, -1, 0, 1, 54, 100]
for v1, v2 in itertools.product(values_int, values_int):
with self.subTest(left=v1, right=v2):
p = iap.Subtract(iap.Deterministic(v1), iap.Deterministic(v2))
sample = p.draw_sample()
samples = p.draw_samples((2, 3))
assert sample == v1 - v2
assert samples.dtype.kind == "i"
assert np.array_equal(
samples,
np.zeros((2, 3), dtype=np.int64) + v1 - v2
)
def test_subtract_floats(self):
values_float = [-100.0, -54.3, -1.0, 0.1, 0.0, 0.1, 1.0, 54.4, 100.0]
for v1, v2 in itertools.product(values_float, values_float):
with self.subTest(left=v1, right=v2):
p = iap.Subtract(iap.Deterministic(v1), v2)
sample = p.draw_sample()
samples = p.draw_samples((2, 3))
assert v1 - v2 - _eps(sample) < sample < v1 - v2 + _eps(sample)
assert samples.dtype.kind == "f"
assert np.allclose(
samples,
np.zeros((2, 3), dtype=np.float64) + v1 - v2
)
def test_subtract_floats_both_deterministic(self):
values_float = [-100.0, -54.3, -1.0, 0.1, 0.0, 0.1, 1.0, 54.4, 100.0]
for v1, v2 in itertools.product(values_float, values_float):
with self.subTest(left=v1, right=v2):
p = iap.Subtract(iap.Deterministic(v1), iap.Deterministic(v2))
sample = p.draw_sample()
samples = p.draw_samples((2, 3))
assert v1 - v2 - _eps(sample) < sample < v1 - v2 + _eps(sample)
assert samples.dtype.kind == "f"
assert np.allclose(
samples,
np.zeros((2, 3), dtype=np.float64) + v1 - v2
)
def test_subtract_stochastic_parameter(self):
param = iap.Subtract(iap.Deterministic(1.0),
(1.0, 2.0),
elementwise=False)
samples = param.draw_samples((10, 20))
samples_sorted = np.sort(samples.flatten())
assert samples.shape == (10, 20)
assert np.all(samples > 1.0 - 2.0 - _eps(samples))
assert np.all(samples < 1.0 - 1.0 + _eps(samples))
assert (
samples_sorted[0] - _eps(samples_sorted[0])
< samples_sorted[-1] <
samples_sorted[0] + _eps(samples_sorted[0])
)
def test_subtract_stochastic_parameter_elementwise(self):
param = iap.Subtract(iap.Deterministic(1.0),
(1.0, 2.0),
elementwise=True)
samples = param.draw_samples((10, 20))
samples_sorted = np.sort(samples.flatten())
assert samples.shape == (10, 20)
assert np.all(samples > 1.0 - 2.0 - _eps(samples))
assert np.all(samples < 1.0 - 1.0 + _eps(samples))
assert not (
samples_sorted[0] - _eps(samples_sorted[0])
< samples_sorted[-1] <
samples_sorted[0] + _eps(samples_sorted[0])
)
def test_subtract_from_stochastic_parameter(self):
param = iap.Subtract(iap.Uniform(1.0, 2.0), 1.0, elementwise=False)
samples = param.draw_samples((10, 20))
samples_sorted = np.sort(samples.flatten())
assert samples.shape == (10, 20)
assert np.all(samples > 1.0 - 1.0 - _eps(samples))
assert np.all(samples < 2.0 - 1.0 + _eps(samples))
assert not (
samples_sorted[0] - _eps(samples_sorted[0])
< samples_sorted[-1] <
samples_sorted[0] + _eps(samples_sorted[0])
)
def test_subtract_from_stochastic_parameter_elementwise(self):
param = iap.Subtract(iap.Uniform(1.0, 2.0), 1.0, elementwise=True)
samples = param.draw_samples((10, 20))
samples_sorted = np.sort(samples.flatten())
assert samples.shape == (10, 20)
assert np.all(samples > 1.0 - 1.0 - _eps(samples))
assert np.all(samples < 2.0 - 1.0 + _eps(samples))
assert not (
samples_sorted[0] - _eps(samples_sorted[0])
< samples_sorted[-1] <
samples_sorted[0] + _eps(samples_sorted[0])
)
class TestPower(unittest.TestCase):
def setUp(self):
reseed()
def test___init__(self):
param = iap.Power(iap.Deterministic(0), 1, elementwise=False)
assert (
param.__str__()
== param.__repr__()
== "Power(Deterministic(int 0), Deterministic(int 1), False)"
)
def test_pairs(self):
values = [
-100, -54, -1, 0, 1, 54, 100,
-100.0, -54.0, -1.0, 0.0, 1.0, 54.0, 100.0
]
exponents = [-2, -1.5, -1, -0.5, 0, 0.5, 1, 1.5, 2]
for base, exponent in itertools.product(values, exponents):
if base < 0 and ia.is_single_float(exponent):
continue
if base == 0 and exponent < 0:
continue
with self.subTest(base=base, exponent=exponent):
p = iap.Power(iap.Deterministic(base), exponent)
sample = p.draw_sample()
samples = p.draw_samples((2, 3))
assert (
base ** exponent - _eps(sample)
< sample <
base ** exponent + _eps(sample)
)
assert samples.dtype.kind == "f"
assert np.allclose(
samples,
np.zeros((2, 3), dtype=np.float64) + base ** exponent
)
def test_pairs_both_deterministic(self):
values = [
-100, -54, -1, 0, 1, 54, 100,
-100.0, -54.0, -1.0, 0.0, 1.0, 54.0, 100.0
]
exponents = [-2, -1.5, -1, -0.5, 0, 0.5, 1, 1.5, 2]
for base, exponent in itertools.product(values, exponents):
if base < 0 and ia.is_single_float(exponent):
continue
if base == 0 and exponent < 0:
continue
with self.subTest(base=base, exponent=exponent):
p = iap.Power(iap.Deterministic(base), iap.Deterministic(exponent))
sample = p.draw_sample()
samples = p.draw_samples((2, 3))
assert (
base ** exponent - _eps(sample)
< sample <
base ** exponent + _eps(sample)
)
assert samples.dtype.kind == "f"
assert np.allclose(
samples,
np.zeros((2, 3), dtype=np.float64) + base ** exponent
)
def test_exponent_is_stochastic_parameter(self):
param = iap.Power(iap.Deterministic(1.5),
(1.0, 2.0),
elementwise=False)
samples = param.draw_samples((10, 20))
samples_sorted = np.sort(samples.flatten())
assert samples.shape == (10, 20)
assert np.all(samples > 1.5 ** 1.0 - 2 * _eps(samples))
assert np.all(samples < 1.5 ** 2.0 + 2 * _eps(samples))
assert (
samples_sorted[0] - _eps(samples_sorted[0])
< samples_sorted[-1] <
samples_sorted[0] + _eps(samples_sorted[0])
)
def test_exponent_is_stochastic_parameter_elementwise(self):
param = iap.Power(iap.Deterministic(1.5),
(1.0, 2.0),
elementwise=True)
samples = param.draw_samples((10, 20))
samples_sorted = np.sort(samples.flatten())
assert samples.shape == (10, 20)
assert np.all(samples > 1.5 ** 1.0 - 2 * _eps(samples))
assert np.all(samples < 1.5 ** 2.0 + 2 * _eps(samples))
assert not (
samples_sorted[0] - _eps(samples_sorted[0])
< samples_sorted[-1] <
samples_sorted[0] + _eps(samples_sorted[0])
)
def test_value_is_uniform(self):
param = iap.Power(iap.Uniform(1.0, 2.0), 1.0, elementwise=False)
samples = param.draw_samples((10, 20))
samples_sorted = np.sort(samples.flatten())
assert samples.shape == (10, 20)
assert np.all(samples > 1.0 ** 1.0 - 2 * _eps(samples))
assert np.all(samples < 2.0 ** 1.0 + 2 * _eps(samples))
assert not (
samples_sorted[0] - _eps(samples_sorted[0])
< samples_sorted[-1] <
samples_sorted[0] + _eps(samples_sorted[0])
)
def test_value_is_uniform_elementwise(self):
param = iap.Power(iap.Uniform(1.0, 2.0), 1.0, elementwise=True)
samples = param.draw_samples((10, 20))
samples_sorted = np.sort(samples.flatten())
assert samples.shape == (10, 20)
assert np.all(samples > 1.0 ** 1.0 - 2 * _eps(samples))
assert np.all(samples < 2.0 ** 1.0 + 2 * _eps(samples))
assert not (
samples_sorted[0] - _eps(samples_sorted[0])
< samples_sorted[-1] <
samples_sorted[0] + _eps(samples_sorted[0])
)
class TestAbsolute(unittest.TestCase):
def setUp(self):
reseed()
def test___init__(self):
param = iap.Absolute(iap.Deterministic(0))
assert (
param.__str__()
== param.__repr__()
== "Absolute(Deterministic(int 0))"
)
def test_fixed_values(self):
simple_values = [-1.5, -1, -1.0, -0.1, 0, 0.0, 0.1, 1, 1.0, 1.5]
for value in simple_values:
with self.subTest(value=value):
param = iap.Absolute(iap.Deterministic(value))
sample = param.draw_sample()
samples = param.draw_samples((10, 5))
assert sample.shape == tuple()
assert samples.shape == (10, 5)
if ia.is_single_float(value):
assert (
abs(value) - _eps(sample)
< sample <
abs(value) + _eps(sample)
)
assert np.all(abs(value) - _eps(samples) < samples)
assert np.all(samples < abs(value) + _eps(samples))
else:
assert sample == abs(value)
assert np.all(samples == abs(value))
def test_value_is_stochastic_parameter(self):
param = iap.Absolute(iap.Choice([-3, -1, 1, 3]))
sample = param.draw_sample()
samples = param.draw_samples((10, 10))
samples_uq = np.sort(np.unique(samples))
assert sample.shape == tuple()
assert sample in [3, 1]
assert samples.shape == (10, 10)
assert len(samples_uq) == 2
assert samples_uq[0] == 1 and samples_uq[1] == 3
class TestRandomSign(unittest.TestCase):
def setUp(self):
reseed()
def test___init__(self):
param = iap.RandomSign(iap.Deterministic(0), 0.5)
assert (
param.__str__()
== param.__repr__()
== "RandomSign(Deterministic(int 0), 0.50)"
)
def test_value_is_deterministic(self):
param = iap.RandomSign(iap.Deterministic(1))
samples = param.draw_samples((1000,))
n_positive = np.sum(samples == 1)
n_negative = np.sum(samples == -1)
assert samples.shape == (1000,)
assert n_positive + n_negative == 1000
assert 350 < n_positive < 750
def test_value_is_deterministic_many_samples(self):
param = iap.RandomSign(iap.Deterministic(1))
seen = [0, 0]
for _ in sm.xrange(1000):
sample = param.draw_sample()
assert sample.shape == tuple()
if sample == 1:
seen[1] += 1
else:
seen[0] += 1
n_negative, n_positive = seen
assert n_positive + n_negative == 1000
assert 350 < n_positive < 750
def test_value_is_stochastic_parameter(self):
param = iap.RandomSign(iap.Choice([1, 2]))
samples = param.draw_samples((4000,))
seen = [0, 0, 0, 0]
seen[0] = np.sum(samples == -2)
seen[1] = np.sum(samples == -1)
seen[2] = np.sum(samples == 1)
seen[3] = np.sum(samples == 2)
assert np.sum(seen) == 4000
assert all([700 < v < 1300 for v in seen])
def test_samples_same_values_for_same_seeds(self):
param = iap.RandomSign(iap.Choice([1, 2]))
samples1 = param.draw_samples((100, 10),
random_state=iarandom.RNG(1234))
samples2 = param.draw_samples((100, 10),
random_state=iarandom.RNG(1234))
assert samples1.shape == (100, 10)
assert samples2.shape == (100, 10)
assert np.array_equal(samples1, samples2)
assert np.sum(samples1 == -2) > 50
assert np.sum(samples1 == -1) > 50
assert np.sum(samples1 == 1) > 50
assert np.sum(samples1 == 2) > 50
class TestForceSign(unittest.TestCase):
def setUp(self):
reseed()
def test___init__(self):
param = iap.ForceSign(iap.Deterministic(0), True, "invert", 1)
assert (
param.__str__()
== param.__repr__()
== "ForceSign(Deterministic(int 0), True, invert, 1)"
)
def test_single_sample_positive(self):
param = iap.ForceSign(iap.Deterministic(1), positive=True,
mode="invert")
sample = param.draw_sample()
assert sample.shape == tuple()
assert sample == 1
def test_single_sample_negative(self):
param = iap.ForceSign(iap.Deterministic(1), positive=False,
mode="invert")
sample = param.draw_sample()
assert sample.shape == tuple()
assert sample == -1
def test_many_samples_positive(self):
param = iap.ForceSign(iap.Deterministic(1), positive=True,
mode="invert")
samples = param.draw_samples(100)
assert samples.shape == (100,)
assert np.all(samples == 1)
def test_many_samples_negative(self):
param = iap.ForceSign(iap.Deterministic(1), positive=False,
mode="invert")
samples = param.draw_samples(100)
assert samples.shape == (100,)
assert np.all(samples == -1)
def test_many_samples_negative_value_to_positive(self):
param = iap.ForceSign(iap.Deterministic(-1), positive=True,
mode="invert")
samples = param.draw_samples(100)
assert samples.shape == (100,)
assert np.all(samples == 1)
def test_many_samples_negative_value_to_negative(self):
param = iap.ForceSign(iap.Deterministic(-1), positive=False,
mode="invert")
samples = param.draw_samples(100)
assert samples.shape == (100,)
assert np.all(samples == -1)
def test_many_samples_stochastic_value_to_positive(self):
param = iap.ForceSign(iap.Choice([-2, 1]), positive=True,
mode="invert")
samples = param.draw_samples(1000)
n_twos = np.sum(samples == 2)
n_ones = np.sum(samples == 1)
assert samples.shape == (1000,)
assert n_twos + n_ones == 1000
assert 200 < n_twos < 700
assert 200 < n_ones < 700
def test_many_samples_stochastic_value_to_positive_reroll(self):
param = iap.ForceSign(iap.Choice([-2, 1]), positive=True,
mode="reroll")
samples = param.draw_samples(1000)
n_twos = np.sum(samples == 2)
n_ones = np.sum(samples == 1)
assert samples.shape == (1000,)
assert n_twos + n_ones == 1000
assert n_twos > 0
assert n_ones > 0
def test_many_samples_stochastic_value_to_positive_reroll_max_count(self):
param = iap.ForceSign(iap.Choice([-2, 1]), positive=True,
mode="reroll", reroll_count_max=100)
samples = param.draw_samples(100)
n_twos = np.sum(samples == 2)
n_ones = np.sum(samples == 1)
assert samples.shape == (100,)
assert n_twos + n_ones == 100
assert n_twos < 5
def test_samples_same_values_for_same_seeds(self):
param = iap.ForceSign(iap.Choice([-2, 1]),
positive=True,
mode="invert")
samples1 = param.draw_samples((100, 10),
random_state=iarandom.RNG(1234))
samples2 = param.draw_samples((100, 10),
random_state=iarandom.RNG(1234))
assert samples1.shape == (100, 10)
assert samples2.shape == (100, 10)
assert np.array_equal(samples1, samples2)
class TestPositive(unittest.TestCase):
def setUp(self):
reseed()
def test_many_samples_reroll(self):
param = iap.Positive(iap.Deterministic(-1),
mode="reroll",
reroll_count_max=1)
samples = param.draw_samples((100,))
assert samples.shape == (100,)
assert np.all(samples == 1)
class TestNegative(unittest.TestCase):
def setUp(self):
reseed()
def test_many_samples_reroll(self):
param = iap.Negative(iap.Deterministic(1),
mode="reroll",
reroll_count_max=1)
samples = param.draw_samples((100,))
assert samples.shape == (100,)
assert np.all(samples == -1)
class TestIterativeNoiseAggregator(unittest.TestCase):
def setUp(self):
reseed()
def test___init__(self):
param = iap.IterativeNoiseAggregator(iap.Deterministic(0),
iterations=(1, 3),
aggregation_method="max")
assert (
param.__str__()
== param.__repr__()
== (
"IterativeNoiseAggregator("
"Deterministic(int 0), "
"DiscreteUniform(Deterministic(int 1), "
"Deterministic(int 3)"
"), "
"Deterministic(max)"
")"
)
)
def test_value_is_deterministic_max_1_iter(self):
param = iap.IterativeNoiseAggregator(iap.Deterministic(1),
iterations=1,
aggregation_method="max")
sample = param.draw_sample()
samples = param.draw_samples((2, 4))
assert sample.shape == tuple()
assert samples.shape == (2, 4)
assert sample == 1
assert np.all(samples == 1)
def test_value_is_stochastic_avg_200_iter(self):
param = iap.IterativeNoiseAggregator(iap.Choice([0, 50]),
iterations=200,
aggregation_method="avg")
sample = param.draw_sample()
samples = param.draw_samples((2, 4))
assert sample.shape == tuple()
assert samples.shape == (2, 4)
assert 25 - 10 < sample < 25 + 10
assert np.all(np.logical_and(25 - 10 < samples, samples < 25 + 10))
def test_value_is_stochastic_max_100_iter(self):
param = iap.IterativeNoiseAggregator(iap.Choice([0, 50]),
iterations=100,
aggregation_method="max")
sample = param.draw_sample()
samples = param.draw_samples((2, 4))
assert sample.shape == tuple()
assert samples.shape == (2, 4)
assert sample == 50
assert np.all(samples == 50)
def test_value_is_stochastic_min_100_iter(self):
param = iap.IterativeNoiseAggregator(iap.Choice([0, 50]),
iterations=100,
aggregation_method="min")
sample = param.draw_sample()
samples = param.draw_samples((2, 4))
assert sample.shape == tuple()
assert samples.shape == (2, 4)
assert sample == 0
assert np.all(samples == 0)
def test_value_is_stochastic_avg_or_max_100_iter_evaluate_counts(self):
seen = [0, 0, 0, 0]
for _ in sm.xrange(100):
param = iap.IterativeNoiseAggregator(
iap.Choice([0, 50]),
iterations=100,
aggregation_method=["avg", "max"])
samples = param.draw_samples((1, 1))
diff_0 = abs(0 - samples[0, 0])
diff_25 = abs(25 - samples[0, 0])
diff_50 = abs(50 - samples[0, 0])
if diff_25 < 10.0:
seen[0] += 1
elif diff_50 < _eps(samples):
seen[1] += 1
elif diff_0 < _eps(samples):
seen[2] += 1
else:
seen[3] += 1
assert seen[2] <= 2 # around 0.0
assert seen[3] <= 2 # 0.0+eps <= x < 15.0 or 35.0 < x < 50.0 or >50.0
assert 50 - 20 < seen[0] < 50 + 20
assert 50 - 20 < seen[1] < 50 + 20
def test_value_is_stochastic_avg_tuple_as_iter_evaluate_histograms(self):
# iterations as tuple
param = iap.IterativeNoiseAggregator(
iap.Uniform(-1.0, 1.0),
iterations=(1, 100),
aggregation_method="avg")
diffs = []
for _ in sm.xrange(100):
samples = param.draw_samples((1, 1))
diff = abs(samples[0, 0] - 0.0)
diffs.append(diff)
nb_bins = 3
hist, _ = np.histogram(diffs, bins=nb_bins, range=(-1.0, 1.0),
density=False)
assert hist[1] > hist[0]
assert hist[1] > hist[2]
def test_value_is_stochastic_max_list_as_iter_evaluate_counts(self):
# iterations as list
seen = [0, 0]
for _ in sm.xrange(400):
param = iap.IterativeNoiseAggregator(
iap.Choice([0, 50]),
iterations=[1, 100],
aggregation_method=["max"])
samples = param.draw_samples((1, 1))
diff_0 = abs(0 - samples[0, 0])
diff_50 = abs(50 - samples[0, 0])
if diff_50 < _eps(samples):
seen[0] += 1
elif diff_0 < _eps(samples):
seen[1] += 1
else:
assert False
assert 300 - 50 < seen[0] < 300 + 50
assert 100 - 50 < seen[1] < 100 + 50
def test_value_is_stochastic_all_100_iter(self):
# test ia.ALL as aggregation_method
# note that each method individually and list of methods are already
# tested, so no in depth test is needed here
param = iap.IterativeNoiseAggregator(
iap.Choice([0, 50]), iterations=100, aggregation_method=ia.ALL)
assert isinstance(param.aggregation_method, iap.Choice)
assert len(param.aggregation_method.a) == 3
assert [v in param.aggregation_method.a for v in ["min", "avg", "max"]]
def test_value_is_stochastic_max_2_iter(self):
param = iap.IterativeNoiseAggregator(
iap.Choice([0, 50]), iterations=2, aggregation_method="max")
samples = param.draw_samples((2, 1000))
nb_0 = np.sum(samples == 0)
nb_50 = np.sum(samples == 50)
assert nb_0 + nb_50 == 2 * 1000
assert 0.25 - 0.05 < nb_0 / (2 * 1000) < 0.25 + 0.05
def test_samples_same_values_for_same_seeds(self):
param = iap.IterativeNoiseAggregator(
iap.Choice([0, 50]), iterations=5, aggregation_method="avg")
samples1 = param.draw_samples((100, 10),
random_state=iarandom.RNG(1234))
samples2 = param.draw_samples((100, 10),
random_state=iarandom.RNG(1234))
assert samples1.shape == (100, 10)
assert samples2.shape == (100, 10)
assert np.allclose(samples1, samples2)
def test_stochastic_param_as_aggregation_method(self):
param = iap.IterativeNoiseAggregator(
iap.Choice([0, 50]),
iterations=5,
aggregation_method=iap.Deterministic("max"))
assert isinstance(param.aggregation_method, iap.Deterministic)
assert param.aggregation_method.value == "max"
def test_bad_datatype_for_aggregation_method(self):
with self.assertRaises(Exception) as context:
_ = iap.IterativeNoiseAggregator(
iap.Choice([0, 50]), iterations=5, aggregation_method=False)
self.assertTrue(
"Expected aggregation_method to be" in str(context.exception))
def test_bad_datatype_for_iterations(self):
with self.assertRaises(Exception) as context:
_ = iap.IterativeNoiseAggregator(
iap.Choice([0, 50]),
iterations=False,
aggregation_method="max")
self.assertTrue("Expected iterations to be" in str(context.exception))
class TestSigmoid(unittest.TestCase):
def setUp(self):
reseed()
def test___init__(self):
param = iap.Sigmoid(
iap.Deterministic(0),
threshold=(-10, 10),
activated=True,
mul=1,
add=0)
assert (
param.__str__()
== param.__repr__()
== (
"Sigmoid("
"Deterministic(int 0), "
"Uniform("
"Deterministic(int -10), "
"Deterministic(int 10)"
"), "
"Deterministic(int 1), "
"1, "
"0)"
)
)
def test_activated_is_true(self):
param = iap.Sigmoid(
iap.Deterministic(5),
add=0,
mul=1,
threshold=0.5,
activated=True)
expected = 1 / (1 + np.exp(-(5 * 1 + 0 - 0.5)))
sample = param.draw_sample()
samples = param.draw_samples((5, 10))
assert sample.shape == tuple()
assert samples.shape == (5, 10)
assert expected - _eps(sample) < sample < expected + _eps(sample)
assert np.all(
np.logical_and(
expected - _eps(samples) < samples,
samples < expected + _eps(samples)
)
)
def test_activated_is_false(self):
param = iap.Sigmoid(
iap.Deterministic(5),
add=0,
mul=1,
threshold=0.5,
activated=False)
expected = 5
sample = param.draw_sample()
samples = param.draw_samples((5, 10))
assert sample.shape == tuple()
assert samples.shape == (5, 10)
assert expected - _eps(sample) < sample < expected + _eps(sample)
assert np.all(
np.logical_and(
expected - _eps(sample) < samples,
samples < expected + _eps(sample)
)
)
def test_activated_is_probabilistic(self):
param = iap.Sigmoid(
iap.Deterministic(5),
add=0,
mul=1,
threshold=0.5,
activated=0.5)
expected_first = 5
expected_second = 1 / (1 + np.exp(-(5 * 1 + 0 - 0.5)))
seen = [0, 0]
for _ in sm.xrange(1000):
sample = param.draw_sample()
diff_first = abs(sample - expected_first)
diff_second = abs(sample - expected_second)
if diff_first < _eps(sample):
seen[0] += 1
elif diff_second < _eps(sample):
seen[1] += 1
else:
assert False
assert 500 - 150 < seen[0] < 500 + 150
assert 500 - 150 < seen[1] < 500 + 150
def test_value_is_stochastic_param(self):
param = iap.Sigmoid(
iap.Choice([1, 10]),
add=0,
mul=1,
threshold=0.5,
activated=True)
expected_first = 1 / (1 + np.exp(-(1 * 1 + 0 - 0.5)))
expected_second = 1 / (1 + np.exp(-(10 * 1 + 0 - 0.5)))
seen = [0, 0]
for _ in sm.xrange(1000):
sample = param.draw_sample()
diff_first = abs(sample - expected_first)
diff_second = abs(sample - expected_second)
if diff_first < _eps(sample):
seen[0] += 1
elif diff_second < _eps(sample):
seen[1] += 1
else:
assert False
assert 500 - 150 < seen[0] < 500 + 150
assert 500 - 150 < seen[1] < 500 + 150
def test_mul_add_threshold_with_various_fixed_values(self):
muls = [0.1, 1, 10.3]
adds = [-5.7, -0.0734, 0, 0.0734, 5.7]
vals = [-1, -0.7, 0, 0.7, 1]
threshs = [-5.7, -0.0734, 0, 0.0734, 5.7]
for mul, add, val, thresh in itertools.product(muls, adds, vals,
threshs):
with self.subTest(mul=mul, add=add, val=val, threshold=thresh):
param = iap.Sigmoid(
iap.Deterministic(val),
add=add,
mul=mul,
threshold=thresh)
sample = param.draw_sample()
samples = param.draw_samples((2, 3))
dt = sample.dtype
val_ = np.array([val], dtype=dt)
mul_ = np.array([mul], dtype=dt)
add_ = np.array([add], dtype=dt)
thresh_ = np.array([thresh], dtype=dt)
expected = (
1 / (
1 + np.exp(
-(val_ * mul_ + add_ - thresh_)
)
)
)
assert sample.shape == tuple()
assert samples.shape == (2, 3)
assert (
expected - 5*_eps(sample)
< sample <
expected + 5*_eps(sample)
)
assert np.all(
np.logical_and(
expected - 5*_eps(sample) < samples,
samples < expected + 5*_eps(sample)
)
)
def test_samples_same_values_for_same_seeds(self):
param = iap.Sigmoid(
iap.Choice([1, 10]),
add=0,
mul=1,
threshold=0.5,
activated=True)
samples1 = param.draw_samples((100, 10),
random_state=iarandom.RNG(1234))
samples2 = param.draw_samples((100, 10),
random_state=iarandom.RNG(1234))
assert samples1.shape == (100, 10)
assert samples2.shape == (100, 10)
assert np.array_equal(samples1, samples2)
| 34.712048 | 83 | 0.557204 | 143,054 | 0.993051 | 0 | 0 | 200 | 0.001388 | 0 | 0 | 7,815 | 0.05425 |
9600225ca5edde94d999985a5e32bc3c498cea99 | 1,731 | py | Python | ml_snek/datasets/jsnek_dataset.py | joram/ml-snek | e1ed8aa831a4683dfe51a6af0cb25a44c3978903 | [
"MIT"
]
| null | null | null | ml_snek/datasets/jsnek_dataset.py | joram/ml-snek | e1ed8aa831a4683dfe51a6af0cb25a44c3978903 | [
"MIT"
]
| 13 | 2019-12-25T21:04:49.000Z | 2020-01-04T20:25:05.000Z | ml_snek/datasets/jsnek_dataset.py | joram/ml-snek | e1ed8aa831a4683dfe51a6af0cb25a44c3978903 | [
"MIT"
]
| null | null | null | """
jsnek_saved_games_dataset that returns flat (vectorized) data
"""
from .jsnek_base_dataset import JSnekBaseDataset
from .. import utils
class JSnekDataset(JSnekBaseDataset):
"""Represents a board state in the following way:
board_state: `torch.Tensor`
Board state in torch.Tensor format. Board state can either be
C x H x W
or
(C*H*W) if board_state_as_vector = True
direction: `torch.Tensor`
Direction taken in one-hot format
"""
def __init__(
self, board_state_as_vector=False, direction_as_index=False, max_frames=-1
):
super().__init__(max_frames=max_frames)
self.board_state_as_vector = board_state_as_vector
self.direction_as_index = direction_as_index
def __getitem__(self, index):
"""
Parameters
----------
index : int
Index of datum
Returns
-------
board_state: `torch.Tensor`
Board state in torch.Tensor format. Board state can either be
C x H x W
or
(C*H*W) if board_state_as_vector = True
direction: `torch.Tensor`
Direction taken in one-hot format
or
Index if direction_as_index = True
"""
frame, winner_id, direction = super().__getitem__(index)
board_state = utils.frame_to_image(frame, winner_id)
if self.board_state_as_vector:
board_state = board_state.view([board_state.numel()])
if self.direction_as_index:
direction = utils.direction_to_index(direction)
else:
direction = utils.direction_to_onehot(direction)
return board_state, direction
| 26.630769 | 82 | 0.622184 | 1,587 | 0.916811 | 0 | 0 | 0 | 0 | 0 | 0 | 855 | 0.493934 |
960075d5d481ca0949f159a6dd4c4e2e599c3197 | 391 | py | Python | src/posts/migrations/0007_recipe_preface.py | eduardkh/matkonim2 | d836b16403d7fce0db88dd39dac2ba24575e6fca | [
"MIT"
]
| null | null | null | src/posts/migrations/0007_recipe_preface.py | eduardkh/matkonim2 | d836b16403d7fce0db88dd39dac2ba24575e6fca | [
"MIT"
]
| null | null | null | src/posts/migrations/0007_recipe_preface.py | eduardkh/matkonim2 | d836b16403d7fce0db88dd39dac2ba24575e6fca | [
"MIT"
]
| null | null | null | # Generated by Django 3.2.7 on 2021-09-15 15:40
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('posts', '0006_auto_20210914_0910'),
]
operations = [
migrations.AddField(
model_name='recipe',
name='preface',
field=models.TextField(blank=True, null=True),
),
]
| 20.578947 | 58 | 0.595908 | 298 | 0.762148 | 0 | 0 | 0 | 0 | 0 | 0 | 96 | 0.245524 |
96021a52c512a37d56b88bb769ca1d2cad4e3a5c | 490 | py | Python | app/database/db.py | flych3r/spotify-tracker | 306d549da6a57866ea480c85286d870e7653a1eb | [
"MIT"
]
| 2 | 2021-06-25T00:24:13.000Z | 2021-07-10T13:00:39.000Z | app/database/db.py | flych3r/spotify-tracker | 306d549da6a57866ea480c85286d870e7653a1eb | [
"MIT"
]
| null | null | null | app/database/db.py | flych3r/spotify-tracker | 306d549da6a57866ea480c85286d870e7653a1eb | [
"MIT"
]
| 2 | 2021-05-16T01:40:39.000Z | 2021-07-10T12:59:07.000Z | import os
import databases
import sqlalchemy
DB_CONNECTOR = os.getenv('APP_DB_CONNECTOR')
DB_USERNAME = os.getenv('APP_DB_USERNAME')
DB_PASSWORD = os.getenv('APP_DB_PASSWORD')
DB_HOST = os.getenv('APP_DB_HOST')
DB_PORT = os.getenv('APP_DB_PORT')
DB_DATABASE = os.getenv('APP_DB_DATABASE')
DB_URL = f'{DB_CONNECTOR}://{DB_USERNAME}:{DB_PASSWORD}@{DB_HOST}:{DB_PORT}/{DB_DATABASE}'
db: databases.Database = databases.Database(DB_URL)
metadata: sqlalchemy.MetaData = sqlalchemy.MetaData()
| 28.823529 | 90 | 0.777551 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 176 | 0.359184 |
96023217ef1c244003018d7cd3aa5cc748e1d708 | 7,631 | py | Python | examples/stl10/main_info.py | hehaodele/align_uniform | 898b9fed960316d4cab6f8b6080490125fc362cd | [
"MIT"
]
| null | null | null | examples/stl10/main_info.py | hehaodele/align_uniform | 898b9fed960316d4cab6f8b6080490125fc362cd | [
"MIT"
]
| null | null | null | examples/stl10/main_info.py | hehaodele/align_uniform | 898b9fed960316d4cab6f8b6080490125fc362cd | [
"MIT"
]
| null | null | null | import os
import time
import argparse
import torchvision
import torch
import torch.nn as nn
from util import AverageMeter, TwoAugUnsupervisedDataset
from encoder import SmallAlexNet
from align_uniform import align_loss, uniform_loss
import json
def parse_option():
parser = argparse.ArgumentParser('STL-10 Representation Learning with Alignment and Uniformity Losses')
parser.add_argument('--align_w', type=float, default=1, help='Alignment loss weight')
parser.add_argument('--unif_w', type=float, default=1, help='Uniformity loss weight')
parser.add_argument('--align_alpha', type=float, default=2, help='alpha in alignment loss')
parser.add_argument('--unif_t', type=float, default=2, help='t in uniformity loss')
parser.add_argument('--batch_size', type=int, default=768, help='Batch size')
parser.add_argument('--epochs', type=int, default=200, help='Number of training epochs')
parser.add_argument('--lr', type=float, default=None,
help='Learning rate. Default is linear scaling 0.12 per 256 batch size')
parser.add_argument('--lr_decay_rate', type=float, default=0.1, help='Learning rate decay rate')
parser.add_argument('--lr_decay_epochs', default=[155, 170, 185], nargs='*', type=int,
help='When to decay learning rate')
parser.add_argument('--momentum', type=float, default=0.9, help='SGD momentum')
parser.add_argument('--weight_decay', type=float, default=1e-4, help='L2 weight decay')
parser.add_argument('--feat_dim', type=int, default=128, help='Feature dimensionality')
parser.add_argument('--num_workers', type=int, default=20, help='Number of data loader workers to use')
parser.add_argument('--log_interval', type=int, default=40, help='Number of iterations between logs')
parser.add_argument('--gpus', default=[0], nargs='*', type=int,
help='List of GPU indices to use, e.g., --gpus 0 1 2 3')
parser.add_argument('--data_folder', type=str, default='./data', help='Path to data')
parser.add_argument('--result_folder', type=str, default='./results', help='Base directory to save model')
parser.add_argument('--suffix', type=str, default='info', help='Name Suffix')
opt = parser.parse_args()
opt.data_folder = '/afs/csail.mit.edu/u/h/hehaodele/radar/Hao/datasets'
opt.result_folder = '/afs/csail.mit.edu/u/h/hehaodele/radar/Hao/projects/align_uniform/results'
if opt.lr is None:
opt.lr = 0.12 * (opt.batch_size / 256)
print(json.dumps(vars(opt), indent=2, default=lambda o: o.__dict__))
opt.gpus = list(map(lambda x: torch.device('cuda', x), opt.gpus))
exp_name = f"align{opt.align_w:g}alpha{opt.align_alpha:g}_unif{opt.unif_w:g}t{opt.unif_t:g}"
if len(opt.suffix) > 0:
exp_name += f'_{opt.suffix}'
opt.save_folder = os.path.join(
opt.result_folder,
exp_name,
)
os.makedirs(opt.save_folder, exist_ok=True)
return opt
def get_data_loader(opt):
from util import RandomResizedCropWithBox, TwoAugUnsupervisedDatasetWithBox
transform_crop = RandomResizedCropWithBox(64, scale=(0.08, 1))
transform_others = torchvision.transforms.Compose([
torchvision.transforms.RandomHorizontalFlip(),
torchvision.transforms.ColorJitter(0.4, 0.4, 0.4, 0.4),
torchvision.transforms.RandomGrayscale(p=0.2),
torchvision.transforms.ToTensor(),
torchvision.transforms.Normalize(
(0.44087801806139126, 0.42790631331699347, 0.3867879370752931),
(0.26826768628079806, 0.2610450402318512, 0.26866836876860795),
),
])
dataset = TwoAugUnsupervisedDatasetWithBox(
torchvision.datasets.STL10(opt.data_folder, 'train+unlabeled', download=True), transform_crop, transform_others)
return torch.utils.data.DataLoader(dataset, batch_size=opt.batch_size, num_workers=opt.num_workers,
shuffle=True, pin_memory=True)
def get_rate(x):
return sum(x) / len(x) * 100
def main():
opt = parse_option()
print(f'Optimize: {opt.align_w:g} * loss_align(alpha={opt.align_alpha:g}) + {opt.unif_w:g} * loss_uniform(t={opt.unif_t:g})')
torch.cuda.set_device(opt.gpus[0])
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = True
encoder = nn.DataParallel(SmallAlexNet(feat_dim=opt.feat_dim).to(opt.gpus[0]), opt.gpus)
optim = torch.optim.SGD(encoder.parameters(), lr=opt.lr,
momentum=opt.momentum, weight_decay=opt.weight_decay)
scheduler = torch.optim.lr_scheduler.MultiStepLR(optim, gamma=opt.lr_decay_rate,
milestones=opt.lr_decay_epochs)
loader = get_data_loader(opt)
align_meter = AverageMeter('align_loss')
unif_meter = AverageMeter('uniform_loss')
loss_meter = AverageMeter('total_loss')
it_time_meter = AverageMeter('iter_time')
info_rate_meter = AverageMeter('info_rate')
noni_rate_meter = AverageMeter('noni_rate')
for epoch in range(opt.epochs):
align_meter.reset()
unif_meter.reset()
loss_meter.reset()
it_time_meter.reset()
t0 = time.time()
for ii, (im_x, info_x, im_y, info_y) in enumerate(loader):
optim.zero_grad()
x, y = encoder(torch.cat([im_x.to(opt.gpus[0]), im_y.to(opt.gpus[0])])).chunk(2)
align_loss_val = align_loss(x, y, alpha=opt.align_alpha)
unif_loss_val = (uniform_loss(x, t=opt.unif_t) + uniform_loss(y, t=opt.unif_t)) / 2
loss = align_loss_val * opt.align_w + unif_loss_val * opt.unif_w
info_x, info_y = info_x.to(opt.gpus[0]), info_y.to(opt.gpus[0])
info_x_idx, noni_x_idx = info_x > 0.5, info_x < 0.2
info_y_idx, noni_y_idx = info_y > 0.5, info_y < 0.2
info_pair_idx = info_x_idx & info_y_idx
if info_pair_idx.any():
align_loss_info = align_loss(x[info_pair_idx], y[info_pair_idx], alpha=opt.align_alpha)
else:
align_loss_info = 0
uniform_loss_noninfo = 0
if noni_x_idx.any():
uniform_loss_noninfo += uniform_loss(x[noni_x_idx], t=opt.unif_t)
if noni_y_idx.any():
uniform_loss_noninfo += uniform_loss(y[noni_y_idx], t=opt.unif_t)
uniform_loss_noninfo /= 2
loss_info = align_loss_info * opt.align_w + uniform_loss_noninfo * opt.unif_w
loss = loss + loss_info
align_meter.update(align_loss_val, x.shape[0])
unif_meter.update(unif_loss_val)
loss_meter.update(loss, x.shape[0])
info_rate_meter.update((get_rate(info_x_idx)+get_rate(info_y_idx))/2)
noni_rate_meter.update((get_rate(noni_x_idx)+get_rate(noni_y_idx))/2)
loss.backward()
optim.step()
it_time_meter.update(time.time() - t0)
if ii % opt.log_interval == 0:
print(f"Epoch {epoch}/{opt.epochs}\tIt {ii}/{len(loader)}\t" +
f"{align_meter}\t{unif_meter}\t{loss_meter}\t{it_time_meter}\t{info_rate_meter}\t{noni_rate_meter}")
t0 = time.time()
scheduler.step()
if epoch % 40 == 0:
ckpt_file = os.path.join(opt.save_folder, f'encoder-ep{epoch}.pth')
torch.save(encoder.module.state_dict(), ckpt_file)
ckpt_file = os.path.join(opt.save_folder, 'encoder.pth')
torch.save(encoder.module.state_dict(), ckpt_file)
print(f'Saved to {ckpt_file}')
if __name__ == '__main__':
main()
| 43.357955 | 129 | 0.657974 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,482 | 0.194208 |
96024e0d78c0a224ad13e044ee7fc8d5953df2e6 | 259 | py | Python | app/__init__.py | nic-mon/IAIOLab | b8c4a23c95ee722938b393e4824b7fc94447f17c | [
"MIT"
]
| null | null | null | app/__init__.py | nic-mon/IAIOLab | b8c4a23c95ee722938b393e4824b7fc94447f17c | [
"MIT"
]
| null | null | null | app/__init__.py | nic-mon/IAIOLab | b8c4a23c95ee722938b393e4824b7fc94447f17c | [
"MIT"
]
| 1 | 2018-04-11T00:34:09.000Z | 2018-04-11T00:34:09.000Z | from flask import Flask
""" 1. Creating a flask application instance, the name argument is passed to flask
application constructor. It's used to determine the root path"""
app = Flask(__name__)
app.config.from_object('config')
from app import views, models
| 28.777778 | 82 | 0.776062 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 155 | 0.598456 |
96037b162a17a26e6138061ce184f323626f7486 | 5,305 | py | Python | ptf/tests/linerate/qos_metrics.py | dariusgrassi/upf-epc | aef4648db118d6e1bdb23a07e4774177bd58fc50 | [
"Apache-2.0"
]
| null | null | null | ptf/tests/linerate/qos_metrics.py | dariusgrassi/upf-epc | aef4648db118d6e1bdb23a07e4774177bd58fc50 | [
"Apache-2.0"
]
| 13 | 2021-12-15T18:39:52.000Z | 2022-03-31T00:08:21.000Z | ptf/tests/linerate/qos_metrics.py | dariusgrassi/upf-epc | aef4648db118d6e1bdb23a07e4774177bd58fc50 | [
"Apache-2.0"
]
| null | null | null | # SPDX-License-Identifier: Apache-2.0
# Copyright(c) 2021 Open Networking Foundation
import time
from ipaddress import IPv4Address
from pprint import pprint
from trex_test import TrexTest
from grpc_test import *
from trex_stl_lib.api import (
STLVM,
STLPktBuilder,
STLStream,
STLTXCont,
)
import ptf.testutils as testutils
UPF_DEST_MAC = "0c:c4:7a:19:6d:ca"
# Port setup
TREX_SENDER_PORT = 0
TREX_RECEIVER_PORT = 1
BESS_SENDER_PORT = 2
BESS_RECEIVER_PORT = 3
# Test specs
DURATION = 10
RATE = 100_000 # 100 Kpps
UE_COUNT = 10_000 # 10k UEs
GTPU_PORT = 2152
PKT_SIZE = 64
class PerFlowQosMetricsTest(TrexTest, GrpcTest):
"""
Generates 1 Mpps downlink traffic for 10k dest UE IP addresses. Uses
BESS-UPF QoS metrics to verify baseline packet loss, latency, and jitter
results.
"""
@autocleanup
def runTest(self):
n3TEID = 0
startIP = IPv4Address('16.0.0.1')
endIP = startIP + UE_COUNT - 1
accessIP = IPv4Address('10.128.13.29')
enbIP = IPv4Address('10.27.19.99') # arbitrary ip for non-existent eNodeB for gtpu encap
# program UPF for downlink traffic by installing PDRs and FARs
print("Installing PDRs and FARs...")
for i in range(UE_COUNT):
# install N6 DL PDR to match UE dst IP
pdrDown = self.createPDR(
srcIface = CORE,
dstIP = int(startIP + i),
srcIfaceMask = 0xFF,
dstIPMask = 0xFFFFFFFF,
precedence = 255,
fseID = n3TEID + i + 1, # start from 1
ctrID = 0,
farID = i,
qerIDList = [N6, 1],
needDecap = 0,
)
self.addPDR(pdrDown)
# install N6 DL FAR for encap
farDown = self.createFAR(
farID = i,
fseID = n3TEID + i + 1, # start from 1
applyAction = ACTION_FORWARD,
dstIntf = DST_ACCESS,
tunnelType = 0x1,
tunnelIP4Src = int(accessIP),
tunnelIP4Dst = int(enbIP), # only one eNB to send to downlink
tunnelTEID = 0,
tunnelPort = GTPU_PORT,
)
self.addFAR(farDown)
# install N6 DL/UL application QER
qer = self.createQER(
gate = GATE_UNMETER,
qerID = N6,
fseID = n3TEID + i + 1, # start from 1
qfi = 9,
ulGbr = 0,
ulMbr = 0,
dlGbr = 0,
dlMbr = 0,
burstDurationMs = 10,
)
self.addApplicationQER(qer)
# set up trex to send traffic thru UPF
print("Setting up TRex client...")
vm = STLVM()
vm.var(
name="dst",
min_value=str(startIP),
max_value=str(endIP),
size=4,
op="random",
)
vm.write(fv_name="dst", pkt_offset="IP.dst")
vm.fix_chksum()
pkt = testutils.simple_udp_packet(
pktlen=PKT_SIZE,
eth_dst=UPF_DEST_MAC,
with_udp_chksum=False,
)
stream = STLStream(
packet=STLPktBuilder(pkt=pkt, vm=vm),
mode=STLTXCont(pps=RATE),
)
self.trex_client.add_streams(stream, ports=[BESS_SENDER_PORT])
print("Running traffic...")
s_time = time.time()
self.trex_client.start(
ports=[BESS_SENDER_PORT], mult="1", duration=DURATION
)
# FIXME: pull QoS metrics at end instead of while traffic running
time.sleep(DURATION - 5)
if self.trex_client.is_traffic_active():
stats = self.getSessionStats(q=[90, 99, 99.9], quiet=True)
preQos = stats["preQos"]
postDlQos = stats["postDlQos"]
postUlQos = stats["postUlQos"]
self.trex_client.wait_on_traffic(ports=[BESS_SENDER_PORT])
print(f"Duration was {time.time() - s_time}")
trex_stats = self.trex_client.get_stats()
sent_packets = trex_stats['total']['opackets']
recv_packets = trex_stats['total']['ipackets']
# 0% packet loss
self.assertEqual(
sent_packets,
recv_packets,
f"Didn't receive all packets; sent {sent_packets}, received {recv_packets}",
)
for fseid in postDlQos:
lat = fseid['latency']['percentileValuesNs']
jitter = fseid['jitter']['percentileValuesNs']
# 99th %ile latency < 100 us
self.assertLessEqual(
int(lat[1]) / 1000,
100,
f"99th %ile latency was higher than 100 us! Was {int(lat[1]) / 1000} us"
)
# 99.9th %ile latency < 200 us
self.assertLessEqual(
int(lat[2]) / 1000,
200,
f"99.9th %ile latency was higher than 200 us! Was {int(lat[2]) / 1000} us"
)
# 99th% jitter < 100 us
self.assertLessEqual(
int(jitter[1]) / 1000,
100,
f"99th %ile jitter was higher than 100 us! Was {int(jitter[1]) / 1000} us"
)
return
| 30.314286 | 96 | 0.535344 | 4,709 | 0.887653 | 0 | 0 | 4,477 | 0.843921 | 0 | 0 | 1,407 | 0.265221 |
9604a31aa1a2fd0161bb919247c6389804233e2e | 6,209 | py | Python | archives_app/documents_serializers.py | DITGO/2021.1-PC-GO1-Archives | d9f28bb29dbe96331b6e2d0beb7ca37875d61300 | [
"MIT"
]
| 1 | 2021-08-22T13:39:56.000Z | 2021-08-22T13:39:56.000Z | archives_app/documents_serializers.py | DITGO/2021.1-PC-GO1-Archives | d9f28bb29dbe96331b6e2d0beb7ca37875d61300 | [
"MIT"
]
| 36 | 2021-09-01T19:12:17.000Z | 2022-03-18T23:43:13.000Z | archives_app/documents_serializers.py | DITGO/2021.1-PC-GO1-Archives | d9f28bb29dbe96331b6e2d0beb7ca37875d61300 | [
"MIT"
]
| 5 | 2021-09-10T21:01:07.000Z | 2021-09-17T16:35:21.000Z | from rest_framework import serializers
from archives_app.documents_models import (FrequencyRelation, BoxArchiving,
AdministrativeProcess, OriginBox,
FrequencySheet, DocumentTypes)
class FrequencySupport(serializers.ModelSerializer):
def get_document_type(self, obj):
if obj.document_type_id is not None:
return obj.document_type_id.document_name
return None
class BoxArchivingSerializer(serializers.ModelSerializer):
def get_shelf_number(self, obj):
if obj.shelf_id is not None:
return obj.shelf_id.number
return None
def get_rack_number(self, obj):
if obj.rack_id is not None:
return obj.rack_id.number
return None
def get_abbreviation_name(self, obj):
if obj.abbreviation_id is not None:
return obj.abbreviation_id.name
return ""
def get_sender_unity(self, obj):
if obj.sender_unity is not None:
return obj.sender_unity.unity_name
return ""
def get_doc_types(self, obj):
if obj.document_types is not None:
doc_types = []
for obj in obj.document_types.all():
doc_types.append(obj.document_type_id.document_name)
return doc_types
return ""
def get_temporalities(self, obj):
if obj.document_types is not None:
doc_types = []
for obj in obj.document_types.all():
doc_types.append(obj.temporality_date)
return doc_types
return None
shelf_number = serializers.SerializerMethodField('get_shelf_number')
rack_number = serializers.SerializerMethodField('get_rack_number')
abbreviation_name = serializers.SerializerMethodField('get_abbreviation_name')
sender_unity_name = serializers.SerializerMethodField('get_sender_unity')
document_type_name = serializers.SerializerMethodField('get_doc_types')
temporality_date = serializers.SerializerMethodField('get_temporalities')
class Meta:
model = BoxArchiving
fields = (
"id",
"process_number",
"sender_unity",
"notes",
"received_date",
"document_url",
"cover_sheet",
"filer_user",
"abbreviation_name",
"shelf_number",
"rack_number",
"origin_box_id",
"abbreviation_id",
"shelf_id",
"rack_id",
"document_types",
"sender_unity_name",
"document_type_name",
"temporality_date"
)
class FrequencyRelationSerializer(FrequencySupport):
def get_sender_unity(self, obj):
if obj.sender_unity is not None:
return obj.sender_unity.unity_name
return ""
document_type_name = serializers.SerializerMethodField(
'get_document_type'
)
sender_unity_name = serializers.SerializerMethodField('get_sender_unity')
class Meta:
model = FrequencyRelation
fields = (
"id",
"process_number",
"notes",
"document_date",
"received_date",
"temporality_date",
"reference_period",
"filer_user",
"sender_unity",
"document_type_id",
"document_type_name",
"sender_unity_name"
)
class AdministrativeProcessSerializer(serializers.ModelSerializer):
def get_document_subject(self, obj):
if obj.subject_id is not None:
return obj.subject_id.subject_name
return None
def get_sender_unity(self, obj):
if obj.sender_unity is not None:
return obj.sender_unity.unity_name
return ""
def get_sender_user(self, obj):
if obj.sender_user is not None:
return obj.sender_user.name
return ""
sender_unity_name = serializers.SerializerMethodField('get_sender_unity')
sender_user_name = serializers.SerializerMethodField('get_sender_user')
document_subject_name = serializers.SerializerMethodField(
'get_document_subject'
)
class Meta:
model = AdministrativeProcess
fields = ("id",
"process_number",
"notes",
"filer_user",
"notice_date",
"interested",
"cpf_cnpj",
"reference_month_year",
"sender_user",
"sender_user_name",
"archiving_date",
"is_filed",
"is_eliminated",
"temporality_date",
"send_date",
"administrative_process_number",
"sender_unity",
"subject_id",
"dest_unity_id",
"unity_id",
"document_subject_name",
"sender_unity_name"
)
class OriginBoxSerializer(serializers.ModelSerializer):
class Meta:
model = OriginBox
fields = '__all__'
class DocumentTypesSerializer(serializers.ModelSerializer):
class Meta:
model = DocumentTypes
fields = '__all__'
class FrequencySheetSerializer(FrequencySupport):
def get_person_name(self, obj):
if obj.person_id is not None:
return obj.person_id.name
return ""
document_type_name = serializers.SerializerMethodField(
'get_document_type'
)
person_name = serializers.SerializerMethodField('get_person_name')
class Meta:
model = FrequencySheet
fields = ("id",
"person_id",
"person_name",
"cpf",
"role",
"category",
"workplace",
"municipal_area",
"reference_period",
"notes",
"process_number",
"document_type_id",
"temporality_date",
"document_type_name"
)
| 29.995169 | 82 | 0.573522 | 5,922 | 0.953777 | 0 | 0 | 0 | 0 | 0 | 0 | 1,207 | 0.194395 |
960632beca7334764b877e64f50cf461743b9b2b | 7,132 | py | Python | src/fparser/common/tests/test_base_classes.py | sturmianseq/fparser | bf3cba3f31a72671d4d4a93b6ef4f9832006219f | [
"BSD-3-Clause"
]
| 33 | 2017-08-18T16:31:27.000Z | 2022-03-28T09:43:50.000Z | src/fparser/common/tests/test_base_classes.py | sturmianseq/fparser | bf3cba3f31a72671d4d4a93b6ef4f9832006219f | [
"BSD-3-Clause"
]
| 319 | 2017-01-12T14:22:07.000Z | 2022-03-23T20:53:25.000Z | src/fparser/common/tests/test_base_classes.py | sturmianseq/fparser | bf3cba3f31a72671d4d4a93b6ef4f9832006219f | [
"BSD-3-Clause"
]
| 17 | 2017-10-13T07:12:28.000Z | 2022-02-11T14:42:18.000Z | # -*- coding: utf-8 -*-
##############################################################################
# Copyright (c) 2017 Science and Technology Facilities Council
#
# All rights reserved.
#
# Modifications made as part of the fparser project are distributed
# under the following license:
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
##############################################################################
# Modified M.Hambley, UK Met Office
##############################################################################
'''
Test battery associated with fparser.common.base_classes package.
'''
import re
import pytest
import fparser.common.base_classes
import fparser.common.readfortran
import fparser.common.sourceinfo
import fparser.common.utils
from fparser import api
def test_statement_logging(log, monkeypatch):
'''
Tests the Statement class' logging methods.
'''
class DummyParser(object):
'''
Null parser harness.
'''
def __init__(self, reader):
self.reader = reader
reader = fparser.common.readfortran.FortranStringReader("dummy = 1")
parser = DummyParser(reader)
monkeypatch.setattr(fparser.common.base_classes.Statement,
'process_item', lambda x: None, raising=False)
unit_under_test = fparser.common.base_classes.Statement(parser, None)
unit_under_test.error('Scary biscuits')
assert(log.messages == {'critical': [],
'debug': [],
'error': ['Scary biscuits'],
'info': [],
'warning': []})
log.reset()
unit_under_test.warning('Trepidacious Cetations')
assert(log.messages == {'critical': [],
'debug': [],
'error': [],
'info': [],
'warning': ['Trepidacious Cetations']})
log.reset()
unit_under_test.info('Hilarious Ontologies')
assert(log.messages == {'critical': [],
'debug': [],
'error': [],
'info': ['Hilarious Ontologies'],
'warning': []})
def test_log_comment_mix(log):
'''
Tests that unexpected Fortran 90 comment in fixed format source is logged.
'''
class EndDummy(fparser.common.base_classes.EndStatement):
'''
Dummy EndStatement.
'''
match = re.compile(r'\s*end(\s*thing\s*\w*|)\s*\Z', re.I).match
class BeginHarness(fparser.common.base_classes.BeginStatement):
'''
Dummy BeginStatement.
'''
end_stmt_cls = EndDummy
classes = []
match = re.compile(r'\s*thing\s+(\w*)\s*\Z', re.I).match
def get_classes(self):
'''
Returns an empty list of contained statements.
'''
return []
code = ' x=1 ! Cheese'
parent = fparser.common.readfortran.FortranStringReader(
code, ignore_comments=False)
parent.set_format(fparser.common.sourceinfo.FortranFormat(False, True))
item = fparser.common.readfortran.Line(code, (1, 1), None, None, parent)
with pytest.raises(fparser.common.utils.AnalyzeError):
__ = BeginHarness(parent, item)
expected = ' 1: x=1 ! Cheese <== ' \
+ 'no parse pattern found for "x=1 ! cheese" ' \
+ "in 'BeginHarness' block, " \
+ 'trying to remove inline comment (not in Fortran 77).'
result = log.messages['warning'][0].split('\n')[1]
assert result == expected
def test_log_unexpected(log):
'''
Tests that an unexpected thing between begin and end statements logs an
event.
'''
class EndThing(fparser.common.base_classes.EndStatement):
'''
Dummy EndStatement class.
'''
isvalid = True
match = re.compile(r'\s*end(\s+thing(\s+\w+)?)?\s*$', re.I).match
class BeginThing(fparser.common.base_classes.BeginStatement):
'''
Dummy BeginStatement class.
'''
end_stmt_cls = EndThing
classes = []
match = re.compile(r'\s*thing\s+(\w+)?\s*$', re.I).match
def get_classes(self):
'''
Returns an empty list of contained classes.
'''
return []
code = [' jumper', ' end thing']
parent = fparser.common.readfortran.FortranStringReader('\n'.join(code))
parent.set_format(fparser.common.sourceinfo.FortranFormat(False, True))
item = fparser.common.readfortran.Line(code[0], (1, 1), None, None, parent)
with pytest.raises(fparser.common.utils.AnalyzeError):
__ = BeginThing(parent, item)
expected = ' 1: jumper <== no parse pattern found for "jumper" ' \
"in 'BeginThing' block."
result = log.messages['warning'][0].split('\n')[1]
assert result == expected
def test_space_after_enddo():
'''Make sure that there is no space after an 'END DO' without name,
but there is a space if there is a name after 'END DO'.
'''
# Unnamed loop:
source_str = '''\
subroutine foo
integer i, r
do i = 1,100
r = r + 1
end do
end subroutine foo
'''
tree = api.parse(source_str, isfree=True, isstrict=False)
assert "END DO " not in tree.tofortran()
# Named loop:
source_str = '''\
subroutine foo
integer i, r
loop1: do i = 1,100
r = r + 1
end do loop1
end subroutine foo
'''
tree = api.parse(source_str, isfree=True, isstrict=False)
assert "END DO loop1" in tree.tofortran()
| 35.839196 | 79 | 0.599411 | 1,302 | 0.182557 | 0 | 0 | 0 | 0 | 0 | 0 | 3,760 | 0.527201 |
96065ad383494de22a076bf5a911760ad23ad0e8 | 87 | py | Python | pyvecorg/__main__.py | torsava/pyvec.org | 809812395e4bffdb0522a52c6a7f7468ffc7ccd6 | [
"MIT"
]
| 3 | 2016-09-08T09:28:02.000Z | 2019-08-25T11:56:26.000Z | pyvecorg/__main__.py | torsava/pyvec.org | 809812395e4bffdb0522a52c6a7f7468ffc7ccd6 | [
"MIT"
]
| 97 | 2016-08-20T17:11:34.000Z | 2022-03-29T07:52:13.000Z | pyvecorg/__main__.py | torsava/pyvec.org | 809812395e4bffdb0522a52c6a7f7468ffc7ccd6 | [
"MIT"
]
| 7 | 2016-11-26T20:38:29.000Z | 2021-08-20T11:11:47.000Z | from elsa import cli
from pyvecorg import app
cli(app, base_url='http://pyvec.org')
| 12.428571 | 37 | 0.735632 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 18 | 0.206897 |
96072e15a870bb0da5695f16be671c56e832f75e | 10,397 | py | Python | ppython/input_handler.py | paberr/ppython | 0c59d503cbd1ca619ad51b627614ae2dd9549c38 | [
"MIT"
]
| 1 | 2016-06-15T17:21:22.000Z | 2016-06-15T17:21:22.000Z | ppython/input_handler.py | paberr/ppython | 0c59d503cbd1ca619ad51b627614ae2dd9549c38 | [
"MIT"
]
| null | null | null | ppython/input_handler.py | paberr/ppython | 0c59d503cbd1ca619ad51b627614ae2dd9549c38 | [
"MIT"
]
| null | null | null | import curtsies.events as ev
import sys
DELIMITERS = ' .'
WHITESPACE = ' '
def print_console(txt, npadding=0, newline=False, flush=True):
"""
Prints txt without newline, cursor positioned at the end.
:param txt: The text to print
:param length: The txt will be padded with spaces to fit this length
:param newline: If True, a newline character will be appended
:return:
"""
sys.stdout.write('\r{0}{1}'.format(txt, WHITESPACE * npadding))
if newline:
sys.stdout.write('\n')
if flush:
sys.stdout.flush()
def move_next_line():
sys.stdout.write('\n')
sys.stdout.flush()
def find_next_in_list(lst, what, start=0, reverse=False):
"""
Finds the next occurrence of what in lst starting at start.
:param lst: The list to search
:param what: The item to find, should be an iterable
:param start: The starting position in the list
:param reverse: Set this to True in order to traverse the list towards 0
:return: False if no occurrence found, index otherwise
"""
if start < 0 or start >= len(lst):
return False
end = -1 if reverse else len(lst)
step = -1 if reverse else 1
for i in range(start, end, step):
if lst[i] in what:
return i
return False
class InputHandler:
def __init__(self, history):
self._input = []
self._position = 0
self._handlers = {}
self._highlight = None
self._max_length = 0
self._complete = None
self._history = history
self._prefix = ''
def process_input(self, c):
"""
Processes the input captured by curtsies.
:param c: the input, either a curtsies keystroke or an event
:return: False if program should stop, the current line otherwise
"""
if isinstance(c, ev.Event):
return self._process_event(c)
else:
return self._process_char(c)
def register_handler(self, key, handler):
if key not in self._handlers:
self._handlers[key] = []
self._handlers[key].append(handler)
def set_highlighter(self, highlight):
self._highlight = highlight
def set_completer(self, complete):
self._complete = complete
def set_prefix(self, prefix):
self._prefix = prefix
def _process_char(self, c):
"""
Processes keystrokes internally, may call handlers as well.
:param c: The curtsies keystroke
:return: The current line
"""
if len(c) == 1:
self._insert(c)
elif c == '<LEFT>':
self._left()
elif c == '<RIGHT>':
self._right()
elif c == '<UP>':
self._hist_up()
elif c == '<DOWN>':
self._hist_down()
elif c == '<SPACE>':
self._insert(' ')
elif c == '<TAB>':
if not self._tab_completion():
self._insert(' ')
elif c == '<BACKSPACE>':
self._back()
elif c == '<Ctrl-w>':
self._delete_last_word()
elif c == '<DELETE>':
self._delete()
elif c == '<HOME>' or c == '<Ctrl-a>':
self._home()
elif c == '<END>' or c == '<Ctrl-e>':
self._end()
elif c == '<Ctrl-u>':
self._delete_before()
elif c == '<Ctrl-k>':
self._delete_after()
elif c == '<Esc+f>':
self._move_word_forwards()
elif c == '<Esc+b>':
self._move_word_backwards()
elif c == '<Ctrl-r>':
pass # history search mode
elif c == '<ESC>':
pass # history search mode
elif c == '<Ctrl-j>':
old_line = self._newline()
if c in self._handlers:
for handler in self._handlers[c]:
handler(old_line)
elif c == '<Ctrl-c>' or c == '<Ctrl-d>':
return False
# new lines are handled differently
if c in self._handlers and c != '<Ctrl-j>':
# call handlers if necessary
for handler in self._handlers[c]:
handler(self._curline())
return self._curline()
def _process_event(self, e):
"""
Processes events internally.
:param e: The event
:return: False in case of SigInt, the input otherwise
"""
if isinstance(e, ev.SigIntEvent):
return False
elif isinstance(e, ev.PasteEvent):
for c in e.events:
self.process_input(c)
return self._curline()
def _line_changed(self):
self._history.edit(self._curline())
def _hist_up(self):
"""
Moves up in the history object.
:return:
"""
self._input = list(self._history.move_up())
self._position = len(self._input)
self.draw()
def _hist_down(self):
"""
Moves down in the history object.
:return:
"""
self._input = list(self._history.move_down())
self._position = len(self._input)
self.draw()
def _curline(self):
"""
Returns the current line.
:return: current line
"""
return ''.join(self._input)
def _insert(self, c):
"""
Inserts a character at current position, moves cursor forward and redraws.
:param c: character
:return:
"""
if len(c) > 1:
# only insert single characters
for cc in c:
self._insert(cc)
return
self._input.insert(self._position, c)
self._position += 1
self._line_changed()
self.draw()
def _left(self):
"""
Moves cursor back and redraws.
:return:
"""
if self._position > 0:
self._position -= 1
self.draw()
def _home(self):
"""
Moves cursor home and redraws.
:return:
"""
self._position = 0
self.draw()
def _right(self):
"""
Moves cursor forward and redraws.
:return:
"""
if self._position < len(self._input):
self._position += 1
self.draw()
def _end(self):
"""
Moves cursor to end and redraws.
:return:
"""
self._position = len(self._input)
self.draw()
def _move_word_forwards(self):
"""
Moves cursor towards the next delimiter.
:return:
"""
next_del = find_next_in_list(self._input, DELIMITERS, start=self._position+1)
if next_del is False:
self._end()
else:
self._position = next_del
self.draw()
def _move_word_backwards(self):
"""
Moves cursor towards the next delimiter.
:return:
"""
next_del = find_next_in_list(self._input, DELIMITERS, start=self._position-2, reverse=True)
if next_del is False:
self._home()
else:
self._position = next_del + 1
self.draw()
def _delete_last_word(self):
"""
Deletes until last delimiter.
:return:
"""
next_del = find_next_in_list(self._input, DELIMITERS, start=self._position - 2, reverse=True)
if next_del is False:
next_del = 0
else:
next_del += 1
del self._input[next_del:self._position]
self._position = next_del
self._line_changed()
self.draw()
def _back(self):
"""
Removes element in front of cursor, moves cursor back and redraws.
:return:
"""
if self._position > 0:
del self._input[self._position - 1]
self._position -= 1
self._line_changed()
self.draw()
def _delete(self):
"""
Removes element behind cursor and redraws.
:return:
"""
if self._position < len(self._input):
del self._input[self._position]
self._line_changed()
self.draw()
def _delete_before(self):
"""
Deletes everything in front of the cursor.
:return:
"""
self._input = self._input[self._position:]
self._position = 0
self._line_changed()
self.draw()
def _delete_after(self):
"""
Deletes everything after the cursor.
:return:
"""
self._input = self._input[:self._position]
self._line_changed()
self.draw()
def _newline(self):
"""
Creates a new line and returns the old one.
:return: old line
"""
self._history.commit()
old_line = self._curline()
self._position = 0
self._max_length = 0
self._input = []
move_next_line()
return old_line
def draw(self):
"""
Draws input with cursor at right position.
:return:
"""
whole_line = self._curline()
cursor_line = whole_line[:self._position]
# add prefix
whole_line = self._prefix + whole_line
cursor_line = self._prefix + cursor_line
self._max_length = max(len(whole_line), self._max_length)
# highlight texts
if self._highlight is not None:
whole_line_h = self._highlight(whole_line).strip()
cursor_line_h = self._highlight(cursor_line).strip()
else:
whole_line_h = whole_line
cursor_line_h = cursor_line
# first print whole line
npadding = max(0, self._max_length - len(whole_line))
print_console(whole_line_h, npadding=npadding, flush=False)
# then print for cursor position
print_console(cursor_line_h)
def _tab_completion(self):
"""
Calls completion function. If possible insert completion.
:return: True if completion was successful
"""
if self._complete is not None:
# try completing
completion = self._complete(self._curline()[:self._position])
if completion is not False:
# if successful, insert the completion
for c in completion:
self._insert(c)
return True
return False
| 28.1 | 101 | 0.541406 | 9,103 | 0.875541 | 0 | 0 | 0 | 0 | 0 | 0 | 3,192 | 0.307012 |
960742a391af9a30c0acaaa433fd60815de5da1f | 1,601 | py | Python | pycon_graphql/events/tests/test_models.py | CarlosMart626/graphql-workshop-pycon.co2019 | 466e56052efcfc7455336a0ac5c6637c68fcb3b9 | [
"MIT"
]
| 1 | 2019-02-10T12:35:14.000Z | 2019-02-10T12:35:14.000Z | pycon_graphql/events/tests/test_models.py | CarlosMart626/graphql-workshop-pycon.co2019 | 466e56052efcfc7455336a0ac5c6637c68fcb3b9 | [
"MIT"
]
| null | null | null | pycon_graphql/events/tests/test_models.py | CarlosMart626/graphql-workshop-pycon.co2019 | 466e56052efcfc7455336a0ac5c6637c68fcb3b9 | [
"MIT"
]
| 1 | 2019-02-10T15:02:30.000Z | 2019-02-10T15:02:30.000Z | from django.core.exceptions import ValidationError
from django.utils import timezone
from django.test import TestCase
from events.models import Event, Invitee
from users.tests.factories import UserFactory
from users.models import get_sentinel_user
class EventModelTestCase(TestCase):
def setUp(self):
self.main_event = Event.objects.create(
title="Pycon 2019 - GraphQL Workshop",
description="Descripción del evento",
invitee_capacity=100,
event_day=timezone.now().date(),
initial_hour="13:00",
end_hour="15:00",
place_name="Universidad Javeriana",
latitude='4.62844',
longitude='-74.06508',
zoom=19,
)
self.platform_users = UserFactory.create_batch(10)
for user in self.platform_users:
self.main_event.enroll_user(user)
def test_event_model(self):
self.assertEqual(str(self.main_event), "Pycon 2019 - GraphQL Workshop")
self.assertEqual(self.main_event.invitees_count(), 10)
self.assertEqual(Invitee.objects.filter(event=self.main_event).count(), 10)
def test_error_already_enrolled_user(self):
user = self.platform_users[0]
with self.assertRaises(ValidationError):
self.main_event.enroll_user(user)
def test_delete_enrolled_user(self):
new_user = UserFactory()
invitee = self.main_event.enroll_user(new_user)
new_user.delete()
invitee.refresh_from_db(fields=("user",))
self.assertEqual(invitee.user, get_sentinel_user())
| 36.386364 | 83 | 0.67208 | 1,351 | 0.843321 | 0 | 0 | 0 | 0 | 0 | 0 | 150 | 0.093633 |
9607844773359aa6aa0c7976c01c1f1c73d9292a | 145 | py | Python | cryptos.py | pogoetic/tricero | 6cb60e780bf9056ad9887a84e2ba7d73787ac2fc | [
"MIT"
]
| null | null | null | cryptos.py | pogoetic/tricero | 6cb60e780bf9056ad9887a84e2ba7d73787ac2fc | [
"MIT"
]
| null | null | null | cryptos.py | pogoetic/tricero | 6cb60e780bf9056ad9887a84e2ba7d73787ac2fc | [
"MIT"
]
| null | null | null | cryptolist = ['ETH','BTC','XRP','EOS','ADA','NEO','STEEM',
'BTS','ZEC','XMR','XVG','XEM','OMG','MIOTA','XTZ','SC',
'CVC','BAT','XLM','ZRX','VEN'] | 48.333333 | 58 | 0.524138 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 108 | 0.744828 |
96085c19f88d75b4448b45a1368f150dc76f3edb | 2,615 | py | Python | python/test/utils/test_sliced_data_iterator.py | kodavatimahendra/nnabla | 72009f670af075f17ffca9c809b07d48cca30bd9 | [
"Apache-2.0"
]
| null | null | null | python/test/utils/test_sliced_data_iterator.py | kodavatimahendra/nnabla | 72009f670af075f17ffca9c809b07d48cca30bd9 | [
"Apache-2.0"
]
| null | null | null | python/test/utils/test_sliced_data_iterator.py | kodavatimahendra/nnabla | 72009f670af075f17ffca9c809b07d48cca30bd9 | [
"Apache-2.0"
]
| null | null | null | # Copyright (c) 2017 Sony Corporation. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import pytest
import numpy as np
from nnabla.utils.data_source_loader import load_image
from nnabla.utils.data_iterator import data_iterator_simple
from .test_data_iterator import check_data_iterator_result
@pytest.mark.parametrize("num_of_slices", [2, 3, 5])
@pytest.mark.parametrize("size", [50])
@pytest.mark.parametrize("batch_size", [1, 5, 11])
@pytest.mark.parametrize("shuffle", [False, True])
def test_sliced_data_iterator(test_data_csv_png_10, num_of_slices, size, batch_size, shuffle):
def test_load_func(position):
return np.full((1), position, dtype=np.float32)
di = data_iterator_simple(test_load_func, size,
batch_size, shuffle=shuffle)
import fractions
def lcm(a, b): return abs(a * b) / fractions.gcd(a, b) if a and b else 0
max_epoch = lcm(batch_size, size) / size
all_data = []
for slice_pos in range(num_of_slices):
sliced_di = di.slice(num_of_slices=num_of_slices, slice_pos=slice_pos)
sliced_data = {}
while True:
current_epoch = sliced_di.epoch
if current_epoch > max_epoch + 1:
break
data = sliced_di.next()
if current_epoch not in sliced_data:
sliced_data[current_epoch] = []
for dat in data:
for d in dat:
sliced_data[current_epoch].append(d)
all_data.append(sliced_data)
epochs = {}
for slice_pos, sliced_data in enumerate(all_data):
for epoch in sorted(sliced_data.keys()):
if epoch not in epochs:
epochs[epoch] = []
epochs[epoch].append(set(sliced_data[epoch]))
for epoch in sorted(epochs.keys()):
x0 = epochs[epoch][0]
acceptable_size = batch_size
amount = size // num_of_slices
if acceptable_size < amount:
acceptable_size = amount
for dup in [x0 & x for x in epochs[epoch][1:]]:
assert len(dup) < amount
| 34.866667 | 94 | 0.66348 | 0 | 0 | 0 | 0 | 1,785 | 0.6826 | 0 | 0 | 635 | 0.24283 |
96090a33ab17b3ef5237b33e54e263f6d813f39f | 819 | py | Python | python/leetcode/646.py | ParkinWu/leetcode | b31312bdefbb2be795f3459e1a76fbc927cab052 | [
"MIT"
]
| null | null | null | python/leetcode/646.py | ParkinWu/leetcode | b31312bdefbb2be795f3459e1a76fbc927cab052 | [
"MIT"
]
| null | null | null | python/leetcode/646.py | ParkinWu/leetcode | b31312bdefbb2be795f3459e1a76fbc927cab052 | [
"MIT"
]
| null | null | null | # 给出 n 个数对。 在每一个数对中,第一个数字总是比第二个数字小。
#
# 现在,我们定义一种跟随关系,当且仅当 b < c 时,数对(c, d) 才可以跟在 (a, b) 后面。我们用这种形式来构造一个数对链。
#
# 给定一个对数集合,找出能够形成的最长数对链的长度。你不需要用到所有的数对,你可以以任何顺序选择其中的一些数对来构造。
#
# 示例 :
#
# 输入: [[1,2], [2,3], [3,4]]
# 输出: 2
# 解释: 最长的数对链是 [1,2] -> [3,4]
# 注意:
#
# 给出数对的个数在 [1, 1000] 范围内。
#
# 来源:力扣(LeetCode)
# 链接:https://leetcode-cn.com/problems/maximum-length-of-pair-chain
# 著作权归领扣网络所有。商业转载请联系官方授权,非商业转载请注明出处。
from typing import List
class Solution:
def findLongestChain(self, pairs: List[List[int]]) -> int:
pairs.sort(key=lambda p: p[1])
tmp = pairs[0]
ans = 1
for p in pairs:
if p[0] > tmp[1]:
tmp = p
ans += 1
return ans
if __name__ == '__main__':
s = Solution()
assert s.findLongestChain([[1, 2], [2, 3], [3, 4]]) == 2 | 22.75 | 70 | 0.57265 | 278 | 0.223833 | 0 | 0 | 0 | 0 | 0 | 0 | 821 | 0.661031 |
960b4705f7f9212fc6fe401b9f516bcb627b27a2 | 19,044 | py | Python | FactorTestMain.py | WeiYouyi/FactorTest | fc23e23252614ce4ed8973416f7fbb2d0dbb5ccc | [
"MIT"
]
| null | null | null | FactorTestMain.py | WeiYouyi/FactorTest | fc23e23252614ce4ed8973416f7fbb2d0dbb5ccc | [
"MIT"
]
| null | null | null | FactorTestMain.py | WeiYouyi/FactorTest | fc23e23252614ce4ed8973416f7fbb2d0dbb5ccc | [
"MIT"
]
| null | null | null | from FactorTest.FactorTestPara import *
from FactorTest.FactorTestBox import *
class FactorTest():
def __init__(self):
self.startdate=20000101
self.enddate=21000101
self.factorlist=[]
self.FactorDataBase={'v':pd.DataFrame(columns=['time','code'])}
self.filterStockDF='FULL'
self.retData = getRetData() #统一为time、code time为int code 为str
self.ICList={}
self.portfolioList={}
self.ICAns={}
self.portfolioAns={}
self.portfolioGroup = pd.DataFrame(columns=['time', 'code'])
self.annualTurnover = {}
self.year_performance={}
self.WR={}
self.PL={}
pd.options.mode.use_inf_as_na = True #剔除inf
self.dataProcess=dataProcess(self.FactorDataBase)
def getFactor(self,Factor):
#考虑:频率统一为月度 ,sql型数据
if('month' in Factor):
Factor.rename(columns={'month':'time'},inplace=True)
if('date' in Factor):
Factor.rename(columns={'date':'time'},inplace=True)
factorList=Factor.columns
if(len(factorList)<=2):
print('error')
return Factor
else:
factorList=getfactorname(Factor,['code','time'])
for factorname in factorList:
if(factorname in self.factorlist):#如果重复则先删除信息再重新载入
rest=pd.Series(self.factorlist)
self.factorlist=rest[rest!=factorname].tolist()
del self.FactorDataBase['v'][factorname]
self.FactorDataBase['v']=self.FactorDataBase['v'].merge(Factor[['time','code',factorname]],on=['time','code'],how='outer')
self.factorlist=self.factorlist+[factorname]
def calcIC(self,factorlist='',startMonth='',endMonth=''):
if(factorlist==''):
factorlist=self.factorlist
if(type(factorlist)==str):
factorlist=[factorlist]
if(startMonth==''):
startMonth=int(str(self.startdate)[:6])
if(endMonth==''):
endMonth=int(str(self.enddate)[:6])
RetData=self.retData
RetData=RetData[RetData.time>=startMonth]
RetData=RetData[RetData.time<=endMonth]
if(type(self.filterStockDF)==pd.DataFrame):
RetData=setStockPool(RetData,self.filterStockDF)
for facname in factorlist:
Mer=self.FactorDataBase['v'][['time','code',facname]].merge(RetData,on=['time','code'],how='outer').dropna()
self.ICList[facname],self.ICAns[facname]=calcIC(Mer,facname)
if(len(factorlist)==1):
print(facname+':')
print(self.ICAns[facname])
if(len(factorlist)>1):
print(self.ICDF)
def calcLongShort(self,factorlist='',startMonth='',endMonth='',t=5,asc=''):
if(factorlist==''):
factorlist=self.factorlist
if(type(factorlist)==str):
factorlist=[factorlist]
if(startMonth==''):
startMonth=int(str(self.startdate)[:6])
if(endMonth==''):
endMonth=int(str(self.enddate)[:6])
RetData=self.retData
RetData=RetData[RetData.time>=startMonth]
RetData=RetData[RetData.time<=endMonth]
if(type(self.filterStockDF)==pd.DataFrame):
RetData=setStockPool(RetData,self.filterStockDF)
for facname in factorlist:
Mer=self.FactorDataBase['v'][['time','code',facname]].merge(RetData,on=['time','code'],how='outer').dropna()
if(asc!=''):
ascloc=asc
else:
ascloc=False
if(facname in self.ICAns):
if(self.ICAns[facname]['IC:']<0):
ascloc=True
Mer = Mer.groupby('time').apply(lambda x: isinGroupT(x, facname, asc=ascloc, t=t)).reset_index(drop=True)
ls_ret = calcGroupRet(Mer,facname,RetData)
ls_ret['多空组合'] = ls_ret[1] - ls_ret[t] # 第一组-第五组
if (facname in self.portfolioGroup.columns): # 如果重复则先删除信息再重新载入
self.portfolioGroup = self.portfolioGroup.drop(columns=facname)
self.portfolioGroup = self.portfolioGroup.merge(Mer[['time','code',facname]], on=['time', 'code'], how='outer')
self.portfolioList[facname]=ls_ret
self.portfolioAns[facname]=evaluatePortfolioRet(ls_ret[1]-ls_ret[t])
self.annualTurnover[facname] = calcAnnualTurnover(self.portfolioGroup, facname)
if(len(factorlist)==1):
print(facname+':')
ls_ret1=ls_ret.reset_index().copy()
ls_ret1['time']=ls_ret1['time'].apply(lambda x:str(x))
ls_ret1.set_index('time').apply(lambda x:x+1).cumprod().plot()
print(self.portfolioAns[facname])
plt.show()
if(len(factorlist)>1):
print(self.portfolioDF)
def doubleSorting(self,factor_list,method='cond',startMonth=200001,endMonth=210001,t1=5,t2=5,asc=''):
'''
Parameters
----------
factor_list : list
必须传入一个列表 ['fac1','fac2'],表示求fac2在fac1条件下的双重排序
fac2在fac1条件下的双重排序命名为:'fac2|fac1'.
method : str 'cond' or 'idp'
'cond'为条件双重排序,'idp'为独立双重排序
t1, t2 : int
t1, t2分别为fac1, fac2的分组数, 默认为5
Returns
-------
第一个返回值为t1*t2年化收益率矩阵.
第二个返回值为t1*t2信息比率矩阵
portfolioList和portfolioGroup做相应更新
'''
data = self.FactorDataBase['v'][['time','code']+factor_list].copy()
data = data[data.time>=startMonth]
data = data[data.time<=endMonth]
RetData=self.retData
RetData=RetData[RetData.time>=startMonth]
RetData=RetData[RetData.time<=endMonth]
if(asc!=''):
ascloc=asc
else:
ascloc=False
if method=='cond':
data = data.merge(RetData, on=['time','code'], how='outer').dropna()
data = data.groupby('time').apply(isinGroupT, factor_list[0], asc=ascloc, t=t1).reset_index(drop=True)
data = data.groupby(['time',factor_list[0]]).apply(isinGroupT, factor_list[1], asc=ascloc, t=t2).reset_index(drop=True)
facname=('%s|%s'%(factor_list[1], factor_list[0]))
data[facname] = data[factor_list[0]].apply(lambda x: str(x))+data[factor_list[1]].apply(lambda x: str(x)) #条件分组编号
ls_ret = calcGroupRet(data,facname,RetData) #条件分组收益率
fac2_ls_ret = calcGroupRet(data,factor_list[1],RetData)
ls_ret['多空组合'] = fac2_ls_ret[1] - fac2_ls_ret[t2]
self.portfolioList[facname]=ls_ret
self.portfolioGroup = self.portfolioGroup.merge(data[['time','code',facname]], on=['time', 'code'], how='outer')
def ARIR(Rev_seq,t=12):
ret_mean=e**(np.log(Rev_seq+1).mean()*12)-1
ret_sharpe=Rev_seq.mean()*t/Rev_seq.std()/t**0.5
return pd.DataFrame({'年化收益率':ret_mean, '信息比率':ret_sharpe})
tmp = ARIR(ls_ret.drop('多空组合',axis=1))
tmp_AnlRet,tmp_IR = tmp['年化收益率'].values.reshape((t2,t1)),tmp['信息比率'].values.reshape((t2,t1))
tmp_AnlRet,tmp_IR = pd.DataFrame(tmp_AnlRet,columns=[factor_list[1]+'_'+str(i) for i in range(1,t2+1)],index=[factor_list[0]+'_'+str(i) for i in range(1,t1+1)]),pd.DataFrame(tmp_IR,columns=[factor_list[1]+'_'+str(i) for i in range(1,t2+1)],index=[factor_list[0]+'_'+str(i) for i in range(1,t1+1)])
return tmp_AnlRet,tmp_IR
#常规测试流程
def autotest(self,factorlist='',startMonth='',endMonth='',t=5,asc=''):
self.calcIC(factorlist,startMonth,endMonth)
self.calcLongShort(factorlist,startMonth,endMonth,t,asc)
#计算按因子值排名前K
def calcTopK(self,factorlist='',startMonth='',endMonth='',k=30,asc='',base=''):
if(factorlist==''):
factorlist=self.factorlist
if(type(factorlist)==str):
factorlist=[factorlist]
if(startMonth==''):
startMonth=int(str(self.startdate)[:6])
if(endMonth==''):
endMonth=int(str(self.enddate)[:6])
RetData=self.retData
RetData=RetData[RetData.time>=startMonth]
RetData=RetData[RetData.time<=endMonth]
if(type(self.filterStockDF)==pd.DataFrame):
RetData=setStockPool(RetData,self.filterStockDF)
if ((base != '') & (base in self.portfolioGroup.columns)):
factorDB = self.portfolioGroup[self.portfolioGroup[base] == 1][['time', 'code']].merge(self.FactorDataBase['v'],on=['time', 'code'],how='outer').dropna()
elif (base == ''):
factorDB = self.FactorDataBase['v']
else:
print('error')
return factorlist
for facname in factorlist:
Mer=factorDB[['time','code',facname]].merge(RetData,on=['time','code'],how='outer').dropna()
if(asc!=''):
ascloc=asc
else:
ascloc=False
if(facname in self.ICAns):
if(self.ICAns[facname]['IC:']<0):
ascloc=True
Mer = Mer.groupby('time').apply(lambda x: isinTopK(x, facname, ascloc, k=k)).reset_index(drop=True)
topk_list = calcGroupRet(Mer,facname,RetData)
if (facname in self.portfolioGroup.columns): # 如果重复则先删除信息再重新载入
self.portfolioGroup = self.portfolioGroup.drop(columns=facname)
# portfoliogroup为1,表明按asc排序该股票的因子值在前k之内,为2表明因子值在倒数k个之内
self.portfolioGroup = self.portfolioGroup.merge(Mer[['time','code',facname]], on=['time', 'code'], how='outer').fillna(0)
self.portfolioList[facname]=topk_list
self.portfolioAns[facname]=evaluatePortfolioRet(topk_list[1]-topk_list[0])
self.annualTurnover[facname] = calcAnnualTurnover(self.portfolioGroup, facname)
if(len(factorlist)==1):
print(facname+':')
topk_list['ls']=topk_list[1]-topk_list[0]
calc_plot(topk_list.apply(lambda x:x+1).cumprod())
print(self.portfolioAns[facname])
plt.show()
if(len(factorlist)>1):
print(self.portfolioDF)
def calcFutureRet(self,factorlist='',startMonth='',endMonth='',L=36,t=5,asc=''):
'''
Parameters
----------
factorlist : TYPE, optional
需要测试的因子 'factor1' 或 ['factor1','factor2'] 可留空
startMonth :int 起始月份 201001 可留空
endMonth : int 终止月份 形如202201 可留空
L : 向后看的月数,默认36个月
t : int 分组数,默认为5.
asc : T or F 方向, 默认为True 从小到大 False为从大到小
Returns
-------
返回每个月向后未来1到36个月的收益均值,存储在Test.FutureRet里面
'''
if(factorlist==''):
factorlist=self.factorlist
if(type(factorlist)==str):
factorlist=[factorlist]
if(startMonth==''):
startMonth=int(str(self.startdate)[:6])
if(endMonth==''):
endMonth=int(str(self.enddate)[:6])
RetData=self.retData.pivot(index='time',columns='code',values='ret')
self.FutureRet=pd.DataFrame(columns=factorlist)
RetData=RetData.apply(lambda x:np.log(x+1))
for i in tqdm(range(1,L+1)):
Ret_loc=RetData.rolling(window=i).sum().apply(lambda x:np.e**x-1).shift(-1*i+1).dropna(how='all').stack().reset_index()
Ret_loc.columns=['time','code','ret']
Ret_loc=Ret_loc[Ret_loc.time>=startMonth]
Ret_loc=Ret_loc[Ret_loc.time<=endMonth]
if(type(self.filterStockDF)==pd.DataFrame):
Ret_loc=setStockPool(RetData,self.filterStockDF)
for facname in factorlist:
if(asc!=''):
ascloc=asc
else:
ascloc=False
if(facname in self.ICAns):
if(self.ICAns[facname]['IC:']<0):
ascloc=True
Mer=self.FactorDataBase['v'][['time','code',facname]].merge(Ret_loc,on=['time','code'],how='outer').dropna()
Mer=Mer.groupby('time').apply(isinGroupT,facname,asc=ascloc,t=t).reset_index(drop=True)
ls_ret=calcGroupRet(Mer,facname,Ret_loc).reset_index()
self.FutureRet.loc[i,facname]=(ls_ret[1]-ls_ret[t]).mean()#第一组-第五组
self.FutureRet.plot()
#计算胜率赔率
def displayWinRate(self,factorlist=''):
if(factorlist==''):
factorlist=self.portfolioList.keys()
for facname in factorlist:
Mer=self.portfolioGroup[['time','code',facname]].merge(self.retData,on=['time','code'],how='outer').dropna()
L=Mer.groupby(['time']).apply(calcGroupWR,facname,self.retData)
self.WR[facname]=L.mean()['WR']
self.PL[facname]=L.mean()['PL']
print(pd.concat([pd.Series(self.WR,name='WR'),pd.Series(self.PL,name='PL')],axis=1))
#展示年度收益
def displayYearPerformance(self,factorlist='',t=5):
'''
分年度打印:
一、五组业绩
一-五 收益率、信息比例、月胜率、最大回撤FB.evaluatePortfolioRet
'''
if(factorlist==''):
factorlist=self.portfolioList.keys()
if(type(factorlist)==str):
factorlist=[factorlist]
for facname in factorlist:
portfolio=self.portfolioList[facname].reset_index()
portfolio['time']=portfolio['time'].apply(lambda x:str(x)[:4])
portfolioyear=portfolio.groupby('time')
ans=pd.DataFrame()
for year in portfolio.time.sort_values().unique():
portfolio_loc=portfolioyear.get_group(year).set_index('time')
ans1=evaluatePortfolioRet(portfolio_loc['多空组合'])
ans1.loc[1]=(portfolio_loc[1]+1).prod()-1
ans1.loc[t]=(portfolio_loc[t]+1).prod()-1
ans1.name=year
ans=ans.append(ans1)
self.year_performance[facname]=ans
#计算相关性矩阵 1.因子值矩阵 2.IC矩阵
def calcCorrMatrix(self,CorType=stats.spearmanr):
'''
self.factorCorr 因子相关性 ICCorr IC序列相关性
默认使用 stats.spearmanr
可换成stats.pearsonr
Parameters
----------
CorType : TYPE, optional
DESCRIPTION. The default is stats.spearmanr.
Returns
-------
None.
'''
self.factorCorr=pd.DataFrame([],index=self.factorlist,columns=self.factorlist)
self.ICCorr=pd.DataFrame([],index=self.factorlist,columns=self.factorlist)
for i in range(len(self.factorlist)):
for j in range(len(self.factorlist)):
if(i<j):
fac=self.FactorDataBase['v'][['time','code',self.factorlist[i],self.factorlist[j]]].dropna()
fac=fac.groupby('time').apply(lambda x:CorType(x[self.factorlist[i]],x[self.factorlist[j]])[0])
self.factorCorr.loc[self.factorlist[i],self.factorlist[j]]=fac.mean()
if(self.factorlist[i] in self.ICList and self.factorlist[j] in self.ICList):
A=pd.DataFrame(self.ICList[self.factorlist[i]],columns=[1])
A[2]=self.ICList[self.factorlist[j]]
A=A.dropna()
self.ICCorr.loc[self.factorlist[i],self.factorlist[j]]=CorType(A[1],A[2])[0]
print('因子相关性:')
print(self.factorCorr)
print('IC相关性:')
print(self.ICCorr.dropna(how='all').dropna(how='all',axis=1))
#测试与Barra因子
def calcCorrBarra(self,factorlist=''):
if(factorlist==''):
factorlist=self.factorlist
if(type(factorlist)==str):
factorlist=[factorlist]
factor_tmp,Barra_list=addXBarra(self.FactorDataBase['v'][['time','code']+factorlist])
Corr_Mat=pd.DataFrame(index=factorlist,columns=Barra_list)
for fac in factorlist:
for barra in Barra_list:
corr_loc=factor_tmp[['time',fac,barra]].dropna()
Corr_Mat.loc[fac,barra]=corr_loc.groupby('time').apply(lambda x:stats.spearmanr(x[fac],x[barra])[0]).mean()
self.Corr_Mat=Corr_Mat
print('与Barra相关性')
print(self.Corr_Mat.T)
#得到纯因子,后缀为 +_pure
def calcPureFactor(self,factorlist=''):
if(factorlist==''):
factorlist=self.factorlist
if(type(factorlist)==str):
factorlist=[factorlist]
for fac in factorlist:
factorDF=calcNeuBarra(self.FactorDataBase['v'], fac)
self.getFactor(factorDF[['time','code',fac+'_pure']].dropna())
@property
def ICDF(self):
return pd.DataFrame(self.ICAns).T
@property
def portfolioDF(self):
return pd.DataFrame(self.portfolioAns).T
class IndTest(FactorTest):
def __init__(self):
self.startdate=20000101
self.enddate=21000101
self.factorlist=[]
self.FactorDataBase={'v':pd.DataFrame(columns=['time','code'])}
self.filterStockDF='FULL'
self.retData = getIndRetData() #统一为time、code time为int code 为str
self.ICList={}
self.portfolioList={}
self.ICAns={}
self.portfolioAns={}
self.portfolioGroup = pd.DataFrame(columns=['time', 'code'])
self.annualTurnover = {}
self.year_performance={}
self.indStatus=pd.read_csv(filepathtestdata+'sw1.csv').set_index('申万代码')
pd.options.mode.use_inf_as_na = True #剔除inf
self.dataProcess=dataProcess(self.FactorDataBase)
#将个股转换为行业数据
@staticmethod
def convertStocktoInd(Factor,func=lambda x:x.mean()):
if('month' in Factor):
Factor.rename(columns={'month':'time'},inplace=True)
if('date' in Factor):
Factor.rename(columns={'date':'time'},inplace=True)
factorList=Factor.columns
if(len(factorList)<=2):
print('error')
return Factor
else:
factorList=getfactorname(Factor,['code','time'])
DF=pd.DataFrame(columns=['time','code'])
indStatus=pd.read_csv(filepathtestdata+'sw1.csv').set_index('申万代码')
for facname in factorList:
DataLoc=Factor[['time','code',facname]]
DataLoc=DataLoc.pipe(getSWIndustry,freq='month')
DataLoc=DataLoc.groupby(['time','SWind']).mean().reset_index()
A=DataLoc.groupby('SWind')
for ind in DataLoc['SWind'].unique():
DataLoc.loc[A.get_group(ind)['SWind'].index,'code']=indStatus.loc[ind,'代码']
DF=DF.merge(DataLoc[['time','code',facname]],on=['time','code'],how='outer')
return DF
| 45.342857 | 313 | 0.56548 | 19,895 | 0.994949 | 0 | 0 | 1,293 | 0.064663 | 0 | 0 | 3,783 | 0.189188 |
960b6014f14f9123b0ec09ae60429c45aaf956f5 | 3,094 | py | Python | src/qm/terachem/terachem.py | hkimaf/unixmd | 616634c720d0589fd600e3268afab9da957e18bb | [
"MIT"
]
| null | null | null | src/qm/terachem/terachem.py | hkimaf/unixmd | 616634c720d0589fd600e3268afab9da957e18bb | [
"MIT"
]
| null | null | null | src/qm/terachem/terachem.py | hkimaf/unixmd | 616634c720d0589fd600e3268afab9da957e18bb | [
"MIT"
]
| null | null | null | from __future__ import division
from qm.qm_calculator import QM_calculator
from misc import call_name
import os
class TeraChem(QM_calculator):
""" Class for common parts of TeraChem
:param string basis_set: Basis set information
:param string functional: Exchange-correlation functional information
:param string precision: Precision in the calculations
:param string root_path: Path for TeraChem root directory
:param integer ngpus: Number of GPUs
:param integer,list gpu_id: ID of used GPUs
:param string version: Version of TeraChem
"""
def __init__(self, functional, basis_set, root_path, ngpus, \
gpu_id, precision, version):
# Save name of QM calculator and its method
super().__init__()
# Initialize TeraChem common variables
self.functional = functional
self.basis_set = basis_set
self.root_path = root_path
if (not os.path.isdir(self.root_path)):
error_message = "Root directory for TeraChem binary not found!"
error_vars = f"root_path = {self.root_path}"
raise FileNotFoundError (f"( {self.qm_method}.{call_name()} ) {error_message} ( {error_vars} )")
self.qm_path = os.path.join(self.root_path, "bin")
# Set the environmental variables for TeraChem
lib_dir = os.path.join(self.root_path, "lib")
os.environ["TeraChem"] = self.root_path
os.environ["LD_LIBRARY_PATH"] += os.pathsep + os.path.join(lib_dir)
self.ngpus = ngpus
self.gpu_id = gpu_id
if (self.gpu_id == None):
error_message = "GPU ID must be set in running script!"
error_vars = f"gpu_id = {self.gpu_id}"
raise ValueError (f"( {self.qm_method}.{call_name()} ) {error_message} ( {error_vars} )")
if (isinstance(self.gpu_id, list)):
if (len(self.gpu_id) != self.ngpus):
error_message = "Number of elements for GPU ID must be equal to number of GPUs!"
error_vars = f"len(gpu_id) = {len(self.gpu_id)}, ngpus = {self.ngpus}"
raise ValueError (f"( {self.qm_method}.{call_name()} ) {error_message} ( {error_vars} )")
else:
error_message = "Type of GPU ID must be list consisting of integer!"
error_vars = f"gpu_id = {self.gpu_id}"
raise TypeError (f"( {self.qm_method}.{call_name()} ) {error_message} ( {error_vars} )")
self.precision = precision
self.version = version
if (isinstance(self.version, str)):
if (self.version != "1.93"):
error_message = "Other versions not implemented!"
error_vars = f"version = {self.version}"
raise ValueError (f"( {self.qm_method}.{call_name()} ) {error_message} ( {error_vars} )")
else:
error_message = "Type of version must be string!"
error_vars = f"version = {self.version}"
raise TypeError (f"( {self.qm_method}.{call_name()} ) {error_message} ( {error_vars} )")
| 43.577465 | 108 | 0.61894 | 2,978 | 0.962508 | 0 | 0 | 0 | 0 | 0 | 0 | 1,507 | 0.487072 |
960c00e5d06118cad7de3e170d517ce0e7416494 | 11,668 | py | Python | tests/unit/modules/test_reg_win.py | l2ol33rt/salt | ff68bbd9f4bda992a3e039822fb32f141e94347c | [
"Apache-2.0"
]
| null | null | null | tests/unit/modules/test_reg_win.py | l2ol33rt/salt | ff68bbd9f4bda992a3e039822fb32f141e94347c | [
"Apache-2.0"
]
| null | null | null | tests/unit/modules/test_reg_win.py | l2ol33rt/salt | ff68bbd9f4bda992a3e039822fb32f141e94347c | [
"Apache-2.0"
]
| null | null | null | # -*- coding: utf-8 -*-
'''
:synopsis: Unit Tests for Windows Registry Module 'module.reg'
:platform: Windows
:maturity: develop
:codeauthor: Damon Atkins <https://github.com/damon-atkins>
versionadded:: 2016.11.0
'''
# Import Python future libs
from __future__ import absolute_import
from __future__ import unicode_literals
# Import Python Libs
import sys
import time
# Import Salt Testing Libs
from tests.support.unit import TestCase, skipIf
from tests.support.helpers import destructiveTest
# Import Salt Libs
import salt.modules.reg as win_mod_reg
from salt.ext import six
try:
from salt.ext.six.moves import winreg as _winreg # pylint: disable=import-error,no-name-in-module
NO_WINDOWS_MODULES = False
except ImportError:
NO_WINDOWS_MODULES = True
PY2 = sys.version_info[0] == 2
# The following used to make sure we are not
# testing already existing data
# Note strftime retunrns a str, so we need to make it unicode
TIMEINT = int(time.time())
if PY2:
TIME_INT_UNICODE = six.text_type(TIMEINT)
TIMESTR = time.strftime('%X %x %Z').decode('utf-8')
else:
TIMESTR = time.strftime('%X %x %Z')
TIME_INT_UNICODE = str(TIMEINT) # pylint: disable=R0204
# we do not need to prefix this with u, as we are
# using from __future__ import unicode_literals
UNICODETEST_WITH_SIGNS = 'Testing Unicode \N{COPYRIGHT SIGN},\N{TRADE MARK SIGN},\N{REGISTERED SIGN} '+TIMESTR
UNICODETEST_WITHOUT_SIGNS = 'Testing Unicode'+TIMESTR
UNICODE_TEST_KEY = 'UnicodeKey \N{TRADE MARK SIGN} '+TIME_INT_UNICODE
UNICODE_TEST_KEY_DEL = 'Delete Me \N{TRADE MARK SIGN} '+TIME_INT_UNICODE
@skipIf(NO_WINDOWS_MODULES, 'requires Windows OS to test Windows registry')
class RegWinTestCase(TestCase):
'''
Test cases for salt.modules.reg
'''
@skipIf(not sys.platform.startswith("win"), "requires Windows OS")
def test_read_reg_plain(self):
'''
Test - Read a registry value from a subkey using Pythen 2 Strings or
Pythen 3 Bytes
'''
if not PY2:
self.skipTest('Invalid for Python Version 2')
subkey = b'Software\\Microsoft\\Windows NT\\CurrentVersion'
vname = b'PathName'
handle = _winreg.OpenKey(
_winreg.HKEY_LOCAL_MACHINE,
subkey,
0,
_winreg.KEY_ALL_ACCESS
)
(current_vdata, dummy_current_vtype) = _winreg.QueryValueEx(handle, vname)
_winreg.CloseKey(handle)
test_vdata = win_mod_reg.read_value(b'HKEY_LOCAL_MACHINE', subkey, vname)[b'vdata']
self.assertEqual(
test_vdata, current_vdata)
@skipIf(not sys.platform.startswith("win"), "requires Windows OS")
def test_read_reg_unicode(self):
'''
Test - Read a registry value from a subkey using Pythen 2 Unicode
or Pythen 3 Str i.e. Unicode
'''
subkey = 'Software\\Microsoft\\Windows NT\\CurrentVersion'
vname = 'PathName'
handle = _winreg.OpenKey(
_winreg.HKEY_LOCAL_MACHINE,
subkey,
0,
_winreg.KEY_ALL_ACCESS
)
(current_vdata, dummy_current_vtype) = _winreg.QueryValueEx(handle, vname)
_winreg.CloseKey(handle)
test_vdata = win_mod_reg.read_value(
'HKEY_LOCAL_MACHINE',
subkey,
vname)['vdata']
self.assertEqual(test_vdata, current_vdata)
@skipIf(not sys.platform.startswith("win"), "requires Windows OS")
def test_list_keys_fail(self):
'''
Test - Read list the keys under a subkey which does not exist.
'''
subkey = 'ThisIsJunkItDoesNotExistIhope'
test_list = win_mod_reg.list_keys('HKEY_LOCAL_MACHINE', subkey)
# returns a tuple with first item false, and second item a reason
test = isinstance(test_list, tuple) and (not test_list[0])
self.assertTrue(test)
@skipIf(not sys.platform.startswith("win"), "requires Windows OS")
def test_list_keys(self):
'''
Test - Read list the keys under a subkey
'''
subkey = 'Software\\Microsoft\\Windows NT\\CurrentVersion'
test_list = win_mod_reg.list_keys('HKEY_LOCAL_MACHINE', subkey)
test = len(test_list) > 5 # Their should be a lot more than 5 items
self.assertTrue(test)
@skipIf(not sys.platform.startswith("win"), "requires Windows OS")
def test_list_values_fail(self):
'''
Test - List the values under a subkey which does not exist.
'''
subkey = 'ThisIsJunkItDoesNotExistIhope'
test_list = win_mod_reg.list_values('HKEY_LOCAL_MACHINE', subkey)
# returns a tuple with first item false, and second item a reason
test = isinstance(test_list, tuple) and (not test_list[0])
self.assertTrue(test)
@skipIf(not sys.platform.startswith("win"), "requires Windows OS")
def test_list_values(self):
'''
Test - List the values under a subkey.
'''
subkey = r'Software\Microsoft\Windows NT\CurrentVersion'
test_list = win_mod_reg.list_values('HKEY_LOCAL_MACHINE', subkey)
test = len(test_list) > 5 # There should be a lot more than 5 items
self.assertTrue(test)
# Not considering this destructive as its writing to a private space
@skipIf(not sys.platform.startswith("win"), "requires Windows OS")
def test_set_value_unicode(self):
'''
Test - set a registry plain text subkey name to a unicode string value
'''
vname = 'TestUniccodeString'
subkey = 'Software\\SaltStackTest'
test1_success = False
test2_success = False
test1_success = win_mod_reg.set_value(
'HKEY_LOCAL_MACHINE',
subkey,
vname,
UNICODETEST_WITH_SIGNS
)
# Now use _winreg direct to see if it worked as expected
if test1_success:
handle = _winreg.OpenKey(
_winreg.HKEY_LOCAL_MACHINE,
subkey,
0,
_winreg.KEY_ALL_ACCESS
)
(current_vdata, dummy_current_vtype) = _winreg.QueryValueEx(handle, vname)
_winreg.CloseKey(handle)
test2_success = (current_vdata == UNICODETEST_WITH_SIGNS)
self.assertTrue(test1_success and test2_success)
@skipIf(not sys.platform.startswith("win"), "requires Windows OS")
def test_set_value_unicode_key(self):
'''
Test - set a registry Unicode subkey name with unicode characters within
to a integer
'''
test_success = win_mod_reg.set_value(
'HKEY_LOCAL_MACHINE',
'Software\\SaltStackTest',
UNICODE_TEST_KEY,
TIMEINT,
'REG_DWORD'
)
self.assertTrue(test_success)
@skipIf(not sys.platform.startswith("win"), "requires Windows OS")
def test_del_value(self):
'''
Test - Create Directly and Delete with salt a registry value
'''
subkey = 'Software\\SaltStackTest'
vname = UNICODE_TEST_KEY_DEL
vdata = 'I will be deleted'
if PY2:
handle = _winreg.CreateKeyEx(
_winreg.HKEY_LOCAL_MACHINE,
subkey.encode('mbcs'),
0,
_winreg.KEY_ALL_ACCESS
)
_winreg.SetValueEx(
handle,
vname.encode('mbcs'),
0,
_winreg.REG_SZ,
vdata.encode('mbcs')
)
else:
handle = _winreg.CreateKeyEx(
_winreg.HKEY_LOCAL_MACHINE,
subkey,
0,
_winreg.KEY_ALL_ACCESS
)
_winreg.SetValueEx(handle, vname, 0, _winreg.REG_SZ, vdata)
_winreg.CloseKey(handle)
# time.sleep(15) # delays for 15 seconds
test_success = win_mod_reg.delete_value(
'HKEY_LOCAL_MACHINE',
subkey,
vname
)
self.assertTrue(test_success)
@skipIf(not sys.platform.startswith("win"), "requires Windows OS")
def test_del_key_recursive_user(self):
'''
Test - Create directly key/value pair and Delete recusivly with salt
'''
subkey = 'Software\\SaltStackTest'
vname = UNICODE_TEST_KEY_DEL
vdata = 'I will be deleted recursive'
if PY2:
handle = _winreg.CreateKeyEx(
_winreg.HKEY_CURRENT_USER,
subkey.encode('mbcs'),
0,
_winreg.KEY_ALL_ACCESS
)
_winreg.SetValueEx(
handle,
vname.encode('mbcs'),
0,
_winreg.REG_SZ,
vdata.encode('mbcs')
)
else:
handle = _winreg.CreateKeyEx(
_winreg.HKEY_CURRENT_USER,
subkey,
0,
_winreg.KEY_ALL_ACCESS
)
_winreg.SetValueEx(handle, vname, 0, _winreg.REG_SZ, vdata)
_winreg.CloseKey(handle)
# time.sleep(15) # delays for 15 seconds so you can run regedit & watch it happen
test_success = win_mod_reg.delete_key_recursive('HKEY_CURRENT_USER', subkey)
self.assertTrue(test_success)
@skipIf(not sys.platform.startswith("win"), "requires Windows OS")
@destructiveTest
def test_del_key_recursive_machine(self):
'''
This is a DESTRUCTIVE TEST it creates a new registry entry.
And then destroys the registry entry recusively , however it is completed in its own space
within the registry. We mark this as destructiveTest as it has the potential
to detroy a machine if salt reg code has a large error in it.
'''
subkey = 'Software\\SaltStackTest'
vname = UNICODE_TEST_KEY_DEL
vdata = 'I will be deleted recursive'
if PY2:
handle = _winreg.CreateKeyEx(
_winreg.HKEY_LOCAL_MACHINE,
subkey.encode('mbcs'),
0,
_winreg.KEY_ALL_ACCESS
)
_winreg.SetValueEx(
handle,
vname.encode('mbcs'),
0,
_winreg.REG_SZ,
vdata.encode('mbcs')
)
else:
handle = _winreg.CreateKeyEx(
_winreg.HKEY_LOCAL_MACHINE,
subkey,
0,
_winreg.KEY_ALL_ACCESS
)
_winreg.SetValueEx(handle, vname, 0, _winreg.REG_SZ, vdata)
_winreg.CloseKey(handle)
# time.sleep(15) # delays for 15 seconds so you can run regedit and watch it happen
test_success = win_mod_reg.delete_key_recursive('HKEY_LOCAL_MACHINE', subkey)
self.assertTrue(test_success)
# pylint: disable=W0511
# TODO: Test other hives, other than HKEY_LOCAL_MACHINE and HKEY_CURRENT_USER
| 38.508251 | 110 | 0.573706 | 9,979 | 0.855245 | 0 | 0 | 10,055 | 0.861759 | 0 | 0 | 3,925 | 0.33639 |
960c27eda1d8cb31a885faeca6a1d05da5d1bc43 | 9,197 | py | Python | glacier/glacierexception.py | JeffAlyanak/amazon-glacier-cmd-interface | f9e50cbc49156233a87f1975323e315370aeeabe | [
"MIT"
]
| 166 | 2015-01-01T14:14:56.000Z | 2022-02-20T21:59:45.000Z | glacier/glacierexception.py | JeffAlyanak/amazon-glacier-cmd-interface | f9e50cbc49156233a87f1975323e315370aeeabe | [
"MIT"
]
| 31 | 2015-01-04T13:18:02.000Z | 2022-01-10T18:40:52.000Z | glacier/glacierexception.py | JeffAlyanak/amazon-glacier-cmd-interface | f9e50cbc49156233a87f1975323e315370aeeabe | [
"MIT"
]
| 75 | 2015-01-03T10:33:41.000Z | 2022-02-22T21:21:47.000Z | import traceback
import re
import sys
import logging
"""
**********
Note by wvmarle:
This file contains the complete code from chained_exception.py plus the
error handling code from GlacierWrapper.py, allowing it to be used in other
modules like glaciercorecalls as well.
**********
"""
class GlacierException(Exception):
"""
An extension of the built-in Exception class, this handles
an additional cause keyword argument, adding it as cause
attribute to the exception message.
It logs the error message (amount of information depends on the log
level) and passes it on to a higher level to handle.
Furthermore it allows for the upstream handler to call for a
complete stack trace or just a simple error and cause message.
TODO: describe usage.
"""
ERRORCODE = {'InternalError': 127, # Library internal error.
'UndefinedErrorCode': 126, # Undefined code.
'NoResults': 125, # Operation yielded no results.
'GlacierConnectionError': 1, # Can not connect to Glacier.
'SdbConnectionError': 2, # Can not connect to SimpleDB.
'CommandError': 3, # Command line is invalid.
'VaultNameError': 4, # Invalid vault name.
'DescriptionError': 5, # Invalid archive description.
'IdError': 6, # Invalid upload/archive/job ID given.
'RegionError': 7, # Invalid region given.
'FileError': 8, # Error related to reading/writing a file.
'ResumeError': 9, # Problem resuming a multipart upload.
'NotReady': 10, # Requested download is not ready yet.
'BookkeepingError': 11, # Bookkeeping not available.
'SdbCommunicationError': 12, # Problem reading/writing SimpleDB data.
'ResourceNotFoundException': 13, # Glacier can not find the requested resource.
'InvalidParameterValueException': 14, # Parameter not accepted.
'DownloadError': 15, # Downloading an archive failed.
'SNSConnectionError': 126, # Can not connect to SNS
'SNSConfigurationError': 127, # Problem with configuration file
'SNSParameterError':128, # Problem with arguments passed to SNS
}
def __init__(self, message, code=None, cause=None):
"""
Constructor. Logs the error.
:param message: the error message.
:type message: str
:param code: the error code.
:type code: str
:param cause: explanation on what caused the error.
:type cause: str
"""
self.logger = logging.getLogger(self.__class__.__name__)
self.exitcode = self.ERRORCODE[code] if code in self.ERRORCODE else 254
self.code = code
if cause:
self.logger.error('ERROR: %s'% cause)
self.cause = cause if isinstance(cause, tuple) else (cause,)
self.stack = traceback.format_stack()[:-2]
else:
self.logger.error('An error occurred, exiting.')
self.cause = ()
# Just wrap up a cause-less exception.
# Get the stack trace for this exception.
self.stack = (
traceback.format_stack()[:-2] +
traceback.format_tb(sys.exc_info()[2]))
# ^^^ let's hope the information is still there; caller must take
# care of this.
self.message = message
self.logger.info(self.fetch(message=True))
self.logger.debug(self.fetch(stack=True))
if self.exitcode == 254:
self.logger.debug('Unknown error code: %s.'% code)
# Works as a generator to help get the stack trace and the cause
# written out.
def causeTree(self, indentation=' ', alreadyMentionedTree=[], stack=False, message=False):
"""
Returns a complete stack tree, an error message, or both.
Returns a warning if neither stack or message are True.
"""
if stack:
yield "Traceback (most recent call last):\n"
ellipsed = 0
for i, line in enumerate(self.stack):
if (ellipsed is not False
and i < len(alreadyMentionedTree)
and line == alreadyMentionedTree[i]):
ellipsed += 1
else:
if ellipsed:
yield " ... (%d frame%s repeated)\n" % (
ellipsed,
"" if ellipsed == 1 else "s")
ellipsed = False # marker for "given out"
yield line
if message:
exc = self if self.message is None else self.message
for line in traceback.format_exception_only(exc.__class__, exc):
yield line
if self.cause:
yield ("Caused by: %d exception%s\n" %
(len(self.cause), "" if len(self.cause) == 1 else "s"))
for causePart in self.cause:
if hasattr(causePart,"causeTree"):
for line in causePart.causeTree(indentation, self.stack):
yield re.sub(r'([^\n]*\n)', indentation + r'\1', line)
else:
for line in traceback.format_exception_only(causePart.__class__, causePart):
yield re.sub(r'([^\n]*\n)', indentation + r'\1', line)
if not message and not stack:
yield ('No output. Specify message=True and/or stack=True \
to get output when calling this function.\n')
def write(self, stream=None, indentation=' ', message=False, stack=False):
"""
Writes the error details to sys.stderr or a stream.
"""
stream = sys.stderr if stream is None else stream
for line in self.causeTree(indentation, message=message, stack=stack):
stream.write(line)
def fetch(self, indentation=' ', message=False, stack=False):
"""
Fetches the error details and returns them as string.
"""
out = ''
for line in self.causeTree(indentation, message=message, stack=stack):
out += line
return out
class InputException(GlacierException):
"""
Exception that is raised when there is someting wrong with the
user input.
"""
VaultNameError = 1
VaultDescriptionError = 2
def __init__(self, message, code=None, cause=None):
""" Handles the exception.
:param message: the error message.
:type message: str
:param code: the error code.
:type code:
:param cause: explanation on what caused the error.
:type cause: str
"""
GlacierException.__init__(self, message, code=code, cause=cause)
class ConnectionException(GlacierException):
"""
Exception that is raised when there is something wrong with
the connection.
"""
GlacierConnectionError = 1
SdbConnectionError = 2
def __init__(self, message, code=None, cause=None):
""" Handles the exception.
:param message: the error message.
:type message: str
:param code: the error code.
:type code:
:param cause: explanation on what caused the error.
:type cause: str
"""
GlacierException.__init__(self, message, code=code, cause=cause)
class CommunicationException(GlacierException):
"""
Exception that is raised when there is something wrong in
the communication with an external library like boto.
"""
def __init__(self, message, code=None, cause=None):
""" Handles the exception.
:param message: the error message.
:type message: str
:param code: the error code.
:type code:
:param cause: explanation on what caused the error.
:type cause: str
"""
GlacierException.__init__(self, message, code=code, cause=cause)
class ResponseException(GlacierException):
"""
Exception that is raised when there is an http response error.
"""
def __init__(self, message, code=None, cause=None):
GlacierException.__init__(self, message, code=code, cause=cause)
if __name__ == '__main__':
class ChildrenException(Exception):
def __init__(self, message):
Exception.__init__(self, message)
class ParentException(GlacierException):
def __init__(self, message, cause=None):
if cause:
GlacierException.__init__(self, message, cause=cause)
else:
GlacierException.__init__(self, message)
try:
try:
raise ChildrenException("parent")
except ChildrenException, e:
raise ParentException("children", cause=e)
except ParentException, e:
e.write(indentation='|| ')
| 38.805907 | 100 | 0.577145 | 8,630 | 0.938349 | 1,937 | 0.210612 | 0 | 0 | 0 | 0 | 4,075 | 0.443079 |
960d016ae24c4293c672a990c11ba81afe431984 | 29,912 | py | Python | modes/import_corpus.py | freingruber/JavaScript-Raider | d1c1fff2fcfc60f210b93dbe063216fa1a83c1d0 | [
"Apache-2.0"
]
| 91 | 2022-01-24T07:32:34.000Z | 2022-03-31T23:37:15.000Z | modes/import_corpus.py | zeusguy/JavaScript-Raider | d1c1fff2fcfc60f210b93dbe063216fa1a83c1d0 | [
"Apache-2.0"
]
| null | null | null | modes/import_corpus.py | zeusguy/JavaScript-Raider | d1c1fff2fcfc60f210b93dbe063216fa1a83c1d0 | [
"Apache-2.0"
]
| 11 | 2022-01-24T14:21:12.000Z | 2022-03-31T23:37:23.000Z | # Copyright 2022 @ReneFreingruber
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This mode can be started by passing the "--import_corpus_mode" flag to the fuzzer
# or by starting the fuzzer the first time (when no OUTPUT directory exists yet).
#
# The script imports new testcases into the current corpus.
# Please note that the progress of the script is not linear (especially when creating an initial corpus).
# The script will start slow (because it will find a lot of testcases with new behavior and this requires
# standardization, minimization & state creation.
# These operations are slow because they require to restart the JS engine multiple times,
# and therefore it will take a longer time. After some time, the import-mode will be faster because it finds less files
# with new coverage. At the end, the mode will again be slow (or maybe very slow) because it's processing the
# bigger testcases (testcases are sorted based on file size and handled from small files to big files).
# State creation for big input files is extremely slow.
# It's maybe better to skip these big testcases and continue because later testcases can maybe further be
# minimized (which would then be again fast). => I created my initial corpus with a different script,
# skipping the big testcases is therefore not implemented here yet (and must manually be done).
# TODO: In my original code I also removed v8 native functions because they quickly lead to crashes
# But I couldn't find the code anymore. I guess this should be implemented in this file somewhere at the end?
# This affect at least the functions:
# %ProfileCreateSnapshotDataBlob
# %LiveEditPatchScript
# %IsWasmCode
# %IsAsmWasmCode
# %ConstructConsString
# %HaveSameMap
# %IsJSReceiver
# %HasSmiElements
# %HasObjectElements
# %HasDoubleElements
# %HasDictionaryElements
# %HasHoleyElements
# %HasSloppyArgumentsElements
# %HaveSameMap
# %HasFastProperties
# %HasPackedElements
#
# More information can be found in my master thesis page 115.
import utils
import os
import config as cfg
import native_code.speed_optimized_functions as speed_optimized_functions
from native_code.executor import Execution_Status
import sys
import random
import string
import re
code_prefix = "function my_opt_func() {\n"
code_suffix1 = """
}
%OptimizeFunctionOnNextCall(my_opt_func);
my_opt_func();
"""
code_suffix2 = """
}
%PrepareFunctionForOptimization(my_opt_func);
%OptimizeFunctionOnNextCall(my_opt_func);
my_opt_func();
"""
code_suffix3 = """
}
my_opt_func();
%PrepareFunctionForOptimization(my_opt_func);
%OptimizeFunctionOnNextCall(my_opt_func);
my_opt_func();
"""
# These are just used for debugging
debugging_number_exceptions = 0
debugging_number_success = 0
debugging_number_new_coverage = 0
def import_corpus_mode(input_dir_to_import):
global code_prefix, code_suffix1, code_suffix2, code_suffix3
utils.msg("[i] Going to import another corpus to the current corpus...")
utils.msg("[i] Corpus dir which will be imported is: %s" % input_dir_to_import)
files_to_handle = []
already_seen_file_hashes = set()
utils.msg("[i] Going to read all files in directory... (this can take some time)")
for filename_to_import in os.listdir(input_dir_to_import):
if filename_to_import.endswith(".js"):
input_file_to_import = os.path.join(input_dir_to_import, filename_to_import)
# Just get file size
with open(input_file_to_import, 'r') as fobj:
content = fobj.read().rstrip()
sample_hash = utils.calc_hash(content)
if sample_hash not in already_seen_file_hashes:
# new file
files_to_handle.append((input_file_to_import, len(content)))
already_seen_file_hashes.add(sample_hash)
utils.msg("[i] Finished reading files. Going to sort files based on file size...")
# Sort based on filesize => start with small files => this ensures that the minimizer is faster
files_to_handle.sort(key=lambda x: x[1])
utils.msg("[i] Finished sorting, going to start importing...")
# Now start to import file by file
cfg.my_status_screen.set_current_operation("Importing")
total_number_files_to_import = len(files_to_handle)
number_files_already_imported = 0
for entry in files_to_handle:
(input_file_to_import, filesize) = entry
number_files_already_imported += 1
utils.msg("[i] Importing file (%d/%d): %s" % (number_files_already_imported, total_number_files_to_import, input_file_to_import))
with open(input_file_to_import, 'r') as fobj:
content = fobj.read().rstrip()
if len(content) > 200000: # 200 KB
continue # big files are too slow and are bad for mutation, so skip them
if '\x00' in content:
continue # ignore files with null bytes for the moment because the Python to C conversation does not support this
# Check normal execution:
check_if_testcase_triggers_new_behavior(content)
# Check adapted execution (e.g. with removed testsuite functions)
samples = preprocess_testcase(content)
for sample in samples:
check_if_testcase_triggers_new_behavior(sample)
# Now check if it triggers more coverage if the code gets compiled:
check_if_testcase_triggers_new_behavior(code_prefix + sample + code_suffix1)
check_if_testcase_triggers_new_behavior(code_prefix + sample + code_suffix2)
check_if_testcase_triggers_new_behavior(code_prefix + sample + code_suffix3)
if cfg.deterministic_preprocessing_enabled:
# And now start to preprocess all imported files! This can take a VERY long runtime
# => I would not recommend running this because it can easily take several weeks of runtime.
# It maybe makes sense for the first small testcases
cfg.deterministically_preprocess_queue_func()
return total_number_files_to_import
def check_if_testcase_triggers_new_behavior(content):
if len(content) > 10000: # 10 KB
# big files are too slow and are bad for mutation, so skip them
# Side note: I'm checking here for 10KB and in the above function for 200KB
# because this function is maybe invoked with sub-functionality from the main script
# which can be a lot smaller
return
previous_stats_new_behavior = cfg.my_status_screen.get_stats_new_behavior()
# Restart the engine so that every testcase starts in a new v8 process
# (=> this slows down the process but having a good input corpus is important)
# If you want to be faster, you can maybe skip the engine restart here
cfg.exec_engine.restart_engine()
cfg.perform_execution_func(content, state=None)
current_stats_new_behavior = cfg.my_status_screen.get_stats_new_behavior()
if current_stats_new_behavior == previous_stats_new_behavior:
# File didn't result in new coverage and was therefore not imported (importing would be done by perform_execution() )!
# Just to get sure that there was not a flawed execution, I try it again here
cfg.perform_execution_func(content, state=None)
# This is a debug version of the above one.
# The above one does all the required calculations (standardization, minimization, state creation)
# which is very slow. But If I just want to quickly check how many files I can import,
# then I'm using this debugging versions (which skips all these steps)
# This version does also not restart the exec engine.
# To use it, just replace the call with this function
def check_if_testcase_triggers_new_behavior_debugging(content):
global debugging_number_exceptions, debugging_number_success, debugging_number_new_coverage
if len(content) > 10000: # 10 KB
return
result = cfg.exec_engine.execute_safe(content)
if result.status == Execution_Status.SUCCESS:
debugging_number_success += 1
if result.num_new_edges > 0:
debugging_number_new_coverage += 1
# Dump the new coverage statistics
number_triggered_edges, total_number_possible_edges = cfg.exec_engine.get_number_triggered_edges()
if total_number_possible_edges == 0:
total_number_possible_edges = 1 # avoid division by zero
triggered_edges_in_percent = (100 * number_triggered_edges) / float(total_number_possible_edges)
utils.msg("[i] Found new coverage! (%d success, %d exceptions, %d new coverage); New Coverage: %.4f %%" % (debugging_number_success, debugging_number_exceptions, debugging_number_new_coverage, triggered_edges_in_percent))
elif result.status == Execution_Status.EXCEPTION_THROWN:
debugging_number_exceptions += 1
# TODO: This is pretty old code and needs a lot of refactoring/improvement ...
# TODO: Also better implement these whole "\t" and " " and "\ņ" checking...
# One testcase file can contain multiple testcases
# That's why this function returns a list of samples
def preprocess_testcase(code):
ret = []
tmp = ""
for line in code.split("\n"):
line_check = line.strip()
if line_check.startswith("import ") \
or line_check.startswith("import(") \
or line_check.startswith("export ") \
or line_check.startswith("loaded++") \
or line_check.startswith("await import"):
continue # remove import and export statements
tmp += line + "\n"
code = tmp
# All the following function replacements where manually found
# The replacements can be found by starting this script and
# dumping all testcases which trigger an exception
# Then the testcases can manually be analyzed to understand
# why they lead to an exception. By doing this, the following
# functions were identified which are not defined as default
# JavaScript functions (in v8).
# Identification of these functions took a long time and corpus
# coverage can still greatly be improved by identifying more such
# functions. However, this is a time consuming task.
# Example: Replace wscript.echo() function calls with console.log()
pattern = re.compile("wscript.echo", re.IGNORECASE)
code = pattern.sub("console.log", code)
pattern = re.compile("CollectGarbage", re.IGNORECASE)
code = pattern.sub("gc", code)
code = code.replace("writeLine", "console.log")
code = code.replace("WScript.SetTimeout", "setTimeout")
code = code.replace("helpers.writeln", "console.log")
code = code.replace("$ERROR", "console.log")
code = code.replace("helpers.printObject", "console.log")
code = code.replace("WScript.Arguments", "[]")
code = code.replace("assert.unreachable()", "")
code = code.replace("assertUnreachable()", "")
code = code.replace("$DONOTEVALUATE()", "")
code = code.replace("assertStmt", "eval")
code = code.replace("inSection", "Number")
code = code.replace("numberOfDFGCompiles", "Number")
code = code.replace("optimizeNextInvocation", "%OptimizeFunctionOnNextCall")
code = code.replace("printBugNumber", "console.log")
code = code.replace("printStatus", "console.log")
code = code.replace("saveStack()", "0")
code = code.replace("gcPreserveCode()", "gc()")
code = code.replace("platformSupportsSamplingProfiler()", "true")
# Example:
# var OProxy = $262.createRealm().global.Proxy;
# =>
# var OProxy = Proxy;
code = code.replace("$262.createRealm().global.", "")
# Quit() is detected as a crash because v8 is closed, therefore I remove it
# However, there can be functions like test_or_quit() where it could incorrectly remove quit()
# Therefore I check for a space or a tab before. This is not a perfect solution, but filters
# out some crashes
# TODO: I now implemented better JavaScript parsing and should use the fuzzer functionality to replace it..
code = code.replace(" quit()", "")
code = code.replace("\tquit()", "")
code = code.replace("\nquit()", "\n")
code = code.replace(" quit(0)", "")
code = code.replace("\tquit(0)", "")
code = code.replace("\nquit(0)", "\n")
code = code.replace("trueish", "true") # it seems like SpiderMonkey accepts "trueish" as argument to asserEq oder reportCompare functions...
code = remove_function_call(code, "this.WScript.LoadScriptFile")
code = remove_function_call(code, "wscript.loadscriptfile")
code = code.replace("WScript.LoadScript(", "eval(")
code = code.replace("evalcx(", "eval(") # from SpiderMonkey, however, it can have a 2nd argument for the context; so this modification is not 100% correct
code = remove_function_call(code, "WScript.LoadModuleFile")
code = remove_function_call(code, "WScript.LoadModule")
code = remove_function_call(code, "WScript.Attach")
code = remove_function_call(code, "WScript.Detach")
code = remove_function_call(code, "saveStack") # I already removed "saveStack()" but this here is to remove saveStack calls where an argument is passed
code = remove_function_call(code, "WScript.FalseFile")
code = remove_function_call(code, "assert.fail")
code = remove_function_call(code, "assert.isUndefined")
code = remove_function_call(code, "description")
code = remove_function_call(code, "assertOptimized")
code = remove_function_call(code, "assertDoesNotThrow")
code = remove_function_call(code, "assertUnoptimized")
code = remove_function_call(code, "assertPropertiesEqual")
code = remove_function_call(code, "$DONE")
code = code.replace("$DONE", "1")
code = remove_function_call(code, "assertParts")
code = remove_function_call(code, "verifyProperty")
code = remove_function_call(code, "verifyWritable")
code = remove_function_call(code, "verifyNotWritable")
code = remove_function_call(code, "verifyEnumerable")
code = remove_function_call(code, "verifyNotEnumerable")
code = remove_function_call(code, "verifyConfigurable")
code = remove_function_call(code, "verifyNotConfigurable")
code = remove_function_call(code, "assertThrowsInstanceOf")
code = remove_function_call(code, "testOption")
code = remove_function_call(code, "assert.calls")
code = remove_function_call(code, "generateBinaryTests")
code = remove_function_call(code, "crash") # TODO , does this detect too many functions which end with "crash"?
# can also be code like =>crash("foo");
# This is a special function in SpiderMonkey which supports fuzzing (?)
code = remove_function_call(code, "offThreadCompileScript")
code = remove_function_call(code, "startgc") # maybe I should change it with the gc() function? But then I need to remove the startgc() argument
code = remove_function_call(code, "gczeal") # some other garbage collection related stuff in SpiderMonkey
code = remove_function_call(code, "gcslice")
code = remove_function_call(code, "schedulezone")
code = remove_function_call(code, "schedulegc")
code = remove_function_call(code, "unsetgczeal")
code = remove_function_call(code, "gcstate")
# The following is for checks like:
# if (this.WScript && this.WScript.LoadScriptFile) {
# Which should become:
# if (False && False) {
code = code.replace("WScript.LoadScriptFile", "False")
code = code.replace("WScript.LoadScript", "False")
code = code.replace("WScript.LoadModuleFile", "False")
code = code.replace("WScript.LoadModule", "False")
code = code.replace("this.WScript", "False")
code = code.replace("this.False", "False")
code = code.replace("WScript", "False")
code = code.replace("$MAX_ITERATIONS", "5")
code = remove_function_call(code, "utils.load")
if " load" not in code and "\tload" not in code:
# Little hack, I want to remove load function calls at the start of a file which load other JS files
# But if load is used as a function e.g.: as code like:
# function load(a) {
# I don't want to remove it
code = remove_function_call(code, "load")
code = remove_function_call(code, "assert.isnotundefined")
code = remove_function_call(code, "assert.isdefined")
code = remove_function_call(code, "assert.throws")
code = remove_function_call(code, "assert_throws")
code = remove_function_call(code, "assertThrows")
code = remove_function_call(code, "assertDoesNotThrow")
code = remove_function_call(code, "shouldThrow")
code = remove_function_call(code, "assertNull")
code = remove_function_call(code, "shouldBeEqualToString")
code = remove_function_call(code, "assertThrowsEquals")
code = remove_function_call(code, "new BenchmarkSuite") # This is not a function but it works
code = remove_function_call(code, "assertNoEntry")
code = remove_function_call(code, "assertEntry")
code = remove_function_call(code, " timeout")
code = remove_function_call(code, "\ttimeout")
code = remove_function_call(code, "\ntimeout")
code = remove_function_call(code, "testFailed")
code = remove_function_call(code, "finishJSTest")
code = remove_function_call(code, "assertIteratorDone")
code = remove_function_call(code, "assertIteratorNext")
code = remove_function_call(code, "assertThrowsValue")
code = remove_function_call(code, "Assertion")
code = remove_function_call(code, "assertStackLengthEq")
code = remove_function_call(code, "noInline")
code = remove_function_call(code, "enableGeckoProfiling")
code = remove_function_call(code, "enableSingleStepProfiling")
code = remove_function_call(code, "enableSingleStepProfiling")
code = remove_function_call(code, "disableSingleStepProfiling")
code = remove_function_call(code, "enableGeckoProfilingWithSlowAssertions")
code = remove_function_call(code, "assertThrownErrorContains")
code = remove_function_call(code, "assertDecl") # can maybe be fixed better
code = remove_function_call(code, "assertExpr")
code = remove_function_call(code, "assert.compareIterator")
code = remove_function_call(code, "$DETACHBUFFER")
code = remove_function_call(code, "checkSpeciesAccessorDescriptor")
code = remove_function_call(code, "assertPropertyExists")
code = remove_function_call(code, "assertPropertyDoesNotExist")
code = remove_function_call(code, "assert_equal_to_array")
code = replace_assert_function(code, "assert.sameValue", "==")
code = replace_assert_function(code, "reportCompare", "==")
code = replace_assert_function(code, "assert.areNotEqual", "!=")
code = replace_assert_function(code, "assert.areEqual", "==")
code = replace_assert_function(code, "assert.equals", "==")
code = replace_assert_function(code, "assert.strictEqual", "===")
code = replace_assert_function(code, "assert_equals", "==")
code = replace_assert_function(code, "assertMatches", "==")
code = replace_assert_function(code, "assertSame", "==")
code = replace_assert_function(code, "assertEqualsDelta", "==")
code = replace_assert_function(code, "assertNotEquals", "!=")
code = replace_assert_function(code, "assert.notSameValue", "!=")
code = replace_assert_function(code, "assertEq", "==")
code = replace_assert_function(code, "verifyEqualTo", "==")
code = replace_assert_function(code, "assert.compareArray", "==")
code = replace_assert_function(code, "compareArray", "==")
code = replace_assert_function(code, "assertDeepEq", "==")
code = replace_assert_function(code, "assertArrayEquals", "==")
code = replace_assert_function(code, "assertArray", "==")
code = replace_assert_function(code, "assertEqArray", "==")
# They must not be patched if only v8 is checked, they don't lead to a crash
# Only the static assert lead to a crash
# code = replace_assert_function(code, "%StrictEqual", "===")
# code = replace_assert_function(code, "%StrictNotEqual", "!==")
# code = replace_assert_function(code, "%Equal", "==")
# %GreaterThanOrEqual
# %LessThan
# %GreaterThan
# %LessThanOrEqual
#
# TODO:
# patching "assertIteratorResult" is more complicated..
# TODO: More complicated :
# verifySymbolSplitResult
# TODO WebKit:
# assert.var fhgjeduyko=array[i];
# => var fhgjeduyko=array[i];
code = replace_assert_function(code, "assertInstanceof", "instanceof")
code = replace_assert_function(code, "assertEquals", "==")
code = replace_assert_function(code, "assertNotSame", "!=") # assertNotSame(Atomics.wake, Atomics.notify);
# The remove_assert_function() calls are for assert functions which just have 1 argument
code = remove_assert_function(code, "assert.isTrue")
code = remove_assert_function(code, "assert.isFalse")
code = remove_assert_function(code, "assert.assertFalse")
code = remove_assert_function(code, "assertFalse")
code = remove_assert_function(code, "assertTrue")
code = remove_assert_function(code, "assert_true")
code = remove_assert_function(code, "%TurbofanStaticAssert")
code = remove_assert_function(code, "assert.shouldBeTrue")
code = remove_assert_function(code, "assert.shouldBeFalse")
code = remove_assert_function(code, "assert.shouldBe")
code = remove_assert_function(code, "assert.assertNotNull")
code = remove_assert_function(code, "shouldBeTrue")
code = remove_assert_function(code, "shouldBeFalse")
code = remove_assert_function(code, "shouldBe")
code = remove_assert_function(code, "assertNotNull")
code = remove_assert_function(code, "testJSON")
code = remove_assert_function(code, "assertNativeFunction")
code = remove_assert_function(code, "assert_malformed")
code = remove_assert_function(code, "assertIteratorResult")
code = remove_assert_function(code, "assert.doesNotThrow")
code = remove_assert_function(code, "assert") # This should be one of the last replacements!
# This is a stupid last hack, in some cases assert.throws is not correctly detected because it's inside a string
# which is later evaluated. That means the logic to detect the end of the function call does not correctly work
# Therefore it's not removed above, here I just replace it with a call to Number to ensure that it does not crash
code = code.replace("assert.throws", "Number")
if "testRunner.run" in code:
# TODO I also need to add function definitions from the start
# E.g.: WebKit testcase: tc50725.js
# or tc1061.js from ChakraCore
start_testcases = ["body: function () {", "body() {"]
while True:
finished = True
for start_testcase in start_testcases:
if start_testcase in code:
finished = False
if finished:
break
for start_testcase in start_testcases:
if start_testcase not in code:
continue
idx = code.index(start_testcase)
rest = code[idx + len(start_testcase):]
idx_end = speed_optimized_functions.get_index_of_next_symbol_not_within_string(rest, "}")
testcase_code = rest[:idx_end]
code = rest[idx_end + 1:]
ret.append(testcase_code)
elif "oomTest" in code:
code = "function oomTest(func_name) { func_name(); }\n" + code
ret.append(code)
elif "runtest" in code:
code = "function runtest(func_name) { func_name(); }\n" + code
ret.append(code)
else:
# Just add it
ret.append(code)
return ret
def remove_function_call(code, function_call_str):
if function_call_str[-1] != "(":
function_call_str = function_call_str + "("
function_call_str = function_call_str.lower()
while True:
code_lowered = code.lower()
if function_call_str not in code_lowered:
return code
idx = code_lowered.index(function_call_str)
if idx != 0:
previous_char = code[idx-1]
if previous_char != "\n" and previous_char != " " and previous_char != "\t":
return code
before = code[:idx]
tmp = code[idx + len(function_call_str):]
idx_end = speed_optimized_functions.get_index_of_next_symbol_not_within_string(tmp, ")")
if idx_end == -1:
# print("TODO Internal error in remove_function_call():")
# print("function_call_str:")
# print(function_call_str)
# print("code:")
# print(code)
# sys.exit(-1)
return code
try:
after = tmp[idx_end+1:]
except:
# The ")" symbol was the last symbol in the string
after = ""
code = before+after
def replace_assert_function(code, assert_function_str, comparison_str):
if assert_function_str[-1] != "(":
assert_function_str = assert_function_str + "("
original_code = code
original_code_len = len(original_code)
while True:
if len(code) > original_code_len:
# This means the last iterations contained a bug
# E.g.: if I replaced something like reportCompare(1,2) but the
# actual JavaScript code didn't contain a second argument =>
# reportCompare(1)
# Then this code can be incorrect and start to create bigger samples
# I catch this here and just return the unmodified code
# Another option is that a regex string is not correctly detected
return original_code
if assert_function_str not in code:
return code
# Examples:
# assert.sameValue(typeof f, 'function');
# assert.sameValue(f(), 'function declaration');
idx = code.index(assert_function_str)
before = code[:idx]
rest = code[idx + len(assert_function_str):]
idx_end = speed_optimized_functions.get_index_of_next_symbol_not_within_string(rest, ",")
part1 = rest[:idx_end]
rest = rest[idx_end + 1:]
idx_end = speed_optimized_functions.get_index_of_next_symbol_not_within_string(rest, ")")
if idx_end == -1:
return code # return the unmodified code; this is most likely because the regex string was not correctly detected
# and inside the regex string a symbol from another string was used...
idx_command = speed_optimized_functions.get_index_of_next_symbol_not_within_string(rest, ",")
if idx_command == -1:
idx_command = idx_end
elif idx_command > idx_end:
idx_command = idx_end
if idx_end == 0:
return code # some buggy case
part2 = rest[:idx_command]
rest = rest[idx_end + 1:]
if len(rest) == 0:
# can happen with some funny unicode testcases
return original_code
if rest[0] == ";":
rest = rest[1:] # remove the ";"
code = before + part1.strip() + " " + comparison_str + " " + part2.strip() + ";" + rest
else:
code = before + part1.strip() + " " + comparison_str + " " + part2.strip() + " " + rest
def remove_assert_function(code, assert_function_str):
if assert_function_str[-1] != "(":
assert_function_str = assert_function_str + "("
while True:
if assert_function_str not in code:
return code
# Examples:
# assert.isTrue(/error in callback/.test(frames[0]), `Invalid first frame "${frames[0]}" for ${builtin.name}`);
idx = code.index(assert_function_str)
before = code[:idx]
rest = code[idx + len(assert_function_str):]
idx_end = speed_optimized_functions.get_index_of_next_symbol_not_within_string(rest, ")")
idx_command = speed_optimized_functions.get_index_of_next_symbol_not_within_string(rest, ",")
if idx_end == -1:
# print("TODO Internal coding error in remove_assert_function():")
# print("assert_function_str: %s" % assert_function_str)
# print(code)
# print("----------------------")
# print("Rest:")
# print(rest)
# sys.exit(-1)
return code
if idx_command == -1:
idx_command = idx_end
elif idx_command > idx_end:
idx_command = idx_end
assert_statement = rest[:idx_command]
rest = rest[idx_end+1:]
# I add here a var *varname* statement because functions can not be standalone.
# E.g.:
# assert.doesNotThrow(function() { Object.defineProperty(obj, key, { value: 'something', enumerable: true }); }, "Object.defineProperty uses ToPropertyKey. Property is added to the object");
# would result in:
# function() { ....}
# this would throw an exception, but
# var xyz = function() { ... }
# doesn't throw
random_variable_name = ''.join(random.sample(string.ascii_lowercase, 10))
if rest[0] == ";":
rest = rest[1:] # remove the ";"
code = before + "var " + random_variable_name + "=" + assert_statement.strip() + ";" + rest
else:
code = before + "var " + random_variable_name + "=" + assert_statement.strip() + " " + rest
| 46.7375 | 233 | 0.688587 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 13,886 | 0.464213 |
960d83d2c94c5959a98a0bd8469e0e2f1a880ff6 | 5,590 | py | Python | crazyflie_demo/scripts/mapping/mapper.py | wydmynd/crazyflie_tom | 0d1cc63dcd0f055d78da82515729ce2098e086cf | [
"MIT"
]
| null | null | null | crazyflie_demo/scripts/mapping/mapper.py | wydmynd/crazyflie_tom | 0d1cc63dcd0f055d78da82515729ce2098e086cf | [
"MIT"
]
| null | null | null | crazyflie_demo/scripts/mapping/mapper.py | wydmynd/crazyflie_tom | 0d1cc63dcd0f055d78da82515729ce2098e086cf | [
"MIT"
]
| null | null | null | #!/usr/bin/env python
""" Simple occupancy-grid-based mapping without localization.
Subscribed topics:
/scan
Published topics:
/map
/map_metadata
Author: Nathan Sprague
Version: 2/13/14
"""
import rospy
from nav_msgs.msg import OccupancyGrid, MapMetaData
from geometry_msgs.msg import Pose, Point, Quaternion
from sensor_msgs.msg import LaserScan
import numpy as np
class Map(object):
"""
The Map class stores an occupancy grid as a two dimensional
numpy array.
Public instance variables:
width -- Number of columns in the occupancy grid.
height -- Number of rows in the occupancy grid.
resolution -- Width of each grid square in meters.
origin_x -- Position of the grid cell (0,0) in
origin_y -- in the map coordinate system.
grid -- numpy array with height rows and width columns.
Note that x increases with increasing column number and y increases
with increasing row number.
"""
def __init__(self, origin_x=-2.5, origin_y=-2.5, resolution=.1,
width=50, height=50):
""" Construct an empty occupancy grid.
Arguments: origin_x,
origin_y -- The position of grid cell (0,0) in the
map coordinate frame.
resolution-- width and height of the grid cells
in meters.
width,
height -- The grid will have height rows and width
columns cells. width is the size of
the x-dimension and height is the size
of the y-dimension.
The default arguments put (0,0) in the center of the grid.
"""
self.origin_x = origin_x
self.origin_y = origin_y
self.resolution = resolution
self.width = width
self.height = height
self.grid = np.zeros((height, width))
def to_message(self):
""" Return a nav_msgs/OccupancyGrid representation of this map. """
grid_msg = OccupancyGrid()
# Set up the header.
grid_msg.header.stamp = rospy.Time.now()
grid_msg.header.frame_id = "map"
# .info is a nav_msgs/MapMetaData message.
grid_msg.info.resolution = self.resolution
grid_msg.info.width = self.width
grid_msg.info.height = self.height
# Rotated maps are not supported... quaternion represents no
# rotation.
grid_msg.info.origin = Pose(Point(self.origin_x, self.origin_y, 0),
Quaternion(0, 0, 0, 1))
# Flatten the numpy array into a list of integers from 0-100.
# This assumes that the grid entries are probalities in the
# range 0-1. This code will need to be modified if the grid
# entries are given a different interpretation (like
# log-odds).
flat_grid = self.grid.reshape((self.grid.size,)) * 100
grid_msg.data = list(np.round(flat_grid))
return grid_msg
def set_cell(self, x, y, val):
""" Set the value of a cell in the grid.
Arguments:
x, y - This is a point in the map coordinate frame.
val - This is the value that should be assigned to the
grid cell that contains (x,y).
This would probably be a helpful method! Feel free to throw out
point that land outside of the grid.
"""
pass
class Mapper(object):
"""
The Mapper class creates a map from laser scan data.
"""
def __init__(self):
""" Start the mapper. """
rospy.init_node('mapper')
self._map = Map()
# Setting the queue_size to 1 will prevent the subscriber from
# buffering scan messages. This is important because the
# callback is likely to be too slow to keep up with the scan
# messages. If we buffer those messages we will fall behind
# and end up processing really old scans. Better to just drop
# old scans and always work with the most recent available.
rospy.Subscriber('scan',
LaserScan, self.scan_callback, queue_size=1)
# Latched publishers are used for slow changing topics like
# maps. Data will sit on the topic until someone reads it.
self._map_pub = rospy.Publisher('map', OccupancyGrid, latch=True)
self._map_data_pub = rospy.Publisher('map_metadata',
MapMetaData, latch=True)
rospy.spin()
def scan_callback(self, scan):
""" Update the map on every scan callback. """
# Fill some cells in the map just so we can see that something is
# being published.
self._map.grid[0, 0] = 1.0
self._map.grid[0, 1] = .9
self._map.grid[0, 2] = .7
self._map.grid[1, 0] = .5
self._map.grid[2, 0] = .3
# Now that the map is updated, publish it!
rospy.loginfo("Scan is processed, publishing updated map.")
self.publish_map()
def publish_map(self):
""" Publish the map. """
grid_msg = self._map.to_message()
self._map_data_pub.publish(grid_msg.info)
self._map_pub.publish(grid_msg)
if __name__ == '__main__':
try:
m = Mapper()
except rospy.ROSInterruptException:
pass
| 33.878788 | 75 | 0.581038 | 5,101 | 0.912522 | 0 | 0 | 0 | 0 | 0 | 0 | 3,251 | 0.581574 |
960dcc8a44c5847743443e7deb1bcd0169e59d72 | 469 | py | Python | flags.py | oaxiom/glbase3 | 9d3fc1efaad58ffb97e5b8126c2a96802daf9bac | [
"MIT"
]
| 8 | 2019-06-11T02:13:20.000Z | 2022-02-22T09:27:23.000Z | flags.py | JackNg88/glbase3 | 4af190d06b89ef360dcba201d9e4e81f41ef8379 | [
"MIT"
]
| 6 | 2020-12-18T15:08:14.000Z | 2021-05-22T00:31:57.000Z | flags.py | JackNg88/glbase3 | 4af190d06b89ef360dcba201d9e4e81f41ef8379 | [
"MIT"
]
| 2 | 2020-05-06T04:27:03.000Z | 2022-02-22T09:28:25.000Z | """
flags.py
. should be renamed helpers...
. This file is scheduled for deletion
"""
"""
valid accessory tags:
"any_tag": {"code": "code_insert_as_string"} # execute arbitrary code to construct this key.
"dialect": csv.excel_tab # dialect of the file, default = csv, set this to use tsv. or sniffer
"skip_lines": number # number of lines to skip at the head of the file.
"skiptill": skip until I see the first instance of <str>
"""
# lists of format-specifiers.
| 23.45 | 94 | 0.712154 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 463 | 0.987207 |
960deebf26b738896cbcd2ee2bd2d46605e19141 | 2,106 | py | Python | packages/jet_bridge/jet_bridge/app.py | goncalomi/jet-bridge | ed968ac3407affdc99059faafb86ec67ac995838 | [
"MIT"
]
| 2 | 2020-04-18T14:34:44.000Z | 2020-04-18T14:34:47.000Z | packages/jet_bridge/jet_bridge/app.py | goncalomi/jet-bridge | ed968ac3407affdc99059faafb86ec67ac995838 | [
"MIT"
]
| null | null | null | packages/jet_bridge/jet_bridge/app.py | goncalomi/jet-bridge | ed968ac3407affdc99059faafb86ec67ac995838 | [
"MIT"
]
| null | null | null | import os
import tornado.ioloop
import tornado.web
from jet_bridge.handlers.temporary_redirect import TemporaryRedirectHandler
from jet_bridge_base import settings as base_settings
from jet_bridge_base.views.api import ApiView
from jet_bridge_base.views.image_resize import ImageResizeView
from jet_bridge_base.views.file_upload import FileUploadView
from jet_bridge_base.views.message import MessageView
from jet_bridge_base.views.model import ModelViewSet
from jet_bridge_base.views.model_description import ModelDescriptionView
from jet_bridge_base.views.register import RegisterView
from jet_bridge_base.views.reload import ReloadView
from jet_bridge_base.views.sql import SqlView
from jet_bridge import settings, media
from jet_bridge.handlers.view import view_handler
from jet_bridge.handlers.not_found import NotFoundHandler
from jet_bridge.router import Router
def make_app():
router = Router()
router.register('/api/models/(?P<model>[^/]+)/', view_handler(ModelViewSet))
urls = [
(r'/', TemporaryRedirectHandler, {'url': "/api/"}),
(r'/register/', view_handler(RegisterView)),
(r'/api/', view_handler(ApiView)),
(r'/api/register/', view_handler(RegisterView)),
(r'/api/model_descriptions/', view_handler(ModelDescriptionView)),
(r'/api/sql/', view_handler(SqlView)),
(r'/api/messages/', view_handler(MessageView)),
(r'/api/file_upload/', view_handler(FileUploadView)),
(r'/api/image_resize/', view_handler(ImageResizeView)),
(r'/api/reload/', view_handler(ReloadView)),
(r'/media/(.*)', tornado.web.StaticFileHandler, {'path': settings.MEDIA_ROOT}),
]
urls += router.urls
if settings.MEDIA_STORAGE == media.MEDIA_STORAGE_FILE:
urls.append((r'/media/(.*)', tornado.web.StaticFileHandler, {'path': settings.MEDIA_ROOT}))
return tornado.web.Application(
handlers=urls,
debug=settings.DEBUG,
default_handler_class=NotFoundHandler,
template_path=os.path.join(base_settings.BASE_DIR, 'templates'),
autoreload=settings.DEBUG
)
| 39 | 99 | 0.738367 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 248 | 0.117759 |
960e05f94b044cbb96eace708beb765aa68c9708 | 1,553 | py | Python | openslides_backend/services/media/adapter.py | FinnStutzenstein/openslides-backend | fffc152f79d3446591e07a6913d9fdf30b46f577 | [
"MIT"
]
| null | null | null | openslides_backend/services/media/adapter.py | FinnStutzenstein/openslides-backend | fffc152f79d3446591e07a6913d9fdf30b46f577 | [
"MIT"
]
| null | null | null | openslides_backend/services/media/adapter.py | FinnStutzenstein/openslides-backend | fffc152f79d3446591e07a6913d9fdf30b46f577 | [
"MIT"
]
| null | null | null | import requests
from ...shared.exceptions import MediaServiceException
from ...shared.interfaces.logging import LoggingModule
from .interface import MediaService
class MediaServiceAdapter(MediaService):
"""
Adapter to connect to media service.
"""
def __init__(self, media_url: str, logging: LoggingModule) -> None:
self.logger = logging.getLogger(__name__)
self.media_url = media_url + "/"
def _upload(self, file: str, id: int, mimetype: str, subpath: str) -> None:
url = self.media_url + subpath + "/"
payload = {"file": file, "id": id, "mimetype": mimetype}
self.logger.debug("Starting upload of file")
try:
response = requests.post(url, json=payload)
except requests.exceptions.ConnectionError:
msg = "Connect to mediaservice failed."
self.logger.debug("Upload of file: " + msg)
raise MediaServiceException(msg)
if response.status_code != 200:
msg = f"Mediaservice Error: {str(response.content)}"
self.logger.debug("Upload of file: " + msg)
raise MediaServiceException(msg)
self.logger.debug("File successfully uploaded to the media service")
def upload_mediafile(self, file: str, id: int, mimetype: str) -> None:
subpath = "upload_mediafile"
self._upload(file, id, mimetype, subpath)
def upload_resource(self, file: str, id: int, mimetype: str) -> None:
subpath = "upload_resource"
self._upload(file, id, mimetype, subpath)
| 37.878049 | 79 | 0.647778 | 1,387 | 0.89311 | 0 | 0 | 0 | 0 | 0 | 0 | 302 | 0.194462 |
960faa636c63399c1988c58ce0e7c98b90dc797e | 169 | py | Python | Lib/async/test/test_echoupper.py | pyparallel/pyparallel | 11e8c6072d48c8f13641925d17b147bf36ee0ba3 | [
"PSF-2.0"
]
| 652 | 2015-07-26T00:00:17.000Z | 2022-02-24T18:30:04.000Z | Lib/async/test/test_echoupper.py | tpn/pyparallel | 11e8c6072d48c8f13641925d17b147bf36ee0ba3 | [
"PSF-2.0"
]
| 8 | 2015-09-07T03:38:19.000Z | 2021-05-23T03:18:51.000Z | Lib/async/test/test_echoupper.py | tpn/pyparallel | 11e8c6072d48c8f13641925d17b147bf36ee0ba3 | [
"PSF-2.0"
]
| 40 | 2015-07-24T19:45:08.000Z | 2021-11-01T14:54:56.000Z | import async
from async.services import EchoUpperData
server = async.server('10.211.55.3', 20007)
async.register(transport=server, protocol=EchoUpperData)
async.run()
| 21.125 | 56 | 0.792899 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 13 | 0.076923 |
960fe6f4df41a131c506151d154738d3ea6e3c53 | 533 | py | Python | alerter/src/alerter/alert_code/node/evm_alert_code.py | SimplyVC/panic | 2f5c327ea0d14b6a49dc8f4599a255048bc2ff6d | [
"Apache-2.0"
]
| 41 | 2019-08-23T12:40:42.000Z | 2022-03-28T11:06:02.000Z | alerter/src/alerter/alert_code/node/evm_alert_code.py | SimplyVC/panic | 2f5c327ea0d14b6a49dc8f4599a255048bc2ff6d | [
"Apache-2.0"
]
| 147 | 2019-08-30T22:09:48.000Z | 2022-03-30T08:46:26.000Z | alerter/src/alerter/alert_code/node/evm_alert_code.py | SimplyVC/panic | 2f5c327ea0d14b6a49dc8f4599a255048bc2ff6d | [
"Apache-2.0"
]
| 3 | 2019-09-03T21:12:28.000Z | 2021-08-18T14:27:56.000Z | from ..alert_code import AlertCode
class EVMNodeAlertCode(AlertCode):
NoChangeInBlockHeight = 'evm_node_alert_1'
BlockHeightUpdatedAlert = 'evm_node_alert_2'
BlockHeightDifferenceIncreasedAboveThresholdAlert = 'evm_node_alert_3'
BlockHeightDifferenceDecreasedBelowThresholdAlert = 'evm_node_alert_4'
InvalidUrlAlert = 'evm_node_alert_5'
ValidUrlAlert = 'evm_node_alert_6'
NodeWentDownAtAlert = 'evm_node_alert_7'
NodeBackUpAgainAlert = 'evm_node_alert_8'
NodeStillDownAlert = 'evm_node_alert_9'
| 38.071429 | 74 | 0.806754 | 495 | 0.928705 | 0 | 0 | 0 | 0 | 0 | 0 | 162 | 0.30394 |
96106fecaab4ad8d3cfef08e2a652f7ab8fec921 | 422 | py | Python | blaze/compute/tests/test_pmap.py | jdmcbr/blaze | 79515a8f0d25a0ff7f87a4cfbed615858241c832 | [
"BSD-3-Clause"
]
| 1 | 2015-05-17T23:17:12.000Z | 2015-05-17T23:17:12.000Z | blaze/compute/tests/test_pmap.py | jreback/blaze | 85c39335cac4ef7f2921a7f621bc13525880fc44 | [
"BSD-3-Clause"
]
| null | null | null | blaze/compute/tests/test_pmap.py | jreback/blaze | 85c39335cac4ef7f2921a7f621bc13525880fc44 | [
"BSD-3-Clause"
]
| null | null | null | from blaze import compute, resource, symbol, discover
from blaze.utils import example
flag = [False]
def mymap(func, *args):
flag[0] = True
return map(func, *args)
def test_map_called_on_resource_star():
r = resource(example('accounts_*.csv'))
s = symbol('s', discover(r))
flag[0] = False
a = compute(s.count(), r)
b = compute(s.count(), r, map=mymap)
assert a == b
assert flag[0]
| 21.1 | 53 | 0.637441 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 19 | 0.045024 |
9610832f6a592c17ec9781319d909b5b964100ab | 15,186 | py | Python | mwtab/mwschema.py | MoseleyBioinformaticsLab/mwtab | 1bc1e3715538348b29a5760a9c3184fe04f568a6 | [
"BSD-3-Clause-Clear"
]
| 7 | 2018-02-02T07:50:20.000Z | 2021-03-14T22:46:58.000Z | mwtab/mwschema.py | MoseleyBioinformaticsLab/mwtab | 1bc1e3715538348b29a5760a9c3184fe04f568a6 | [
"BSD-3-Clause-Clear"
]
| 2 | 2019-02-14T08:38:54.000Z | 2020-02-19T08:08:02.000Z | mwtab/mwschema.py | MoseleyBioinformaticsLab/mwtab | 1bc1e3715538348b29a5760a9c3184fe04f568a6 | [
"BSD-3-Clause-Clear"
]
| 1 | 2019-10-12T23:38:44.000Z | 2019-10-12T23:38:44.000Z | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
mwtab.mwschema
~~~~~~~~~~~~~~
This module provides schema definitions for different sections of the
``mwTab`` Metabolomics Workbench format.
"""
import sys
from schema import Schema, Optional, Or
if sys.version_info.major == 2:
str = unicode
metabolomics_workbench_schema = Schema(
{
"VERSION": str,
"CREATED_ON": str,
Optional("STUDY_ID"): str,
Optional("ANALYSIS_ID"): str,
Optional("PROJECT_ID"): str,
Optional("HEADER"): str,
Optional("DATATRACK_ID"): str
}
)
project_schema = Schema(
{
"PROJECT_TITLE": str,
Optional("PROJECT_TYPE"): str,
"PROJECT_SUMMARY": str,
"INSTITUTE": str,
Optional("DEPARTMENT"): str,
Optional("LABORATORY"): str,
"LAST_NAME": str,
"FIRST_NAME": str,
"ADDRESS": str,
"EMAIL": str,
"PHONE": str,
Optional("FUNDING_SOURCE"): str,
Optional("PROJECT_COMMENTS"): str,
Optional("PUBLICATIONS"): str,
Optional("CONTRIBUTORS"): str,
Optional("DOI"): str
}
)
study_schema = Schema(
{
"STUDY_TITLE": str,
Optional("STUDY_TYPE"): str,
"STUDY_SUMMARY": str,
"INSTITUTE": str,
Optional("DEPARTMENT"): str,
Optional("LABORATORY"): str,
"LAST_NAME": str,
"FIRST_NAME": str,
"ADDRESS": str,
"EMAIL": str,
"PHONE": str,
Optional("NUM_GROUPS"): str,
Optional("TOTAL_SUBJECTS"): str,
Optional("NUM_MALES"): str,
Optional("NUM_FEMALES"): str,
Optional("STUDY_COMMENTS"): str,
Optional("PUBLICATIONS"): str, # assumed
Optional("SUBMIT_DATE"): str # assumed
}
)
subject_schema = Schema(
{
"SUBJECT_TYPE": str,
"SUBJECT_SPECIES": str,
Optional("TAXONOMY_ID"): str,
Optional("GENOTYPE_STRAIN"): str,
Optional("AGE_OR_AGE_RANGE"): str,
Optional("WEIGHT_OR_WEIGHT_RANGE"): str,
Optional("HEIGHT_OR_HEIGHT_RANGE"): str,
Optional("GENDER"): str,
Optional("HUMAN_RACE"): str,
Optional("HUMAN_ETHNICITY"): str,
Optional("HUMAN_TRIAL_TYPE"): str,
Optional("HUMAN_LIFESTYLE_FACTORS"): str,
Optional("HUMAN_MEDICATIONS"): str,
Optional("HUMAN_PRESCRIPTION_OTC"): str,
Optional("HUMAN_SMOKING_STATUS"): str,
Optional("HUMAN_ALCOHOL_DRUG_USE"): str,
Optional("HUMAN_NUTRITION"): str,
Optional("HUMAN_INCLUSION_CRITERIA"): str,
Optional("HUMAN_EXCLUSION_CRITERIA"): str,
Optional("ANIMAL_ANIMAL_SUPPLIER"): str,
Optional("ANIMAL_HOUSING"): str,
Optional("ANIMAL_LIGHT_CYCLE"): str,
Optional("ANIMAL_FEED"): str,
Optional("ANIMAL_WATER"): str,
Optional("ANIMAL_INCLUSION_CRITERIA"): str,
Optional("CELL_BIOSOURCE_OR_SUPPLIER"): str,
Optional("CELL_STRAIN_DETAILS"): str,
Optional("SUBJECT_COMMENTS"): str,
Optional("CELL_PRIMARY_IMMORTALIZED"): str,
Optional("CELL_PASSAGE_NUMBER"): str,
Optional("CELL_COUNTS"): str,
Optional("SPECIES_GROUP"): str
}
)
subject_sample_factors_schema = Schema(
[
{
"Subject ID": str,
"Sample ID": str,
"Factors": dict,
Optional("Additional sample data"): {
Optional("RAW_FILE_NAME"): str,
Optional(str): str
}
}
]
)
collection_schema = Schema(
{
"COLLECTION_SUMMARY": str,
Optional("COLLECTION_PROTOCOL_ID"): str,
Optional("COLLECTION_PROTOCOL_FILENAME"): str,
Optional("COLLECTION_PROTOCOL_COMMENTS"): str,
Optional("SAMPLE_TYPE"): str, # assumed optional due to large number of files without
Optional("COLLECTION_METHOD"): str,
Optional("COLLECTION_LOCATION"): str,
Optional("COLLECTION_FREQUENCY"): str,
Optional("COLLECTION_DURATION"): str,
Optional("COLLECTION_TIME"): str,
Optional("VOLUMEORAMOUNT_COLLECTED"): str,
Optional("STORAGE_CONDITIONS"): str,
Optional("COLLECTION_VIALS"): str,
Optional("STORAGE_VIALS"): str,
Optional("COLLECTION_TUBE_TEMP"): str,
Optional("ADDITIVES"): str,
Optional("BLOOD_SERUM_OR_PLASMA"): str,
Optional("TISSUE_CELL_IDENTIFICATION"): str,
Optional("TISSUE_CELL_QUANTITY_TAKEN"): str
}
)
treatment_schema = Schema(
{
"TREATMENT_SUMMARY": str,
Optional("TREATMENT_PROTOCOL_ID"): str,
Optional("TREATMENT_PROTOCOL_FILENAME"): str,
Optional("TREATMENT_PROTOCOL_COMMENTS"): str,
Optional("TREATMENT"): str,
Optional("TREATMENT_COMPOUND"): str,
Optional("TREATMENT_ROUTE"): str,
Optional("TREATMENT_DOSE"): str,
Optional("TREATMENT_DOSEVOLUME"): str,
Optional("TREATMENT_DOSEDURATION"): str,
Optional("TREATMENT_VEHICLE"): str,
Optional("ANIMAL_VET_TREATMENTS"): str,
Optional("ANIMAL_ANESTHESIA"): str,
Optional("ANIMAL_ACCLIMATION_DURATION"): str,
Optional("ANIMAL_FASTING"): str,
Optional("ANIMAL_ENDP_EUTHANASIA"): str,
Optional("ANIMAL_ENDP_TISSUE_COLL_LIST"): str,
Optional("ANIMAL_ENDP_TISSUE_PROC_METHOD"): str,
Optional("ANIMAL_ENDP_CLINICAL_SIGNS"): str,
Optional("HUMAN_FASTING"): str,
Optional("HUMAN_ENDP_CLINICAL_SIGNS"): str,
Optional("CELL_STORAGE"): str,
Optional("CELL_GROWTH_CONTAINER"): str,
Optional("CELL_GROWTH_CONFIG"): str,
Optional("CELL_GROWTH_RATE"): str,
Optional("CELL_INOC_PROC"): str,
Optional("CELL_MEDIA"): str,
Optional("CELL_ENVIR_COND"): str,
Optional("CELL_HARVESTING"): str,
Optional("PLANT_GROWTH_SUPPORT"): str,
Optional("PLANT_GROWTH_LOCATION"): str,
Optional("PLANT_PLOT_DESIGN"): str,
Optional("PLANT_LIGHT_PERIOD"): str,
Optional("PLANT_HUMIDITY"): str,
Optional("PLANT_TEMP"): str,
Optional("PLANT_WATERING_REGIME"): str,
Optional("PLANT_NUTRITIONAL_REGIME"): str,
Optional("PLANT_ESTAB_DATE"): str,
Optional("PLANT_HARVEST_DATE"): str,
Optional("PLANT_GROWTH_STAGE"): str,
Optional("PLANT_METAB_QUENCH_METHOD"): str,
Optional("PLANT_HARVEST_METHOD"): str,
Optional("PLANT_STORAGE"): str,
Optional("CELL_PCT_CONFLUENCE"): str,
Optional("CELL_MEDIA_LASTCHANGED"): str
}
)
sampleprep_schema = Schema(
{
"SAMPLEPREP_SUMMARY": str,
Optional("SAMPLEPREP_PROTOCOL_ID"): str,
Optional("SAMPLEPREP_PROTOCOL_FILENAME"): str,
Optional("SAMPLEPREP_PROTOCOL_COMMENTS"): str,
Optional("PROCESSING_METHOD"): str,
Optional("PROCESSING_STORAGE_CONDITIONS"): str,
Optional("EXTRACTION_METHOD"): str,
Optional("EXTRACT_CONCENTRATION_DILUTION"): str,
Optional("EXTRACT_ENRICHMENT"): str,
Optional("EXTRACT_CLEANUP"): str,
Optional("EXTRACT_STORAGE"): str,
Optional("SAMPLE_RESUSPENSION"): str,
Optional("SAMPLE_DERIVATIZATION"): str,
Optional("SAMPLE_SPIKING"): str,
Optional("ORGAN"): str,
Optional("ORGAN_SPECIFICATION"): str,
Optional("CELL_TYPE"): str,
Optional("SUBCELLULAR_LOCATION"): str
}
)
chromatography_schema = Schema(
{
Optional("CHROMATOGRAPHY_SUMMARY"): str,
"CHROMATOGRAPHY_TYPE": str,
"INSTRUMENT_NAME": str,
"COLUMN_NAME": str,
Optional("FLOW_GRADIENT"): str,
Optional("FLOW_RATE"): str,
Optional("COLUMN_TEMPERATURE"): str,
Optional("METHODS_FILENAME"): str,
Optional("SOLVENT_A"): str,
Optional("SOLVENT_B"): str,
Optional("METHODS_ID"): str,
Optional("COLUMN_PRESSURE"): str,
Optional("INJECTION_TEMPERATURE"): str,
Optional("INTERNAL_STANDARD"): str,
Optional("INTERNAL_STANDARD_MT"): str,
Optional("RETENTION_INDEX"): str,
Optional("RETENTION_TIME"): str,
Optional("SAMPLE_INJECTION"): str,
Optional("SAMPLING_CONE"): str,
Optional("ANALYTICAL_TIME"): str,
Optional("CAPILLARY_VOLTAGE"): str,
Optional("MIGRATION_TIME"): str,
Optional("OVEN_TEMPERATURE"): str,
Optional("PRECONDITIONING"): str,
Optional("RUNNING_BUFFER"): str,
Optional("RUNNING_VOLTAGE"): str,
Optional("SHEATH_LIQUID"): str,
Optional("TIME_PROGRAM"): str,
Optional("TRANSFERLINE_TEMPERATURE"): str,
Optional("WASHING_BUFFER"): str,
Optional("WEAK_WASH_SOLVENT_NAME"): str,
Optional("WEAK_WASH_VOLUME"): str,
Optional("STRONG_WASH_SOLVENT_NAME"): str,
Optional("STRONG_WASH_VOLUME"): str,
Optional("TARGET_SAMPLE_TEMPERATURE"): str,
Optional("SAMPLE_LOOP_SIZE"): str,
Optional("SAMPLE_SYRINGE_SIZE"): str,
Optional("RANDOMIZATION_ORDER"): str,
Optional("CHROMATOGRAPHY_COMMENTS"): str
}
)
analysis_schema = Schema(
{
"ANALYSIS_TYPE": str,
Optional("LABORATORY_NAME"): str,
Optional("OPERATOR_NAME"): str,
Optional("DETECTOR_TYPE"): str,
Optional("SOFTWARE_VERSION"): str,
Optional("ACQUISITION_DATE"): str,
Optional("ANALYSIS_PROTOCOL_FILE"): str,
Optional("ACQUISITION_PARAMETERS_FILE"): str,
Optional("PROCESSING_PARAMETERS_FILE"): str,
Optional("DATA_FORMAT"): str,
# not specified in mwTab specification (assumed)
Optional("ACQUISITION_ID"): str,
Optional("ACQUISITION_TIME"): str,
Optional("ANALYSIS_COMMENTS"): str,
Optional("ANALYSIS_DISPLAY"): str,
Optional("INSTRUMENT_NAME"): str,
Optional("INSTRUMENT_PARAMETERS_FILE"): str,
Optional("NUM_FACTORS"): str,
Optional("NUM_METABOLITES"): str,
Optional("PROCESSED_FILE"): str,
Optional("RANDOMIZATION_ORDER"): str,
Optional("RAW_FILE"): str,
}
)
ms_schema = Schema(
{
"INSTRUMENT_NAME": str,
"INSTRUMENT_TYPE": str,
"MS_TYPE": str,
"ION_MODE": str,
"MS_COMMENTS": str, # changed to required
Optional("CAPILLARY_TEMPERATURE"): str,
Optional("CAPILLARY_VOLTAGE"): str,
Optional("COLLISION_ENERGY"): str,
Optional("COLLISION_GAS"): str,
Optional("DRY_GAS_FLOW"): str,
Optional("DRY_GAS_TEMP"): str,
Optional("FRAGMENT_VOLTAGE"): str,
Optional("FRAGMENTATION_METHOD"): str,
Optional("GAS_PRESSURE"): str,
Optional("HELIUM_FLOW"): str,
Optional("ION_SOURCE_TEMPERATURE"): str,
Optional("ION_SPRAY_VOLTAGE"): str,
Optional("IONIZATION"): str,
Optional("IONIZATION_ENERGY"): str,
Optional("IONIZATION_POTENTIAL"): str,
Optional("MASS_ACCURACY"): str,
Optional("PRECURSOR_TYPE"): str,
Optional("REAGENT_GAS"): str,
Optional("SOURCE_TEMPERATURE"): str,
Optional("SPRAY_VOLTAGE"): str,
Optional("ACTIVATION_PARAMETER"): str,
Optional("ACTIVATION_TIME"): str,
Optional("ATOM_GUN_CURRENT"): str,
Optional("AUTOMATIC_GAIN_CONTROL"): str,
Optional("BOMBARDMENT"): str,
Optional("CDL_SIDE_OCTOPOLES_BIAS_VOLTAGE"): str,
Optional("CDL_TEMPERATURE"): str,
Optional("DATAFORMAT"): str,
Optional("DESOLVATION_GAS_FLOW"): str,
Optional("DESOLVATION_TEMPERATURE"): str,
Optional("INTERFACE_VOLTAGE"): str,
Optional("IT_SIDE_OCTOPOLES_BIAS_VOLTAGE"): str,
Optional("LASER"): str,
Optional("MATRIX"): str,
Optional("NEBULIZER"): str,
Optional("OCTPOLE_VOLTAGE"): str,
Optional("PROBE_TIP"): str,
Optional("RESOLUTION_SETTING"): str,
Optional("SAMPLE_DRIPPING"): str,
Optional("SCAN_RANGE_MOVERZ"): str,
Optional("SCANNING"): str,
Optional("SCANNING_CYCLE"): str,
Optional("SCANNING_RANGE"): str,
Optional("SKIMMER_VOLTAGE"): str,
Optional("TUBE_LENS_VOLTAGE"): str,
Optional("MS_RESULTS_FILE"): Or(str, dict)
}
)
nmr_schema = Schema(
{
"INSTRUMENT_NAME": str,
"INSTRUMENT_TYPE": str,
"NMR_EXPERIMENT_TYPE": str,
Optional("NMR_COMMENTS"): str,
Optional("FIELD_FREQUENCY_LOCK"): str,
Optional("STANDARD_CONCENTRATION"): str,
"SPECTROMETER_FREQUENCY": str,
Optional("NMR_PROBE"): str,
Optional("NMR_SOLVENT"): str,
Optional("NMR_TUBE_SIZE"): str,
Optional("SHIMMING_METHOD"): str,
Optional("PULSE_SEQUENCE"): str,
Optional("WATER_SUPPRESSION"): str,
Optional("PULSE_WIDTH"): str,
Optional("POWER_LEVEL"): str,
Optional("RECEIVER_GAIN"): str,
Optional("OFFSET_FREQUENCY"): str,
Optional("PRESATURATION_POWER_LEVEL"): str,
Optional("CHEMICAL_SHIFT_REF_CPD"): str,
Optional("TEMPERATURE"): str,
Optional("NUMBER_OF_SCANS"): str,
Optional("DUMMY_SCANS"): str,
Optional("ACQUISITION_TIME"): str,
Optional("RELAXATION_DELAY"): str,
Optional("SPECTRAL_WIDTH"): str,
Optional("NUM_DATA_POINTS_ACQUIRED"): str,
Optional("REAL_DATA_POINTS"): str,
Optional("LINE_BROADENING"): str,
Optional("ZERO_FILLING"): str,
Optional("APODIZATION"): str,
Optional("BASELINE_CORRECTION_METHOD"): str,
Optional("CHEMICAL_SHIFT_REF_STD"): str,
Optional("BINNED_INCREMENT"): str,
Optional("BINNED_DATA_NORMALIZATION_METHOD"): str,
Optional("BINNED_DATA_PROTOCOL_FILE"): str,
Optional("BINNED_DATA_CHEMICAL_SHIFT_RANGE"): str,
Optional("BINNED_DATA_EXCLUDED_RANGE"): str
}
)
data_schema = Schema(
[
{
Or("Metabolite", "Bin range(ppm)", only_one=True): str,
Optional(str): str,
},
]
)
extended_schema = Schema(
[
{
"Metabolite": str,
Optional(str): str,
"sample_id": str
},
]
)
ms_metabolite_data_schema = Schema(
{
"Units": str,
"Data": data_schema,
"Metabolites": data_schema,
Optional("Extended"): extended_schema
}
)
nmr_binned_data_schema = Schema(
{
"Units": str,
"Data": data_schema
}
)
section_schema_mapping = {
"METABOLOMICS WORKBENCH": metabolomics_workbench_schema,
"PROJECT": project_schema,
"STUDY": study_schema,
"ANALYSIS": analysis_schema,
"SUBJECT": subject_schema,
"SUBJECT_SAMPLE_FACTORS": subject_sample_factors_schema,
"COLLECTION": collection_schema,
"TREATMENT": treatment_schema,
"SAMPLEPREP": sampleprep_schema,
"CHROMATOGRAPHY": chromatography_schema,
"MS": ms_schema,
"NM": nmr_schema,
"MS_METABOLITE_DATA": ms_metabolite_data_schema,
"NMR_METABOLITE_DATA": ms_metabolite_data_schema,
"NMR_BINNED_DATA": nmr_binned_data_schema,
}
| 34.049327 | 94 | 0.61965 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 6,224 | 0.409851 |
9610da1cf47afbf95b11be72f8e2780125e49449 | 27,544 | py | Python | functions/asmm_xml.py | EUFAR/asmm-eufar | 69ede7a24f757392e63f04091e86c50ab129016f | [
"BSD-3-Clause"
]
| null | null | null | functions/asmm_xml.py | EUFAR/asmm-eufar | 69ede7a24f757392e63f04091e86c50ab129016f | [
"BSD-3-Clause"
]
| 2 | 2015-06-12T09:28:29.000Z | 2015-06-12T09:34:16.000Z | functions/asmm_xml.py | eufarn7sp/asmm-eufar | 69ede7a24f757392e63f04091e86c50ab129016f | [
"BSD-3-Clause"
]
| null | null | null | import datetime
import xml.dom.minidom
import logging
from PyQt5 import QtCore, QtWidgets
from functions.button_functions import add_read
NAMESPACE_URI = 'http://www.eufar.net/ASMM'
def create_asmm_xml(self, out_file_name):
logging.debug('asmm_xml.py - create_asmm_xml - out_file_name ' + out_file_name)
doc = xml.dom.minidom.Document()
doc_root = add_element(doc, "MissionMetadata", doc)
doc_root.setAttribute("xmlns:asmm", NAMESPACE_URI)
current_date = datetime.date.isoformat(datetime.date.today())
if not self.create_date:
self.create_date = current_date
add_element(doc, "CreationDate", doc_root, self.create_date)
add_element(doc, "RevisionDate", doc_root, current_date)
############################
# Flight Information
############################
flightInformation = add_element(doc, "FlightInformation", doc_root)
add_element(doc, "FlightNumber", flightInformation, self.flightNumber_ln.text())
add_element(doc, "Date", flightInformation, self.date_dt.date().toString(QtCore.Qt.ISODate))
add_element(doc, "ProjectAcronym", flightInformation, self.projectAcronym_ln.text())
add_element(doc, "MissionScientist", flightInformation, self.missionSci_ln.text())
add_element(doc, "FlightManager", flightInformation, self.flightManager_ln.text())
operator = self.operator_cb.currentText()
aircraft = self.aircraft_cb.currentText()
country = ''
manufacturer = ''
registration = ''
if operator == 'Other...':
operator = self.newOperator_ln.text()
aircraft = self.newAircraft_ln.text()
registration = self.newRegistration_ln.text()
manufacturer = self.newManufacturer_ln.text()
if self.newCountry_cb.currentText() != 'Make a choice...':
country = self.newCountry_cb.currentText()
elif operator != 'Make a choice...':
if aircraft != 'Make a choice...':
index = -1
index = aircraft.find(' - ')
if (index != -1):
registration = aircraft[index + 3:]
if len(registration) > 3:
aircraft = aircraft[0:index]
for i in range(len(self.new_operators_aircraft)):
if registration != '' and len(registration) > 3:
if registration == self.new_operators_aircraft[i][2]:
index = self.new_operators_aircraft[i][1].find(', ');
manufacturer = self.new_operators_aircraft[i][1][: index]
country = self.new_operators_aircraft[i][3]
break
else:
index = self.new_operators_aircraft[i][1].find(', ');
aircraft_from_table = self.new_operators_aircraft[i][1][index + 2:]
if aircraft == aircraft_from_table:
manufacturer = self.new_operators_aircraft[i][1][: index]
country = self.new_operators_aircraft[i][3]
registration = self.new_operators_aircraft[i][2]
break
else:
aircraft = ''
else:
operator = ''
aircraft = ''
for key, value in self.new_country_code.items():
if value == country:
country = key
break
add_element(doc, "Platform", flightInformation, aircraft)
add_element(doc, "Operator", flightInformation, operator)
add_element(doc, "OperatorCountry", flightInformation, country)
add_element(doc, "Manufacturer", flightInformation, manufacturer)
add_element(doc, "RegistrationNumber", flightInformation, registration)
if self.location_cb.currentText() == "Make a choice...":
add_element(doc, "Localisation", flightInformation, "")
elif self.detailList.currentText() == "Make a choice...":
add_element(doc, "Localisation", flightInformation, "")
else:
add_element(doc, "Localisation", flightInformation, self.detailList.currentText())
###########################
# Metadata Contact Info
###########################
contactInfo = add_element(doc, "ContactInfo", doc_root)
add_element(doc, "ContactName", contactInfo, self.contactName_ln.text())
if self.contact_cb.currentText() == 'Make a choice...':
add_element(doc, "ContactRole", contactInfo, '')
else:
add_element(doc, "ContactRole", contactInfo, self.contact_cb.currentText())
add_element(doc, "ContactEmail", contactInfo, self.contactEmail_ln.text())
############################
# Scientific Aims
############################
scientificAims = add_element(doc, "ScientificAims", doc_root)
add_check_elements(doc, self.scientific_aims_check_dict, "SA_Code", scientificAims)
if self.sa_ck_list:
for i in range(self.gridLayout_5.count()):
if isinstance(self.gridLayout_5.itemAt(i).widget(), QtWidgets.QCheckBox):
if self.gridLayout_5.itemAt(i).widget().isChecked():
add_element(doc,"SA_User", scientificAims, self.gridLayout_5.itemAt(i).widget().
text())
add_element(doc, "SA_Other", scientificAims, self.SAOtherTextBox.toPlainText())
############################
# Geographical Region
############################
geographicalRegion = add_element(doc, "GeographicalRegion", doc_root)
geographicBoundingBox = add_element(doc, "GeographicBoundingBox", geographicalRegion)
add_element(doc, "westBoundLongitude", geographicBoundingBox, self.westBoundLongitudeLine.text())
add_element(doc, "eastBoundLongitude", geographicBoundingBox, self.eastBoundLongitudeLine.text())
add_element(doc, "northBoundLatitude", geographicBoundingBox, self.northBoundLatitudeLine.text())
add_element(doc, "southBoundLatitude", geographicBoundingBox, self.southBoundLatitudeLine.text())
add_element(doc, "minAltitude", geographicBoundingBox, self.minAltitudeLine.text())
add_element(doc, "maxAltitude", geographicBoundingBox, self.maxAltitudeLine.text())
add_check_elements(doc, self.geographical_region_check_dict, "GR_Code", geographicalRegion)
if self.gr_ck_list:
for i in range(self.gridLayout_8.count()):
if isinstance(self.gridLayout_8.itemAt(i).widget(), QtWidgets.QCheckBox):
if self.gridLayout_8.itemAt(i).widget().isChecked():
add_element(doc,"GR_User", geographicalRegion, self.gridLayout_8.itemAt(i).
widget().text())
add_element(doc, "GR_Other", geographicalRegion, self.GROtherTextBox.toPlainText())
############################
# Atmospheric Features
############################
atmosphericFeatures = add_element(doc, "AtmosFeatures", doc_root)
add_check_elements(doc, self.atmospheric_features_check_dict, "AF_Code", atmosphericFeatures)
if self.af_ck_list:
for i in range(self.gridLayout_9.count()):
if isinstance(self.gridLayout_9.itemAt(i).widget(), QtWidgets.QCheckBox):
if self.gridLayout_9.itemAt(i).widget().isChecked():
add_element(doc,"AF_User", atmosphericFeatures, self.gridLayout_9.itemAt(i).
widget().text())
add_element(doc, "AF_Other", atmosphericFeatures, self.AFOtherTextBox.toPlainText())
############################
# Cloud Types
############################
cloudTypes = add_element(doc, "CloudTypes", doc_root)
add_check_elements(doc, self.cloud_types_check_dict, "CT_Code", cloudTypes)
if self.ct_ck_list:
for i in range(self.gridLayout_10.count()):
if isinstance(self.gridLayout_10.itemAt(i).widget(), QtWidgets.QCheckBox):
if self.gridLayout_10.itemAt(i).widget().isChecked():
add_element(doc,"CT_User", cloudTypes, self.gridLayout_10.itemAt(i).widget().
text())
add_element(doc, "CT_Other", cloudTypes, self.CTOtherTextBox.toPlainText())
############################
# Particles Sampled
############################
particlesSampled = add_element(doc, "ParticlesSampled", doc_root)
add_check_elements(doc, self.particles_sampled_check_dict, "PS_Code", particlesSampled)
if self.ps_ck_list:
for i in range(self.gridLayout_11.count()):
if isinstance(self.gridLayout_11.itemAt(i).widget(), QtWidgets.QCheckBox):
if self.gridLayout_11.itemAt(i).widget().isChecked():
add_element(doc,"PS_User", particlesSampled, self.gridLayout_11.itemAt(i).
widget().text())
add_element(doc, "PS_Other", particlesSampled, self.PSOtherTextBox.toPlainText())
############################
# Surfaces Overflown
############################
surfacesOverflown = add_element(doc, "SurfacesOverflown", doc_root)
add_check_elements(doc, self.surfaces_overflown_check_dict, "SO_Code", surfacesOverflown)
if self.so_ck_list:
for i in range(self.gridLayout_13.count()):
if isinstance(self.gridLayout_13.itemAt(i).widget(), QtWidgets.QCheckBox):
if self.gridLayout_13.itemAt(i).widget().isChecked():
add_element(doc,"SO_User", surfacesOverflown, self.gridLayout_13.itemAt(i).
widget().text())
add_element(doc, "SO_Other", surfacesOverflown, self.SOOtherTextBox.toPlainText())
############################
# Altitude Ranges
############################
altitudeRanges = add_element(doc, "AltitudeRanges", doc_root)
add_check_elements(doc, self.altitude_ranges_check_dict, "AR_Code", altitudeRanges)
if self.ar_ck_list:
for i in range(self.gridLayout_14.count()):
if isinstance(self.gridLayout_14.itemAt(i).widget(), QtWidgets.QCheckBox):
if self.gridLayout_14.itemAt(i).widget().isChecked():
add_element(doc,"AR_User", altitudeRanges, self.gridLayout_14.itemAt(i).
widget().text())
add_element(doc, "AR_Other", altitudeRanges, self.AROtherTextBox.toPlainText())
############################
# Flight Types
############################
flightTypes = add_element(doc, "FlightTypes", doc_root)
add_check_elements(doc, self.flight_types_check_dict, "FT_Code", flightTypes)
if self.fm_ck_list:
for i in range(self.gridLayout_15.count()):
if isinstance(self.gridLayout_15.itemAt(i).widget(), QtWidgets.QCheckBox):
if self.gridLayout_15.itemAt(i).widget().isChecked():
add_element(doc,"FT_User", flightTypes, self.gridLayout_15.itemAt(i).widget().
text())
add_element(doc, "FT_Other", flightTypes, self.FTOtherTextBox.toPlainText())
############################
# Satellite coordination
############################
satelliteCoordination = add_element(doc, "SatelliteCoordination", doc_root)
add_check_elements(doc, self.satellite_coordination_check_dict, "SC_Code", satelliteCoordination)
if self.sc_ck_list:
for i in range(self.gridLayout_25.count()):
if isinstance(self.gridLayout_25.itemAt(i).widget(), QtWidgets.QCheckBox):
if self.gridLayout_25.itemAt(i).widget().isChecked():
add_element(doc,"SC_User", satelliteCoordination, self.gridLayout_25.itemAt(i).
widget().text())
add_element(doc, "SC_Other", satelliteCoordination, self.SCOtherTextBox.toPlainText())
############################
# Surface Observations
############################
surfaceObs = add_element(doc, "SurfaceObs", doc_root)
for item in self.ground_site_list:
add_element(doc, "GroundSite", surfaceObs, item)
for item in self.research_vessel_list:
add_element(doc, "ResearchVessel", surfaceObs, item)
for item in self.arm_site_list:
add_element(doc, "ArmSite", surfaceObs, item)
for item in self.arm_mobile_list:
add_element(doc, "ArmMobile", surfaceObs, item)
############################
# Other Comments
############################
if self.OtherCommentsTextBox.toPlainText():
add_element(doc, "OtherComments", doc_root, self.OtherCommentsTextBox.toPlainText())
############################
# File Creation
############################
f = open(out_file_name, 'w')
f.write(doc.toprettyxml())
f.close()
self.saved = True
self.modified = False
logging.debug('asmm_xml.py - create_asmm_xml - file created successfully')
def read_asmm_xml(self, in_file_name):
logging.debug('asmm_xml.py - read_asmm_xml - out_file_name ' + in_file_name)
self.reset_all_fields()
f = open(in_file_name, 'r')
doc = xml.dom.minidom.parse(f)
############################
# Flight Information
############################
self.create_date = get_element_value(doc, "CreationDate")
flightInformation = get_element(doc, "FlightInformation")
set_text_value(self.flightNumber_ln, flightInformation, "FlightNumber")
date = get_element_value(flightInformation, "Date")
self.date_dt.setDate(QtCore.QDate.fromString(date, QtCore.Qt.ISODate))
set_text_value(self.projectAcronym_ln, flightInformation, "ProjectAcronym")
set_text_value(self.missionSci_ln, flightInformation, "MissionScientist")
set_text_value(self.flightManager_ln, flightInformation, "FlightManager")
operator = get_element_value(flightInformation, "Operator")
aircraft = get_element_value(flightInformation, "Platform")
registration = get_element_value(flightInformation, "RegistrationNumber")
aircraft_found = False
if registration:
for i in range(len(self.new_operators_aircraft)):
if registration == self.new_operators_aircraft[i][2]:
aircraft_found = True
self.operator_cb.setCurrentIndex(self.operator_cb.findText(operator))
self.operator_changed()
index = self.aircraft_cb.findText(aircraft)
if index != -1:
self.aircraft_cb.setCurrentIndex(index)
else:
index = self.aircraft_cb.findText(aircraft + ' - ' + registration)
self.aircraft_cb.setCurrentIndex(index)
break
if not aircraft_found:
self.operator_cb.setCurrentIndex(1)
self.operator_changed()
self.newOperator_ln.setText(operator)
self.newAircraft_ln.setText(aircraft)
self.newRegistration_ln.setText(registration)
self.newManufacturer_ln.setText(get_element_value(flightInformation, "Manufacturer"))
if get_element_value(flightInformation, "OperatorCountry"):
self.newCountry_cb.setCurrentIndex(self.newCountry_cb.findText(get_element_value(flightInformation, "OperatorCountry")))
else:
self.operator_cb.setCurrentIndex(1)
self.operator_changed()
self.newOperator_ln.setText(operator)
self.newAircraft_ln.setText(aircraft)
self.newRegistration_ln.setText(registration)
self.newManufacturer_ln.setText(get_element_value(flightInformation, "Manufacturer"))
if get_element_value(flightInformation, "OperatorCountry"):
index = self.newCountry_cb.findText(get_element_value(flightInformation, "OperatorCountry"))
if index != -1:
self.newCountry_cb.setCurrentIndex(index)
combo_text = get_element_value(flightInformation, "Localisation")
if combo_text != None:
if combo_text in self.countries:
self.location_cb.setCurrentIndex(self.location_cb.findText("Countries"))
self.detailList.clear()
self.detailList.setEnabled(True)
self.detailList.addItems(self.countries)
self.detailList.setCurrentIndex(self.detailList.findText(combo_text))
elif combo_text in self.continents:
self.location_cb.setCurrentIndex(self.location_cb.findText("Continents"))
self.detailList.clear()
self.detailList.setEnabled(True)
self.detailList.addItems(self.continents)
self.detailList.setCurrentIndex(self.detailList.findText(combo_text))
elif combo_text in self.oceans:
self.location_cb.setCurrentIndex(self.location_cb.findText("Oceans"))
self.detailList.clear()
self.detailList.setEnabled(True)
self.detailList.addItems(self.oceans)
self.detailList.setCurrentIndex(self.detailList.findText(combo_text))
elif combo_text in self.regions:
self.location_cb.setCurrentIndex(self.location_cb.findText("Regions"))
self.detailList.clear()
self.detailList.setEnabled(True)
self.detailList.addItems(self.regions)
self.detailList.setCurrentIndex(self.detailList.findText(combo_text))
#############################
# Metadata Contact Info
#############################
contactInfo = get_element(doc, "ContactInfo")
set_text_value(self.contactName_ln, contactInfo, "ContactName")
set_text_value(self.contactEmail_ln, contactInfo, "ContactEmail")
combo_text = get_element_value(contactInfo, "ContactRole")
if combo_text != None:
self.contact_cb.setCurrentIndex(self.contact_cb.findText(combo_text))
#############################
# Scientific Aims
#############################
scientificAims = get_element(doc, "ScientificAims")
try:
set_check_values(self.scientific_aims_check_dict, scientificAims, "SA_Code")
except IndexError:
set_check_values(self.old_scientific_aims_check_dict, scientificAims, "SA_Code")
set_text_value(self.SAOtherTextBox, scientificAims, "SA_Other")
values = get_element_values(scientificAims, "SA_User")
for item in values:
add_read(self, "SA", item)
#############################
# Geographical Region
#############################
geographicalRegion = get_element(doc, "GeographicalRegion")
geographicBoundingBox = get_element(geographicalRegion, "GeographicBoundingBox")
set_text_value_coord(self, self.westBoundLongitudeLine, geographicBoundingBox, "westBoundLongitude")
set_text_value_coord(self, self.eastBoundLongitudeLine, geographicBoundingBox, "eastBoundLongitude")
set_text_value_coord(self, self.northBoundLatitudeLine, geographicBoundingBox, "northBoundLatitude")
set_text_value_coord(self, self.southBoundLatitudeLine, geographicBoundingBox, "southBoundLatitude")
set_text_value_coord(self, self.minAltitudeLine, geographicBoundingBox, "minAltitude")
set_text_value_coord(self, self.maxAltitudeLine, geographicBoundingBox, "maxAltitude")
try:
set_check_values(self.geographical_region_check_dict, geographicalRegion, "GR_Code")
except IndexError:
set_check_values(self.old_geographical_region_check_dict, geographicalRegion, "GR_Code")
set_text_value(self.GROtherTextBox, geographicalRegion, "GR_Other")
values = get_element_values(geographicalRegion, "GR_User")
for item in values:
add_read(self, "GR", item)
#############################
# Atmospheric Features
#############################
atmosphericFeatures = get_element(doc, "AtmosFeatures")
try:
set_check_values(self.atmospheric_features_check_dict, atmosphericFeatures, "AF_Code")
except IndexError:
set_check_values(self.old_atmospheric_features_check_dict, atmosphericFeatures, "AF_Code")
set_text_value(self.AFOtherTextBox, atmosphericFeatures, "AF_Other")
values = get_element_values(atmosphericFeatures, "AF_User")
for item in values:
add_read(self, "AF", item)
#############################
# Cloud Types
#############################
cloudTypes = get_element(doc, "CloudTypes")
try:
set_check_values(self.cloud_types_check_dict, cloudTypes, "CT_Code")
except IndexError:
set_check_values(self.old_cloud_types_check_dict, cloudTypes, "CT_Code")
set_text_value(self.CTOtherTextBox, cloudTypes, "CT_Other")
values = get_element_values(cloudTypes, "CT_User")
for item in values:
add_read(self, "CT", item)
#############################
# Particles Sampled
#############################
particlesSampled = get_element(doc, "ParticlesSampled")
try:
set_check_values(self.particles_sampled_check_dict, particlesSampled, "PS_Code")
except IndexError:
set_check_values(self.old_particles_sampled_check_dict, particlesSampled, "PS_Code")
set_text_value(self.PSOtherTextBox, particlesSampled, "PS_Other")
values = get_element_values(particlesSampled, "PS_User")
for item in values:
add_read(self, "PS", item)
#############################
# Surfaces Overflown
#############################
surfacesOverflown = get_element(doc, "SurfacesOverflown")
try:
set_check_values(self.surfaces_overflown_check_dict, surfacesOverflown, "SO_Code")
except IndexError:
set_check_values(self.old_surfaces_overflown_check_dict, surfacesOverflown, "SO_Code")
set_text_value(self.SOOtherTextBox, surfacesOverflown, "SO_Other")
values = get_element_values(surfacesOverflown, "SO_User")
for item in values:
add_read(self, "SO", item)
#############################
# Altitude Ranges
#############################
altitudeRanges = get_element(doc, "AltitudeRanges")
try:
set_check_values(self.altitude_ranges_check_dict, altitudeRanges, "AR_Code")
except IndexError:
set_check_values(self.old_altitude_ranges_check_dict, altitudeRanges, "AR_Code")
set_text_value(self.AROtherTextBox, altitudeRanges, "AR_Other")
values = get_element_values(altitudeRanges, "AR_User")
for item in values:
add_read(self, "AR", item)
#############################
# Flight Types
#############################
flightTypes = get_element(doc, "FlightTypes")
try:
set_check_values(self.flight_types_check_dict, flightTypes, "FT_Code")
except IndexError:
set_check_values(self.old_flight_types_check_dict, flightTypes, "FT_Code")
set_text_value(self.FTOtherTextBox, flightTypes, "FT_Other")
values = get_element_values(flightTypes, "FT_User")
for item in values:
add_read(self, "FM", item)
#############################
# Satellite Coordination
#############################
satelliteCoordination = get_element(doc, "SatelliteCoordination")
try:
set_check_values(self.satellite_coordination_check_dict, satelliteCoordination, "SC_Code")
except IndexError:
set_check_values(self.old_satellite_coordination_check_dict, satelliteCoordination, "SC_Code")
set_text_value(self.SCOtherTextBox, satelliteCoordination, "SC_Other")
values = get_element_values(satelliteCoordination, "SC_User")
for item in values:
add_read(self, "SC", item)
#############################
# Surface Observations
#############################
surfaceObservations = get_element(doc, "SurfaceObs")
self.ground_site_list = get_element_values(surfaceObservations, "GroundSite")
self.groundListWidget.addItems(self.ground_site_list)
self.research_vessel_list = get_element_values(surfaceObservations, "ResearchVessel")
self.vesselListWidget.addItems(self.research_vessel_list)
self.arm_site_list = get_element_values(surfaceObservations, "ArmSite")
self.armListWidget.addItems(self.arm_site_list)
self.arm_mobile_list = get_element_values(surfaceObservations, "ArmMobile")
self.armMobileListWidget.addItems(self.arm_mobile_list)
##############################
# Other Comments
##############################
set_text_value(self.OtherCommentsTextBox, doc, "OtherComments")
logging.debug('asmm_xml.py - create_asmm_xml - file read successfully')
def get_element(parent, element_name):
logging.debug('asmm_xml.py - get_element - parent ' + str(parent) + ' ; element_name ' + str(element_name))
return parent.getElementsByTagNameNS(NAMESPACE_URI, element_name)[0]
def get_element_value(parent, element_name):
logging.debug('asmm_xml.py - get_element_value - parent ' + str(parent) + ' ; element_name ' + str(element_name))
elements = parent.getElementsByTagNameNS(NAMESPACE_URI, element_name)
if elements:
element = elements[0]
nodes = element.childNodes
for node in nodes:
if node.nodeType == node.TEXT_NODE:
return node.data.strip()
def get_element_values(parent, element_name):
logging.debug('asmm_xml.py - get_element_values - parent ' + str(parent) + ' ; element_name ' + str(element_name))
value_list = []
elements = parent.getElementsByTagNameNS(NAMESPACE_URI, element_name)
for element in elements:
value_list.append(element.childNodes[0].data.strip())
return value_list
def set_check_values(check_dict, parent, element_name):
logging.debug('asmm_xml.py - set_check_values - parent ' + str(parent) + ' ; element_name ' + str(element_name))
elements = parent.getElementsByTagNameNS(NAMESPACE_URI, element_name)
for element in elements:
check_widget = find_key(check_dict, element.childNodes[0].data.strip())
if check_widget is not None:
check_widget.setChecked(True)
def set_text_value(text_widget, parent, element_name):
logging.debug('asmm_xml.py - set_text_value - parent ' + str(parent) + ' ; element_name ' + str(element_name))
node_data = get_element_value(parent, element_name)
if node_data:
text_widget.setText(node_data)
def set_text_value_coord(self, text_widget, parent, element_name):
logging.debug('asmm_xml.py - set_text_value_coord - parent ' + str(parent) + ' ; element_name ' + str(element_name))
node_data = get_element_value(parent, element_name)
if node_data:
text_widget.setText(clean_coordinate_string(self, node_data))
def add_element(doc, element_name, parent, value=None):
logging.debug('asmm_xml.py - add_element - parent ' + str(parent) + ' ; element_name ' + str(element_name) + ' ; value ' + str(value))
new_element = doc.createElementNS(NAMESPACE_URI, "asmm:" + element_name)
if value:
new_text = doc.createTextNode(value)
new_element.appendChild(new_text)
parent.appendChild(new_element)
return new_element
def add_check_elements(doc, check_dict, code_name, parent):
logging.debug('asmm_xml.py - add_check_elements - parent ' + str(parent) + ' ; element_name ' + str(code_name))
for key, val in iter(check_dict.items()):
if key.isChecked():
add_element(doc, code_name, parent, val)
def find_key(dic, val):
return [k for k, v in iter(dic.items()) if v == val][0]
def clean_coordinate_string(self, string):
logging.debug('asmm_xml.py - clean_coordinate_string - string ' + string)
for key, val in self.coordinate_units_list.items():
try:
string = string[:string.index(key)]
if val < 0:
string = '-' + string
break
except ValueError:
pass
return string
| 46.37037 | 138 | 0.647255 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 4,994 | 0.18131 |
9610eaf838ce8599d05cfd89f28acb8943b4bb46 | 191 | py | Python | github/models.py | pyprism/Hiren-Git-Commit-Reminder | 253ba078f63cc9bf3f39a5b735a783c4846b5ba7 | [
"MIT"
]
| null | null | null | github/models.py | pyprism/Hiren-Git-Commit-Reminder | 253ba078f63cc9bf3f39a5b735a783c4846b5ba7 | [
"MIT"
]
| null | null | null | github/models.py | pyprism/Hiren-Git-Commit-Reminder | 253ba078f63cc9bf3f39a5b735a783c4846b5ba7 | [
"MIT"
]
| null | null | null | from django.db import models
# Create your models here.
class Hiren(models.Model):
access_token = models.CharField(max_length=200)
authorized = models.BooleanField(default=False)
| 19.1 | 51 | 0.759162 | 130 | 0.680628 | 0 | 0 | 0 | 0 | 0 | 0 | 26 | 0.136126 |
961296a2dbd17acbbeca5341d04b5200b3df15a3 | 4,973 | py | Python | linux-distro/package/nuxleus/Source/Vendor/Microsoft/IronPython-2.0.1/Lib/Axon/idGen.py | mdavid/nuxleus | 653f1310d8bf08eaa5a7e3326c2349e56a6abdc2 | [
"BSD-3-Clause"
]
| 1 | 2017-03-28T06:41:51.000Z | 2017-03-28T06:41:51.000Z | linux-distro/package/nuxleus/Source/Vendor/Microsoft/IronPython-2.0.1/Lib/Axon/idGen.py | mdavid/nuxleus | 653f1310d8bf08eaa5a7e3326c2349e56a6abdc2 | [
"BSD-3-Clause"
]
| null | null | null | linux-distro/package/nuxleus/Source/Vendor/Microsoft/IronPython-2.0.1/Lib/Axon/idGen.py | mdavid/nuxleus | 653f1310d8bf08eaa5a7e3326c2349e56a6abdc2 | [
"BSD-3-Clause"
]
| 1 | 2016-12-13T21:08:58.000Z | 2016-12-13T21:08:58.000Z | #!/usr/bin/python
#
# Copyright (C) 2004 British Broadcasting Corporation and Kamaelia Contributors(1)
# All Rights Reserved.
#
# You may only modify and redistribute this under the terms of any of the
# following licenses(2): Mozilla Public License, V1.1, GNU General
# Public License, V2.0, GNU Lesser General Public License, V2.1
#
# (1) Kamaelia Contributors are listed in the AUTHORS file and at
# http://kamaelia.sourceforge.net/AUTHORS - please extend this file,
# not this notice.
# (2) Reproduced in the COPYING file, and at:
# http://kamaelia.sourceforge.net/COPYING
# Under section 3.5 of the MPL, we are using this text since we deem the MPL
# notice inappropriate for this file. As per MPL/GPL/LGPL removal of this
# notice is prohibited.
#
# Please contact us via: [email protected]
# to discuss alternative licensing.
# -------------------------------------------------------------------------
"""\
====================
Unique ID generation
====================
The methods of the idGen class are used to generate unique IDs in various forms
(numbers, strings, etc) which are used to give microprocesses and other Axon
objects a unique identifier and name.
* Every Axon.Microprocess.microprocess gets a unique ID
* Axon.ThreadedComponent.threadedcomponent uses unique IDs to identify threads
Generating a new unique ID
--------------------------
Do not use the idGen class defined in this module directly. Instead, use any
of these module methods to obtain a unique ID:
* **Axon.idGen.newId(thing)** - returns a unique identifier as a string based on
the class name of the object provided
* **Axon.idGen.strId(thing)** - returns a unique identifier as a string based on
the class name of the object provided
* **Axon.idGen.numId()** - returns a unique identifier as a number
* **Axon.idGen.tupleId(thing)** - returns both the numeric and string versions
of a new unique id as a tuple (where the string version is based on the class
name of the object provided)
Calling tupleId(thing) is *not* equivalent to calling numId() then strId(thing)
because doing that would return two different id values!
Examples::
>>> x=Component.component()
>>> idGen.newId(x)
'Component.component_4'
>>> idGen.strId(x)
'Component.component_5'
>>> idGen.numId()
6
>>> idGen.tupleId(x)
(7, 'Component.component_7')
"""
import debug;
debugger = debug.debug()
debugger.useConfig()
Debug = debugger.debug
# idGen - A class to provide Unique Identifiers
#
# Ids can provide be provided as numerical, string or a tuple.
#
# numerical ids are integers allocated on a "next integer" basis.
# eg object 1, apple 2, orange 3. (Not object 1, apple 2, orange 3)
#
# string ids consist of the '__str__' of the object, with the numerical
# id tacked on the end.
#
# tuple ids consists : '(the numerical id, the string id)'
#
class idGen(object):
"""\
Unique ID creator.
Use numId(), strId(), and tupleId() methods to obtain unique IDs.
"""
lowestAllocatedId = 0
def nextId(self):
"""\
**INTERNAL**
Returns the next unique id, incrementing the private class variable
"""
idGen.lowestAllocatedId = idGen.lowestAllocatedId +1
return idGen.lowestAllocatedId
next = nextId # pseudonym
def idToString(self,thing,aNumId):
"""\
**INTERNAL**
Combines the 'str()' of the object's class with the id to form a string id
"""
# This next line takes <class '__main__.foo'>
# and chops out the __main__.foo part
r = str(thing.__class__)[8:][:-2] + "_" + str(aNumId)
return r
def numId(self):
"""Allocates & returns the next available id"""
result = self.nextId()
assert Debug("idGen.numId", 1, "idGen.numId:", result)
return result
def strId(self,thing):
"""\
Allocates & returns the next available id combined with the object's
class name, in string form
"""
theId = self.nextId()
strid = self.idToString(thing,theId)
assert Debug("idGen.strId", 1, "idGen.strId:", strid)
return strid
def tupleId(self,thing):
"""\
Allocates the next available id and returns it both as a tuple (num,str)
containing both the numeric version and a string version where it is
combined with the object's class name.
"""
theId = self.nextId()
strId = self.idToString(thing,theId)
assert Debug("idGen.tupleId", 1, "idGen.tupleId:", theId, strId)
return theId, strId
newId = idGen().strId
strId=idGen().strId
numId=idGen().numId
tupleId=idGen().tupleId
if __name__ == '__main__':
class foo: pass
class bar: pass
class bibble: pass
print newId(foo())
print newId(bar())
print newId(bibble())
| 31.474684 | 83 | 0.646893 | 1,763 | 0.354514 | 0 | 0 | 0 | 0 | 0 | 0 | 3,797 | 0.763523 |
961374e180229cec23558c1850e6a56b8464ae8b | 63,005 | py | Python | pyCEvNS/flux.py | athompson-tamu/pyCEvNS | feb3f83c706e6604608eae83c50ac79ced9140bf | [
"MIT"
]
| null | null | null | pyCEvNS/flux.py | athompson-tamu/pyCEvNS | feb3f83c706e6604608eae83c50ac79ced9140bf | [
"MIT"
]
| null | null | null | pyCEvNS/flux.py | athompson-tamu/pyCEvNS | feb3f83c706e6604608eae83c50ac79ced9140bf | [
"MIT"
]
| null | null | null | """
flux related class and functions
"""
from scipy.integrate import quad
import pandas as pd
from .helper import LinearInterp, polar_to_cartesian, lorentz_boost, lorentz_matrix
from .oscillation import survival_solar
from .parameters import *
def _invs(ev):
return 1/ev**2
class FluxBaseContinuous:
def __init__(self, ev, flux, norm=1):
self.norm = norm
self.ev = ev
self.fx = flux
self.ev_min = self.ev[0]
self.ev_max = self.ev[-1]
self.binw = self.ev[1:] - self.ev[:-1]
self.precalc = {None: self.binw*(self.fx[1:]+self.fx[:-1])/2}
def __call__(self, ev):
if ev == self.ev_min:
return self.fx[0] * self.norm
if ev == self.ev_max:
return self.fx[-1] * self.norm
if self.ev_min < ev < self.ev_max:
idx = self.ev.searchsorted(ev)
l1 = ev - self.ev[idx-1]
l2 = self.ev[idx] - ev
h1 = self.fx[idx-1]
h2 = self.fx[idx]
return (l1*h2 + l2*h1) / (l1 + l2) * self.norm
return 0
def integrate(self, ea, eb, weight_function=None):
if eb <= ea:
return 0
res = 0
if weight_function not in self.precalc:
weighted = weight_function(self.ev)*self.fx
self.precalc[weight_function] = self.binw * (weighted[1:]+weighted[:-1]) / 2
eb = min(eb, self.ev_max)
ea = max(ea, self.ev_min)
idxmin = self.ev.searchsorted(ea, side='right')
idxmax = self.ev.searchsorted(eb, side='left')
if idxmin == idxmax:
l1 = ea - self.ev[idxmin - 1]
l2 = self.ev[idxmin] - ea
h1 = self.fx[idxmin - 1] * weight_function(self.ev[idxmin - 1]) \
if weight_function is not None else self.fx[idxmin - 1]
h2 = self.fx[idxmin] * weight_function(self.ev[idxmin]) \
if weight_function is not None else self.fx[idxmin]
ha = (l1*h2+l2*h1)/(l1+l2)
l1 = eb - self.ev[idxmax - 1]
l2 = self.ev[idxmax] - eb
hb = (l1*h2+l2*h1)/(l1+l2)
return (ha + hb) * (eb - ea) / 2 * self.norm
res += np.sum(self.precalc[weight_function][idxmin:idxmax-1])
l1 = ea - self.ev[idxmin-1]
l2 = self.ev[idxmin] - ea
h1 = self.fx[idxmin-1]*weight_function(self.ev[idxmin-1]) \
if weight_function is not None else self.fx[idxmin-1]
h2 = self.fx[idxmin]*weight_function(self.ev[idxmin]) \
if weight_function is not None else self.fx[idxmin]
res += ((l1*h2+l2*h1)/(l1+l2)+h2)*l2/2
l1 = eb - self.ev[idxmax - 1]
l2 = self.ev[idxmax] - eb
h1 = self.fx[idxmax - 1] * weight_function(self.ev[idxmax - 1]) \
if weight_function is not None else self.fx[idxmax-1]
h2 = self.fx[idxmax] * weight_function(self.ev[idxmax]) \
if weight_function is not None else self.fx[idxmax]
res += ((l1 * h2 + l2 * h1) / (l1 + l2) + h1) * l1 / 2
return res * self.norm
class Flux:
"""
flux class,
flux at source
"""
def __init__(self, fl_name, delimiter=',', fl_unc=0):
"""
initializing flux, can take in user provided flux
restrictions: user provided data must have 7 columns,
first column is neutrino energy in MeV,
other columns are neutrino flux in cm^2/s/MeV, they are enu, munu, taunu, enubar, munubar, taunubar
:param fl_name: name of the flux or path to the file or array of neutrino flux
:param delimiter: delimiter of the input file, default is ','
:param fl_unc: uncertainty of flux
"""
if isinstance(fl_name, str):
self.fl_name = fl_name.lower()
else:
self.fl_name = 'default'
if self.fl_name == 'reactor':
self.evMin = 0.0
self.evMax = 30 # MeV
self.flUn = 0.02
fpers = 3.0921 * (10 ** 16) # antineutrinos per fission
nuperf = 6.14102
self.__nuflux1m = nuperf * fpers / (4 * np.pi) * (meter_by_mev ** 2)
elif self.fl_name in ['sns', 'prompt', 'delayed']:
self.evMin = 0
self.evMax = 52 # MeV
self.flUn = 0.1
self.__norm = 1.13 * (10 ** 11) * (meter_by_mev ** 2)
elif self.fl_name in ['solar', 'b8', 'f17', 'n13', 'o15', 'pp', 'hep']:
f = np.genfromtxt(pkg_resources.resource_filename(__name__, 'data/' + self.fl_name + '.csv'), delimiter=',')
self.flUn = 0
self.evMin = f[0, 0]
self.evMax = f[-1, 0]
self.__nue = LinearInterp(f[:, 0], f[:, 1] * ((100 * meter_by_mev) ** 2))
else:
if isinstance(fl_name, np.ndarray):
f = fl_name
else:
f = np.genfromtxt(fl_name, delimiter=delimiter)
self.evMin = np.amin(f[:, 0])
self.evMax = np.amax(f[:, 0])
self.flUn = fl_unc
self.__nue = LinearInterp(f[:, 0], f[:, 1] * ((100 * meter_by_mev) ** 2))
self.__numu = LinearInterp(f[:, 0], f[:, 2] * ((100 * meter_by_mev) ** 2))
self.__nutau = LinearInterp(f[:, 0], f[:, 3] * ((100 * meter_by_mev) ** 2))
self.__nuebar = LinearInterp(f[:, 0], f[:, 4] * ((100 * meter_by_mev) ** 2))
self.__numubar = LinearInterp(f[:, 0], f[:, 5] * ((100 * meter_by_mev) ** 2))
self.__nutaubar = LinearInterp(f[:, 0], f[:, 6] * ((100 * meter_by_mev) ** 2))
def flux(self, ev, flavor='e', f=None, **kwargs):
"""
differential neutrino flux at the detector, unit MeV^-3*s^-1
:param ev: nuetrino energy
:param flavor: nuetrino flavor
:param f: function that convolves with neutrino flux, typically neutrino oscillation,
the first argument must be neutrino energy,
the last two arguments must be input flavor nui and out put flavor nuf
:param kwargs: parameters with keys that goes into function f
:return: neutrino flux
"""
if self.fl_name == 'reactor':
# Phys.Rev.D39, 11 Vogel
# 5.323608902707208 = Integrate[Exp[.870 - .16*e - .091*e^2], {e, 0, 10}]
# reactor neutrino is actually anti-neutrino, this may cause problem when doing electron scattering
if flavor == 'ebar':
if f is not None:
return np.exp(0.87 - 0.16 * ev - 0.091 * (ev ** 2)) / 5.323608902707208 * \
f(ev, nui='ebar', nuf=flavor, **kwargs)
return np.exp(0.87 - 0.16 * ev - 0.091 * (ev ** 2)) / 5.323608902707208 * self.__nuflux1m
elif flavor[-1] == 'r':
if f is not None:
return np.exp(0.87 - 0.16 * ev - 0.091 * (ev ** 2)) / 5.323608902707208 * \
f(ev, nui='ebar', nuf=flavor, **kwargs)
return 0
else:
return 0
elif self.fl_name in ['sns', 'delayed']:
if flavor[-1] != 'r':
if f is not None:
return (3 * ((ev / (2 / 3 * 52)) ** 2) - 2 * ((ev / (2 / 3 * 52)) ** 3)) / 29.25 * self.__norm * \
f(ev, nui='e', nuf=flavor, **kwargs)
return (3 * ((ev / (2 / 3 * 52)) ** 2) - 2 * ((ev / (2 / 3 * 52)) ** 3)) / 29.25 * self.__norm \
if flavor == 'e' else 0
else:
if f is not None:
return (3 * ((ev / 52) ** 2) - 2 * ((ev / 52) ** 3)) / 26 * self.__norm * \
f(ev, nui='mubar', nuf=flavor, **kwargs)
return (3 * ((ev / 52) ** 2) - 2 * ((ev / 52) ** 3)) / 26 * self.__norm if flavor == 'mubar' else 0
elif self.fl_name == 'prompt':
return 0
elif self.fl_name in ['solar', 'b8', 'f17', 'n13', 'o15', 'pp', 'hep']:
if flavor[-1] != 'r':
if f is None:
f = survival_solar
return self.__nue(ev) * f(ev, nui='e', nuf=flavor, **kwargs)
return 0
else:
if flavor[-1] != 'r':
if f is None:
if flavor == 'e':
return self.__nue(ev)
elif flavor == 'mu':
return self.__numu(ev)
elif flavor == 'tau':
return self.__nutau(ev)
else:
return 0
return self.__nue(ev) * f(ev, nui='e', nuf=flavor, **kwargs) + \
self.__numu(ev) * f(ev, nui='mu', nuf=flavor, **kwargs) + \
self.__nutau(ev) * f(ev, nui='tau', nuf=flavor, **kwargs)
else:
if f is None:
if flavor == 'ebar':
return self.__nuebar(ev)
elif flavor == 'mubar':
return self.__numubar(ev)
elif flavor == 'taubar':
return self.__nutaubar(ev)
else:
return 0
return self.__nuebar(ev) * f(ev, nui='ebar', nuf=flavor, **kwargs) + \
self.__numubar(ev) * f(ev, nui='mubar', nuf=flavor, **kwargs) + \
self.__nutaubar(ev) * f(ev, nui='taubar', nuf=flavor, **kwargs)
def fint(self, er, m, flavor='e', f=None, **kwargs):
"""
flux integration over the range that can produce a recoil energy er
:param er: recoil energy
:param m: mass of the target, it can be an array
:param flavor: neutrino flavor
:param f: function that convolves with neutrino flux, typically neutrino oscillation,
the first argument must be neutrino energy,
the last two arguments must be input flavor nui and out put flavor nuf
:param kwargs: parameters with keys that goes into function f
:return: the result of integration, it can be an array
"""
emin = 0.5 * (np.sqrt(er ** 2 + 2 * er * m) + er)
def fx(ev):
return self.flux(ev, flavor, f, **kwargs)
if not isinstance(emin, np.ndarray):
res = quad(fx, emin, self.evMax)[0] # no need to check range, because outside evMin and evMax are 0
if self.fl_name == 'solar':
if f is None:
f = survival_solar
# pep
res += 1.44e8 * ((100 * meter_by_mev) ** 2) * f(1.439, nui='e', nuf=flavor, **kwargs) \
if emin < 1.439 else 0
# be7
res += 5e9 * ((100 * meter_by_mev) ** 2) * f(0.8613, nui='e', nuf=flavor, **kwargs) \
if emin < 0.8613 else 0
elif self.fl_name in ['sns', 'prompt']:
if f is None and flavor == 'mu':
# prompt neutrino
res += self.__norm if emin <= 29 else 0
elif f is not None and flavor[-1] != 'r':
res += self.__norm * f(29, nui='mu', nuf=flavor, **kwargs) if emin <= 29 else 0
else:
res = np.zeros_like(emin)
for i in range(emin.shape[0]):
res[i] = quad(fx, emin[i], self.evMax)[0]
if self.fl_name == 'solar':
if f is None:
f = survival_solar
# pep
res[i] += 1.44e8 * ((100 * meter_by_mev) ** 2) * f(1.439, nui='e', nuf=flavor, **kwargs) \
if emin[i] < 1.439 else 0
# be7
res[i] += 5e9 * ((100 * meter_by_mev) ** 2) * f(0.8613, nui='e', nuf=flavor, **kwargs) \
if emin[i] < 0.8613 else 0
elif self.fl_name in ['sns', 'prompt']:
if f is None and flavor == 'mu':
# prompt neutrino
res[i] += self.__norm if emin[i] <= 29 else 0
elif f is not None and flavor[-1] != 'r':
res[i] += self.__norm * f(29, nui='mu', nuf=flavor, **kwargs) if emin[i] <= 29 else 0
return res
def fintinv(self, er, m, flavor='e', f=None, **kwargs):
"""
flux/ev integration over the range that can produce a recoil energy er
:param er: recoil energy
:param m: mass of the target, it can be an array
:param flavor: neutrino flavor
:param f: function that convolves with neutrino flux, typically neutrino oscillation,
the first argument must be neutrino energy,
the last two arguments must be input flavor nui and out put flavor nuf
:param kwargs: parameters with keys that goes into function f
:return: the result of integration, it can be an array
"""
emin = 0.5 * (np.sqrt(er ** 2 + 2 * er * m) + er)
def finv(ev):
"""
flux/ev
"""
return self.flux(ev, flavor, f, **kwargs) / ev
if not isinstance(emin, np.ndarray):
res = quad(finv, emin, self.evMax)[0]
if self.fl_name == 'solar':
if f is None:
f = survival_solar
# pep
res += 1.44e8 * ((100 * meter_by_mev) ** 2) * f(1.439, nui='e', nuf=flavor, **kwargs) / 1.439 \
if emin < 1.439 else 0
# be7
res += 5e9 * ((100 * meter_by_mev) ** 2) * f(0.8613, nui='e', nuf=flavor, **kwargs) / 0.8613 \
if emin < 0.8613 else 0
elif self.fl_name in ['sns', 'prompt']:
if f is None and flavor == 'mu':
# prompt neutrino
res += self.__norm / 29 if emin <= 29 else 0
elif f is not None and flavor[-1] != 'r':
res += self.__norm / 29 * f(29, nui='mu', nuf=flavor, **kwargs) if emin <= 29 else 0
else:
res = np.zeros_like(emin)
for i in range(emin.shape[0]):
res[i] = quad(finv, emin[i], self.evMax)[0]
if self.fl_name == 'solar':
if f is None:
f = survival_solar
# pep
res[i] += 1.44e8 * ((100 * meter_by_mev) ** 2) * f(1.439, nui='e', nuf=flavor, **kwargs) / \
1.439 if emin[i] < 1.439 else 0
# be7
res[i] += 5e9 * ((100 * meter_by_mev) ** 2) * f(0.8613, nui='e', nuf=flavor, **kwargs) / \
0.8613 if emin[i] < 0.8613 else 0
elif self.fl_name in ['sns', 'prompt']:
if f is None and flavor == 'mu':
# prompt neutrino
res[i] += self.__norm / 29 if emin[i] <= 29 else 0
elif f is not None and flavor[-1] != 'r':
res[i] += self.__norm / 29 * f(29, nui='mu', nuf=flavor, **kwargs) \
if emin[i] <= 29 else 0
return res
def fintinvs(self, er, m, flavor='e', f=None, **kwargs):
"""
flux/ev^2 integration over the range that can produce a recoil energy er
:param er: recoil energy
:param m: mass of the target, it can be an array
:param flavor: neutrino flavor
:param f: function that convolves with neutrino flux, typically neutrino oscillation,
the first argument must be neutrino energy,
the last two arguments must be input flavor nui and out put flavor nuf
:param kwargs: parameters with keys that goes into function f
:return: the result of integration, it can be an array
"""
emin = 0.5 * (np.sqrt(er ** 2 + 2 * er * m) + er)
def finvs(ev):
"""
flux/ev^2
"""
return self.flux(ev, flavor, f, **kwargs) / (ev ** 2)
if not isinstance(emin, np.ndarray):
res = quad(finvs, emin, self.evMax)[0]
if self.fl_name == 'solar':
if f is None:
f = survival_solar
# pep
res += 1.44e8 * ((100 * meter_by_mev) ** 2) * f(1.439, nui='e', nuf=flavor, **kwargs) / 1.439**2\
if emin < 1.439 else 0
# be7
res += 5e9 * ((100 * meter_by_mev) ** 2) * f(0.8613, nui='e', nuf=flavor, **kwargs) / 0.8613**2 \
if emin < 0.8613 else 0
elif self.fl_name in ['sns', 'prompt']:
if f is None and flavor == 'mu':
# prompt neutrino
res += self.__norm / 29**2 if emin <= 29 else 0
elif f is not None and flavor[-1] != 'r':
res += self.__norm / 29**2 * f(29, nui='mu', nuf=flavor, **kwargs) if emin <= 29 else 0
else:
res = np.zeros_like(emin)
for i in range(emin.shape[0]):
res[i] = quad(finvs, emin[i], self.evMax)[0]
if self.fl_name == 'solar':
if f is None:
f = survival_solar
# pep
res[i] += 1.44e8 * ((100 * meter_by_mev) ** 2) * f(1.439, nui='e', nuf=flavor, **kwargs) / \
1.439**2 if emin[i] < 1.439 else 0
# be7
res[i] += 5e9 * ((100 * meter_by_mev) ** 2) * f(0.8613, nui='e', nuf=flavor, **kwargs) / \
0.8613**2 if emin[i] < 0.8613 else 0
elif self.fl_name in ['sns', 'prompt']:
if f is None and flavor == 'mu':
# prompt neutrino
res[i] += self.__norm / 29**2 if emin[i] <= 29 else 0
elif f is not None and flavor[-1] != 'r':
res[i] += self.__norm / 29**2 * f(29, nui='mu', nuf=flavor, **kwargs) \
if emin[i] <= 29 else 0
return res
class NeutrinoFluxFactory:
def __init__(self):
self.flux_list = ['solar', 'solar_b8', 'solar_f17', 'solar_hep', 'solar_n13', 'solar_o15', 'solar_pp',
'solar_pep', 'solar_be7', 'coherent', 'coherent_prompt', 'coherent_delayed',
'far_beam_nu', 'far_beam_nubar', 'atmospheric','jsns_prompt', 'jsns_delayed', 'jsns_prompt_continuous',
'near_beam_nu', 'near_beam_nubar',]
def print_available(self):
print(self.flux_list)
def interp_flux(self, nrg, data):
return np.interp(nrg, data[:,0], data[:,1])
def get(self, flux_name, **kwargs):
if flux_name not in self.flux_list:
print('flux name not in current list: ', self.flux_list)
raise Exception('flux not found.')
if flux_name in ['solar_b8', 'solar_f17', 'solar_hep', 'solar_n13', 'solar_o15', 'solar_pp']:
f = np.genfromtxt(pkg_resources.resource_filename(__name__, 'data/' + flux_name[6:] + '.csv'), delimiter=',')
return NeutrinoFlux(continuous_fluxes={'ev': f[:, 0], 'e': f[:, 1]})
if flux_name == 'solar':
f = np.genfromtxt(pkg_resources.resource_filename(__name__, 'data/' + flux_name + '.csv'), delimiter=',')
return NeutrinoFlux(continuous_fluxes={'ev': f[:, 0], 'e': f[:, 1]}, delta_fluxes={'e': [(1.439, 1.44e8), (0.8613, 5e9)]})
if flux_name == 'pep':
return NeutrinoFlux(delta_fluxes={'e': [(1.439, 1.44e8), ]})
if flux_name == 'be7':
return NeutrinoFlux(delta_fluxes={'e': [(0.8613, 5e9), ]})
if flux_name == 'coherent':
def de(evv):
return (3 * ((evv / (2 / 3 * 52)) ** 2) - 2 * ((evv / (2 / 3 * 52)) ** 3)) / 29.25
def dmubar(evv):
return (3 * ((evv / 52) ** 2) - 2 * ((evv / 52) ** 3)) / 26
ev = np.linspace(0.001, 52, 100)
return NeutrinoFlux(continuous_fluxes={'ev': ev, 'e': de(ev), 'mubar': dmubar(ev)},
delta_fluxes={'mu': [(29, 1)]}, norm=1.13 * (10 ** 7)) ## default unit is /(cm^2*s)
if flux_name == 'coherent_delayed':
def de(evv):
return (3 * ((evv / (2 / 3 * 52)) ** 2) - 2 * ((evv / (2 / 3 * 52)) ** 3)) / 29.25
def dmubar(evv):
return (3 * ((evv / 52) ** 2) - 2 * ((evv / 52) ** 3)) / 26
ev = np.linspace(0.001, 52, kwargs['npoints'] if 'npoints' in kwargs else 100)
return NeutrinoFlux(continuous_fluxes={'ev': ev, 'e': de(ev), 'mubar': dmubar(ev)}, norm=1.13 * (10 ** 7))
if flux_name == 'coherent_prompt':
return NeutrinoFlux(delta_fluxes={'mu': [(29, 1)]}, norm=1.13 * (10 ** 7))
if flux_name == 'jsns':
nu_e = np.genfromtxt(pkg_resources.resource_filename(__name__, "data/jsns2/jsns_nu_e.txt"), delimiter=',')
nu_mu = np.genfromtxt(pkg_resources.resource_filename(__name__, "data/jsns2/jsns_nu_mu_nodelta.txt"), delimiter=',')
nubar_mu = np.genfromtxt(pkg_resources.resource_filename(__name__, "data/jsns2/jsns_nubar_mu.txt"), delimiter=',')
norm_nu_e = quad(self.interp_flux, 0, 300, args=(nu_e,))[0]
norm_nu_mu = quad(self.interp_flux, 0, 300, args=(nu_mu,))[0]
norm_nubar_mu = quad(self.interp_flux, 0, 300, args=(nubar_mu,))[0]
def numuPDF(energy):
return self.interp_flux(energy, nu_mu) / norm_nu_mu
def nuePDF(energy):
return self.interp_flux(energy, nu_e) / norm_nu_e
def nubarmuPDF(energy):
return self.interp_flux(energy, nubar_mu) / norm_nubar_mu
edges = np.arange(0, 302, 2) # energy bin edges
ev = (edges[:-1] + edges[1:]) / 2
return NeutrinoFlux(continuous_fluxes={'ev': ev, 'e': nuePDF(ev), 'mubar': nubarmuPDF(ev), 'mu': numuPDF(ev)},
delta_fluxes={'mu': [(29, 1),(236, 0.013)]}, norm=4.9 * (10 ** 7)) ## default unit is /(cm^2*s)
if flux_name == 'jsns_delayed':
nu_e = np.genfromtxt(pkg_resources.resource_filename(__name__, "data/jsns2/jsns_nu_e.txt"), delimiter=',')
nubar_mu = np.genfromtxt(pkg_resources.resource_filename(__name__, "data/jsns2/jsns_nubar_mu.txt"), delimiter=',')
norm_nu_e = quad(self.interp_flux, 0, 300, args=(nu_e,))[0]
norm_nubar_mu = quad(self.interp_flux, 0, 300, args=(nubar_mu,))[0]
def nuePDF(energy):
return self.interp_flux(energy, nu_e) / norm_nu_e
def nubarmuPDF(energy):
return self.interp_flux(energy, nubar_mu) / norm_nubar_mu
edges = np.arange(0, 302, 2) # energy bin edges
ev = (edges[:-1] + edges[1:]) / 2
return NeutrinoFlux(continuous_fluxes={'ev': ev, 'e': nuePDF(ev), 'mubar': nubarmuPDF(ev)}, norm=3 * (10 ** 7))
if flux_name == 'jsns_prompt':
return NeutrinoFlux(delta_fluxes={'mu': [(29, 1),(236, 0.013)]}, norm=1.85 * (10 ** 7))
if flux_name == 'jsns_prompt_continuous':
nu_mu = np.genfromtxt(pkg_resources.resource_filename(__name__, "data/jsns2/jsns_nu_mu_nodelta.txt"), delimiter=',')
norm_nu_mu = quad(self.interp_flux, 0, 300, args=(nu_mu,))[0]
def numuPDF(energy):
return self.interp_flux(energy, nu_mu) / norm_nu_mu
edges = np.arange(0, 302, 2) # energy bin edges
ev = (edges[:-1] + edges[1:]) / 2
return NeutrinoFlux(continuous_fluxes={'ev': ev, 'mu': numuPDF(ev)},
norm=1.85 * (10 ** 4))
if flux_name == 'far_beam_nu':
far_beam_txt = 'data/dune_beam_fd_nu_flux_120GeVoptimized.txt'
f_beam = np.genfromtxt(pkg_resources.resource_filename(__name__, far_beam_txt), delimiter=',')
nu = {'ev': f_beam[:, 0],
'e': f_beam[:, 1],
'mu': f_beam[:, 2],
'ebar': f_beam[:, 4],
'mubar': f_beam[:, 5]}
return NeutrinoFlux(continuous_fluxes=nu)
if flux_name == 'far_beam_nubar':
far_beam_txt = 'data/dune_beam_fd_antinu_flux_120GeVoptimized.txt'
f_beam = np.genfromtxt(pkg_resources.resource_filename(__name__, far_beam_txt), delimiter=',')
nu = {'ev': f_beam[:, 0],
'e': f_beam[:, 1],
'mu': f_beam[:, 2],
'ebar': f_beam[:, 4],
'mubar': f_beam[:, 5]}
return NeutrinoFlux(continuous_fluxes=nu)
if flux_name == 'near_beam_nu':
far_beam_txt = 'data/dune_beam_nd_nu_flux_120GeVoptimized.txt'
f_beam = np.genfromtxt(pkg_resources.resource_filename(__name__, far_beam_txt))
nu = {'ev': f_beam[:, 0],
'e': f_beam[:, 1],
'mu': f_beam[:, 2],
'ebar': f_beam[:, 4],
'mubar': f_beam[:, 5]}
return NeutrinoFlux(continuous_fluxes=nu)
if flux_name == 'near_beam_nubar':
far_beam_txt = 'data/dune_beam_nd_antinu_flux_120GeVoptimized.txt'
f_beam = np.genfromtxt(pkg_resources.resource_filename(__name__, far_beam_txt))
nu = {'ev': f_beam[:, 0],
'e': f_beam[:, 1],
'mu': f_beam[:, 2],
'ebar': f_beam[:, 4],
'mubar': f_beam[:, 5]}
return NeutrinoFlux(continuous_fluxes=nu)
if flux_name == 'atmospheric':
if 'zenith' not in kwargs:
raise Exception('please specify zenith angle')
zen = np.round(kwargs['zenith'], decimals=3)
zen_list = np.round(np.linspace(-0.975, 0.975, 40), decimals=3)
if zen not in zen_list:
print('available choice of zenith angle: ', zen_list)
raise Exception('zenith angle not available')
idx = (0.975 - zen) / 0.05 * 61
f_atmos = np.genfromtxt(pkg_resources.resource_filename(__name__, 'data/atmos.txt'), delimiter=',')
nu = {'ev': f_atmos[int(round(idx)):int(round(idx))+61, 0],
'e': f_atmos[int(round(idx)):int(round(idx))+61, 2],
'mu': f_atmos[int(round(idx)):int(round(idx))+61, 3],
'ebar': f_atmos[int(round(idx)):int(round(idx))+61, 5],
'mubar': f_atmos[int(round(idx)):int(round(idx))+61, 6]}
return NeutrinoFlux(continuous_fluxes=nu)
class NeutrinoFlux:
def __init__(self, continuous_fluxes=None, delta_fluxes=None, norm=1):
self.norm = norm * ((100 * meter_by_mev) ** 2)
self.ev_min = None
self.ev_max = None
if continuous_fluxes is None:
self.nu = None
elif isinstance(continuous_fluxes, dict):
self.ev = continuous_fluxes['ev']
sorted_idx = np.argsort(self.ev)
self.ev = self.ev[sorted_idx]
self.ev_min = self.ev[0]
self.ev_max = self.ev[-1]
if self.ev_min == 0:
raise Exception('flux with neutrino energy equal to zeros is not supported. '
'please consider using a small value for your lower bound.')
self.nu = {'e': continuous_fluxes['e'][sorted_idx] if 'e' in continuous_fluxes else None,
'mu': continuous_fluxes['mu'][sorted_idx] if 'mu' in continuous_fluxes else None,
'tau': continuous_fluxes['tau'][sorted_idx] if 'tau' in continuous_fluxes else None,
'ebar': continuous_fluxes['ebar'][sorted_idx] if 'ebar' in continuous_fluxes else None,
'mubar': continuous_fluxes['mubar'][sorted_idx] if 'mubar' in continuous_fluxes else None,
'taubar': continuous_fluxes['taubar'][sorted_idx] if 'taubar' in continuous_fluxes else None}
self.binw = self.ev[1:] - self.ev[:-1]
self.precalc = {None: {flr: self.binw*(flx[1:]+flx[:-1])/2 if flx is not None else None for flr, flx in self.nu.items()}}
else:
raise Exception('only support dict as input.')
if delta_fluxes is None:
self.delta_nu = None
elif isinstance(delta_fluxes, dict):
self.delta_nu = {'e': delta_fluxes['e'] if 'e' in delta_fluxes else None,
'mu': delta_fluxes['mu'] if 'mu' in delta_fluxes else None,
'tau': delta_fluxes['tau'] if 'tau' in delta_fluxes else None,
'ebar': delta_fluxes['ebar'] if 'ebar' in delta_fluxes else None,
'mubar': delta_fluxes['mubar'] if 'mubar' in delta_fluxes else None,
'taubar': delta_fluxes['taubar'] if 'taubar' in delta_fluxes else None}
for flavor in self.delta_nu: # grab the maximum energy of the delta fluxes
if self.delta_nu[flavor] is None:
continue
energies = [self.delta_nu[flavor][i][0] for i in range(len(self.delta_nu[flavor]))]
if self.ev_max is None or max(energies) > self.ev_max:
self.ev_max = max(energies)
else:
raise Exception("'delta_fluxes' must be a dictionary of a list of tuples! e.g. {'e': [(12, 4), (14, 15)], ...}")
def __call__(self, ev, flavor):
if self.nu is None or self.nu[flavor] is None:
return 0
if ev == self.ev_min:
return self.nu[flavor][0] * self.norm
if ev == self.ev_max:
return self.nu[flavor][-1] * self.norm
if self.ev_min < ev < self.ev_max:
idx = self.ev.searchsorted(ev)
l1 = ev - self.ev[idx - 1]
l2 = self.ev[idx] - ev
h1 = self.nu[flavor][idx - 1]
h2 = self.nu[flavor][idx]
return (l1*h2+l2*h1)/(l1+l2) * self.norm
return 0
def integrate(self, ea, eb, flavor, weight_function=None):
"""
Please avoid using lambda as your weight_function!!!
:param ea:
:param eb:
:param flavor:
:param weight_function:
:return:
"""
if eb <= ea:
return 0
res = 0
if self.delta_nu is not None and self.delta_nu[flavor] is not None:
for deltas in self.delta_nu[flavor]:
if ea < deltas[0] <= eb: # self.ev_max should be included with <=
res += deltas[1] if weight_function is None else deltas[1]*weight_function(deltas[0])
if self.nu is not None and self.nu[flavor] is not None:
if weight_function not in self.precalc:
weight = weight_function(self.ev)
self.precalc[weight_function] = {flr: self.binw*((flx*weight)[1:]+(flx*weight)[:-1])/2
if flx is not None else None for flr, flx in self.nu.items()}
eb = min(eb, self.ev_max)
ea = max(ea, self.ev_min)
idxmin = self.ev.searchsorted(ea, side='right')
idxmax = self.ev.searchsorted(eb, side='left')
if idxmin == idxmax:
l1 = ea - self.ev[idxmin - 1]
l2 = self.ev[idxmin] - ea
h1 = self.nu[flavor][idxmin - 1] * weight_function(self.ev[idxmin - 1]) \
if weight_function is not None else self.nu[flavor][idxmin - 1]
h2 = self.nu[flavor][idxmin] * weight_function(self.ev[idxmin]) \
if weight_function is not None else self.nu[flavor][idxmin]
ha = (l1*h2+l2*h1)/(l1+l2)
l1 = eb - self.ev[idxmax - 1]
l2 = self.ev[idxmax] - eb
hb = (l1*h2+l2*h1)/(l1+l2)
return (ha + hb) * (eb - ea) / 2 * self.norm
res += np.sum(self.precalc[weight_function][flavor][idxmin:idxmax-1])
l1 = ea - self.ev[idxmin-1]
l2 = self.ev[idxmin] - ea
h1 = self.nu[flavor][idxmin-1]*weight_function(self.ev[idxmin-1]) \
if weight_function is not None else self.nu[flavor][idxmin-1]
h2 = self.nu[flavor][idxmin]*weight_function(self.ev[idxmin]) \
if weight_function is not None else self.nu[flavor][idxmin]
res += ((l1*h2+l2*h1)/(l1+l2)+h2)*l2/2
l1 = eb - self.ev[idxmax - 1]
l2 = self.ev[idxmax] - eb
h1 = self.nu[flavor][idxmax - 1] * weight_function(self.ev[idxmax - 1]) \
if weight_function is not None else self.nu[flavor][idxmax-1]
h2 = self.nu[flavor][idxmax] * weight_function(self.ev[idxmax]) \
if weight_function is not None else self.nu[flavor][idxmax]
res += ((l1 * h2 + l2 * h1) / (l1 + l2) + h1) * l1 / 2
return res * self.norm
def change_parameters(self):
pass
class DMFlux:
"""
Dark matter flux at COHERENT
"""
def __init__(self, dark_photon_mass, life_time, coupling_quark, dark_matter_mass,
detector_distance=19.3, pot_mu=0.75, pot_sigma=0.25, size=100000, mono_energy=None):
"""
initialize and generate flux
:param dark_photon_mass: dark photon mass
:param life_time: life time of dark photon in rest frame, unit in micro second
:param coupling_quark: dark photon coupling to quarks
:param dark_matter_mass: mass of dark matter, unit in MeV
:param detector_distance: distance from the detector to the Hg target
:param pot_mu: mean of guassian distribution of proton on target, unit in micro second
:param pot_sigma: std of guassian distribution of proton on target, unit in micro second
:param size: size of sampling dark photons
"""
self.dp_m = dark_photon_mass
self.dm_m = dark_matter_mass
self.epsi_quark = coupling_quark
self.det_dist = detector_distance / meter_by_mev
self.dp_life = life_time * 1e-6 * c_light / meter_by_mev
self.pot_mu = pot_mu * 1e-6 * c_light / meter_by_mev
self.pot_sigma = pot_sigma * 1e-6 * c_light / meter_by_mev
if mono_energy is None:
self.timing, self.energy = self._generate(size)
else:
self.timing, self.energy = self._mono_flux(mono_energy, pot_mu)
self.ed_min = self.energy.min()
self.ed_max = self.energy.max()
self.dm_norm = self.epsi_quark**2*0.23*1e20 / (4*np.pi*(detector_distance**2)*24*3600) * (meter_by_mev**2) * \
self.timing.shape[0] * 2 / size
def _generate(self, size=1000000):
"""
generate dark matter flux at COHERENT
:param size: size of sampling dark photons
:return: time and energy histogram of dark matter
"""
dp_m = self.dp_m
dp_e = ((massofpi+massofp)**2 - massofn**2 + dp_m**2)/(2*(massofpi+massofp))
dp_p = np.sqrt(dp_e ** 2 - dp_m ** 2)
dp_v = dp_p / dp_e
gamma = dp_e / dp_m
tau = self.dp_life * gamma
tf = np.random.normal(self.pot_mu, self.pot_sigma, size) # POT
t = np.random.exponential(tau, size) # life time of each dark photon
cs = np.random.uniform(-1, 1, size) # direction of each dark photon
# in rest frame
estar = dp_m / 2
pstar = np.sqrt(estar ** 2 - self.dm_m ** 2)
pstarx = pstar * cs
pstary = pstar * np.sqrt(1 - cs ** 2)
# boost to lab frame
elab = gamma * (estar + dp_v * pstarx)
plabx = gamma * (pstarx + dp_v * estar)
plaby = pstary
vx = plabx / elab
vy = plaby / elab
timing = []
energy = []
for i in range(size):
a = vx[i] ** 2 + vy[i] ** 2
b = 2 * vx[i] * t[i] * dp_v
cc = dp_v ** 2 * t[i] ** 2 - self.det_dist ** 2
if b ** 2 - 4 * a * cc >= 0:
if (-b - np.sqrt(b ** 2 - 4 * a * cc)) / (2 * a) > 0:
timing.append((-b - np.sqrt(b ** 2 - 4 * a * cc)) / (2 * a) + t[i] + tf[i])
energy.append(elab[i])
if (-b + np.sqrt(b ** 2 - 4 * a * cc)) / (2 * a) > 0:
timing.append((-b + np.sqrt(b ** 2 - 4 * a * cc)) / (2 * a) + t[i] + tf[i])
energy.append(elab[i])
return np.array(timing) / c_light * meter_by_mev * 1e6, np.array(energy)
def _mono_flux(self, e_chi, t_trig, size=1000):
return np.random.normal(loc=t_trig, scale=0.01*t_trig, size=size), np.random.normal(loc=e_chi, scale=0.005*e_chi, size=size)
def flux(self, ev):
"""
dark matter flux
:param ev: dark matter energy
:return: dark matter flux
"""
return 1/(self.ed_max-self.ed_min)*self.dm_norm if self.ed_min <= ev <= self.ed_max else 0
def fint(self, er, m, **kwargs):
"""
flux/(ex^2-mx^2) integration
:param er: recoil energy in MeV
:param m: target nucleus mass in MeV
:param kwargs: other argument
:return: flux/(ex^2-mx^2) integration
"""
emin = 0.5 * (np.sqrt((er**2*m+2*er*m**2+2*er*self.dm_m**2+4*m*self.dm_m**2)/m) + er)
emin = 0.0 * emin
def integrand(ex):
return self.flux(ex)/(ex**2 - self.dm_m**2)
if not isinstance(emin, np.ndarray):
res = quad(integrand, emin, self.ed_max)[0]
else:
res = np.zeros_like(emin)
for i in range(emin.shape[0]):
res[i] = quad(integrand, emin[i], self.ed_max)[0]
return res
def fint1(self, er, m, **kwargs):
"""
flux*ex/(ex^2-mx^2) integration
:param er: recoil energy in MeV
:param m: target nucleus mass in MeV
:param kwargs: other argument
:return: flux*ex/(ex^2-mx^2) integration
"""
emin = 0.5 * (np.sqrt((er**2*m+2*er*m**2+2*er*self.dm_m**2+4*m*self.dm_m**2)/m) + er)
emin = 0.0 * emin
def integrand(ex):
return self.flux(ex) * ex / (ex ** 2 - self.dm_m ** 2)
if not isinstance(emin, np.ndarray):
res = quad(integrand, emin, self.ed_max)[0]
else:
res = np.zeros_like(emin)
for i in range(emin.shape[0]):
res[i] = quad(integrand, emin[i], self.ed_max)[0]
return res
def fint2(self, er, m, **kwargs):
"""
flux*ex^2/(ex^2-mx^2) integration
:param er: recoil energy in MeV
:param m: target nucleus mass in MeV
:param kwargs: other argument
:return: flux*ex^2/(ex^2-mx^2) integration
"""
emin = 0.5 * (np.sqrt((er**2*m+2*er*m**2+2*er*self.dm_m**2+4*m*self.dm_m**2)/m) + er)
emin = 0.0 * emin
def integrand(ex):
return self.flux(ex) * ex**2 / (ex ** 2 - self.dm_m ** 2)
if not isinstance(emin, np.ndarray):
res = quad(integrand, emin, self.ed_max)[0]
else:
res = np.zeros_like(emin)
for i in range(emin.shape[0]):
res[i] = quad(integrand, emin[i], self.ed_max)[0]
return res
class DMFluxIsoPhoton(FluxBaseContinuous):
def __init__(self, photon_distribution, dark_photon_mass, coupling, dark_matter_mass, life_time=0.001,
detector_distance=19.3, pot_rate=5e20, pot_sample=100000, brem_suppress=True,
pot_mu=0.7, pot_sigma=0.15, sampling_size=100, nbins=20, verbose=False):
self.nbins = nbins
self.photon_flux = photon_distribution
self.dp_m = dark_photon_mass
self.dm_m = dark_matter_mass
self.epsilon = coupling
self.life_time = life_time # input in mus, internal in s
self.det_dist = detector_distance # meters
self.pot_rate = pot_rate # the number of POT/day in the experiment
self.pot_mu = pot_mu
self.pot_sigma = pot_sigma
self.pot_sample = pot_sample # the number of POT in photon_distribution
self.time = []
self.energy = []
self.weight = []
self.norm = 1
self.sampling_size = sampling_size
self.supp = brem_suppress # add phase space suppression
self.verbose = verbose
for photon_events in photon_distribution:
if self.verbose:
print("getting photons from E =", photon_events[0], "Size =", photon_events[1])
self._generate_single(photon_events, self.sampling_size)
normalization = self.epsilon ** 2 * (self.pot_rate / self.pot_sample) \
/ (4 * np.pi * (self.det_dist ** 2) * 24 * 3600) * (meter_by_mev**2)
self.norm = normalization
self.weight = [x * self.norm for x in self.weight]
self.timing = np.array(self.time) * 1e6
hist, bin_edges = np.histogram(self.energy, bins=nbins, weights=self.weight, density=True)
super().__init__((bin_edges[:-1] + bin_edges[1:]) / 2, hist, norm=np.sum(self.weight))
def getScaledWeights(self):
wgt = self.weight
wgt = [x * self.norm * 24 * 3600 / (meter_by_mev**2) for x in wgt]
return wgt
def simulate(self):
self.time = []
self.energy = []
self.weight = []
normalization = self.epsilon ** 2 * (self.pot_rate / self.pot_sample) \
/ (4 * np.pi * (self.det_dist ** 2) * 24 * 3600) * (meter_by_mev**2)
self.norm = normalization
for photon_events in self.photon_flux:
if self.verbose:
print("getting photons from E =", photon_events[0], "Size =", photon_events[1])
self._generate_single(photon_events, self.sampling_size)
self.weight = [x * self.norm for x in self.weight]
self.timing = np.array(self.time) * 1e6
hist, bin_edges = np.histogram(self.energy, bins=self.nbins, weights=self.weight, density=True)
super().__init__((bin_edges[:-1] + bin_edges[1:]) / 2, hist, norm=np.sum(self.weight))
def _generate_single(self, photon_events, nsamples):
# Initiate photon position, energy and momentum.
if photon_events[0]**2 < self.dp_m**2:
return
dp_m = self.dp_m
dp_e = photon_events[0]
dp_p = np.sqrt(dp_e ** 2 - self.dp_m ** 2)
dp_momentum = np.array([dp_e, 0, 0, dp_p])
# dark photon to dark matter
dm_m = self.dm_m
dm_e = self.dp_m / 2
dm_p = np.sqrt(dm_e ** 2 - dm_m ** 2)
# Directional sampling.
dp_wgt = photon_events[1] / nsamples # Event weight
# Brem suppression
if self.supp == True:
el_e = 1.0773*dp_e + 13.716 # most likely electron energy that produced this dark photon
supp_fact = min(1, 1154 * np.exp(-24.42 * np.power(dp_m/el_e, 0.3174)))
dp_wgt *= supp_fact
## optimize
#pos = np.zeros(3) ## optimize
t = np.random.normal(self.pot_mu * 1e-6, self.pot_sigma * 1e-6, nsamples)
t_dp = np.random.exponential(1e-6 * self.life_time * dp_momentum[0] / dp_m, nsamples)
t += t_dp
csd = np.random.uniform(-1, 1, nsamples)
phid = np.random.uniform(0, 2 * np.pi, nsamples)
boost_matr = lorentz_matrix(np.array([-dp_momentum[1] / dp_momentum[0],
-dp_momentum[2] / dp_momentum[0],
-dp_momentum[3] / dp_momentum[0]]))
pos_z = c_light * t_dp * dp_momentum[3] / dp_momentum[0] # position is along z by construction
for i in range(nsamples):
dm_momentum = np.array([dm_e, dm_p * np.sqrt(1 - csd[i] ** 2) * np.cos(phid[i]),
dm_p * np.sqrt(1 - csd[i] ** 2) * np.sin(phid[i]), dm_p * csd[i]])
dm_momentum = boost_matr @ dm_momentum
# dark matter arrives at detector, assuming azimuthal symmetric
# append the time and energy spectrum of the DM.
# DM particle 1
v = dm_momentum[1:] / dm_momentum[0] * c_light
a = v[0]*v[0] + v[1]*v[1] + v[2]*v[2] #np.sum(v ** 2)
b = 2*v[2]*pos_z[i] # dot product is along z by construction
c = pos_z[i]**2 - self.det_dist ** 2
if b ** 2 - 4 * a * c >= 0:
t_dm = (-b - np.sqrt(b ** 2 - 4 * a * c)) / (2 * a)
if t_dm >= 0:
if self.verbose:
print("adding weight", dp_wgt)
self.time.append(t[i]+t_dm)
self.energy.append(dm_momentum[0])
self.weight.append(dp_wgt)
t_dm = (-b + np.sqrt(b ** 2 - 4 * a * c)) / (2 * a)
if t_dm >= 0:
if self.verbose:
print("adding weight", dp_wgt)
self.time.append(t[i]+t_dm)
self.energy.append(dm_momentum[0])
self.weight.append(dp_wgt)
# DM particle 2
v = (dp_momentum - dm_momentum)[1:] / (dp_momentum - dm_momentum)[0] * c_light
a = v[0]*v[0] + v[1]*v[1] + v[2]*v[2] #np.sum(v ** 2)
b = b = 2*v[2]*pos_z[i]
c = pos_z[i]**2 - self.det_dist ** 2
if b ** 2 - 4 * a * c >= 0:
t_dm = (-b - np.sqrt(b ** 2 - 4 * a * c)) / (2 * a)
if t_dm >= 0:
if self.verbose:
print("adding weight", dp_wgt)
self.time.append(t[i]+t_dm)
self.energy.append((dp_momentum - dm_momentum)[0])
self.weight.append(dp_wgt)
t_dm = (-b + np.sqrt(b ** 2 - 4 * a * c)) / (2 * a)
if t_dm >= 0:
if self.verbose:
print("adding weight", dp_wgt)
self.time.append(t[i]+t_dm)
self.energy.append((dp_momentum - dm_momentum)[0])
self.weight.append(dp_wgt)
def fint(self, er, m):
if np.isscalar(m):
m = np.array([m])
emin = 0.5 * (np.sqrt((er**2*m+2*er*m**2+2*er*self.dm_m**2+4*m*self.dm_m**2)/m) + er)
res = np.zeros_like(emin)
for i in range(emin.shape[0]):
res[i] = self.integrate(emin[i], self.ev_max, weight_function=self.f0)
return res
def fint1(self, er, m):
if np.isscalar(m):
m = np.array([m])
emin = 0.5 * (np.sqrt((er**2*m+2*er*m**2+2*er*self.dm_m**2+4*m*self.dm_m**2)/m) + er)
res = np.zeros_like(emin)
for i in range(emin.shape[0]):
res[i] = self.integrate(emin[i], self.ev_max, weight_function=self.f1)
return res
def fint2(self, er, m):
if np.isscalar(m):
m = np.array([m])
emin = 0.5 * (np.sqrt((er**2*m+2*er*m**2+2*er*self.dm_m**2+4*m*self.dm_m**2)/m) + er)
res = np.zeros_like(emin)
for i in range(emin.shape[0]):
res[i] = self.integrate(emin[i], self.ev_max, weight_function=self.f2)
return res
def f0(self, ev):
return 1/(ev**2 - self.dm_m**2)
def f1(self, ev):
return ev/(ev**2 - self.dm_m**2)
def f2(self, ev):
return ev**2 / (ev**2 - self.dm_m**2)
class DMFluxFromPiMinusAbsorption:
r"""
Dark matter flux from pi^- + p -> A^\prime + n -> \chi + \chi + n
"""
def __init__(self, dark_photon_mass, coupling_quark, dark_matter_mass, life_time=0.001,
detector_distance=19.3, pot_rate=5e20, pot_mu=0.7, pot_sigma=0.15, pion_rate=18324/500000,
sampling_size=100000):
"""
initialize and generate flux
default values are COHERENT experiment values
:param dark_photon_mass: dark photon mass
:param life_time: life time of dark photon in rest frame, unit in micro second
:param coupling_quark: dark photon coupling to quarks divided by electron charge
:param dark_matter_mass: mass of dark matter, unit in MeV
:param detector_distance: distance from the detector to the target
:param pot_rate: proton on target rate, unit POT/day
:param pot_mu: mean of guassian distribution of proton on target, unit in micro second
:param pot_sigma: std of guassian distribution of proton on target, unit in micro second
:param pion_rate: pi^- production rate
:param sampling_size: size of sampling dark photons
"""
self.dp_m = dark_photon_mass
self.dm_m = dark_matter_mass
self.epsi_quark = coupling_quark
self.det_dist = detector_distance / meter_by_mev
self.life_time = life_time # input in mus, internal in s
self.pot_mu = pot_mu
self.pot_sigma = pot_sigma
self.pot_rate = pot_rate
self.pion_rate = pion_rate
self.sampling_size = sampling_size
self.timing = []
self.energy = []
self.ed_min = None
self.ed_max = None
self.norm = None
self.simulate()
self.ev_min = self.ed_min
self.ev_max = self.ed_max
def get_lifetime(self, g, m):
return ((16 * np.pi ** 2) / ((g ** 2) * m)) * mev_per_hz
def simulate(self):
"""
generate dark matter flux
"""
# First check that the dp mass is less than the pi- mass.
if self.dp_m > massofpi:
self.norm = 0.0
return
dp_m = self.dp_m
dp_e = ((massofpi + massofp) ** 2 - massofn ** 2 + dp_m ** 2) / (2 * (massofpi + massofp))
dp_p = np.sqrt(dp_e ** 2 - dp_m ** 2)
dp_v = dp_p / dp_e
gamma = dp_e / dp_m
tau = (self.life_time * 1e-6 * c_light / meter_by_mev) * gamma
tf = np.random.normal(self.pot_mu * 1e-6 * c_light / meter_by_mev,
self.pot_sigma * 1e-6 * c_light / meter_by_mev,
self.sampling_size) # POT
t = np.random.exponential(tau, self.sampling_size) # life time of each dark photon
cs = np.random.uniform(-1, 1, self.sampling_size) # direction of each dark photon
# in rest frame
estar = dp_m / 2
pstar = np.sqrt(estar ** 2 - self.dm_m ** 2)
pstarx = pstar * cs
pstary = pstar * np.sqrt(1 - cs ** 2)
# boost to lab frame
elab = gamma * (estar + dp_v * pstarx)
plabx = gamma * (pstarx + dp_v * estar)
plaby = pstary
vx = plabx / elab
vy = plaby / elab
timing = []
energy = []
for i in range(self.sampling_size):
a = vx[i] ** 2 + vy[i] ** 2
b = 2 * vx[i] * t[i] * dp_v
cc = dp_v ** 2 * t[i] ** 2 - self.det_dist ** 2
if b ** 2 - 4 * a * cc >= 0:
if (-b - np.sqrt(b ** 2 - 4 * a * cc)) / (2 * a) > 0:
timing.append((-b - np.sqrt(b ** 2 - 4 * a * cc)) / (2 * a) + t[i] + tf[i])
energy.append(elab[i])
if (-b + np.sqrt(b ** 2 - 4 * a * cc)) / (2 * a) > 0:
timing.append((-b + np.sqrt(b ** 2 - 4 * a * cc)) / (2 * a) + t[i] + tf[i])
energy.append(elab[i])
self.timing = np.array(timing) / c_light * meter_by_mev * 1e6
self.energy = np.array(energy)
self.ed_min = min(energy)
self.ed_max = max(energy)
self.ev_min = self.ed_min
self.ev_max = self.ed_max
self.norm = self.epsi_quark ** 2 * self.pot_rate * self.pion_rate / (4 * np.pi * (self.det_dist ** 2) * 24 * 3600) * \
self.timing.shape[0] * 2 / self.sampling_size
def __call__(self, ev):
"""
dark matter flux, the spectrum is flat because of isotropic
:param ev: dark matter energy
:return: dark matter flux
"""
return 1 / (self.ed_max - self.ed_min) * self.norm if self.ed_min <= ev <= self.ed_max else 0
def integrate(self, ea, eb, weight_function=None):
"""
adaptive quadrature can achieve almost linear time on simple weight function, no need to do precalculation
:param ea: lowerbound
:param eb: upperbound
:param weight_function: weight function
:return: integration of the flux, weighted by the weight function
"""
if eb <= ea:
return 0
eb = min(eb, self.ed_max)
ea = max(ea, self.ed_min)
if weight_function is None:
return (eb - ea) / (self.ed_max - self.ed_min) * self.norm
return quad(weight_function, ea, eb, epsrel=1e-3)[0] / (self.ed_max - self.ed_min) * self.norm
def change_parameters(self, dark_photon_mass=None, life_time=None, coupling_quark=None, dark_matter_mass=None,
detector_distance=None, pot_rate=None, pot_mu=None, pot_sigma=None, pion_rate=None, sampling_size=None):
self.dp_m = dark_photon_mass if dark_photon_mass is not None else self.dp_m
self.dp_life = life_time * 1e-6 * c_light / meter_by_mev if life_time is not None else self.dp_life
self.epsi_quark = coupling_quark if coupling_quark is not None else self.epsi_quark
self.dm_m = dark_matter_mass if dark_matter_mass is not None else self.dm_m
self.det_dist = detector_distance / meter_by_mev if detector_distance is not None else self.det_dist
self.pot_rate = pot_rate if pot_rate is not None else self.pot_rate
self.pot_mu = pot_mu * 1e-6 * c_light / meter_by_mev if pot_mu is not None else self.pot_mu
self.pot_sigma = pot_sigma * 1e-6 * c_light / meter_by_mev if pot_sigma is not None else self.pot_sigma
self.pion_rate = self.pion_rate if pion_rate is not None else self.pion_rate
self.sampling_size = sampling_size if sampling_size is not None else self.sampling_size
self.simulate()
def fint(self, er, m):
if np.isscalar(m):
m = np.array([m])
emin = 0.5 * (np.sqrt((er**2*m+2*er*m**2+2*er*self.dm_m**2+4*m*self.dm_m**2)/m) + er)
res = np.zeros_like(emin)
for i in range(emin.shape[0]):
res[i] = self.integrate(emin[i], self.ev_max, weight_function=self.f0)
return res
def fint1(self, er, m):
if np.isscalar(m):
m = np.array([m])
emin = 0.5 * (np.sqrt((er**2*m+2*er*m**2+2*er*self.dm_m**2+4*m*self.dm_m**2)/m) + er)
res = np.zeros_like(emin)
for i in range(emin.shape[0]):
res[i] = self.integrate(emin[i], self.ev_max, weight_function=self.f1)
return res
def fint2(self, er, m):
if np.isscalar(m):
m = np.array([m])
emin = 0.5 * (np.sqrt((er**2*m+2*er*m**2+2*er*self.dm_m**2+4*m*self.dm_m**2)/m) + er)
res = np.zeros_like(emin)
for i in range(emin.shape[0]):
res[i] = self.integrate(emin[i], self.ev_max, weight_function=self.f2)
return res
def f0(self, ev):
return 1/(ev**2 - self.dm_m**2)
def f1(self, ev):
return ev/(ev**2 - self.dm_m**2)
def f2(self, ev):
return ev**2 / (ev**2 - self.dm_m**2)
class DMFluxFromPi0Decay(FluxBaseContinuous):
"""
z direction is the direction of the beam
"""
def __init__(self, pi0_distribution, dark_photon_mass, coupling_quark, dark_matter_mass, meson_mass=massofpi0, life_time=0.001,
detector_distance=19.3, detector_direction=0, detector_width=0.1, pot_rate=5e20, pot_mu=0.7,
pot_sigma=0.15, pion_rate=52935/500000, nbins=20):
self.pi0_distribution = pi0_distribution
self.dp_m = dark_photon_mass
self.life_time = life_time
self.epsilon = coupling_quark # input in mus, internal in s
self.dm_m = dark_matter_mass
self.meson_mass = meson_mass
self.det_dist = detector_distance
self.det_direc = detector_direction
self.det_width = detector_width
self.pot_rate = pot_rate
self.pot_mu = pot_mu
self.pot_sigma = pot_sigma
self.pion_rate = pion_rate
self.time = []
self.energy = []
self.nbins = nbins
self.dm_m = dark_matter_mass
for pi0_events in pi0_distribution: # must be in the form [azimuth, cos(zenith), kinetic energy]
self._generate_single(pi0_events)
self.timing = np.array(self.time)*1e6
hist, bin_edges = np.histogram(self.energy, bins=nbins, density=True)
ps_factor = np.heaviside(self.meson_mass - self.dp_m, 0.0) * 2 * self.epsilon**2 * (1 - (self.dp_m / self.meson_mass)**2)**3
super().__init__((bin_edges[:-1]+bin_edges[1:])/2, hist,
norm=ps_factor*pot_rate*pion_rate*len(self.time)/len(pi0_distribution)/
(2*np.pi*(min(1.0, detector_direction+detector_width/2)-max(-1.0, detector_direction-detector_width/2))*detector_distance**2*24*3600)
*(meter_by_mev**2))
def get_lifetime(self, g, m):
return ((16 * np.pi ** 2) / ((g ** 2) * m)) * mev_per_hz
def simulate(self):
self.time = []
self.energy = []
for pi0_events in self.pi0_distribution: # must be in the form [azimuth, cos(zenith), kinetic energy]
self._generate_single(pi0_events)
self.timing = np.array(self.time)*1e6
hist, bin_edges = np.histogram(self.energy, bins=self.nbins, density=True)
ps_factor = np.heaviside(self.meson_mass - self.dp_m, 0.0) * 2 * self.epsilon**2 * (1 - (self.dp_m / self.meson_mass)**2)**3
norm = ps_factor * self.pot_rate * self.pion_rate * \
len(self.time)/len(self.pi0_distribution)/ \
(2*np.pi*(min(1.0, self.det_direc+self.det_width/2)-max(-1.0, self.det_direc-self.det_width/2))*self.det_dist**2*24*3600)*(meter_by_mev**2)
super().__init__((bin_edges[:-1]+bin_edges[1:])/2, hist, norm=norm)
def _generate_single(self, pi0_events):
if self.dp_m > self.meson_mass:
return
pos = np.zeros(3)
t = 0
t += np.random.normal(self.pot_mu * 1e-6, self.pot_sigma * 1e-6)
pi_e = self.meson_mass + pi0_events[2]
pi_p = np.sqrt(pi_e**2 - self.meson_mass**2)
pi_v = pi_p / pi_e
t_pi = np.random.exponential(8.4e-17*pi_e/self.meson_mass)
pos += pi_v * polar_to_cartesian(pi0_events[:2]) * t_pi * c_light
t += t_pi
# pi0 to dark photon
dp_m = self.dp_m
dp_e = (self.meson_mass**2 + dp_m**2)/(2*self.meson_mass)
dp_p = (self.meson_mass**2 - dp_m**2)/(2*self.meson_mass)
cs = np.random.uniform(-1, 1)
phi = np.random.uniform(0, 2*np.pi)
dp_momentum = np.array([dp_e, dp_p*np.sqrt(1-cs**2)*np.cos(phi), dp_p*np.sqrt(1-cs**2)*np.sin(phi), dp_p*cs])
dp_momentum = lorentz_boost(dp_momentum, -pi_v*polar_to_cartesian(pi0_events[:2]))
t_dp = np.random.exponential((self.life_time*1e-6)*dp_momentum[0]/dp_m)
pos += c_light*t_dp*np.array([dp_momentum[1]/dp_momentum[0], dp_momentum[2]/dp_momentum[0], dp_momentum[3]/dp_momentum[0]])
t += t_dp
# dark photon to dark matter
dm_m = self.dm_m
dm_e = dp_m / 2
dm_p = np.sqrt(dm_e**2 - dm_m**2)
csd = np.random.uniform(-1, 1)
phid = np.random.uniform(0, 2*np.pi)
dm_momentum = np.array([dm_e, dm_p*np.sqrt(1-csd**2)*np.cos(phid), dm_p*np.sqrt(1-csd**2)*np.sin(phid), dm_p*csd])
dm_momentum = lorentz_boost(dm_momentum, np.array([-dp_momentum[1]/dp_momentum[0],
-dp_momentum[2]/dp_momentum[0],
-dp_momentum[3]/dp_momentum[0]]))
# dark matter arrives at detector, assuming azimuthal symmetric
v = dm_momentum[1:]/dm_momentum[0]*c_light
a = np.sum(v**2)
b = 2*np.sum(v*pos) #2 * v[2] * (c_light * dp_p / dp_e) * t_dp
c = np.sum(pos**2) - self.det_dist**2
if b**2 - 4*a*c >= 0:
t_dm = (-b+np.sqrt(b**2-4*a*c))/(2*a)
if t_dm >= 0: #and self.det_direc-self.det_width/2 <= (pos[2]+v[2]*t_dm)/np.sqrt(np.sum((v*t_dm + pos)**2)) <= self.det_direc+self.det_width/2:
self.time.append(t+t_dm)
self.energy.append(dm_momentum[0])
t_dm = (-b-np.sqrt(b**2-4*a*c))/(2*a)
if t_dm >= 0: #and self.det_direc-self.det_width/2 <= (pos[2]+v[2]*t_dm)/np.sqrt(np.sum((v*t_dm + pos)**2)) <= self.det_direc+self.det_width/2:
self.time.append(t+t_dm)
self.energy.append(dm_momentum[0])
v = (dp_momentum-dm_momentum)[1:]/(dp_momentum-dm_momentum)[0]*c_light
a = np.sum(v**2)
b = 2*np.sum(v*pos)
c = np.sum(pos**2) - self.det_dist**2
if b**2 - 4*a*c >= 0:
t_dm = (-b+np.sqrt(b**2-4*a*c))/(2*a)
if t_dm >= 0: #and self.det_direc-self.det_width/2 <= (pos[2]+v[2]*t_dm)/np.sqrt(np.sum((v*t_dm + pos)**2)) <= self.det_direc+self.det_width/2:
self.time.append(t+t_dm)
self.energy.append((dp_momentum-dm_momentum)[0])
t_dm = (-b-np.sqrt(b**2-4*a*c))/(2*a)
if t_dm >= 0: #and self.det_direc-self.det_width/2 <= (pos[2]+v[2]*t_dm)/np.sqrt(np.sum((v*t_dm + pos)**2)) <= self.det_direc+self.det_width/2:
self.time.append(t+t_dm)
self.energy.append((dp_momentum-dm_momentum)[0])
def to_pandas(self):
return pd.DataFrame({'time': self.time, 'energy': self.energy})
def fint(self, er, m):
if np.isscalar(m):
m = np.array([m])
emin = 0.5 * (np.sqrt((er**2*m+2*er*m**2+2*er*self.dm_m**2+4*m*self.dm_m**2)/m) + er)
res = np.zeros_like(emin)
for i in range(emin.shape[0]):
res[i] = self.integrate(emin[i], self.ev_max, weight_function=self.f0)
return res
def fint1(self, er, m):
if np.isscalar(m):
m = np.array([m])
emin = 0.5 * (np.sqrt((er**2*m+2*er*m**2+2*er*self.dm_m**2+4*m*self.dm_m**2)/m) + er)
res = np.zeros_like(emin)
for i in range(emin.shape[0]):
res[i] = self.integrate(emin[i], self.ev_max, weight_function=self.f1)
return res
def fint2(self, er, m):
if np.isscalar(m):
m = np.array([m])
emin = 0.5 * (np.sqrt((er**2*m+2*er*m**2+2*er*self.dm_m**2+4*m*self.dm_m**2)/m) + er)
res = np.zeros_like(emin)
for i in range(emin.shape[0]):
res[i] = self.integrate(emin[i], self.ev_max, weight_function=self.f2)
return res
def f0(self, ev):
return 1/(ev**2 - self.dm_m**2)
def f1(self, ev):
return ev/(ev**2 - self.dm_m**2)
def f2(self, ev):
return ev**2 / (ev**2 - self.dm_m**2)
| 48.09542 | 163 | 0.53164 | 62,689 | 0.994985 | 0 | 0 | 0 | 0 | 0 | 0 | 11,155 | 0.177049 |
9613fedd3e0d7142ca8e288d57dc930f5c14893f | 7,252 | py | Python | enso/contrib/minimessages.py | blackdaemon/enso-launcher-continued | 346f82811e77caf73560619cdeb16afabfbf1fce | [
"BSD-3-Clause"
]
| 7 | 2015-09-19T20:57:32.000Z | 2020-12-31T16:34:42.000Z | enso/contrib/minimessages.py | blackdaemon/enso-launcher-continued | 346f82811e77caf73560619cdeb16afabfbf1fce | [
"BSD-3-Clause"
]
| 21 | 2015-11-03T23:15:25.000Z | 2018-10-11T21:57:45.000Z | enso/contrib/minimessages.py | blackdaemon/enso-launcher-continued | 346f82811e77caf73560619cdeb16afabfbf1fce | [
"BSD-3-Clause"
]
| 4 | 2015-09-15T17:18:00.000Z | 2021-06-16T07:06:06.000Z | # Author : Pavel Vitis "blackdaemon"
# Email : [email protected]
#
# Copyright (c) 2010, Pavel Vitis <[email protected]>
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# 3. Neither the name of Enso nor the names of its contributors may
# be used to endorse or promote products derived from this
# software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES,
# INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND
# FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY,
# OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
# ----------------------------------------------------------------------------
#
# enso.contrib.minimessages
#
# ----------------------------------------------------------------------------
"""
An Enso plugin that makes all mini-messages related commands available.
Commands:
hide mini messages
"""
# ----------------------------------------------------------------------------
# Imports
# ----------------------------------------------------------------------------
from xml.sax.saxutils import escape as xml_escape
import enso.messages
from enso.commands import CommandManager, CommandObject
from enso.commands.factories import ArbitraryPostfixFactory
from enso.contrib.scriptotron.ensoapi import EnsoApi
from enso.contrib.scriptotron.tracebacks import safetyNetted
from enso.messages import MessageManager, TimedMiniMessage
ensoapi = EnsoApi()
# ----------------------------------------------------------------------------
# The 'hide mini messages' command
# ---------------------------------------------------------------------------
class HideMiniMessagesCommand(CommandObject):
"""
The 'hide mini messages' command.
"""
NAME = "hide mini messages"
DESCRIPTION = "Hides all mini messages."
def __init__(self):
super(HideMiniMessagesCommand, self).__init__()
self.setDescription(self.DESCRIPTION)
self.setName(self.NAME)
@safetyNetted
def run(self):
MessageManager.get().finishMessages()
# ----------------------------------------------------------------------------
# The 'show mini message' testing command
# ---------------------------------------------------------------------------
class ShowMiniMessageCommand(CommandObject):
"""
The 'show mini message {text}' command.
"""
LOREMIPSUM = u"Lorem ipsum dolor sit amet, consectetur adipiscing elit. "\
"Nunc fringilla ipsum dapibus mi porta et laoreet turpis porta. Class aptent "\
"taciti sociosqu ad litora torquent per conubia nostra, per inceptos himenaeos. "\
"Duis commodo massa nec arcu mollis auctor. Nunc et orci quis lacus suscipit "\
"dictum eu vitae est. Donec neque massa, pretium sed venenatis sed, consequat "\
"quis est. Proin auctor consequat euismod. Praesent iaculis placerat libero eu "\
"gravida. Curabitur ullamcorper velit sit amet tortor fermentum fringilla. "\
"Pellentesque non lectus mauris, a iaculis ipsum. Cum sociis natoque penatibus "\
"et magnis dis parturient montes, nascetur ridiculus mus. Vivamus mauris nibh, "\
"ultrices in accumsan in, bibendum sed mi. Ut ut nunc a mi vestibulum luctus. "\
"Sed ornare euismod justo a condimentum."
def __init__(self, postfix):
super(ShowMiniMessageCommand, self).__init__()
self._postfix = postfix
self._msgmanager = MessageManager.get()
@safetyNetted
def run(self):
import random
text = self._postfix
if text and "," in text:
timeout, text = text.split(",")
timeout = max(int(timeout), 0)
else:
timeout = None
if not text:
pos = random.randint(0, self.LOREMIPSUM.count(" ") - 10 + 1)
cnt = random.randint(5, 10)
words = self.LOREMIPSUM.split()
text = " ".join(words[pos:pos + cnt])
if text[0].upper() != text[0]:
text = "..." + text
if text[-1] != ".":
text = text + "..."
if timeout:
caption = "test message (timed %ds)" % timeout
else:
caption = "test message"
msg = xml_escape(text)
caption = xml_escape(caption)
if caption:
xmltext = u"<p>%s</p><caption>%s</caption>" % (msg, caption)
else:
xmltext = u"<p>%s</p>" % (msg)
msg = TimedMiniMessage(
primaryXml=None,
miniXml=xmltext,
waitTime=timeout
)
self._msgmanager.newMessage(msg)
class ShowMiniMessageFactory(ArbitraryPostfixFactory):
"""
Generates a "show mini message {text}" command.
"""
PREFIX = "show mini message "
DESCRIPTION = "Show mini message with given timeout and text, both optional."
HELP_TEXT = "{timeout,text}"
NAME = "%s%s" % (PREFIX, HELP_TEXT)
def _generateCommandObj(self, postfix):
cmd = ShowMiniMessageCommand(postfix)
cmd.setDescription(self.DESCRIPTION)
cmd.setName(self.NAME)
cmd.setHelp(self.HELP_TEXT)
return cmd
class ShowRecentMessageCommand(CommandObject):
"""
The 'show recent message' command.
"""
NAME = "show recent message"
DESCRIPTION = "Show recent message."
def __init__(self):
super(ShowRecentMessageCommand, self).__init__()
self.setDescription(self.DESCRIPTION)
self.setName(self.NAME)
@safetyNetted
def run(self):
if not enso.messages.displayRecentMessage():
ensoapi.display_message(u"No recent messages.")
# ----------------------------------------------------------------------------
# Plugin initialization
# ---------------------------------------------------------------------------
def load():
cmdMan = CommandManager.get()
cmdMan.registerCommand(
HideMiniMessagesCommand.NAME,
HideMiniMessagesCommand()
)
cmdMan.registerCommand(
ShowMiniMessageFactory.NAME,
ShowMiniMessageFactory()
)
cmdMan.registerCommand(
ShowRecentMessageCommand.NAME,
ShowRecentMessageCommand()
)
# vim:set tabstop=4 shiftwidth=4 expandtab:
| 34.533333 | 90 | 0.59818 | 3,797 | 0.52358 | 0 | 0 | 1,368 | 0.188638 | 0 | 0 | 3,993 | 0.550607 |
9616192a13cde5beffe85342bdb0bcbe725c8e0a | 3,597 | py | Python | article_curation/test_article_curation.py | mrkarezina/graph-recommendation-api | 8ed3895f7816b095ec27f3c1d972bf5b8e163b34 | [
"MIT"
]
| null | null | null | article_curation/test_article_curation.py | mrkarezina/graph-recommendation-api | 8ed3895f7816b095ec27f3c1d972bf5b8e163b34 | [
"MIT"
]
| null | null | null | article_curation/test_article_curation.py | mrkarezina/graph-recommendation-api | 8ed3895f7816b095ec27f3c1d972bf5b8e163b34 | [
"MIT"
]
| null | null | null | import unittest
from unittest.mock import Mock
import json
from processor import scrape_article
import main
class ArticleCurationTestCase(unittest.TestCase):
def test_article_fetch(self):
response = scrape_article(
url='https://www.cnn.com/2019/03/25/us/yale-rescinds-student-admissions-scandal/index.html')
self.assertGreater(len(response["text"].split()), 150)
self.assertIn("Yale rescinds", response["title"])
self.assertIn("http", response["img_url"])
# Tricky url, tests if the extended newspaper component works
response = scrape_article(
url='http://www.physiciansnewsnetwork.com/ximed/study-hospital-physician-vertical-integration-has-little-impact-on-quality/article_257c41a0-3a11-11e9-952b-97cc981efd76.html')
self.assertGreater(len(response["text"].split()), 150)
self.assertIn("http", response["img_url"])
def test_article_fetch_endpoint(self):
"""
Test the actual endpoint by simulating the request object
:return:
"""
data = {
"article_url": "https://techcrunch.com/2019/05/01/alexa-in-skill-purchasing-which-lets-developers-make-money-from-voice-apps-launches-internationally"
}
req = Mock(get_json=Mock(return_value=data), args=data)
response, code, headers = main.fetch_article(req)
self.assertEqual(code, 200)
self.assertGreater(len(json.loads(response)["text"].split()), 150)
# Testing a bad url, see error message
data = {
"article_url": "https://example.com/test123"
}
req = Mock(get_json=Mock(return_value=data), args=data)
response, code, headers = main.fetch_article(req)
self.assertEqual(code, 500)
def test_download_rss_endpoint(self):
data = {
"rss_url": "http://rss.cnn.com/rss/cnn_topstories.rss"
}
req = Mock(get_json=Mock(return_value=data), args=data)
response, code, headers = main.download_rss(req)
self.assertEqual(code, 200)
self.assertGreater(len(json.loads(response)), 1)
def test_fetch_rss_endpoint(self):
data = {
"rss_url": "http://rss.cnn.com/rss/cnn_topstories.rss"
}
req = Mock(get_json=Mock(return_value=data), args=data)
response, code, headers = main.fetch_rss(req)
self.assertEqual(code, 200)
self.assertGreater(len(json.loads(response)), 1)
# Test case when rss not in DB
data = {
"rss_url": "http://www.example.com/example.rss"
}
req = Mock(get_json=Mock(return_value=data), args=data)
response, code, headers = main.fetch_rss(req)
self.assertEqual(code, 404)
# def test_get_article_dicts_from_rss_cache(self):
#
# start = time.time()
# for i in range(1000):
# article_dicts = get_article_dicts_from_rss('http://rss.cnn.com/rss/cnn_topstories.rss')
#
# end = time.time()
# total_time = end - start
#
# # Make less than 10 sec, so cache works
# self.assertLess(total_time, 10)
#
# def test_get_article_dicts_from_rss(self):
#
# article_dicts = get_article_dicts_from_rss('http://rss.cnn.com/rss/cnn_topstories.rss')
# self.assertGreater(len(article_dicts), 0)
#
# for article in article_dicts:
# self.assertIn("http", article["img_url"])
#
# # Make sure title has more than 0 characters
# self.assertGreater(len(article["title"]), 0)
| 34.586538 | 186 | 0.633584 | 3,485 | 0.968863 | 0 | 0 | 0 | 0 | 0 | 0 | 1,611 | 0.447873 |
9616936f76e77083ea419e018de9e5eaec39224e | 4,715 | py | Python | test.py | chdre/noise-randomized | c803fd6c6fd641a0b1c0f4880920584a647587bc | [
"MIT"
]
| null | null | null | test.py | chdre/noise-randomized | c803fd6c6fd641a0b1c0f4880920584a647587bc | [
"MIT"
]
| null | null | null | test.py | chdre/noise-randomized | c803fd6c6fd641a0b1c0f4880920584a647587bc | [
"MIT"
]
| 3 | 2021-10-05T09:01:51.000Z | 2021-10-05T09:37:06.000Z | import unittest
class PerlinTestCase(unittest.TestCase):
def test_perlin_1d_range(self):
from noise import pnoise1
for i in range(-10000, 10000):
x = i * 0.49
n = pnoise1(x)
self.assertTrue(-1.0 <= n <= 1.0, (x, n))
def test_perlin_1d_octaves_range(self):
from noise import pnoise1
for i in range(-1000, 1000):
for o in range(10):
x = i * 0.49
n = pnoise1(x, octaves=o + 1)
self.assertTrue(-1.0 <= n <= 1.0, (x, n))
def test_perlin_1d_base(self):
from noise import pnoise1
self.assertEqual(pnoise1(0.5), pnoise1(0.5, base=0))
self.assertNotEqual(pnoise1(0.5), pnoise1(0.5, base=5))
self.assertNotEqual(pnoise1(0.5, base=5), pnoise1(0.5, base=1))
def test_perlin_2d_range(self):
from noise import pnoise2
for i in range(-10000, 10000):
x = i * 0.49
y = -i * 0.67
n = pnoise2(x, y)
self.assertTrue(-1.0 <= n <= 1.0, (x, y, n))
def test_perlin_2d_octaves_range(self):
from noise import pnoise2
for i in range(-1000, 1000):
for o in range(10):
x = -i * 0.49
y = i * 0.67
n = pnoise2(x, y, octaves=o + 1)
self.assertTrue(-1.0 <= n <= 1.0, (x, n))
def test_perlin_2d_base(self):
from noise import pnoise2
x, y = 0.73, 0.27
self.assertEqual(pnoise2(x, y), pnoise2(x, y, base=0))
self.assertNotEqual(pnoise2(x, y), pnoise2(x, y, base=5))
self.assertNotEqual(pnoise2(x, y, base=5), pnoise2(x, y, base=1))
def test_perlin_3d_range(self):
from noise import pnoise3
for i in range(-10000, 10000):
x = -i * 0.49
y = i * 0.67
z = -i * 0.727
n = pnoise3(x, y, z)
self.assertTrue(-1.0 <= n <= 1.0, (x, y, z, n))
def test_perlin_3d_octaves_range(self):
from noise import pnoise3
for i in range(-1000, 1000):
x = i * 0.22
y = -i * 0.77
z = -i * 0.17
for o in range(10):
n = pnoise3(x, y, z, octaves=o + 1)
self.assertTrue(-1.0 <= n <= 1.0, (x, y, z, n))
def test_perlin_3d_base(self):
from noise import pnoise3
x, y, z = 0.1, 0.7, 0.33
self.assertEqual(pnoise3(x, y, z), pnoise3(x, y, z, base=0))
self.assertNotEqual(pnoise3(x, y, z), pnoise3(x, y, z, base=5))
self.assertNotEqual(pnoise3(x, y, z, base=5), pnoise3(x, y, z, base=1))
class SimplexTestCase(unittest.TestCase):
def test_randomize(self):
from noise import randomize
self.assertTrue(randomize(4096,23490))
def test_simplex_2d_range(self):
from noise import snoise2
for i in range(-10000, 10000):
x = i * 0.49
y = -i * 0.67
n = snoise2(x, y)
self.assertTrue(-1.0 <= n <= 1.0, (x, y, n))
def test_simplex_2d_octaves_range(self):
from noise import snoise2
for i in range(-1000, 1000):
for o in range(10):
x = -i * 0.49
y = i * 0.67
n = snoise2(x, y, octaves=o + 1)
self.assertTrue(-1.0 <= n <= 1.0, (x, n))
def test_simplex_3d_range(self):
from noise import snoise3
for i in range(-10000, 10000):
x = i * 0.31
y = -i * 0.7
z = i * 0.19
n = snoise3(x, y, z)
self.assertTrue(-1.0 <= n <= 1.0, (x, y, z, n))
def test_simplex_3d_octaves_range(self):
from noise import snoise3
for i in range(-1000, 1000):
x = -i * 0.12
y = i * 0.55
z = i * 0.34
for o in range(10):
n = snoise3(x, y, z, octaves=o + 1)
self.assertTrue(-1.0 <= n <= 1.0, (x, y, z, o+1, n))
def test_simplex_4d_range(self):
from noise import snoise4
for i in range(-10000, 10000):
x = i * 0.88
y = -i * 0.11
z = -i * 0.57
w = i * 0.666
n = snoise4(x, y, z, w)
self.assertTrue(-1.0 <= n <= 1.0, (x, y, z, w, n))
def test_simplex_4d_octaves_range(self):
from noise import snoise4
for i in range(-1000, 1000):
x = -i * 0.12
y = i * 0.55
z = i * 0.34
w = i * 0.21
for o in range(10):
n = snoise4(x, y, z, w, octaves=o + 1)
self.assertTrue(-1.0 <= n <= 1.0, (x, y, z, w, o+1, n))
if __name__ == '__main__':
unittest.main()
| 32.972028 | 79 | 0.487381 | 4,644 | 0.984942 | 0 | 0 | 0 | 0 | 0 | 0 | 10 | 0.002121 |
961a0eab590ae86fe03daebff4911d080dc4f38a | 3,829 | py | Python | pipelines/controllers/datasets.py | platiagro/pipeline-generator | d84b9512c39970c469154eaed56f08780ebf21eb | [
"Apache-2.0"
]
| 1 | 2020-05-19T14:57:55.000Z | 2020-05-19T14:57:55.000Z | pipelines/controllers/datasets.py | platiagro/pipelines | d84b9512c39970c469154eaed56f08780ebf21eb | [
"Apache-2.0"
]
| 93 | 2020-04-25T21:10:49.000Z | 2020-12-15T18:25:49.000Z | pipelines/controllers/datasets.py | platiagro/pipelines | d84b9512c39970c469154eaed56f08780ebf21eb | [
"Apache-2.0"
]
| 6 | 2019-09-05T12:37:59.000Z | 2020-08-08T00:08:25.000Z | # -*- coding: utf-8 -*-
import platiagro
import pandas as pd
from werkzeug.exceptions import NotFound
from pipelines.database import db_session
from pipelines.models import Operator
from pipelines.models.utils import raise_if_experiment_does_not_exist
def get_dataset_name(experiment_id, operator_id,):
"""Retrieves a dataset name from experiment.
Args:
experiment_id(str): the experiment uuid
operator_id(str): the operator uuid
Returns:
Dataset name
"""
raise_if_experiment_does_not_exist(experiment_id)
operator = Operator.query.get(operator_id)
if operator is None:
raise NotFound("The specified operator does not exist")
# get dataset name
dataset = operator.parameters.get('dataset')
if dataset is None:
# try to find dataset name in other operators
operators = db_session.query(Operator) \
.filter_by(experiment_id=experiment_id) \
.filter(Operator.uuid != operator_id) \
.all()
for operator in operators:
dataset = operator.parameters.get('dataset')
if dataset:
break
if dataset is None:
raise NotFound()
return dataset
def get_dataset_pagination(application_csv,
name,
operator_id,
page,
page_size,
run_id):
"""Retrieves a dataset.
Args:
application_csv(bool): if is to return dataset as csv
name(str): the dataset name
operator_id(str): the operator uuid
page_size(int) : record numbers
page(int): page number
run_id (str): the run id.
Returns:
Dataset
"""
try:
metadata = platiagro.stat_dataset(name=name, operator_id=operator_id)
if "run_id" not in metadata:
raise FileNotFoundError()
dataset = platiagro.load_dataset(name=name, operator_id=operator_id, run_id=run_id)
except FileNotFoundError as e:
raise NotFound(str(e))
if page_size == -1:
if application_csv:
return dataset.to_csv(index=False)
dataset = dataset.to_dict(orient="split")
del dataset["index"]
return dataset
else:
dataset = dataset.to_dict(orient="split")
del dataset["index"]
pdataset = pagination_datasets(page=page, page_size=page_size, dataset=dataset)
if application_csv:
df = pd.DataFrame(columns=pdataset['columns'], data=pdataset['data'])
return df.to_csv(index=False)
return pdataset
def pagination_datasets(page, page_size, dataset):
"""pagination of datasets.
Args:
page_size(int) : record numbers
page(int): page number
dataset(json): data to be paged
Returns:
Paged dataset
"""
try:
count = 0
new_datasets = []
total_elements = len(dataset['data'])
page = (page * page_size) - page_size
for i in range(page, total_elements):
new_datasets.append(dataset['data'][i])
count += 1
if page_size == count:
response = {
'columns': dataset['columns'],
'data': new_datasets,
'total': len(dataset['data'])
}
return response
if len(new_datasets) == 0:
raise NotFound("The informed page does not contain records")
else:
response = {
'columns': dataset['columns'],
'data': new_datasets,
'total': len(dataset['data'])
}
return response
except RuntimeError:
raise NotFound("The specified page does not exist")
| 32.176471 | 91 | 0.58527 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,054 | 0.275268 |
961b41ac7e12348d2cd9bb21a06c9a3f33d3b4af | 4,545 | py | Python | tests/test_message.py | jfkinslow/flask-mailing | dda99214b783b60fabc7dfad209fff4438eaf61c | [
"MIT"
]
| null | null | null | tests/test_message.py | jfkinslow/flask-mailing | dda99214b783b60fabc7dfad209fff4438eaf61c | [
"MIT"
]
| null | null | null | tests/test_message.py | jfkinslow/flask-mailing | dda99214b783b60fabc7dfad209fff4438eaf61c | [
"MIT"
]
| null | null | null | import pytest
from flask_mailing.schemas import Message, MultipartSubtypeEnum
from flask_mailing.msg import MailMsg
import os
CONTENT = "file test content"
def test_initialize():
message = Message(
subject="test subject",
recipients=["[email protected]"],
body="test",
subtype="plain"
)
assert message.subject == "test subject"
def test_recipients_properly_initialized():
message = Message(
subject="test subject",
recipients=[],
body="test",
subtype="plain"
)
assert message.recipients == []
def test_add_recipient_method():
message = Message(
subject="test subject",
recipients=[],
body="test",
subtype="plain"
)
message.add_recipient("[email protected]")
assert message.recipients == ["[email protected]"]
def test_sendto_properly_set():
msg = Message(subject="subject", recipients=["[email protected]", "[email protected]"],
cc=["[email protected]"], bcc=["[email protected]"], reply_to=["[email protected]"])
assert len(msg.recipients) == 2
assert len(msg.cc) == 1
assert len(msg.bcc) == 1
assert len(msg.reply_to) == 1
def test_plain_message():
message = Message(
subject="test subject",
recipients=["[email protected]"],
body="test",
subtype="plain"
)
assert message.body == "test"
def test_charset():
message = Message(
subject="test subject",
recipients=["[email protected]"],
body="test",
subtype="plain"
)
assert message.charset == "utf-8"
def test_message_str():
message = Message(
subject="test subject",
recipients=["[email protected]"],
body="test",
subtype="plain"
)
assert type(message.body) == str
def test_plain_message_with_attachments():
directory = os.getcwd()
attachement = directory + "/files/attachement.txt"
msg = Message(subject="testing",
recipients=["[email protected]"],
attachments=[attachement],
body="test mail body")
with open(attachement, "w") as file:
file.write(CONTENT)
assert len(msg.attachments) == 1
def test_plain_message_with_attach_method():
directory = os.getcwd()
attachement = directory + "/files/attachement_1.txt"
msg = Message(subject="testing",
recipients=["[email protected]"],
body="test mail body")
with open(attachement, "w") as file:
file.write(CONTENT)
with open(attachement, "rb") as fp:
msg.attach("attachement_1.txt", fp.read())
assert len(msg.attachments) == 1
def test_empty_subject_header():
message = Message(
subject="",
recipients=["[email protected]"],
body="test",
subtype="plain"
)
assert len(message.subject) == 0
def test_bcc():
msg = Message(subject="subject", recipients=[],
bcc=["[email protected]"])
assert len(msg.bcc) == 1
assert msg.bcc == ["[email protected]"]
def test_replyto():
msg = Message(subject="subject", recipients=[],
reply_to=["[email protected]"])
assert len(msg.reply_to) == 1
assert msg.reply_to == ["[email protected]"]
def test_cc():
msg = Message(subject="subject", recipients=[],
cc=["[email protected]"])
assert len(msg.cc) == 1
assert msg.cc == ["[email protected]"]
def test_multipart_subtype():
message = Message(
subject="test subject",
recipients=["[email protected]"],
body="test",
subtype="plain"
)
assert message.multipart_subtype == MultipartSubtypeEnum.mixed
@pytest.mark.asyncio
async def test_msgid_header():
message = Message(
subject="test subject",
recipients=["[email protected]"],
body="test",
subtype="plain"
)
msg = MailMsg(**message.dict())
msg_object = await msg._message('[email protected]')
assert msg_object['Message-ID'] is not None
@pytest.mark.asyncio
async def test_message_charset():
message = Message(
subject="test subject",
recipients=["[email protected]"],
body="test",
subtype="plain"
)
msg = MailMsg(**message.dict())
msg_object = await msg._message('[email protected]')
assert msg_object._charset is not None
assert msg_object._charset == "utf-8" | 24.175532 | 105 | 0.59824 | 0 | 0 | 0 | 0 | 719 | 0.158196 | 677 | 0.148955 | 958 | 0.210781 |
961ccfb0c6fb46c865492bed7af363f36b450b4b | 1,239 | py | Python | utils/checks.py | JDJGInc/JDBot | 057bcc5c80452c9282606e9bf66219e614aac5e1 | [
"MIT"
]
| 12 | 2021-01-09T06:17:51.000Z | 2022-03-18T06:30:15.000Z | utils/checks.py | JDJGInc/JDBot | 057bcc5c80452c9282606e9bf66219e614aac5e1 | [
"MIT"
]
| 21 | 2021-03-21T16:43:45.000Z | 2022-02-01T16:02:26.000Z | utils/checks.py | JDJGInc/JDBot | 057bcc5c80452c9282606e9bf66219e614aac5e1 | [
"MIT"
]
| 25 | 2021-03-21T16:33:56.000Z | 2022-03-12T16:52:25.000Z | import discord
def check(ctx):
def inner(m):
return m.author == ctx.author
return inner
def Membercheck(ctx):
def inner(m):
return m.author == ctx.guild.me
return inner
def warn_permission(ctx, Member):
if isinstance(ctx.channel, discord.TextChannel):
return ctx.author.guild_permissions.manage_messages and ctx.author.top_role > Member.top_role and ctx.author.guild_permissions >= Member.guild_permissions
#bug with user with same permissions maybe and other stuff(seems fixed for right now, leaving note just in case.)
if isinstance(ctx.channel, discord.DMChannel):
return True
def cleanup_permission(ctx):
if isinstance(ctx.channel, discord.TextChannel):
return ctx.author.guild_permissions.manage_messages
if isinstance(ctx.channel, discord.DMChannel):
return True
def mutual_guild_check(ctx, user):
mutual_guilds = set(ctx.author.mutual_guilds)
mutual_guilds2 = set(user.mutual_guilds)
return bool(mutual_guilds.intersection(mutual_guilds2))
async def filter_commands(ctx, command_list):
async def check(cmd, ctx):
try:
return await cmd.can_run(ctx)
except:
return False
return [cmd for cmd in command_list if await check(cmd, ctx)] | 27.533333 | 158 | 0.742534 | 0 | 0 | 0 | 0 | 0 | 0 | 222 | 0.179177 | 113 | 0.091203 |
961ceec2cadcdefd7771e879e51fe43976210c30 | 46,670 | py | Python | scripts/mgear/rigbits/eye_rigger.py | stormstudios/rigbits | 37ce738952a3cd31ba8a18b8989f5ea491d03bf0 | [
"MIT"
]
| 1 | 2020-08-11T01:17:19.000Z | 2020-08-11T01:17:19.000Z | scripts/mgear/rigbits/eye_rigger.py | stormstudios/rigbits | 37ce738952a3cd31ba8a18b8989f5ea491d03bf0 | [
"MIT"
]
| null | null | null | scripts/mgear/rigbits/eye_rigger.py | stormstudios/rigbits | 37ce738952a3cd31ba8a18b8989f5ea491d03bf0 | [
"MIT"
]
| null | null | null | """Rigbits eye rigger tool"""
import json
import traceback
from functools import partial
import mgear.core.pyqt as gqt
import pymel.core as pm
from maya.app.general.mayaMixin import MayaQWidgetDockableMixin
from mgear.core import meshNavigation, curve, applyop, node, primitive, icon
from mgear.core import transform, utils, attribute, skin, string
from mgear.vendor.Qt import QtCore, QtWidgets
from pymel.core import datatypes
from mgear import rigbits
##########################################################
# Eye rig constructor
##########################################################
def eyeRig(eyeMesh,
edgeLoop,
blinkH,
namePrefix,
offset,
rigidLoops,
falloffLoops,
headJnt,
doSkin,
parent=None,
ctlName="ctl",
sideRange=False,
customCorner=False,
intCorner=None,
extCorner=None,
ctlGrp=None,
defGrp=None):
"""Create eyelid and eye rig
Args:
eyeMesh (TYPE): Description
edgeLoop (TYPE): Description
blinkH (TYPE): Description
namePrefix (TYPE): Description
offset (TYPE): Description
rigidLoops (TYPE): Description
falloffLoops (TYPE): Description
headJnt (TYPE): Description
doSkin (TYPE): Description
parent (None, optional): Description
ctlName (str, optional): Description
sideRange (bool, optional): Description
customCorner (bool, optional): Description
intCorner (None, optional): Description
extCorner (None, optional): Description
ctlGrp (None, optional): Description
defGrp (None, optional): Description
Returns:
TYPE: Description
"""
# Checkers
if edgeLoop:
edgeLoopList = [pm.PyNode(e) for e in edgeLoop.split(",")]
else:
pm.displayWarning("Please set the edge loop first")
return
if eyeMesh:
try:
eyeMesh = pm.PyNode(eyeMesh)
except pm.MayaNodeError:
pm.displayWarning("The object %s can not be found in the "
"scene" % (eyeMesh))
return
else:
pm.displayWarning("Please set the eye mesh first")
if doSkin:
if not headJnt:
pm.displayWarning("Please set the Head Jnt or unCheck "
"Compute Topological Autoskin")
return
# Initial Data
bboxCenter = meshNavigation.bboxCenter(eyeMesh)
extr_v = meshNavigation.getExtremeVertexFromLoop(edgeLoopList, sideRange)
upPos = extr_v[0]
lowPos = extr_v[1]
inPos = extr_v[2]
outPos = extr_v[3]
edgeList = extr_v[4]
vertexList = extr_v[5]
# Detect the side L or R from the x value
if inPos.getPosition(space='world')[0] < 0.0:
side = "R"
inPos = extr_v[3]
outPos = extr_v[2]
normalPos = outPos
npw = normalPos.getPosition(space='world')
normalVec = npw - bboxCenter
else:
side = "L"
normalPos = outPos
npw = normalPos.getPosition(space='world')
normalVec = bboxCenter - npw
# Manual Vertex corners
if customCorner:
if intCorner:
try:
if side == "R":
inPos = pm.PyNode(extCorner)
else:
inPos = pm.PyNode(intCorner)
except pm.MayaNodeError:
pm.displayWarning("%s can not be found" % intCorner)
return
else:
pm.displayWarning("Please set the internal eyelid corner")
return
if extCorner:
try:
normalPos = pm.PyNode(extCorner)
npw = normalPos.getPosition(space='world')
if side == "R":
outPos = pm.PyNode(intCorner)
normalVec = npw - bboxCenter
else:
outPos = pm.PyNode(extCorner)
normalVec = bboxCenter - npw
except pm.MayaNodeError:
pm.displayWarning("%s can not be found" % extCorner)
return
else:
pm.displayWarning("Please set the external eyelid corner")
return
# Check if we have prefix:
if namePrefix:
namePrefix = string.removeInvalidCharacter(namePrefix)
else:
pm.displayWarning("Prefix is needed")
return
def setName(name, ind=None):
namesList = [namePrefix, side, name]
if ind is not None:
namesList[1] = side + str(ind)
name = "_".join(namesList)
return name
if pm.ls(setName("root")):
pm.displayWarning("The object %s already exist in the scene. Please "
"choose another name prefix" % setName("root"))
return
# Eye root
eye_root = primitive.addTransform(None, setName("root"))
eyeCrv_root = primitive.addTransform(eye_root, setName("crvs"))
# Eyelid Main crvs
try:
upEyelid = meshNavigation.edgeRangeInLoopFromMid(
edgeList, upPos, inPos, outPos)
upCrv = curve.createCurveFromOrderedEdges(
upEyelid, inPos, setName("upperEyelid"), parent=eyeCrv_root)
upCrv_ctl = curve.createCurveFromOrderedEdges(
upEyelid, inPos, setName("upCtl_crv"), parent=eyeCrv_root)
pm.rebuildCurve(upCrv_ctl, s=2, rt=0, rpo=True, ch=False)
lowEyelid = meshNavigation.edgeRangeInLoopFromMid(
edgeList, lowPos, inPos, outPos)
lowCrv = curve.createCurveFromOrderedEdges(
lowEyelid, inPos, setName("lowerEyelid"), parent=eyeCrv_root)
lowCrv_ctl = curve.createCurveFromOrderedEdges(
lowEyelid,
inPos,
setName("lowCtl_crv"),
parent=eyeCrv_root)
pm.rebuildCurve(lowCrv_ctl, s=2, rt=0, rpo=True, ch=False)
except UnboundLocalError:
if customCorner:
pm.displayWarning("This error is maybe caused because the custom "
"Corner vertex is not part of the edge loop")
pm.displayError(traceback.format_exc())
return
upBlink = curve.createCurveFromCurve(
upCrv, setName("upblink_crv"), nbPoints=30, parent=eyeCrv_root)
lowBlink = curve.createCurveFromCurve(
lowCrv, setName("lowBlink_crv"), nbPoints=30, parent=eyeCrv_root)
upTarget = curve.createCurveFromCurve(
upCrv, setName("upblink_target"), nbPoints=30, parent=eyeCrv_root)
lowTarget = curve.createCurveFromCurve(
lowCrv, setName("lowBlink_target"), nbPoints=30, parent=eyeCrv_root)
midTarget = curve.createCurveFromCurve(
lowCrv, setName("midBlink_target"), nbPoints=30, parent=eyeCrv_root)
rigCrvs = [upCrv,
lowCrv,
upCrv_ctl,
lowCrv_ctl,
upBlink,
lowBlink,
upTarget,
lowTarget,
midTarget]
for crv in rigCrvs:
crv.attr("visibility").set(False)
# localBBOX
localBBox = eyeMesh.getBoundingBox(invisible=True, space='world')
wRadius = abs((localBBox[0][0] - localBBox[1][0]))
dRadius = abs((localBBox[0][1] - localBBox[1][1]) / 1.7)
# Groups
if not ctlGrp:
ctlGrp = "rig_controllers_grp"
try:
ctlSet = pm.PyNode(ctlGrp)
except pm.MayaNodeError:
pm.sets(n=ctlGrp, em=True)
ctlSet = pm.PyNode(ctlGrp)
if not defGrp:
defGrp = "rig_deformers_grp"
try:
defset = pm.PyNode(defGrp)
except pm.MayaNodeError:
pm.sets(n=defGrp, em=True)
defset = pm.PyNode(defGrp)
# Calculate center looking at
averagePosition = ((upPos.getPosition(space='world')
+ lowPos.getPosition(space='world')
+ inPos.getPosition(space='world')
+ outPos.getPosition(space='world'))
/ 4)
if side == "R":
negate = False
offset = offset
over_offset = dRadius
else:
negate = False
over_offset = dRadius
if side == "R" and sideRange or side == "R" and customCorner:
axis = "z-x"
# axis = "zx"
else:
axis = "z-x"
t = transform.getTransformLookingAt(
bboxCenter,
averagePosition,
normalVec,
axis=axis,
negate=negate)
over_npo = primitive.addTransform(
eye_root, setName("center_lookatRoot"), t)
over_ctl = icon.create(over_npo,
setName("over_%s" % ctlName),
t,
icon="square",
w=wRadius,
d=dRadius,
ro=datatypes.Vector(1.57079633, 0, 0),
po=datatypes.Vector(0, 0, over_offset),
color=4)
node.add_controller_tag(over_ctl)
attribute.add_mirror_config_channels(over_ctl)
attribute.setKeyableAttributes(
over_ctl,
params=["tx", "ty", "tz", "ro", "rx", "ry", "rz", "sx", "sy", "sz"])
if side == "R":
over_npo.attr("rx").set(over_npo.attr("rx").get() * -1)
over_npo.attr("ry").set(over_npo.attr("ry").get() + 180)
over_npo.attr("sz").set(-1)
if len(ctlName.split("_")) == 2 and ctlName.split("_")[-1] == "ghost":
pass
else:
pm.sets(ctlSet, add=over_ctl)
center_lookat = primitive.addTransform(
over_ctl, setName("center_lookat"), t)
# Tracking
# Eye aim control
t_arrow = transform.getTransformLookingAt(bboxCenter,
averagePosition,
upPos.getPosition(space='world'),
axis="zy", negate=False)
radius = abs((localBBox[0][0] - localBBox[1][0]) / 1.7)
arrow_npo = primitive.addTransform(eye_root, setName("aim_npo"), t_arrow)
arrow_ctl = icon.create(arrow_npo,
setName("aim_%s" % ctlName),
t_arrow,
icon="arrow",
w=1,
po=datatypes.Vector(0, 0, radius),
color=4)
if len(ctlName.split("_")) == 2 and ctlName.split("_")[-1] == "ghost":
pass
else:
pm.sets(ctlSet, add=arrow_ctl)
attribute.setKeyableAttributes(arrow_ctl, params=["rx", "ry", "rz"])
# tracking custom trigger
if side == "R":
tt = t_arrow
else:
tt = t
aimTrigger_root = primitive.addTransform(
center_lookat, setName("aimTrigger_root"), tt)
aimTrigger_lvl = primitive.addTransform(
aimTrigger_root, setName("aimTrigger_lvl"), tt)
aimTrigger_lvl.attr("tz").set(1.0)
aimTrigger_ref = primitive.addTransform(
aimTrigger_lvl, setName("aimTrigger_ref"), tt)
aimTrigger_ref.attr("tz").set(0.0)
# connect trigger with arrow_ctl
pm.parentConstraint(arrow_ctl, aimTrigger_ref, mo=True)
# Controls lists
upControls = []
trackLvl = []
# upper eyelid controls
upperCtlNames = ["inCorner", "upInMid", "upMid", "upOutMid", "outCorner"]
cvs = upCrv_ctl.getCVs(space="world")
if side == "R" and not sideRange:
# if side == "R":
cvs = [cv for cv in reversed(cvs)]
for i, cv in enumerate(cvs):
if utils.is_odd(i):
color = 14
wd = .5
icon_shape = "circle"
params = ["tx", "ty", "tz"]
else:
color = 4
wd = .7
icon_shape = "square"
params = ["tx",
"ty",
"tz",
"ro",
"rx",
"ry",
"rz",
"sx",
"sy",
"sz"]
t = transform.setMatrixPosition(t, cvs[i])
npo = primitive.addTransform(center_lookat,
setName("%s_npo" % upperCtlNames[i]),
t)
npoBase = npo
if i == 2:
# we add an extra level to input the tracking ofset values
npo = primitive.addTransform(npo,
setName("%s_trk" % upperCtlNames[i]),
t)
trackLvl.append(npo)
ctl = icon.create(npo,
setName("%s_%s" % (upperCtlNames[i], ctlName)),
t,
icon=icon_shape,
w=wd,
d=wd,
ro=datatypes.Vector(1.57079633, 0, 0),
po=datatypes.Vector(0, 0, offset),
color=color)
attribute.add_mirror_config_channels(ctl)
node.add_controller_tag(ctl, over_ctl)
upControls.append(ctl)
if len(ctlName.split("_")) == 2 and ctlName.split("_")[-1] == "ghost":
pass
else:
pm.sets(ctlSet, add=ctl)
attribute.setKeyableAttributes(ctl, params)
if side == "R":
npoBase.attr("ry").set(180)
npoBase.attr("sz").set(-1)
# adding parent average contrains to odd controls
for i, ctl in enumerate(upControls):
if utils.is_odd(i):
pm.parentConstraint(upControls[i - 1],
upControls[i + 1],
ctl.getParent(),
mo=True)
# lower eyelid controls
lowControls = [upControls[0]]
lowerCtlNames = ["inCorner",
"lowInMid",
"lowMid",
"lowOutMid",
"outCorner"]
cvs = lowCrv_ctl.getCVs(space="world")
if side == "R" and not sideRange:
cvs = [cv for cv in reversed(cvs)]
for i, cv in enumerate(cvs):
# we skip the first and last point since is already in the uper eyelid
if i in [0, 4]:
continue
if utils.is_odd(i):
color = 14
wd = .5
icon_shape = "circle"
params = ["tx", "ty", "tz"]
else:
color = 4
wd = .7
icon_shape = "square"
params = ["tx",
"ty",
"tz",
"ro",
"rx",
"ry",
"rz",
"sx",
"sy",
"sz"]
t = transform.setMatrixPosition(t, cvs[i])
npo = primitive.addTransform(center_lookat,
setName("%s_npo" % lowerCtlNames[i]),
t)
npoBase = npo
if i == 2:
# we add an extra level to input the tracking ofset values
npo = primitive.addTransform(npo,
setName("%s_trk" % lowerCtlNames[i]),
t)
trackLvl.append(npo)
ctl = icon.create(npo,
setName("%s_%s" % (lowerCtlNames[i], ctlName)),
t,
icon=icon_shape,
w=wd,
d=wd,
ro=datatypes.Vector(1.57079633, 0, 0),
po=datatypes.Vector(0, 0, offset),
color=color)
attribute.add_mirror_config_channels(ctl)
lowControls.append(ctl)
if len(ctlName.split("_")) == 2 and ctlName.split("_")[-1] == "ghost":
pass
else:
pm.sets(ctlSet, add=ctl)
attribute.setKeyableAttributes(ctl, params)
# mirror behaviout on R side controls
if side == "R":
npoBase.attr("ry").set(180)
npoBase.attr("sz").set(-1)
for lctl in reversed(lowControls[1:]):
node.add_controller_tag(lctl, over_ctl)
lowControls.append(upControls[-1])
# adding parent average contrains to odd controls
for i, ctl in enumerate(lowControls):
if utils.is_odd(i):
pm.parentConstraint(lowControls[i - 1],
lowControls[i + 1],
ctl.getParent(),
mo=True)
# Connecting control crvs with controls
applyop.gear_curvecns_op(upCrv_ctl, upControls)
applyop.gear_curvecns_op(lowCrv_ctl, lowControls)
# adding wires
w1 = pm.wire(upCrv, w=upBlink)[0]
w2 = pm.wire(lowCrv, w=lowBlink)[0]
w3 = pm.wire(upTarget, w=upCrv_ctl)[0]
w4 = pm.wire(lowTarget, w=lowCrv_ctl)[0]
# adding blendshapes
bs_upBlink = pm.blendShape(upTarget,
midTarget,
upBlink,
n="blendShapeUpBlink")
bs_lowBlink = pm.blendShape(lowTarget,
midTarget,
lowBlink,
n="blendShapeLowBlink")
bs_mid = pm.blendShape(lowTarget,
upTarget,
midTarget,
n="blendShapeLowBlink")
# setting blendshape reverse connections
rev_node = pm.createNode("reverse")
pm.connectAttr(bs_upBlink[0].attr(midTarget.name()), rev_node + ".inputX")
pm.connectAttr(rev_node + ".outputX", bs_upBlink[0].attr(upTarget.name()))
rev_node = pm.createNode("reverse")
pm.connectAttr(bs_lowBlink[0].attr(midTarget.name()), rev_node + ".inputX")
pm.connectAttr(rev_node + ".outputX",
bs_lowBlink[0].attr(lowTarget.name()))
rev_node = pm.createNode("reverse")
pm.connectAttr(bs_mid[0].attr(upTarget.name()), rev_node + ".inputX")
pm.connectAttr(rev_node + ".outputX", bs_mid[0].attr(lowTarget.name()))
# setting default values
bs_mid[0].attr(upTarget.name()).set(blinkH)
# joints root
jnt_root = primitive.addTransformFromPos(
eye_root, setName("joints"), pos=bboxCenter)
# head joint
if headJnt:
try:
headJnt = pm.PyNode(headJnt)
jnt_base = headJnt
except pm.MayaNodeError:
pm.displayWarning(
"Aborted can not find %s " % headJnt)
return
else:
# Eye root
jnt_base = jnt_root
eyeTargets_root = primitive.addTransform(eye_root,
setName("targets"))
eyeCenter_jnt = rigbits.addJnt(arrow_ctl,
jnt_base,
grp=defset,
jntName=setName("center_jnt"))
# Upper Eyelid joints ##################################################
cvs = upCrv.getCVs(space="world")
upCrv_info = node.createCurveInfoNode(upCrv)
# aim constrain targets and joints
upperEyelid_aimTargets = []
upperEyelid_jnt = []
upperEyelid_jntRoot = []
for i, cv in enumerate(cvs):
# aim targets
trn = primitive.addTransformFromPos(eyeTargets_root,
setName("upEyelid_aimTarget", i),
pos=cv)
upperEyelid_aimTargets.append(trn)
# connecting positions with crv
pm.connectAttr(upCrv_info + ".controlPoints[%s]" % str(i),
trn.attr("translate"))
# joints
jntRoot = primitive.addJointFromPos(jnt_root,
setName("upEyelid_jnt_base", i),
pos=bboxCenter)
jntRoot.attr("radius").set(.08)
jntRoot.attr("visibility").set(False)
upperEyelid_jntRoot.append(jntRoot)
applyop.aimCns(jntRoot, trn, axis="zy", wupObject=jnt_root)
jnt_ref = primitive.addJointFromPos(jntRoot,
setName("upEyelid_jnt_ref", i),
pos=cv)
jnt_ref.attr("radius").set(.08)
jnt_ref.attr("visibility").set(False)
jnt = rigbits.addJnt(jnt_ref,
jnt_base,
grp=defset,
jntName=setName("upEyelid_jnt", i))
upperEyelid_jnt.append(jnt)
# Lower Eyelid joints ##################################################
cvs = lowCrv.getCVs(space="world")
lowCrv_info = node.createCurveInfoNode(lowCrv)
# aim constrain targets and joints
lowerEyelid_aimTargets = []
lowerEyelid_jnt = []
lowerEyelid_jntRoot = []
for i, cv in enumerate(cvs):
if i in [0, len(cvs) - 1]:
continue
# aim targets
trn = primitive.addTransformFromPos(eyeTargets_root,
setName("lowEyelid_aimTarget", i),
pos=cv)
lowerEyelid_aimTargets.append(trn)
# connecting positions with crv
pm.connectAttr(lowCrv_info + ".controlPoints[%s]" % str(i),
trn.attr("translate"))
# joints
jntRoot = primitive.addJointFromPos(jnt_root,
setName("lowEyelid_base", i),
pos=bboxCenter)
jntRoot.attr("radius").set(.08)
jntRoot.attr("visibility").set(False)
lowerEyelid_jntRoot.append(jntRoot)
applyop.aimCns(jntRoot, trn, axis="zy", wupObject=jnt_root)
jnt_ref = primitive.addJointFromPos(jntRoot,
setName("lowEyelid_jnt_ref", i),
pos=cv)
jnt_ref.attr("radius").set(.08)
jnt_ref.attr("visibility").set(False)
jnt = rigbits.addJnt(jnt_ref,
jnt_base,
grp=defset,
jntName=setName("lowEyelid_jnt", i))
lowerEyelid_jnt.append(jnt)
# Channels
# Adding and connecting attributes for the blink
up_ctl = upControls[2]
blink_att = attribute.addAttribute(
over_ctl, "blink", "float", 0, minValue=0, maxValue=1)
blinkMult_att = attribute.addAttribute(
over_ctl, "blinkMult", "float", 1, minValue=1, maxValue=2)
midBlinkH_att = attribute.addAttribute(
over_ctl, "blinkHeight", "float", blinkH, minValue=0, maxValue=1)
mult_node = node.createMulNode(blink_att, blinkMult_att)
pm.connectAttr(mult_node + ".outputX",
bs_upBlink[0].attr(midTarget.name()))
pm.connectAttr(mult_node + ".outputX",
bs_lowBlink[0].attr(midTarget.name()))
pm.connectAttr(midBlinkH_att, bs_mid[0].attr(upTarget.name()))
low_ctl = lowControls[2]
# Adding channels for eye tracking
upVTracking_att = attribute.addAttribute(up_ctl,
"vTracking",
"float",
.02,
minValue=0,
maxValue=1,
keyable=False,
channelBox=True)
upHTracking_att = attribute.addAttribute(up_ctl,
"hTracking",
"float",
.01,
minValue=0,
maxValue=1,
keyable=False,
channelBox=True)
lowVTracking_att = attribute.addAttribute(low_ctl,
"vTracking",
"float",
.01,
minValue=0,
maxValue=1,
keyable=False,
channelBox=True)
lowHTracking_att = attribute.addAttribute(low_ctl,
"hTracking",
"float",
.01,
minValue=0,
maxValue=1,
keyable=False,
channelBox=True)
mult_node = node.createMulNode(upVTracking_att, aimTrigger_ref.attr("ty"))
pm.connectAttr(mult_node + ".outputX", trackLvl[0].attr("ty"))
mult_node = node.createMulNode(upHTracking_att, aimTrigger_ref.attr("tx"))
pm.connectAttr(mult_node + ".outputX", trackLvl[0].attr("tx"))
mult_node = node.createMulNode(lowVTracking_att, aimTrigger_ref.attr("ty"))
pm.connectAttr(mult_node + ".outputX", trackLvl[1].attr("ty"))
mult_node = node.createMulNode(lowHTracking_att, aimTrigger_ref.attr("tx"))
pm.connectAttr(mult_node + ".outputX", trackLvl[1].attr("tx"))
# Tension on blink
node.createReverseNode(blink_att, w1.scale[0])
node.createReverseNode(blink_att, w3.scale[0])
node.createReverseNode(blink_att, w2.scale[0])
node.createReverseNode(blink_att, w4.scale[0])
###########################################
# Reparenting
###########################################
if parent:
try:
if isinstance(parent, basestring):
parent = pm.PyNode(parent)
parent.addChild(eye_root)
except pm.MayaNodeError:
pm.displayWarning("The eye rig can not be parent to: %s. Maybe "
"this object doesn't exist." % parent)
###########################################
# Auto Skinning
###########################################
if doSkin:
# eyelid vertex rows
totalLoops = rigidLoops + falloffLoops
vertexLoopList = meshNavigation.getConcentricVertexLoop(vertexList,
totalLoops)
vertexRowList = meshNavigation.getVertexRowsFromLoops(vertexLoopList)
# we set the first value 100% for the first initial loop
skinPercList = [1.0]
# we expect to have a regular grid topology
for r in range(rigidLoops):
for rr in range(2):
skinPercList.append(1.0)
increment = 1.0 / float(falloffLoops)
# we invert to smooth out from 100 to 0
inv = 1.0 - increment
for r in range(falloffLoops):
for rr in range(2):
if inv < 0.0:
inv = 0.0
skinPercList.append(inv)
inv -= increment
# this loop add an extra 0.0 indices to avoid errors
for r in range(10):
for rr in range(2):
skinPercList.append(0.0)
# base skin
geo = pm.listRelatives(edgeLoopList[0], parent=True)[0]
# Check if the object has a skinCluster
objName = pm.listRelatives(geo, parent=True)[0]
skinCluster = skin.getSkinCluster(objName)
if not skinCluster:
skinCluster = pm.skinCluster(headJnt,
geo,
tsb=True,
nw=2,
n='skinClsEyelid')
eyelidJoints = upperEyelid_jnt + lowerEyelid_jnt
pm.progressWindow(title='Auto skinning process',
progress=0,
max=len(eyelidJoints))
firstBoundary = False
for jnt in eyelidJoints:
pm.progressWindow(e=True, step=1, status='\nSkinning %s' % jnt)
skinCluster.addInfluence(jnt, weight=0)
v = meshNavigation.getClosestVertexFromTransform(geo, jnt)
for row in vertexRowList:
if v in row:
it = 0 # iterator
inc = 1 # increment
for i, rv in enumerate(row):
try:
perc = skinPercList[it]
t_val = [(jnt, perc), (headJnt, 1.0 - perc)]
pm.skinPercent(skinCluster,
rv,
transformValue=t_val)
if rv.isOnBoundary():
# we need to compare with the first boundary
# to check if the row have inverted direction
# and offset the value
if not firstBoundary:
firstBoundary = True
firstBoundaryValue = it
else:
if it < firstBoundaryValue:
it -= 1
elif it > firstBoundaryValue:
it += 1
inc = 2
except IndexError:
continue
it = it + inc
pm.progressWindow(e=True, endProgress=True)
# Eye Mesh skinning
skinCluster = skin.getSkinCluster(eyeMesh)
if not skinCluster:
skinCluster = pm.skinCluster(eyeCenter_jnt,
eyeMesh,
tsb=True,
nw=1,
n='skinClsEye')
##########################################################
# Eye Rig UI
##########################################################
class eyeRigUI(MayaQWidgetDockableMixin, QtWidgets.QDialog):
valueChanged = QtCore.Signal(int)
def __init__(self, parent=None):
super(eyeRigUI, self).__init__(parent)
self.create()
def create(self):
self.setWindowTitle("Rigbits: Eye Rigger")
self.setWindowFlags(QtCore.Qt.Window)
self.setAttribute(QtCore.Qt.WA_DeleteOnClose, 1)
self.create_controls()
self.create_layout()
self.create_connections()
def create_controls(self):
# Geometry input controls
self.geometryInput_group = QtWidgets.QGroupBox("Geometry Input")
self.eyeball_label = QtWidgets.QLabel("Eyeball:")
self.eyeball_lineEdit = QtWidgets.QLineEdit()
self.eyeball_button = QtWidgets.QPushButton("<<")
self.edgeloop_label = QtWidgets.QLabel("Edge Loop:")
self.edgeloop_lineEdit = QtWidgets.QLineEdit()
self.edgeloop_button = QtWidgets.QPushButton("<<")
# Manual corners
self.manualCorners_group = QtWidgets.QGroupBox("Custom Eye Corners")
self.manualCorners_check = QtWidgets.QCheckBox(
"Set Manual Vertex Corners")
self.manualCorners_check.setChecked(False)
self.intCorner_label = QtWidgets.QLabel("Internal Corner")
self.intCorner_lineEdit = QtWidgets.QLineEdit()
self.intCorner_button = QtWidgets.QPushButton("<<")
self.extCorner_label = QtWidgets.QLabel("External Corner")
self.extCorner_lineEdit = QtWidgets.QLineEdit()
self.extCorner_button = QtWidgets.QPushButton("<<")
# Blink heigh slider
self.blinkHeigh_group = QtWidgets.QGroupBox("Blink High")
self.blinkHeight_value = QtWidgets.QSpinBox()
self.blinkHeight_value.setRange(0, 100)
self.blinkHeight_value.setSingleStep(10)
self.blinkHeight_value.setValue(20)
self.blinkHeight_slider = QtWidgets.QSlider(QtCore.Qt.Horizontal)
self.blinkHeight_slider.setRange(0, 100)
self.blinkHeight_slider.setSingleStep(
self.blinkHeight_slider.maximum() / 10.0)
self.blinkHeight_slider.setValue(20)
# Name prefix
self.prefix_group = QtWidgets.QGroupBox("Name Prefix")
self.prefix_lineEdit = QtWidgets.QLineEdit()
self.prefix_lineEdit.setText("eye")
self.control_group = QtWidgets.QGroupBox("Control Name Extension")
self.control_lineEdit = QtWidgets.QLineEdit()
self.control_lineEdit.setText("ctl")
# joints
self.joints_group = QtWidgets.QGroupBox("Joints")
self.headJnt_label = QtWidgets.QLabel("Head or Eye area Joint:")
self.headJnt_lineEdit = QtWidgets.QLineEdit()
self.headJnt_button = QtWidgets.QPushButton("<<")
# Topological Autoskin
self.topoSkin_group = QtWidgets.QGroupBox("Skin")
self.rigidLoops_label = QtWidgets.QLabel("Rigid Loops:")
self.rigidLoops_value = QtWidgets.QSpinBox()
self.rigidLoops_value.setRange(0, 30)
self.rigidLoops_value.setSingleStep(1)
self.rigidLoops_value.setValue(2)
self.falloffLoops_label = QtWidgets.QLabel("Falloff Loops:")
self.falloffLoops_value = QtWidgets.QSpinBox()
self.falloffLoops_value.setRange(0, 30)
self.falloffLoops_value.setSingleStep(1)
self.falloffLoops_value.setValue(4)
self.topSkin_check = QtWidgets.QCheckBox(
'Compute Topological Autoskin')
self.topSkin_check.setChecked(True)
# Options
self.options_group = QtWidgets.QGroupBox("Options")
self.parent_label = QtWidgets.QLabel("Rig Parent:")
self.parent_lineEdit = QtWidgets.QLineEdit()
self.parent_button = QtWidgets.QPushButton("<<")
self.ctlShapeOffset_label = QtWidgets.QLabel("Controls Offset:")
self.ctlShapeOffset_value = QtWidgets.QDoubleSpinBox()
self.ctlShapeOffset_value.setRange(0, 10)
self.ctlShapeOffset_value.setSingleStep(.05)
self.ctlShapeOffset_value.setValue(.05)
self.sideRange_check = QtWidgets.QCheckBox(
"Use Z axis for wide calculation (i.e: Horse and fish side eyes)")
self.sideRange_check.setChecked(False)
self.ctlGrp_label = QtWidgets.QLabel("Controls Group:")
self.ctlGrp_lineEdit = QtWidgets.QLineEdit()
self.ctlGrp_button = QtWidgets.QPushButton("<<")
self.deformersGrp_label = QtWidgets.QLabel("Deformers Group:")
self.deformersGrp_lineEdit = QtWidgets.QLineEdit()
self.deformersGrp_button = QtWidgets.QPushButton("<<")
# Build button
self.build_button = QtWidgets.QPushButton("Build Eye Rig")
self.export_button = QtWidgets.QPushButton("Export Config to json")
def create_layout(self):
# Eyeball Layout
eyeball_layout = QtWidgets.QHBoxLayout()
eyeball_layout.setContentsMargins(1, 1, 1, 1)
eyeball_layout.addWidget(self.eyeball_label)
eyeball_layout.addWidget(self.eyeball_lineEdit)
eyeball_layout.addWidget(self.eyeball_button)
# Edge Loop Layout
edgeloop_layout = QtWidgets.QHBoxLayout()
edgeloop_layout.setContentsMargins(1, 1, 1, 1)
edgeloop_layout.addWidget(self.edgeloop_label)
edgeloop_layout.addWidget(self.edgeloop_lineEdit)
edgeloop_layout.addWidget(self.edgeloop_button)
# Geometry Input Layout
geometryInput_layout = QtWidgets.QVBoxLayout()
geometryInput_layout.setContentsMargins(6, 1, 6, 2)
geometryInput_layout.addLayout(eyeball_layout)
geometryInput_layout.addLayout(edgeloop_layout)
self.geometryInput_group.setLayout(geometryInput_layout)
# Blink High Layout
blinkHeight_layout = QtWidgets.QHBoxLayout()
blinkHeight_layout.setContentsMargins(1, 1, 1, 1)
blinkHeight_layout.addWidget(self.blinkHeight_value)
blinkHeight_layout.addWidget(self.blinkHeight_slider)
self.blinkHeigh_group.setLayout(blinkHeight_layout)
# joints Layout
headJnt_layout = QtWidgets.QHBoxLayout()
headJnt_layout.addWidget(self.headJnt_label)
headJnt_layout.addWidget(self.headJnt_lineEdit)
headJnt_layout.addWidget(self.headJnt_button)
joints_layout = QtWidgets.QVBoxLayout()
joints_layout.setContentsMargins(6, 4, 6, 4)
joints_layout.addLayout(headJnt_layout)
self.joints_group.setLayout(joints_layout)
# topological autoskin Layout
skinLoops_layout = QtWidgets.QGridLayout()
skinLoops_layout.addWidget(self.rigidLoops_label, 0, 0)
skinLoops_layout.addWidget(self.falloffLoops_label, 0, 1)
skinLoops_layout.addWidget(self.rigidLoops_value, 1, 0)
skinLoops_layout.addWidget(self.falloffLoops_value, 1, 1)
topoSkin_layout = QtWidgets.QVBoxLayout()
topoSkin_layout.setContentsMargins(6, 4, 6, 4)
topoSkin_layout.addWidget(self.topSkin_check,
alignment=QtCore.Qt.Alignment())
topoSkin_layout.addLayout(skinLoops_layout)
self.topoSkin_group.setLayout(topoSkin_layout)
# Manual Corners Layout
intCorner_layout = QtWidgets.QHBoxLayout()
intCorner_layout.addWidget(self.intCorner_label)
intCorner_layout.addWidget(self.intCorner_lineEdit)
intCorner_layout.addWidget(self.intCorner_button)
extCorner_layout = QtWidgets.QHBoxLayout()
extCorner_layout.addWidget(self.extCorner_label)
extCorner_layout.addWidget(self.extCorner_lineEdit)
extCorner_layout.addWidget(self.extCorner_button)
manualCorners_layout = QtWidgets.QVBoxLayout()
manualCorners_layout.setContentsMargins(6, 4, 6, 4)
manualCorners_layout.addWidget(self.manualCorners_check,
alignment=QtCore.Qt.Alignment())
manualCorners_layout.addLayout(intCorner_layout)
manualCorners_layout.addLayout(extCorner_layout)
self.manualCorners_group.setLayout(manualCorners_layout)
# Options Layout
parent_layout = QtWidgets.QHBoxLayout()
parent_layout.addWidget(self.parent_label)
parent_layout.addWidget(self.parent_lineEdit)
parent_layout.addWidget(self.parent_button)
offset_layout = QtWidgets.QHBoxLayout()
offset_layout.addWidget(self.ctlShapeOffset_label)
offset_layout.addWidget(self.ctlShapeOffset_value)
ctlGrp_layout = QtWidgets.QHBoxLayout()
ctlGrp_layout.addWidget(self.ctlGrp_label)
ctlGrp_layout.addWidget(self.ctlGrp_lineEdit)
ctlGrp_layout.addWidget(self.ctlGrp_button)
deformersGrp_layout = QtWidgets.QHBoxLayout()
deformersGrp_layout.addWidget(self.deformersGrp_label)
deformersGrp_layout.addWidget(self.deformersGrp_lineEdit)
deformersGrp_layout.addWidget(self.deformersGrp_button)
options_layout = QtWidgets.QVBoxLayout()
options_layout.setContentsMargins(6, 1, 6, 2)
options_layout.addLayout(parent_layout)
options_layout.addLayout(offset_layout)
options_layout.addWidget(self.blinkHeigh_group)
options_layout.addWidget(self.sideRange_check)
options_layout.addLayout(ctlGrp_layout)
options_layout.addLayout(deformersGrp_layout)
self.options_group.setLayout(options_layout)
# Name prefix
namePrefix_layout = QtWidgets.QVBoxLayout()
namePrefix_layout.setContentsMargins(1, 1, 1, 1)
namePrefix_layout.addWidget(self.prefix_lineEdit)
self.prefix_group.setLayout(namePrefix_layout)
# Name prefix
controlExtension_layout = QtWidgets.QVBoxLayout()
controlExtension_layout.setContentsMargins(1, 1, 1, 1)
controlExtension_layout.addWidget(self.control_lineEdit)
self.control_group.setLayout(controlExtension_layout)
# Main Layout
main_layout = QtWidgets.QVBoxLayout()
main_layout.setContentsMargins(6, 6, 6, 6)
main_layout.addWidget(self.prefix_group)
main_layout.addWidget(self.control_group)
main_layout.addWidget(self.geometryInput_group)
main_layout.addWidget(self.manualCorners_group)
main_layout.addWidget(self.options_group)
main_layout.addWidget(self.joints_group)
main_layout.addWidget(self.topoSkin_group)
main_layout.addWidget(self.build_button)
main_layout.addWidget(self.export_button)
self.setLayout(main_layout)
def create_connections(self):
self.blinkHeight_value.valueChanged[int].connect(
self.blinkHeight_slider.setValue)
self.blinkHeight_slider.valueChanged[int].connect(
self.blinkHeight_value.setValue)
self.eyeball_button.clicked.connect(partial(self.populate_object,
self.eyeball_lineEdit))
self.parent_button.clicked.connect(partial(self.populate_object,
self.parent_lineEdit))
self.headJnt_button.clicked.connect(partial(self.populate_object,
self.headJnt_lineEdit,
1))
self.edgeloop_button.clicked.connect(self.populate_edgeloop)
self.build_button.clicked.connect(self.buildRig)
self.export_button.clicked.connect(self.exportDict)
self.intCorner_button.clicked.connect(partial(self.populate_element,
self.intCorner_lineEdit,
"vertex"))
self.extCorner_button.clicked.connect(partial(self.populate_element,
self.extCorner_lineEdit,
"vertex"))
self.ctlGrp_button.clicked.connect(partial(self.populate_element,
self.ctlGrp_lineEdit,
"objectSet"))
self.deformersGrp_button.clicked.connect(partial(
self.populate_element, self.deformersGrp_lineEdit, "objectSet"))
# SLOTS ##########################################################
def populate_element(self, lEdit, oType="transform"):
if oType == "joint":
oTypeInst = pm.nodetypes.Joint
elif oType == "vertex":
oTypeInst = pm.MeshVertex
elif oType == "objectSet":
oTypeInst = pm.nodetypes.ObjectSet
else:
oTypeInst = pm.nodetypes.Transform
oSel = pm.selected()
if oSel:
if isinstance(oSel[0], oTypeInst):
lEdit.setText(oSel[0].name())
else:
pm.displayWarning(
"The selected element is not a valid %s" % oType)
else:
pm.displayWarning("Please select first one %s." % oType)
def populate_object(self, lEdit, oType=None):
if oType == 1:
oType = pm.nodetypes.Joint
else:
oType = pm.nodetypes.Transform
oSel = pm.selected()
if oSel:
if isinstance(oSel[0], oType):
lEdit.setText(oSel[0].name())
else:
pm.displayWarning("The selected element is not a valid object")
else:
pm.displayWarning("Please select first the object.")
def populate_edgeloop(self):
oSel = pm.selected(fl=1)
if oSel:
edgeList = ""
separator = ""
for e in oSel:
if isinstance(e, pm.MeshEdge):
if edgeList:
separator = ","
edgeList = edgeList + separator + str(e)
if not edgeList:
pm.displayWarning("Please select first the eyelid edge loop.")
elif len(edgeList.split(",")) < 4:
pm.displayWarning("The minimun edge count is 4")
else:
self.edgeloop_lineEdit.setText(edgeList)
else:
pm.displayWarning("Please select first the eyelid edge loop.")
def populateDict(self):
self.buildDict = {}
blinkH = float(self.blinkHeight_value.value()) / 100.0
self.buildDict["eye"] = [self.eyeball_lineEdit.text(),
self.edgeloop_lineEdit.text(),
blinkH,
self.prefix_lineEdit.text(),
self.ctlShapeOffset_value.value(),
self.rigidLoops_value.value(),
self.falloffLoops_value.value(),
self.headJnt_lineEdit.text(),
self.topSkin_check.isChecked(),
self.parent_lineEdit.text(),
self.control_lineEdit.text(),
self.sideRange_check.isChecked(),
self.manualCorners_check.isChecked(),
self.intCorner_lineEdit.text(),
self.extCorner_lineEdit.text(),
self.ctlGrp_lineEdit.text(),
self.deformersGrp_lineEdit.text()]
def buildRig(self):
self.populateDict()
eyeRig(*self.buildDict["eye"])
def exportDict(self):
self.populateDict()
data_string = json.dumps(self.buildDict, indent=4, sort_keys=True)
filePath = pm.fileDialog2(
dialogStyle=2,
fileMode=0,
fileFilter='Eyes Rigger Configuration .eyes (*%s)' % ".eyes")
if not filePath:
return
if not isinstance(filePath, basestring):
filePath = filePath[0]
f = open(filePath, 'w')
f.write(data_string)
f.close()
# build lips from json file:
def eyesFromfile(path):
buildDict = json.load(open(path))
eyeRig(*buildDict["eye"])
def showEyeRigUI(*args):
gqt.showDialog(eyeRigUI)
if __name__ == "__main__":
showEyeRigUI()
# path = "C:\\Users\\miquel\\Desktop\\eye_L.eyes"
# eyesFromfile(path)
# path = "C:\\Users\\miquel\\Desktop\\eye_R.eyes"
# eyesFromfile(path)
| 38.698176 | 79 | 0.541526 | 16,004 | 0.342918 | 0 | 0 | 0 | 0 | 0 | 0 | 6,718 | 0.143947 |
961d4ae687b3642af37cc358422318fe31255362 | 3,260 | py | Python | Python/expert/interact_with_linux/solution.py | fpichl/ProgrammingTasks | da494022455dd77de1c99a6c6e4962616e9764e6 | [
"Unlicense"
]
| 2 | 2018-10-18T16:35:56.000Z | 2019-03-07T06:16:18.000Z | Python/expert/interact_with_linux/solution.py | fpichl/ProgrammingTasks | da494022455dd77de1c99a6c6e4962616e9764e6 | [
"Unlicense"
]
| 2 | 2019-11-13T09:25:54.000Z | 2021-08-19T08:23:32.000Z | Python/expert/interact_with_linux/solution.py | fpichl/ProgrammingTasks | da494022455dd77de1c99a6c6e4962616e9764e6 | [
"Unlicense"
]
| 3 | 2019-05-22T12:20:05.000Z | 2019-08-30T12:57:56.000Z | #!/usr/bin/env python3
import os
import shutil
import sys
import pathlib
import logging
# I will NEVER EVER use subproccess again
# At least not for something like Popen
try:
from sh import wget
except Exception:
print('[!] Just install sh right now!(pip install --user sh)')
sys.exit(0)
# Dumb Python2 support
if sys.version_info[0] == 2:
input = raw_input
# Path where this python script is located when it's run
curr_dir = pathlib.Path(os.path.dirname(os.path.abspath(__file__)))
# The URL
url = input('[$] Url(none for ema.perfact.de): ')
url = url if url else 'ema.perfact.de'
print('[*] Url: {}\n'.format(url))
# Get name of the directory where the whole page should be saved
dir_name = input('[$] Directory name for the page(none for "1337"): ')
dir_name = dir_name if dir_name else '1337'
page_dir = curr_dir / dir_name
if page_dir.is_dir():
print('[!] {} is already a directory and will be overwritten!'.format(page_dir))
choice = input('[!] Continue?(y/n):').lower()
if choice != 'y':
sys.exit(0)
print('[*] Directory to save the page: {}\n'.format(dir_name))
# Get name of directory where the files will be saved we actually want to save
save_name = input('[$] Directory name to save findings(none for "saved"): ')
save_name = save_name if save_name else 'saved'
save_dir = curr_dir / save_name
if save_dir.is_dir():
print('[!] {} is already a directory!'.format(save_dir))
choice = input('[!] Delete it?(y/n): '.format(save_dir)).lower()
if choice == 'y':
shutil.rmtree(save_dir.absolute().as_posix())
else:
sys.exit(0)
os.makedirs(save_dir.absolute().as_posix())
print('[*] Directory to save findings: {}\n'.format(save_name))
# The searchterm (which files we want to copy)
print('[*] Everything with the following substring will be copied')
search_term = input('[$] Files to copy to that directory(none for ".png"): ')
search_term = search_term if search_term else '.png'
print('[*] Searchterm: {}\n'.format(search_term))
input('\n[$] Press any key to continue...')
# We will give these exit_codes to the wget call later
# to disabled every exit/error message (will look horribly else)
exit_codes = (i for i in range(0, 9))
# Sets off the wget -m <url> -P <directory> commande
# It's written so weird, so we can see the output of the program
try:
for line in wget('-m', url, '-P', dir_name, _iter=True, _err_to_out=True,
_out_bufsize=1, _ok_code=exit_codes):
print(line)
except Exception:
pass
# Copying the files we want to save
try:
# Get every file with the correct searchterm from the folder where the webpage is saved
files = list(page_dir.glob("**/*{}".format(search_term)))
if not files:
print("[!] No matching files found")
else:
print("[*] Copying {} *{} files...".format(len(files), search_term))
for f in files:
shutil.copy(f.absolute().as_posix(), save_dir.absolute().as_posix())
except Exception as e:
print('[!] Something went wrong while copying data')
print(e)
# Deleting the saved webpage, cause we don't need it anymore
print('\n[*] Cleaning up...\n')
if page_dir.is_dir():
shutil.rmtree(page_dir.absolute().as_posix())
print('[*] All done!')
| 33.958333 | 91 | 0.674233 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,589 | 0.487423 |
961e5e18627878c209a335c0392cc2286e8803ad | 323 | py | Python | Asap-3.8.4/Projects/NanoparticleMC/misc/viewatomsmc.py | auag92/n2dm | 03403ef8da303b79478580ae76466e374ec9da60 | [
"MIT"
]
| 1 | 2021-10-19T11:35:34.000Z | 2021-10-19T11:35:34.000Z | Asap-3.8.4/Projects/NanoparticleMC/misc/viewatomsmc.py | auag92/n2dm | 03403ef8da303b79478580ae76466e374ec9da60 | [
"MIT"
]
| null | null | null | Asap-3.8.4/Projects/NanoparticleMC/misc/viewatomsmc.py | auag92/n2dm | 03403ef8da303b79478580ae76466e374ec9da60 | [
"MIT"
]
| 3 | 2016-07-18T19:22:48.000Z | 2021-07-06T03:06:42.000Z | import ase
from ase import Atoms
from ase.atom import Atom
import sys
from ase.visualize import view
import pickle
f = open(sys.argv[1],'r') #The .amc file
p = pickle.load(f)
positions = p['atomspositions']
atms = Atoms()
for p0 in positions:
a = Atom('Au',position=p0)
atms.append(a)
atms.center(vacuum=2)
view(atms)
| 17 | 40 | 0.721362 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 37 | 0.114551 |
961e930045b962f6aec047adbd1d0fd8f14a977a | 453 | py | Python | bot_settings_example.py | nikmedoed/BalanceBot | 731e6d09d71bbf8d7802d0b42a570947343d3ce6 | [
"MIT"
]
| null | null | null | bot_settings_example.py | nikmedoed/BalanceBot | 731e6d09d71bbf8d7802d0b42a570947343d3ce6 | [
"MIT"
]
| null | null | null | bot_settings_example.py | nikmedoed/BalanceBot | 731e6d09d71bbf8d7802d0b42a570947343d3ce6 | [
"MIT"
]
| null | null | null | # это dev среда
TELEGRAM_TOKEN = "..."
RELATIVE_CHAT_IDS = [ "...", '...']
TEXT = {
"bot_info": ('Привет, я бот, который отвечает за равномерное распределение участников по комнатам.\n\n'
'Нажми кнопку, если готов сменить комнату'),
"get_link": "Получить рекомендацию",
"new_room": "Ваша новая комната\n%s",
"nothing_to_change": "На данный момент ничего менять не требуется"
}
def logger(*message):
print(message) | 30.2 | 107 | 0.655629 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 489 | 0.765258 |
961f8e0ded1739e7f84175c2bdac8bbf64966432 | 8,270 | py | Python | test/xslt/borrowed/sm_20000304.py | zepheira/amara | d3ffe07d6e2266b34d72b012a82d572c8edbf1e7 | [
"Apache-2.0"
]
| 6 | 2015-01-30T03:50:36.000Z | 2022-03-20T16:09:58.000Z | test/xslt/borrowed/sm_20000304.py | zepheira/amara | d3ffe07d6e2266b34d72b012a82d572c8edbf1e7 | [
"Apache-2.0"
]
| 2 | 2015-02-04T17:18:47.000Z | 2019-09-27T23:39:52.000Z | test/xslt/borrowed/sm_20000304.py | zepheira/amara | d3ffe07d6e2266b34d72b012a82d572c8edbf1e7 | [
"Apache-2.0"
]
| 6 | 2015-02-04T16:16:18.000Z | 2019-10-30T20:07:48.000Z | ########################################################################
# test/xslt/sm20000304.py
# Example from Steve Muench <[email protected]>
# to Jon Smirl <[email protected]>
# on 4 March 2000
"""
From: "Steve Muench" <[email protected]>
To: <[email protected]>
Subject: Re: SVG charts and graphs from XML input
Date: Sat, 4 Mar 2000 18:02:53 -0800 (19:02 MST)
This is by no means a bullet-proof, one-size-fits
all charting stylesheet, but it *was* my first foray
into SVG from XSLT.
Given XML results of an Oracle XSQL Page like:
<xsql:query xmlns:xsql="urn:oracle-xsql" connection="demo">
select ename, sal from dept
</xsql:query>
Which under the covers produces a dynamic XML doc like:
[SNIP source]
The following "salchart.xsl" XSLT stylesheet
renders a dynamic bar chart with "cool colors"
for the employees in the department.
You may have to modify the namespace of the
Java extension functions to get it to work in
XT or Saxon or other XSLT engines.
[SNIP stylesheet]
"""
import os
import cStringIO
import unittest
from amara.lib import treecompare
from amara.test import test_main
from amara.test.xslt import xslt_test, filesource, stringsource
### dalke - added to make the imports work
def NumberValue(x):
return x
#Extensions
ORACLE_JAVA_NS = 'http://www.oracle.com/XSL/Transform/java'
JAVA_COLOR_NS = ORACLE_JAVA_NS + '/java.awt.Color'
JAVA_INTEGER_NS = ORACLE_JAVA_NS + '/java.lang.Integer'
def Java_Color_GetHSBColor(context, hue, saturation, brightness):
hue = NumberValue(hue)
saturation = NumberValue(saturation)
brightness = NumberValue(brightness)
if saturation == 0:
r = g = b = int(brightness * 255)
else:
r = g = b = 0
h = (hue - int(hue)) * 6.0
f = h - int(h)
p = brightness * (1.0 - saturation)
q = brightness * (1.0 - saturation * f)
t = brightness * (1.0 - (saturation * (1.0 - f)))
h = int(h)
if h == 0:
r = int(brightness * 255)
g = int(t * 255)
b = int(p * 255)
elif h == 1:
r = int(q * 255)
g = int(brightness * 255)
b = int(p * 255)
elif h == 2:
r = int(p * 255)
g = int(brightness * 255)
b = int(t * 255)
elif h == 3:
r = int(p * 255)
g = int(q * 255)
b = int(brightness * 255)
elif h == 4:
r = int(t * 255)
g = int(p * 255)
b = int(brightness * 255)
elif h == 5:
r = int(brightness * 255)
g = int(p * 255)
b = int(q * 255)
return 0xff000000L | (r << 16) | (g << 8) | (b << 0)
def Java_Color_GetRed(context, color):
color = NumberValue(color)
return (long(color) >> 16) & 0xff
def Java_Color_GetGreen(context, color):
color = NumberValue(color)
return (long(color) >> 8) & 0xff
def Java_Color_GetBlue(context, color):
color = NumberValue(color)
return long(color) & 0xff
def Java_Integer_ToHexString(context, number):
return '%X' % NumberValue(number)
ExtFunctions = {
(JAVA_COLOR_NS, 'getHSBColor') : Java_Color_GetHSBColor,
(JAVA_COLOR_NS, 'getRed') : Java_Color_GetRed,
(JAVA_COLOR_NS, 'getGreen') : Java_Color_GetGreen,
(JAVA_COLOR_NS, 'getBlue') : Java_Color_GetBlue,
(JAVA_INTEGER_NS, 'toHexString') : Java_Integer_ToHexString,
}
class test_xslt_call_template_ed_20010101(xslt_test):
source = stringsource("""<?xml version = '1.0'?>
<ROWSET>
<ROW num="1">
<ENAME>CLARK</ENAME>
<SAL>2450</SAL>
</ROW>
<ROW num="2">
<ENAME>KING</ENAME>
<SAL>3900</SAL>
</ROW>
<ROW num="3">
<ENAME>MILLER</ENAME>
<SAL>1300</SAL>
</ROW>
</ROWSET>
""")
transform = stringsource('''<xsl:stylesheet version="1.0"
xmlns:xsl="http://www.w3.org/1999/XSL/Transform"
xmlns:Color="http://www.oracle.com/XSL/Transform/java/java.awt.Color"
xmlns:Integer="http://www.oracle.com/XSL/Transform/java/java.lang.Integer"
exclude-result-prefixes="Color Integer">
<xsl:output media-type="image/svg"/>
<xsl:template match="/">
<svg xml:space="preserve" width="1000" height="1000">
<desc>Salary Chart</desc>
<g style="stroke:#000000;stroke-width:1;font-family:Arial;font-size:16">
<xsl:for-each select="ROWSET/ROW">
<xsl:call-template name="drawBar">
<xsl:with-param name="rowIndex" select="position()"/>
<xsl:with-param name="ename" select="ENAME"/>
<xsl:with-param name="sal" select="number(SAL)"/>
</xsl:call-template>
</xsl:for-each>
</g>
</svg>
</xsl:template>
<xsl:template name="drawBar">
<xsl:param name="rowIndex" select="number(0)"/>
<xsl:param name="ename"/>
<xsl:param name="sal" select="number(0)"/>
<xsl:variable name="xOffset" select="number(100)"/>
<xsl:variable name="yOffset" select="number(20)"/>
<xsl:variable name="barHeight" select="number(25)"/>
<xsl:variable name="gap" select="number(10)"/>
<xsl:variable name="x" select="$xOffset"/>
<xsl:variable name="y" select="$yOffset + $rowIndex * ($barHeight + $gap)"/>
<xsl:variable name="barWidth" select="$sal div number(10)"/>
<rect x="{$x}" y="{$y}" height="{$barHeight}" width="{$barWidth}">
<xsl:attribute name="style">
<xsl:text>fill:#</xsl:text>
<xsl:call-template name="getCoolColorStr" xml:space="default">
<xsl:with-param name="colorIndex" select="$rowIndex"/>
<xsl:with-param name="totalColors" select="number(14)"/>
</xsl:call-template>
<xsl:text> </xsl:text>
</xsl:attribute>
</rect>
<xsl:variable name="fontHeight" select="number(18)"/>
<text x="20" y="{$y + $fontHeight}">
<xsl:value-of select="$ename"/>
</text>
<xsl:variable name="x2" select="$xOffset + $barWidth + 10"/>
<text x="{$x2}" y="{$y + $fontHeight}">
<xsl:value-of select="$sal"/>
</text>
</xsl:template>
<xsl:template name="getCoolColorStr">
<xsl:param name="colorIndex"/>
<xsl:param name="totalColors"/>
<xsl:variable name="SATURATION" select="number(0.6)"/>
<xsl:variable name="BRIGHTNESS" select="number(0.9)"/>
<xsl:variable name="hue" select="$colorIndex div $totalColors"/>
<xsl:variable name="c" select="Color:getHSBColor($hue, $SATURATION, $BRIGHTNESS)"/>
<xsl:variable name="r" select="Color:getRed($c)"/>
<xsl:variable name="g" select="Color:getGreen($c)"/>
<xsl:variable name="b" select="Color:getBlue($c)"/>
<xsl:variable name="rs" select="Integer:toHexString($r)"/>
<xsl:variable name="gs" select="Integer:toHexString($g)"/>
<xsl:variable name="bs" select="Integer:toHexString($b)"/>
<xsl:if test="$r < 16">0</xsl:if><xsl:value-of select="$rs"/>
<xsl:if test="$g < 16">0</xsl:if><xsl:value-of select="$gs"/>
<xsl:if test="$b < 16">0</xsl:if><xsl:value-of select="$bs"/>
</xsl:template>
</xsl:stylesheet>
''')
parameters = {}
expected = """<?xml version='1.0' encoding='UTF-8'?>
<svg height='1000' xml:space='preserve' width='1000'>
<desc>Salary Chart</desc>
<g style='stroke:#000000;stroke-width:1;font-family:Arial;font-size:16'>
<rect height='25' x='100' style='fill:#E5965B ' width='245' y='55'/><text x='20' y='73'>CLARK</text><text x='355' y='73'>2450</text>
<rect height='25' x='100' style='fill:#E5D15B ' width='390' y='90'/><text x='20' y='108'>KING</text><text x='500' y='108'>3900</text>
<rect height='25' x='100' style='fill:#BEE55B ' width='130' y='125'/><text x='20' y='143'>MILLER</text><text x='240' y='143'>1300</text>
</g>
</svg>"""
# def test_transform(self):
# import sys
# from amara.xslt import transform
#
# result = transform(self.source, self.transform, output=io)
#
# #FIXME: the numerics break under Python 2.3
# test_harness.XsltTest(tester, source, [sheet], expected_1,
# extensionModules=[__name__])
#
# self.assert_(treecompare.html_compare(self.expected, io.getvalue()))
#
# return
# Hide the test framework from nose
del xslt_test
if __name__ == '__main__':
test_main()
| 32.687747 | 143 | 0.606167 | 4,739 | 0.573035 | 0 | 0 | 0 | 0 | 0 | 0 | 5,769 | 0.697582 |
961fc04d55a2472f650b925e3c30b289d25af832 | 123 | py | Python | model-server/config.py | campos537/deep-fashion-system | 1de31dd6260cc967e1832cff63ae7e537a3a4e9d | [
"Unlicense"
]
| 1 | 2021-04-06T00:43:26.000Z | 2021-04-06T00:43:26.000Z | model-server/config.py | campos537/deep-fashion-system | 1de31dd6260cc967e1832cff63ae7e537a3a4e9d | [
"Unlicense"
]
| null | null | null | model-server/config.py | campos537/deep-fashion-system | 1de31dd6260cc967e1832cff63ae7e537a3a4e9d | [
"Unlicense"
]
| null | null | null | import json
def Config(config_path):
with open(config_path) as config_file:
return json.load(config_file)
| 20.5 | 42 | 0.707317 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.