max_stars_repo_path
stringlengths 4
286
| max_stars_repo_name
stringlengths 5
119
| max_stars_count
int64 0
191k
| id
stringlengths 1
7
| content
stringlengths 6
1.03M
| content_cleaned
stringlengths 6
1.03M
| language
stringclasses 111
values | language_score
float64 0.03
1
| comments
stringlengths 0
556k
| edu_score
float64 0.32
5.03
| edu_int_score
int64 0
5
|
---|---|---|---|---|---|---|---|---|---|---|
taller_estructuras_de_control_selectivas/ejercicio_13.py | JMosqueraM/algoritmos_y_programacion | 0 | 9700 | # Desarrolle un un programa que reciba la fecha de nacimiento
# de una persona, y como salida, indique el nombre del signo del
# zodiaco correspondiente, ademas de su edad
def zodiaco(DD, MM):
if (((DD >= 22) and (MM == 11)) or ((DD <=21) and (MM == 12))):
return("Sagitario")
if (((DD >= 22) and (MM == 12)) or ((DD <=20) and (MM == 1))):
return("Capricornio")
if (((DD >= 21) and (MM == 1)) or ((DD <=19) and (MM == 2))):
return("Acuario")
if (((DD >= 20) and (MM == 2)) or ((DD <=19) and (MM == 3))):
return("Piscis")
if (((DD >= 21) and (MM == 3)) or ((DD <=20) and (MM == 4))):
return("Aries")
if (((DD >= 21) and (MM == 4)) or ((DD <=21) and (MM == 5))):
return("Tauro")
if (((DD >= 22) and (MM == 5)) or ((DD <=21) and (MM == 6))):
return("Geminis")
if (((DD >= 22) and (MM == 6)) or ((DD <=22) and (MM == 7))):
return("Cancer")
if (((DD >= 23) and (MM == 7)) or ((DD <=23) and (MM == 8))):
return("Leo")
if (((DD >= 24) and (MM == 8)) or ((DD <=22) and (MM == 9))):
return("Virgo")
if (((DD >= 23) and (MM == 9)) or ((DD <=22) and (MM == 10))):
return("Libra")
if (((DD >= 23) and (MM == 10)) or ((DD <=21) and (MM == 11))):
return("Escorpion")
fecha_str = input("Ingrese la fecha de nacimiento (DD/MM/AAAA): ")
fecha = fecha_str.split("/")
fecha_int = []
for elemento in fecha:
fecha_int.append(int(elemento))
dia = fecha_int[0]
mes = fecha_int[1]
ano = fecha_int[2]
signo = zodiaco(dia, mes)
print(f"Siendo que su fecha de nacimiento es {fecha_str}, su signo zodiacal corresponde a {signo} y tiene {abs(ano - 2021)} años") | # Desarrolle un un programa que reciba la fecha de nacimiento
# de una persona, y como salida, indique el nombre del signo del
# zodiaco correspondiente, ademas de su edad
def zodiaco(DD, MM):
if (((DD >= 22) and (MM == 11)) or ((DD <=21) and (MM == 12))):
return("Sagitario")
if (((DD >= 22) and (MM == 12)) or ((DD <=20) and (MM == 1))):
return("Capricornio")
if (((DD >= 21) and (MM == 1)) or ((DD <=19) and (MM == 2))):
return("Acuario")
if (((DD >= 20) and (MM == 2)) or ((DD <=19) and (MM == 3))):
return("Piscis")
if (((DD >= 21) and (MM == 3)) or ((DD <=20) and (MM == 4))):
return("Aries")
if (((DD >= 21) and (MM == 4)) or ((DD <=21) and (MM == 5))):
return("Tauro")
if (((DD >= 22) and (MM == 5)) or ((DD <=21) and (MM == 6))):
return("Geminis")
if (((DD >= 22) and (MM == 6)) or ((DD <=22) and (MM == 7))):
return("Cancer")
if (((DD >= 23) and (MM == 7)) or ((DD <=23) and (MM == 8))):
return("Leo")
if (((DD >= 24) and (MM == 8)) or ((DD <=22) and (MM == 9))):
return("Virgo")
if (((DD >= 23) and (MM == 9)) or ((DD <=22) and (MM == 10))):
return("Libra")
if (((DD >= 23) and (MM == 10)) or ((DD <=21) and (MM == 11))):
return("Escorpion")
fecha_str = input("Ingrese la fecha de nacimiento (DD/MM/AAAA): ")
fecha = fecha_str.split("/")
fecha_int = []
for elemento in fecha:
fecha_int.append(int(elemento))
dia = fecha_int[0]
mes = fecha_int[1]
ano = fecha_int[2]
signo = zodiaco(dia, mes)
print(f"Siendo que su fecha de nacimiento es {fecha_str}, su signo zodiacal corresponde a {signo} y tiene {abs(ano - 2021)} años") | es | 0.969296 | # Desarrolle un un programa que reciba la fecha de nacimiento # de una persona, y como salida, indique el nombre del signo del # zodiaco correspondiente, ademas de su edad | 3.627526 | 4 |
assignment3/crawler/spiders/benchmark_spider.py | vhazali/cs5331 | 8 | 9701 | import re, scrapy
from crawler.items import *
class BenchmarkSpider(scrapy.Spider):
drop_params = True
# Spider name, for use with the scrapy crawl command
name = "benchmarks"
# Constants to get url parts
FULL, PROTOCOL, USER, PASSWORD, SUBDOMAIN, DOMAIN, TOP_LEVEL_DOMAIN, PORT_NUM, PATH, PAGE, GET_PARAMS, HASHTAGS = range(12)
# List of start urls to start crawling
start_urls = [
# 'https://app1.com',
# 'https://app2.com',
# 'https://app3.com',
# 'https://app4.com',
# 'https://app5.com',
# 'https://app6.com',
# 'https://app7.com',
# 'https://app8.com',
# 'https://app9.com',
# 'https://app10.com',
# 'https://app11.com',
'http://ec2-54-255-215-139.ap-southeast-1.compute.amazonaws.com/'
]
allowed_domains = [
"app1.com",
"app2.com",
"app3.com",
"app4.com",
"app5.com",
"app6.com",
"app7.com",
"app8.com",
"app9.com",
"app10.com",
"app11.com",
"app12.com",
"app13.com",
"app14.com",
"app15.com",
"app16.com",
"app17.com",
"app18.com",
"app19.com",
"app20.com",
"app21.com"
]
# Set to keep track of visited urls
visited_urls = set(start_urls)
"""
Uses Regex to split up url into components. Groups and what they are:
0 : the full url
1 : Protocol
2 : User
3 : Password
4 : Subdomain
5 : Domain
6 : Top level domain (.com .net etc)
7 : Port number
8 : Path
9 : Page
10: Get parameters
11: Hashtags
"""
def splitUrlIntoParts(self, url, index):
pattern = '(?:([^\:]*)\:\/\/)?(?:([^\:\@]*)(?:\:([^\@]*))?\@)?(?:([^\/\:]*)\.(?=[^\.\/\:]*\.[^\.\/\:]*))?([^\.\/\:]*)(?:\.([^\/\.\:#]*))?(?:\:([0-9]*))?(\/[^\?#]*(?=.*?\/)\/)?([^\?#]*)?(?:\?([^#]*))?(?:#(.*))?'
match = re.search(pattern, url)
if match:
if match.group(index):
return match.group(index)
return ''
def populateURLItem(self, item, url):
item['url'] = url
item['protocol'] = self.splitUrlIntoParts(url, self.PROTOCOL)
item['domain'] = self.splitUrlIntoParts(url, self.DOMAIN)
item['path'] = self.splitUrlIntoParts(url, self.PATH)
item['page'] = self.splitUrlIntoParts(url, self.PAGE)
item['get_params'] = self.splitUrlIntoParts(url, self.GET_PARAMS)
def getUrlWithoutParams(self, url):
# Pattern looks out for a question mark that marks start of params
# Assumption is that url is already valid
pattern = '([^? ]+).*'
match = re.search(pattern, url)
if match:
if match.group(1):
return match.group(1)
else:
return ''
def isVisited(self, url):
if self.drop_params:
truncated_url = self.getUrlWithoutParams(url)
return truncated_url in self.visited_urls
else :
return url in self.visited_urls
def markAsVisited(self, url):
if self.drop_params:
truncated_url = self.getUrlWithoutParams(url)
self.visited_urls.add(truncated_url)
else:
self.visited_urls.add(url)
# The default method that's called by scrapy for each url in the start_url list
def parse(self, response):
# Get URL item
item = URLItem()
# Get parts of URL item
self.populateURLItem(item, response.url)
yield item
# Look for Forms
# Assumption: forms will have id attribute
# We will be using this id and url to uniquely identify each form
forms = response.css('form')
for form in forms:
formItem = FormItem()
formItem['url'] = response.url
form_id = form.css('::attr(id)').extract_first()
if form_id is None:
form_id = ''
formItem['id_attr'] = form_id
yield formItem
inputs = form.css('input')
for a in inputs:
inputItem = InputItem()
inputItem['url'] = response.url
inputItem['form_id'] = form_id
inputItem['complete'] = a.extract()
inputItem['type_attr'] = a.css('::attr(type)').extract_first()
yield inputItem
# Get url to visit next
links = response.css('a::attr(href)').extract()
for next_page in links:
# Check that url exist
if next_page is not None:
# Handle weirdass cases where hrefs has scheme:///domain
next_page = next_page.replace("///", "//", 1)
next_page = response.urljoin(next_page)
# Check that url is not visited yet
if not self.isVisited(next_page):
self.markAsVisited(next_page)
yield scrapy.Request(next_page, callback=self.parse) | import re, scrapy
from crawler.items import *
class BenchmarkSpider(scrapy.Spider):
drop_params = True
# Spider name, for use with the scrapy crawl command
name = "benchmarks"
# Constants to get url parts
FULL, PROTOCOL, USER, PASSWORD, SUBDOMAIN, DOMAIN, TOP_LEVEL_DOMAIN, PORT_NUM, PATH, PAGE, GET_PARAMS, HASHTAGS = range(12)
# List of start urls to start crawling
start_urls = [
# 'https://app1.com',
# 'https://app2.com',
# 'https://app3.com',
# 'https://app4.com',
# 'https://app5.com',
# 'https://app6.com',
# 'https://app7.com',
# 'https://app8.com',
# 'https://app9.com',
# 'https://app10.com',
# 'https://app11.com',
'http://ec2-54-255-215-139.ap-southeast-1.compute.amazonaws.com/'
]
allowed_domains = [
"app1.com",
"app2.com",
"app3.com",
"app4.com",
"app5.com",
"app6.com",
"app7.com",
"app8.com",
"app9.com",
"app10.com",
"app11.com",
"app12.com",
"app13.com",
"app14.com",
"app15.com",
"app16.com",
"app17.com",
"app18.com",
"app19.com",
"app20.com",
"app21.com"
]
# Set to keep track of visited urls
visited_urls = set(start_urls)
"""
Uses Regex to split up url into components. Groups and what they are:
0 : the full url
1 : Protocol
2 : User
3 : Password
4 : Subdomain
5 : Domain
6 : Top level domain (.com .net etc)
7 : Port number
8 : Path
9 : Page
10: Get parameters
11: Hashtags
"""
def splitUrlIntoParts(self, url, index):
pattern = '(?:([^\:]*)\:\/\/)?(?:([^\:\@]*)(?:\:([^\@]*))?\@)?(?:([^\/\:]*)\.(?=[^\.\/\:]*\.[^\.\/\:]*))?([^\.\/\:]*)(?:\.([^\/\.\:#]*))?(?:\:([0-9]*))?(\/[^\?#]*(?=.*?\/)\/)?([^\?#]*)?(?:\?([^#]*))?(?:#(.*))?'
match = re.search(pattern, url)
if match:
if match.group(index):
return match.group(index)
return ''
def populateURLItem(self, item, url):
item['url'] = url
item['protocol'] = self.splitUrlIntoParts(url, self.PROTOCOL)
item['domain'] = self.splitUrlIntoParts(url, self.DOMAIN)
item['path'] = self.splitUrlIntoParts(url, self.PATH)
item['page'] = self.splitUrlIntoParts(url, self.PAGE)
item['get_params'] = self.splitUrlIntoParts(url, self.GET_PARAMS)
def getUrlWithoutParams(self, url):
# Pattern looks out for a question mark that marks start of params
# Assumption is that url is already valid
pattern = '([^? ]+).*'
match = re.search(pattern, url)
if match:
if match.group(1):
return match.group(1)
else:
return ''
def isVisited(self, url):
if self.drop_params:
truncated_url = self.getUrlWithoutParams(url)
return truncated_url in self.visited_urls
else :
return url in self.visited_urls
def markAsVisited(self, url):
if self.drop_params:
truncated_url = self.getUrlWithoutParams(url)
self.visited_urls.add(truncated_url)
else:
self.visited_urls.add(url)
# The default method that's called by scrapy for each url in the start_url list
def parse(self, response):
# Get URL item
item = URLItem()
# Get parts of URL item
self.populateURLItem(item, response.url)
yield item
# Look for Forms
# Assumption: forms will have id attribute
# We will be using this id and url to uniquely identify each form
forms = response.css('form')
for form in forms:
formItem = FormItem()
formItem['url'] = response.url
form_id = form.css('::attr(id)').extract_first()
if form_id is None:
form_id = ''
formItem['id_attr'] = form_id
yield formItem
inputs = form.css('input')
for a in inputs:
inputItem = InputItem()
inputItem['url'] = response.url
inputItem['form_id'] = form_id
inputItem['complete'] = a.extract()
inputItem['type_attr'] = a.css('::attr(type)').extract_first()
yield inputItem
# Get url to visit next
links = response.css('a::attr(href)').extract()
for next_page in links:
# Check that url exist
if next_page is not None:
# Handle weirdass cases where hrefs has scheme:///domain
next_page = next_page.replace("///", "//", 1)
next_page = response.urljoin(next_page)
# Check that url is not visited yet
if not self.isVisited(next_page):
self.markAsVisited(next_page)
yield scrapy.Request(next_page, callback=self.parse) | en | 0.74076 | # Spider name, for use with the scrapy crawl command # Constants to get url parts # List of start urls to start crawling # 'https://app1.com', # 'https://app2.com', # 'https://app3.com', # 'https://app4.com', # 'https://app5.com', # 'https://app6.com', # 'https://app7.com', # 'https://app8.com', # 'https://app9.com', # 'https://app10.com', # 'https://app11.com', # Set to keep track of visited urls Uses Regex to split up url into components. Groups and what they are: 0 : the full url 1 : Protocol 2 : User 3 : Password 4 : Subdomain 5 : Domain 6 : Top level domain (.com .net etc) 7 : Port number 8 : Path 9 : Page 10: Get parameters 11: Hashtags #]*))?(?:\:([0-9]*))?(\/[^\?#]*(?=.*?\/)\/)?([^\?#]*)?(?:\?([^#]*))?(?:#(.*))?' # Pattern looks out for a question mark that marks start of params # Assumption is that url is already valid # The default method that's called by scrapy for each url in the start_url list # Get URL item # Get parts of URL item # Look for Forms # Assumption: forms will have id attribute # We will be using this id and url to uniquely identify each form # Get url to visit next # Check that url exist # Handle weirdass cases where hrefs has scheme:///domain # Check that url is not visited yet | 2.709682 | 3 |
octavia_tempest_plugin/services/load_balancer/v2/listener_client.py | NeCTAR-RC/octavia-tempest-plugin | 0 | 9702 | # Copyright 2017 GoDaddy
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
from oslo_serialization import jsonutils
from tempest import config
from octavia_tempest_plugin.services.load_balancer.v2 import base_client
CONF = config.CONF
Unset = base_client.Unset
class ListenerClient(base_client.BaseLBaaSClient):
root_tag = 'listener'
list_root_tag = 'listeners'
def create_listener(self, protocol, protocol_port, loadbalancer_id,
name=Unset, description=Unset, admin_state_up=Unset,
connection_limit=Unset, timeout_client_data=Unset,
timeout_member_connect=Unset,
timeout_member_data=Unset, timeout_tcp_inspect=Unset,
insert_headers=Unset, default_pool_id=Unset,
default_tls_container_ref=Unset,
sni_container_refs=Unset, client_authentication=Unset,
client_ca_tls_container_ref=Unset,
client_crl_container_ref=Unset,
return_object_only=True):
"""Create a listener.
:param protocol: The protocol for the resource.
:param protocol_port: The protocol port number for the resource.
:param loadbalancer_id: The ID of the load balancer.
:param name: Human-readable name of the resource.
:param description: A human-readable description for the resource.
:param admin_state_up: The administrative state of the resource, which
is up (true) or down (false).
:param connection_limit: The maximum number of connections permitted
for this listener. Default value is -1 which
represents infinite connections.
:param timeout_client_data: Frontend client inactivity timeout in
milliseconds.
:param timeout_member_connect: Backend member connection timeout in
milliseconds.
:param timeout_member_data: Backend member inactivity timeout in
milliseconds.
:param timeout_tcp_inspect: Time, in milliseconds, to wait for
additional TCP packets for content
inspection.
:param insert_headers: A dictionary of optional headers to insert into
the request before it is sent to the backend
member.
:param default_pool_id: The ID of the pool used by the listener if no
L7 policies match.
:param default_tls_container_ref: The URI of the key manager service
secret containing a PKCS12 format
certificate/key bundle for
TERMINATED_TLS listeners.
:param sni_container_refs: A list of URIs to the key manager service
secrets containing PKCS12 format
certificate/key bundles for TERMINATED_TLS
listeners.
:param client_authentication: The TLS client authentication mode. One
of the options NONE, OPTIONAL or
MANDATORY.
:param client_ca_tls_container_ref: The ref of the key manager service
secret containing a PEM format
client CA certificate bundle for
TERMINATED_HTTPS listeners.
:param client_crl_container_ref: The URI of the key manager service
secret containing a PEM format CA
revocation list file for
TERMINATED_HTTPS listeners.
:param return_object_only: If True, the response returns the object
inside the root tag. False returns the full
response from the API.
:raises AssertionError: if the expected_code isn't a valid http success
response code
:raises BadRequest: If a 400 response code is received
:raises Conflict: If a 409 response code is received
:raises Forbidden: If a 403 response code is received
:raises Gone: If a 410 response code is received
:raises InvalidContentType: If a 415 response code is received
:raises InvalidHTTPResponseBody: The response body wasn't valid JSON
:raises InvalidHttpSuccessCode: if the read code isn't an expected
http success code
:raises NotFound: If a 404 response code is received
:raises NotImplemented: If a 501 response code is received
:raises OverLimit: If a 413 response code is received and over_limit is
not in the response body
:raises RateLimitExceeded: If a 413 response code is received and
over_limit is in the response body
:raises ServerFault: If a 500 response code is received
:raises Unauthorized: If a 401 response code is received
:raises UnexpectedContentType: If the content-type of the response
isn't an expect type
:raises UnexpectedResponseCode: If a response code above 400 is
received and it doesn't fall into any
of the handled checks
:raises UnprocessableEntity: If a 422 response code is received and
couldn't be parsed
:returns: A listener object.
"""
kwargs = {arg: value for arg, value in locals().items()
if arg != 'self' and value is not Unset}
return self._create_object(**kwargs)
def show_listener(self, listener_id, query_params=None,
return_object_only=True):
"""Get listener details.
:param listener_id: The listener ID to query.
:param query_params: The optional query parameters to append to the
request. Ex. fields=id&fields=name
:param return_object_only: If True, the response returns the object
inside the root tag. False returns the full
response from the API.
:raises AssertionError: if the expected_code isn't a valid http success
response code
:raises BadRequest: If a 400 response code is received
:raises Conflict: If a 409 response code is received
:raises Forbidden: If a 403 response code is received
:raises Gone: If a 410 response code is received
:raises InvalidContentType: If a 415 response code is received
:raises InvalidHTTPResponseBody: The response body wasn't valid JSON
:raises InvalidHttpSuccessCode: if the read code isn't an expected
http success code
:raises NotFound: If a 404 response code is received
:raises NotImplemented: If a 501 response code is received
:raises OverLimit: If a 413 response code is received and over_limit is
not in the response body
:raises RateLimitExceeded: If a 413 response code is received and
over_limit is in the response body
:raises ServerFault: If a 500 response code is received
:raises Unauthorized: If a 401 response code is received
:raises UnexpectedContentType: If the content-type of the response
isn't an expect type
:raises UnexpectedResponseCode: If a response code above 400 is
received and it doesn't fall into any
of the handled checks
:raises UnprocessableEntity: If a 422 response code is received and
couldn't be parsed
:returns: A listener object.
"""
return self._show_object(obj_id=listener_id,
query_params=query_params,
return_object_only=return_object_only)
def list_listeners(self, query_params=None, return_object_only=True):
"""Get a list of listener objects.
:param query_params: The optional query parameters to append to the
request. Ex. fields=id&fields=name
:param return_object_only: If True, the response returns the object
inside the root tag. False returns the full
response from the API.
:raises AssertionError: if the expected_code isn't a valid http success
response code
:raises BadRequest: If a 400 response code is received
:raises Conflict: If a 409 response code is received
:raises Forbidden: If a 403 response code is received
:raises Gone: If a 410 response code is received
:raises InvalidContentType: If a 415 response code is received
:raises InvalidHTTPResponseBody: The response body wasn't valid JSON
:raises InvalidHttpSuccessCode: if the read code isn't an expected
http success code
:raises NotFound: If a 404 response code is received
:raises NotImplemented: If a 501 response code is received
:raises OverLimit: If a 413 response code is received and over_limit is
not in the response body
:raises RateLimitExceeded: If a 413 response code is received and
over_limit is in the response body
:raises ServerFault: If a 500 response code is received
:raises Unauthorized: If a 401 response code is received
:raises UnexpectedContentType: If the content-type of the response
isn't an expect type
:raises UnexpectedResponseCode: If a response code above 400 is
received and it doesn't fall into any
of the handled checks
:raises UnprocessableEntity: If a 422 response code is received and
couldn't be parsed
:returns: A list of listener objects.
"""
return self._list_objects(query_params=query_params,
return_object_only=return_object_only)
def update_listener(self, listener_id, name=Unset, description=Unset,
admin_state_up=Unset, connection_limit=Unset,
timeout_client_data=Unset,
timeout_member_connect=Unset,
timeout_member_data=Unset, timeout_tcp_inspect=Unset,
insert_headers=Unset, default_pool_id=Unset,
default_tls_container_ref=Unset,
sni_container_refs=Unset, client_authentication=Unset,
client_ca_tls_container_ref=Unset,
client_crl_container_ref=Unset,
return_object_only=True):
"""Update a listener.
:param listener_id: The listener ID to update.
:param name: Human-readable name of the resource.
:param description: A human-readable description for the resource.
:param admin_state_up: The administrative state of the resource, which
is up (true) or down (false).
:param connection_limit: The maximum number of connections permitted
for this listener. Default value is -1 which
represents infinite connections.
:param timeout_client_data: Frontend client inactivity timeout in
milliseconds.
:param timeout_member_connect: Backend member connection timeout in
milliseconds.
:param timeout_member_data: Backend member inactivity timeout in
milliseconds.
:param timeout_tcp_inspect: Time, in milliseconds, to wait for
additional TCP packets for content
inspection.
:param insert_headers: A dictionary of optional headers to insert into
the request before it is sent to the backend
member.
:param default_pool_id: The ID of the pool used by the listener if no
L7 policies match.
:param default_tls_container_ref: The URI of the key manager service
secret containing a PKCS12 format
certificate/key bundle for
TERMINATED_TLS listeners.
:param sni_container_refs: A list of URIs to the key manager service
secrets containing PKCS12 format
certificate/key bundles for TERMINATED_TLS
listeners.
:param client_authentication: The TLS client authentication mode. One
of the options NONE, OPTIONAL or
MANDATORY.
:param client_ca_tls_container_ref: The ref of the key manager service
secret containing a PEM format
client CA certificate bundle for
TERMINATED_HTTPS listeners.
:param client_crl_container_ref: The URI of the key manager service
secret containing a PEM format CA
revocation list file for
TERMINATED_HTTPS listeners.
:param return_object_only: If True, the response returns the object
inside the root tag. False returns the full
response from the API.
:raises AssertionError: if the expected_code isn't a valid http success
response code
:raises BadRequest: If a 400 response code is received
:raises Conflict: If a 409 response code is received
:raises Forbidden: If a 403 response code is received
:raises Gone: If a 410 response code is received
:raises InvalidContentType: If a 415 response code is received
:raises InvalidHTTPResponseBody: The response body wasn't valid JSON
:raises InvalidHttpSuccessCode: if the read code isn't an expected
http success code
:raises NotFound: If a 404 response code is received
:raises NotImplemented: If a 501 response code is received
:raises OverLimit: If a 413 response code is received and over_limit is
not in the response body
:raises RateLimitExceeded: If a 413 response code is received and
over_limit is in the response body
:raises ServerFault: If a 500 response code is received
:raises Unauthorized: If a 401 response code is received
:raises UnexpectedContentType: If the content-type of the response
isn't an expect type
:raises UnexpectedResponseCode: If a response code above 400 is
received and it doesn't fall into any
of the handled checks
:raises UnprocessableEntity: If a 422 response code is received and
couldn't be parsed
:returns: A listener object.
"""
kwargs = {arg: value for arg, value in locals().items()
if arg != 'self' and value is not Unset}
kwargs['obj_id'] = kwargs.pop('listener_id')
return self._update_object(**kwargs)
def delete_listener(self, listener_id, ignore_errors=False):
"""Delete a listener.
:param listener_id: The listener ID to delete.
:param ignore_errors: True if errors should be ignored.
:raises AssertionError: if the expected_code isn't a valid http success
response code
:raises BadRequest: If a 400 response code is received
:raises Conflict: If a 409 response code is received
:raises Forbidden: If a 403 response code is received
:raises Gone: If a 410 response code is received
:raises InvalidContentType: If a 415 response code is received
:raises InvalidHTTPResponseBody: The response body wasn't valid JSON
:raises InvalidHttpSuccessCode: if the read code isn't an expected
http success code
:raises NotFound: If a 404 response code is received
:raises NotImplemented: If a 501 response code is received
:raises OverLimit: If a 413 response code is received and over_limit is
not in the response body
:raises RateLimitExceeded: If a 413 response code is received and
over_limit is in the response body
:raises ServerFault: If a 500 response code is received
:raises Unauthorized: If a 401 response code is received
:raises UnexpectedContentType: If the content-type of the response
isn't an expect type
:raises UnexpectedResponseCode: If a response code above 400 is
received and it doesn't fall into any
of the handled checks
:raises UnprocessableEntity: If a 422 response code is received and
couldn't be parsed
:returns: None if ignore_errors is True, the response status code
if not.
"""
return self._delete_obj(obj_id=listener_id,
ignore_errors=ignore_errors)
def get_listener_stats(self, listener_id, query_params=None,
return_object_only=True):
"""Get listener statistics.
:param listener_id: The listener ID to query.
:param query_params: The optional query parameters to append to the
request. Ex. fields=id&fields=name
:param return_object_only: If True, the response returns the object
inside the root tag. False returns the full
response from the API.
:raises AssertionError: if the expected_code isn't a valid http success
response code
:raises BadRequest: If a 400 response code is received
:raises Conflict: If a 409 response code is received
:raises Forbidden: If a 403 response code is received
:raises Gone: If a 410 response code is received
:raises InvalidContentType: If a 415 response code is received
:raises InvalidHTTPResponseBody: The response body wasn't valid JSON
:raises InvalidHttpSuccessCode: if the read code isn't an expected
http success code
:raises NotFound: If a 404 response code is received
:raises NotImplemented: If a 501 response code is received
:raises OverLimit: If a 413 response code is received and over_limit is
not in the response body
:raises RateLimitExceeded: If a 413 response code is received and
over_limit is in the response body
:raises ServerFault: If a 500 response code is received
:raises Unauthorized: If a 401 response code is received
:raises UnexpectedContentType: If the content-type of the response
isn't an expect type
:raises UnexpectedResponseCode: If a response code above 400 is
received and it doesn't fall into any
of the handled checks
:raises UnprocessableEntity: If a 422 response code is received and
couldn't be parsed
:returns: A listener statistics object.
"""
if query_params:
request_uri = '{0}/{1}/stats?{2}'.format(self.uri, listener_id,
query_params)
else:
request_uri = '{0}/{1}/stats'.format(self.uri, listener_id)
response, body = self.get(request_uri)
self.expected_success(200, response.status)
if return_object_only:
return jsonutils.loads(body.decode('utf-8'))['stats']
else:
return jsonutils.loads(body.decode('utf-8'))
| # Copyright 2017 GoDaddy
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
from oslo_serialization import jsonutils
from tempest import config
from octavia_tempest_plugin.services.load_balancer.v2 import base_client
CONF = config.CONF
Unset = base_client.Unset
class ListenerClient(base_client.BaseLBaaSClient):
root_tag = 'listener'
list_root_tag = 'listeners'
def create_listener(self, protocol, protocol_port, loadbalancer_id,
name=Unset, description=Unset, admin_state_up=Unset,
connection_limit=Unset, timeout_client_data=Unset,
timeout_member_connect=Unset,
timeout_member_data=Unset, timeout_tcp_inspect=Unset,
insert_headers=Unset, default_pool_id=Unset,
default_tls_container_ref=Unset,
sni_container_refs=Unset, client_authentication=Unset,
client_ca_tls_container_ref=Unset,
client_crl_container_ref=Unset,
return_object_only=True):
"""Create a listener.
:param protocol: The protocol for the resource.
:param protocol_port: The protocol port number for the resource.
:param loadbalancer_id: The ID of the load balancer.
:param name: Human-readable name of the resource.
:param description: A human-readable description for the resource.
:param admin_state_up: The administrative state of the resource, which
is up (true) or down (false).
:param connection_limit: The maximum number of connections permitted
for this listener. Default value is -1 which
represents infinite connections.
:param timeout_client_data: Frontend client inactivity timeout in
milliseconds.
:param timeout_member_connect: Backend member connection timeout in
milliseconds.
:param timeout_member_data: Backend member inactivity timeout in
milliseconds.
:param timeout_tcp_inspect: Time, in milliseconds, to wait for
additional TCP packets for content
inspection.
:param insert_headers: A dictionary of optional headers to insert into
the request before it is sent to the backend
member.
:param default_pool_id: The ID of the pool used by the listener if no
L7 policies match.
:param default_tls_container_ref: The URI of the key manager service
secret containing a PKCS12 format
certificate/key bundle for
TERMINATED_TLS listeners.
:param sni_container_refs: A list of URIs to the key manager service
secrets containing PKCS12 format
certificate/key bundles for TERMINATED_TLS
listeners.
:param client_authentication: The TLS client authentication mode. One
of the options NONE, OPTIONAL or
MANDATORY.
:param client_ca_tls_container_ref: The ref of the key manager service
secret containing a PEM format
client CA certificate bundle for
TERMINATED_HTTPS listeners.
:param client_crl_container_ref: The URI of the key manager service
secret containing a PEM format CA
revocation list file for
TERMINATED_HTTPS listeners.
:param return_object_only: If True, the response returns the object
inside the root tag. False returns the full
response from the API.
:raises AssertionError: if the expected_code isn't a valid http success
response code
:raises BadRequest: If a 400 response code is received
:raises Conflict: If a 409 response code is received
:raises Forbidden: If a 403 response code is received
:raises Gone: If a 410 response code is received
:raises InvalidContentType: If a 415 response code is received
:raises InvalidHTTPResponseBody: The response body wasn't valid JSON
:raises InvalidHttpSuccessCode: if the read code isn't an expected
http success code
:raises NotFound: If a 404 response code is received
:raises NotImplemented: If a 501 response code is received
:raises OverLimit: If a 413 response code is received and over_limit is
not in the response body
:raises RateLimitExceeded: If a 413 response code is received and
over_limit is in the response body
:raises ServerFault: If a 500 response code is received
:raises Unauthorized: If a 401 response code is received
:raises UnexpectedContentType: If the content-type of the response
isn't an expect type
:raises UnexpectedResponseCode: If a response code above 400 is
received and it doesn't fall into any
of the handled checks
:raises UnprocessableEntity: If a 422 response code is received and
couldn't be parsed
:returns: A listener object.
"""
kwargs = {arg: value for arg, value in locals().items()
if arg != 'self' and value is not Unset}
return self._create_object(**kwargs)
def show_listener(self, listener_id, query_params=None,
return_object_only=True):
"""Get listener details.
:param listener_id: The listener ID to query.
:param query_params: The optional query parameters to append to the
request. Ex. fields=id&fields=name
:param return_object_only: If True, the response returns the object
inside the root tag. False returns the full
response from the API.
:raises AssertionError: if the expected_code isn't a valid http success
response code
:raises BadRequest: If a 400 response code is received
:raises Conflict: If a 409 response code is received
:raises Forbidden: If a 403 response code is received
:raises Gone: If a 410 response code is received
:raises InvalidContentType: If a 415 response code is received
:raises InvalidHTTPResponseBody: The response body wasn't valid JSON
:raises InvalidHttpSuccessCode: if the read code isn't an expected
http success code
:raises NotFound: If a 404 response code is received
:raises NotImplemented: If a 501 response code is received
:raises OverLimit: If a 413 response code is received and over_limit is
not in the response body
:raises RateLimitExceeded: If a 413 response code is received and
over_limit is in the response body
:raises ServerFault: If a 500 response code is received
:raises Unauthorized: If a 401 response code is received
:raises UnexpectedContentType: If the content-type of the response
isn't an expect type
:raises UnexpectedResponseCode: If a response code above 400 is
received and it doesn't fall into any
of the handled checks
:raises UnprocessableEntity: If a 422 response code is received and
couldn't be parsed
:returns: A listener object.
"""
return self._show_object(obj_id=listener_id,
query_params=query_params,
return_object_only=return_object_only)
def list_listeners(self, query_params=None, return_object_only=True):
"""Get a list of listener objects.
:param query_params: The optional query parameters to append to the
request. Ex. fields=id&fields=name
:param return_object_only: If True, the response returns the object
inside the root tag. False returns the full
response from the API.
:raises AssertionError: if the expected_code isn't a valid http success
response code
:raises BadRequest: If a 400 response code is received
:raises Conflict: If a 409 response code is received
:raises Forbidden: If a 403 response code is received
:raises Gone: If a 410 response code is received
:raises InvalidContentType: If a 415 response code is received
:raises InvalidHTTPResponseBody: The response body wasn't valid JSON
:raises InvalidHttpSuccessCode: if the read code isn't an expected
http success code
:raises NotFound: If a 404 response code is received
:raises NotImplemented: If a 501 response code is received
:raises OverLimit: If a 413 response code is received and over_limit is
not in the response body
:raises RateLimitExceeded: If a 413 response code is received and
over_limit is in the response body
:raises ServerFault: If a 500 response code is received
:raises Unauthorized: If a 401 response code is received
:raises UnexpectedContentType: If the content-type of the response
isn't an expect type
:raises UnexpectedResponseCode: If a response code above 400 is
received and it doesn't fall into any
of the handled checks
:raises UnprocessableEntity: If a 422 response code is received and
couldn't be parsed
:returns: A list of listener objects.
"""
return self._list_objects(query_params=query_params,
return_object_only=return_object_only)
def update_listener(self, listener_id, name=Unset, description=Unset,
admin_state_up=Unset, connection_limit=Unset,
timeout_client_data=Unset,
timeout_member_connect=Unset,
timeout_member_data=Unset, timeout_tcp_inspect=Unset,
insert_headers=Unset, default_pool_id=Unset,
default_tls_container_ref=Unset,
sni_container_refs=Unset, client_authentication=Unset,
client_ca_tls_container_ref=Unset,
client_crl_container_ref=Unset,
return_object_only=True):
"""Update a listener.
:param listener_id: The listener ID to update.
:param name: Human-readable name of the resource.
:param description: A human-readable description for the resource.
:param admin_state_up: The administrative state of the resource, which
is up (true) or down (false).
:param connection_limit: The maximum number of connections permitted
for this listener. Default value is -1 which
represents infinite connections.
:param timeout_client_data: Frontend client inactivity timeout in
milliseconds.
:param timeout_member_connect: Backend member connection timeout in
milliseconds.
:param timeout_member_data: Backend member inactivity timeout in
milliseconds.
:param timeout_tcp_inspect: Time, in milliseconds, to wait for
additional TCP packets for content
inspection.
:param insert_headers: A dictionary of optional headers to insert into
the request before it is sent to the backend
member.
:param default_pool_id: The ID of the pool used by the listener if no
L7 policies match.
:param default_tls_container_ref: The URI of the key manager service
secret containing a PKCS12 format
certificate/key bundle for
TERMINATED_TLS listeners.
:param sni_container_refs: A list of URIs to the key manager service
secrets containing PKCS12 format
certificate/key bundles for TERMINATED_TLS
listeners.
:param client_authentication: The TLS client authentication mode. One
of the options NONE, OPTIONAL or
MANDATORY.
:param client_ca_tls_container_ref: The ref of the key manager service
secret containing a PEM format
client CA certificate bundle for
TERMINATED_HTTPS listeners.
:param client_crl_container_ref: The URI of the key manager service
secret containing a PEM format CA
revocation list file for
TERMINATED_HTTPS listeners.
:param return_object_only: If True, the response returns the object
inside the root tag. False returns the full
response from the API.
:raises AssertionError: if the expected_code isn't a valid http success
response code
:raises BadRequest: If a 400 response code is received
:raises Conflict: If a 409 response code is received
:raises Forbidden: If a 403 response code is received
:raises Gone: If a 410 response code is received
:raises InvalidContentType: If a 415 response code is received
:raises InvalidHTTPResponseBody: The response body wasn't valid JSON
:raises InvalidHttpSuccessCode: if the read code isn't an expected
http success code
:raises NotFound: If a 404 response code is received
:raises NotImplemented: If a 501 response code is received
:raises OverLimit: If a 413 response code is received and over_limit is
not in the response body
:raises RateLimitExceeded: If a 413 response code is received and
over_limit is in the response body
:raises ServerFault: If a 500 response code is received
:raises Unauthorized: If a 401 response code is received
:raises UnexpectedContentType: If the content-type of the response
isn't an expect type
:raises UnexpectedResponseCode: If a response code above 400 is
received and it doesn't fall into any
of the handled checks
:raises UnprocessableEntity: If a 422 response code is received and
couldn't be parsed
:returns: A listener object.
"""
kwargs = {arg: value for arg, value in locals().items()
if arg != 'self' and value is not Unset}
kwargs['obj_id'] = kwargs.pop('listener_id')
return self._update_object(**kwargs)
def delete_listener(self, listener_id, ignore_errors=False):
"""Delete a listener.
:param listener_id: The listener ID to delete.
:param ignore_errors: True if errors should be ignored.
:raises AssertionError: if the expected_code isn't a valid http success
response code
:raises BadRequest: If a 400 response code is received
:raises Conflict: If a 409 response code is received
:raises Forbidden: If a 403 response code is received
:raises Gone: If a 410 response code is received
:raises InvalidContentType: If a 415 response code is received
:raises InvalidHTTPResponseBody: The response body wasn't valid JSON
:raises InvalidHttpSuccessCode: if the read code isn't an expected
http success code
:raises NotFound: If a 404 response code is received
:raises NotImplemented: If a 501 response code is received
:raises OverLimit: If a 413 response code is received and over_limit is
not in the response body
:raises RateLimitExceeded: If a 413 response code is received and
over_limit is in the response body
:raises ServerFault: If a 500 response code is received
:raises Unauthorized: If a 401 response code is received
:raises UnexpectedContentType: If the content-type of the response
isn't an expect type
:raises UnexpectedResponseCode: If a response code above 400 is
received and it doesn't fall into any
of the handled checks
:raises UnprocessableEntity: If a 422 response code is received and
couldn't be parsed
:returns: None if ignore_errors is True, the response status code
if not.
"""
return self._delete_obj(obj_id=listener_id,
ignore_errors=ignore_errors)
def get_listener_stats(self, listener_id, query_params=None,
return_object_only=True):
"""Get listener statistics.
:param listener_id: The listener ID to query.
:param query_params: The optional query parameters to append to the
request. Ex. fields=id&fields=name
:param return_object_only: If True, the response returns the object
inside the root tag. False returns the full
response from the API.
:raises AssertionError: if the expected_code isn't a valid http success
response code
:raises BadRequest: If a 400 response code is received
:raises Conflict: If a 409 response code is received
:raises Forbidden: If a 403 response code is received
:raises Gone: If a 410 response code is received
:raises InvalidContentType: If a 415 response code is received
:raises InvalidHTTPResponseBody: The response body wasn't valid JSON
:raises InvalidHttpSuccessCode: if the read code isn't an expected
http success code
:raises NotFound: If a 404 response code is received
:raises NotImplemented: If a 501 response code is received
:raises OverLimit: If a 413 response code is received and over_limit is
not in the response body
:raises RateLimitExceeded: If a 413 response code is received and
over_limit is in the response body
:raises ServerFault: If a 500 response code is received
:raises Unauthorized: If a 401 response code is received
:raises UnexpectedContentType: If the content-type of the response
isn't an expect type
:raises UnexpectedResponseCode: If a response code above 400 is
received and it doesn't fall into any
of the handled checks
:raises UnprocessableEntity: If a 422 response code is received and
couldn't be parsed
:returns: A listener statistics object.
"""
if query_params:
request_uri = '{0}/{1}/stats?{2}'.format(self.uri, listener_id,
query_params)
else:
request_uri = '{0}/{1}/stats'.format(self.uri, listener_id)
response, body = self.get(request_uri)
self.expected_success(200, response.status)
if return_object_only:
return jsonutils.loads(body.decode('utf-8'))['stats']
else:
return jsonutils.loads(body.decode('utf-8'))
| en | 0.872992 | # Copyright 2017 GoDaddy # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # Create a listener. :param protocol: The protocol for the resource. :param protocol_port: The protocol port number for the resource. :param loadbalancer_id: The ID of the load balancer. :param name: Human-readable name of the resource. :param description: A human-readable description for the resource. :param admin_state_up: The administrative state of the resource, which is up (true) or down (false). :param connection_limit: The maximum number of connections permitted for this listener. Default value is -1 which represents infinite connections. :param timeout_client_data: Frontend client inactivity timeout in milliseconds. :param timeout_member_connect: Backend member connection timeout in milliseconds. :param timeout_member_data: Backend member inactivity timeout in milliseconds. :param timeout_tcp_inspect: Time, in milliseconds, to wait for additional TCP packets for content inspection. :param insert_headers: A dictionary of optional headers to insert into the request before it is sent to the backend member. :param default_pool_id: The ID of the pool used by the listener if no L7 policies match. :param default_tls_container_ref: The URI of the key manager service secret containing a PKCS12 format certificate/key bundle for TERMINATED_TLS listeners. :param sni_container_refs: A list of URIs to the key manager service secrets containing PKCS12 format certificate/key bundles for TERMINATED_TLS listeners. :param client_authentication: The TLS client authentication mode. One of the options NONE, OPTIONAL or MANDATORY. :param client_ca_tls_container_ref: The ref of the key manager service secret containing a PEM format client CA certificate bundle for TERMINATED_HTTPS listeners. :param client_crl_container_ref: The URI of the key manager service secret containing a PEM format CA revocation list file for TERMINATED_HTTPS listeners. :param return_object_only: If True, the response returns the object inside the root tag. False returns the full response from the API. :raises AssertionError: if the expected_code isn't a valid http success response code :raises BadRequest: If a 400 response code is received :raises Conflict: If a 409 response code is received :raises Forbidden: If a 403 response code is received :raises Gone: If a 410 response code is received :raises InvalidContentType: If a 415 response code is received :raises InvalidHTTPResponseBody: The response body wasn't valid JSON :raises InvalidHttpSuccessCode: if the read code isn't an expected http success code :raises NotFound: If a 404 response code is received :raises NotImplemented: If a 501 response code is received :raises OverLimit: If a 413 response code is received and over_limit is not in the response body :raises RateLimitExceeded: If a 413 response code is received and over_limit is in the response body :raises ServerFault: If a 500 response code is received :raises Unauthorized: If a 401 response code is received :raises UnexpectedContentType: If the content-type of the response isn't an expect type :raises UnexpectedResponseCode: If a response code above 400 is received and it doesn't fall into any of the handled checks :raises UnprocessableEntity: If a 422 response code is received and couldn't be parsed :returns: A listener object. Get listener details. :param listener_id: The listener ID to query. :param query_params: The optional query parameters to append to the request. Ex. fields=id&fields=name :param return_object_only: If True, the response returns the object inside the root tag. False returns the full response from the API. :raises AssertionError: if the expected_code isn't a valid http success response code :raises BadRequest: If a 400 response code is received :raises Conflict: If a 409 response code is received :raises Forbidden: If a 403 response code is received :raises Gone: If a 410 response code is received :raises InvalidContentType: If a 415 response code is received :raises InvalidHTTPResponseBody: The response body wasn't valid JSON :raises InvalidHttpSuccessCode: if the read code isn't an expected http success code :raises NotFound: If a 404 response code is received :raises NotImplemented: If a 501 response code is received :raises OverLimit: If a 413 response code is received and over_limit is not in the response body :raises RateLimitExceeded: If a 413 response code is received and over_limit is in the response body :raises ServerFault: If a 500 response code is received :raises Unauthorized: If a 401 response code is received :raises UnexpectedContentType: If the content-type of the response isn't an expect type :raises UnexpectedResponseCode: If a response code above 400 is received and it doesn't fall into any of the handled checks :raises UnprocessableEntity: If a 422 response code is received and couldn't be parsed :returns: A listener object. Get a list of listener objects. :param query_params: The optional query parameters to append to the request. Ex. fields=id&fields=name :param return_object_only: If True, the response returns the object inside the root tag. False returns the full response from the API. :raises AssertionError: if the expected_code isn't a valid http success response code :raises BadRequest: If a 400 response code is received :raises Conflict: If a 409 response code is received :raises Forbidden: If a 403 response code is received :raises Gone: If a 410 response code is received :raises InvalidContentType: If a 415 response code is received :raises InvalidHTTPResponseBody: The response body wasn't valid JSON :raises InvalidHttpSuccessCode: if the read code isn't an expected http success code :raises NotFound: If a 404 response code is received :raises NotImplemented: If a 501 response code is received :raises OverLimit: If a 413 response code is received and over_limit is not in the response body :raises RateLimitExceeded: If a 413 response code is received and over_limit is in the response body :raises ServerFault: If a 500 response code is received :raises Unauthorized: If a 401 response code is received :raises UnexpectedContentType: If the content-type of the response isn't an expect type :raises UnexpectedResponseCode: If a response code above 400 is received and it doesn't fall into any of the handled checks :raises UnprocessableEntity: If a 422 response code is received and couldn't be parsed :returns: A list of listener objects. Update a listener. :param listener_id: The listener ID to update. :param name: Human-readable name of the resource. :param description: A human-readable description for the resource. :param admin_state_up: The administrative state of the resource, which is up (true) or down (false). :param connection_limit: The maximum number of connections permitted for this listener. Default value is -1 which represents infinite connections. :param timeout_client_data: Frontend client inactivity timeout in milliseconds. :param timeout_member_connect: Backend member connection timeout in milliseconds. :param timeout_member_data: Backend member inactivity timeout in milliseconds. :param timeout_tcp_inspect: Time, in milliseconds, to wait for additional TCP packets for content inspection. :param insert_headers: A dictionary of optional headers to insert into the request before it is sent to the backend member. :param default_pool_id: The ID of the pool used by the listener if no L7 policies match. :param default_tls_container_ref: The URI of the key manager service secret containing a PKCS12 format certificate/key bundle for TERMINATED_TLS listeners. :param sni_container_refs: A list of URIs to the key manager service secrets containing PKCS12 format certificate/key bundles for TERMINATED_TLS listeners. :param client_authentication: The TLS client authentication mode. One of the options NONE, OPTIONAL or MANDATORY. :param client_ca_tls_container_ref: The ref of the key manager service secret containing a PEM format client CA certificate bundle for TERMINATED_HTTPS listeners. :param client_crl_container_ref: The URI of the key manager service secret containing a PEM format CA revocation list file for TERMINATED_HTTPS listeners. :param return_object_only: If True, the response returns the object inside the root tag. False returns the full response from the API. :raises AssertionError: if the expected_code isn't a valid http success response code :raises BadRequest: If a 400 response code is received :raises Conflict: If a 409 response code is received :raises Forbidden: If a 403 response code is received :raises Gone: If a 410 response code is received :raises InvalidContentType: If a 415 response code is received :raises InvalidHTTPResponseBody: The response body wasn't valid JSON :raises InvalidHttpSuccessCode: if the read code isn't an expected http success code :raises NotFound: If a 404 response code is received :raises NotImplemented: If a 501 response code is received :raises OverLimit: If a 413 response code is received and over_limit is not in the response body :raises RateLimitExceeded: If a 413 response code is received and over_limit is in the response body :raises ServerFault: If a 500 response code is received :raises Unauthorized: If a 401 response code is received :raises UnexpectedContentType: If the content-type of the response isn't an expect type :raises UnexpectedResponseCode: If a response code above 400 is received and it doesn't fall into any of the handled checks :raises UnprocessableEntity: If a 422 response code is received and couldn't be parsed :returns: A listener object. Delete a listener. :param listener_id: The listener ID to delete. :param ignore_errors: True if errors should be ignored. :raises AssertionError: if the expected_code isn't a valid http success response code :raises BadRequest: If a 400 response code is received :raises Conflict: If a 409 response code is received :raises Forbidden: If a 403 response code is received :raises Gone: If a 410 response code is received :raises InvalidContentType: If a 415 response code is received :raises InvalidHTTPResponseBody: The response body wasn't valid JSON :raises InvalidHttpSuccessCode: if the read code isn't an expected http success code :raises NotFound: If a 404 response code is received :raises NotImplemented: If a 501 response code is received :raises OverLimit: If a 413 response code is received and over_limit is not in the response body :raises RateLimitExceeded: If a 413 response code is received and over_limit is in the response body :raises ServerFault: If a 500 response code is received :raises Unauthorized: If a 401 response code is received :raises UnexpectedContentType: If the content-type of the response isn't an expect type :raises UnexpectedResponseCode: If a response code above 400 is received and it doesn't fall into any of the handled checks :raises UnprocessableEntity: If a 422 response code is received and couldn't be parsed :returns: None if ignore_errors is True, the response status code if not. Get listener statistics. :param listener_id: The listener ID to query. :param query_params: The optional query parameters to append to the request. Ex. fields=id&fields=name :param return_object_only: If True, the response returns the object inside the root tag. False returns the full response from the API. :raises AssertionError: if the expected_code isn't a valid http success response code :raises BadRequest: If a 400 response code is received :raises Conflict: If a 409 response code is received :raises Forbidden: If a 403 response code is received :raises Gone: If a 410 response code is received :raises InvalidContentType: If a 415 response code is received :raises InvalidHTTPResponseBody: The response body wasn't valid JSON :raises InvalidHttpSuccessCode: if the read code isn't an expected http success code :raises NotFound: If a 404 response code is received :raises NotImplemented: If a 501 response code is received :raises OverLimit: If a 413 response code is received and over_limit is not in the response body :raises RateLimitExceeded: If a 413 response code is received and over_limit is in the response body :raises ServerFault: If a 500 response code is received :raises Unauthorized: If a 401 response code is received :raises UnexpectedContentType: If the content-type of the response isn't an expect type :raises UnexpectedResponseCode: If a response code above 400 is received and it doesn't fall into any of the handled checks :raises UnprocessableEntity: If a 422 response code is received and couldn't be parsed :returns: A listener statistics object. | 1.657333 | 2 |
ryu/gui/views/router_address_delete.py | isams1/Thesis | 3 | 9703 | import re
import logging
import httplib
import view_base
from models import rt_proxy
LOG = logging.getLogger('ryu.gui')
class RtAddrDel(view_base.ViewBase):
def __init__(self, host, port, dpid, address_id, status=None):
super(RtAddrDel, self).__init__()
self.host = host
self.port = port
self.dpid = dpid
self.address_id = address_id
self.status = status
def run(self):
LOG.debug('Router Address Delete Rule running')
if not self.status:
# set rule
return self._delete_address()
def _delete_address(self):
address = '%s:%s' % (self.host, self.port)
res = {'host': self.host,
'port': self.port,
'status': None}
address_no = {}
address_no['address_id'] = self.address_id
status = rt_proxy.delete_router_address(address, address_no, self.dpid)
if status[0]['command_result']:
command_result = status[0]['command_result']
res['status'] = command_result
else:
res['status'] = status
res['status'] = status
return self.json_response(res) | import re
import logging
import httplib
import view_base
from models import rt_proxy
LOG = logging.getLogger('ryu.gui')
class RtAddrDel(view_base.ViewBase):
def __init__(self, host, port, dpid, address_id, status=None):
super(RtAddrDel, self).__init__()
self.host = host
self.port = port
self.dpid = dpid
self.address_id = address_id
self.status = status
def run(self):
LOG.debug('Router Address Delete Rule running')
if not self.status:
# set rule
return self._delete_address()
def _delete_address(self):
address = '%s:%s' % (self.host, self.port)
res = {'host': self.host,
'port': self.port,
'status': None}
address_no = {}
address_no['address_id'] = self.address_id
status = rt_proxy.delete_router_address(address, address_no, self.dpid)
if status[0]['command_result']:
command_result = status[0]['command_result']
res['status'] = command_result
else:
res['status'] = status
res['status'] = status
return self.json_response(res) | en | 0.962632 | # set rule | 2.243698 | 2 |
tests/util/test_helper.py | TobiasRasbold/pywrangler | 14 | 9704 | """This module contains tests for the helper module.
"""
from pywrangler.util.helper import get_param_names
def test_get_param_names():
def func():
pass
assert get_param_names(func) == []
def func1(a, b=4, c=6):
pass
assert get_param_names(func1) == ["a", "b", "c"]
assert get_param_names(func1, ["a"]) == ["b", "c"]
| """This module contains tests for the helper module.
"""
from pywrangler.util.helper import get_param_names
def test_get_param_names():
def func():
pass
assert get_param_names(func) == []
def func1(a, b=4, c=6):
pass
assert get_param_names(func1) == ["a", "b", "c"]
assert get_param_names(func1, ["a"]) == ["b", "c"]
| en | 0.431578 | This module contains tests for the helper module. | 2.586443 | 3 |
Python-Files/model_conversion/convert_to_tflite.py | jcgeo9/ML-For-Fish-Recognition | 0 | 9705 | <reponame>jcgeo9/ML-For-Fish-Recognition
# =============================================================================
# Created By : <NAME>
# Project : Machine Learning for Fish Recognition (Individual Project)
# =============================================================================
# Description : File in order to convert saved models to .tflite instances.
# To be used after the desired model are trained and saved
# How to use : Replace variables in CAPS according to needs of the dataset
# =============================================================================
import tensorflow as tf
model_path='PATH TO SAVED MODEL'
tflite_model_name='NAME OF THE NEWLY CREATED TFLITE MODEL'
#convert the model by loading the saved model to the converter
converter = tf.lite.TFLiteConverter.from_saved_model(model_path)
tflite_model = converter.convert()
#save the tflite model
with open(tflite_model_name+'.tflite', 'wb') as f:
f.write(tflite_model)
| # =============================================================================
# Created By : <NAME>
# Project : Machine Learning for Fish Recognition (Individual Project)
# =============================================================================
# Description : File in order to convert saved models to .tflite instances.
# To be used after the desired model are trained and saved
# How to use : Replace variables in CAPS according to needs of the dataset
# =============================================================================
import tensorflow as tf
model_path='PATH TO SAVED MODEL'
tflite_model_name='NAME OF THE NEWLY CREATED TFLITE MODEL'
#convert the model by loading the saved model to the converter
converter = tf.lite.TFLiteConverter.from_saved_model(model_path)
tflite_model = converter.convert()
#save the tflite model
with open(tflite_model_name+'.tflite', 'wb') as f:
f.write(tflite_model) | en | 0.682351 | # ============================================================================= # Created By : <NAME> # Project : Machine Learning for Fish Recognition (Individual Project) # ============================================================================= # Description : File in order to convert saved models to .tflite instances. # To be used after the desired model are trained and saved # How to use : Replace variables in CAPS according to needs of the dataset # ============================================================================= #convert the model by loading the saved model to the converter #save the tflite model | 3.26676 | 3 |
python3/sparkts/test/test_datetimeindex.py | hedibejaoui/spark-timeseries | 0 | 9706 | <reponame>hedibejaoui/spark-timeseries<gh_stars>0
from .test_utils import PySparkTestCase
from sparkts.datetimeindex import *
import pandas as pd
class DateTimeIndexTestCase(PySparkTestCase):
def test_frequencies(self):
bd = BusinessDayFrequency(1, 1, self.sc)
self.assertEqual(bd.days(), 1)
hf = HourFrequency(4, self.sc)
self.assertEqual(hf.hours(), 4)
def test_uniform(self):
freq = DayFrequency(3, self.sc)
self.assertEqual(freq.days(), 3)
start = '2015-04-10'
index = uniform(start, periods=5, freq=freq, sc=self.sc)
index2 = uniform(start, end='2015-04-22', freq=freq, sc=self.sc)
self.assertEqual(index, index2)
self.assertEqual(len(index), 5)
self.assertEqual(index.first(), pd.to_datetime('2015-04-10'))
self.assertEqual(index.last(), pd.to_datetime('2015-04-22'))
subbydate = index[pd.to_datetime('2015-04-13'):pd.to_datetime('2015-04-19')]
subbyloc = index.islice(1, 4)
self.assertEqual(subbydate, subbyloc)
self.assertEqual(subbydate.first(), pd.to_datetime('2015-04-13'))
self.assertEqual(subbydate.last(), pd.to_datetime('2015-04-19'))
self.assertEqual(subbydate.datetime_at_loc(0), pd.to_datetime('2015-04-13'))
self.assertEqual(subbydate[pd.to_datetime('2015-04-13')], 0)
def test_irregular(self):
pd_index = pd.date_range('2015-04-10', periods=5, freq='3D')
dt_index = irregular(pd_index, self.sc)
self.assertEqual(len(dt_index), 5)
self.assertEqual(dt_index.first(), pd.to_datetime('2015-04-10'))
self.assertEqual(dt_index.last(), pd.to_datetime('2015-04-22'))
subbydate = dt_index[pd.to_datetime('2015-04-13'):pd.to_datetime('2015-04-19')]
subbyloc = dt_index.islice(1, 4)
self.assertEqual(subbydate, subbyloc)
self.assertEqual(subbydate.first(), pd.to_datetime('2015-04-13'))
self.assertEqual(subbydate.last(), pd.to_datetime('2015-04-19'))
self.assertEqual(subbydate.datetime_at_loc(0), pd.to_datetime('2015-04-13'))
self.assertEqual(subbydate[pd.to_datetime('2015-04-13')], 0)
pd_index2 = dt_index.to_pandas_index()
self.assertTrue(pd_index.equals(pd_index2), str(pd_index) + "!=" + str(pd_index2))
| from .test_utils import PySparkTestCase
from sparkts.datetimeindex import *
import pandas as pd
class DateTimeIndexTestCase(PySparkTestCase):
def test_frequencies(self):
bd = BusinessDayFrequency(1, 1, self.sc)
self.assertEqual(bd.days(), 1)
hf = HourFrequency(4, self.sc)
self.assertEqual(hf.hours(), 4)
def test_uniform(self):
freq = DayFrequency(3, self.sc)
self.assertEqual(freq.days(), 3)
start = '2015-04-10'
index = uniform(start, periods=5, freq=freq, sc=self.sc)
index2 = uniform(start, end='2015-04-22', freq=freq, sc=self.sc)
self.assertEqual(index, index2)
self.assertEqual(len(index), 5)
self.assertEqual(index.first(), pd.to_datetime('2015-04-10'))
self.assertEqual(index.last(), pd.to_datetime('2015-04-22'))
subbydate = index[pd.to_datetime('2015-04-13'):pd.to_datetime('2015-04-19')]
subbyloc = index.islice(1, 4)
self.assertEqual(subbydate, subbyloc)
self.assertEqual(subbydate.first(), pd.to_datetime('2015-04-13'))
self.assertEqual(subbydate.last(), pd.to_datetime('2015-04-19'))
self.assertEqual(subbydate.datetime_at_loc(0), pd.to_datetime('2015-04-13'))
self.assertEqual(subbydate[pd.to_datetime('2015-04-13')], 0)
def test_irregular(self):
pd_index = pd.date_range('2015-04-10', periods=5, freq='3D')
dt_index = irregular(pd_index, self.sc)
self.assertEqual(len(dt_index), 5)
self.assertEqual(dt_index.first(), pd.to_datetime('2015-04-10'))
self.assertEqual(dt_index.last(), pd.to_datetime('2015-04-22'))
subbydate = dt_index[pd.to_datetime('2015-04-13'):pd.to_datetime('2015-04-19')]
subbyloc = dt_index.islice(1, 4)
self.assertEqual(subbydate, subbyloc)
self.assertEqual(subbydate.first(), pd.to_datetime('2015-04-13'))
self.assertEqual(subbydate.last(), pd.to_datetime('2015-04-19'))
self.assertEqual(subbydate.datetime_at_loc(0), pd.to_datetime('2015-04-13'))
self.assertEqual(subbydate[pd.to_datetime('2015-04-13')], 0)
pd_index2 = dt_index.to_pandas_index()
self.assertTrue(pd_index.equals(pd_index2), str(pd_index) + "!=" + str(pd_index2)) | none | 1 | 2.410657 | 2 |
|
src/listIntersect/inter.py | rajitbanerjee/leetcode | 0 | 9707 | # Definition for singly-linked list.
class ListNode:
def __init__(self, x):
self.val = x
self.next = None
class Solution:
def getIntersectionNode(self, headA: ListNode, headB: ListNode) -> ListNode:
seen = set()
curr = headA
while curr:
seen.add(curr)
curr = curr.next
curr = headB
while curr:
if curr in seen:
return curr
curr = curr.next
return None
| # Definition for singly-linked list.
class ListNode:
def __init__(self, x):
self.val = x
self.next = None
class Solution:
def getIntersectionNode(self, headA: ListNode, headB: ListNode) -> ListNode:
seen = set()
curr = headA
while curr:
seen.add(curr)
curr = curr.next
curr = headB
while curr:
if curr in seen:
return curr
curr = curr.next
return None
| en | 0.620171 | # Definition for singly-linked list. | 3.581728 | 4 |
photon_stream_production/tests/test_drs_run_assignment.py | fact-project/photon_stream_production | 0 | 9708 | import numpy as np
import photon_stream as ps
import photon_stream_production as psp
import pkg_resources
import os
runinfo_path = pkg_resources.resource_filename(
'photon_stream_production',
os.path.join('tests', 'resources', 'runinfo_20161115_to_20170103.csv')
)
drs_fRunID_for_obs_run = psp.drs_run._drs_fRunID_for_obs_run
def test_drs_run_assignment():
ri = psp.runinfo.read(runinfo_path)
ro = psp.drs_run.assign_drs_runs(ri)
ri = ri[(ri.fNight > 20161229) & (ri.fNight <= 20170102)]
ro = ro[(ro.fNight > 20161229) & (ro.fNight <= 20170102)]
for i, row in ri.iterrows():
assert row.fNight == ro.loc[i, 'fNight']
assert row.fRunID == ro.loc[i, 'fRunID']
if row.fRunTypeKey == psp.runinfo.OBSERVATION_RUN_TYPE_KEY:
first_method_drs_run_id = drs_fRunID_for_obs_run(
runinfo=ri,
fNight=row.fNight,
fRunID=row.fRunID
)
second_method_drs_run_id = ro.loc[i, 'DrsRunID']
if np.isnan(first_method_drs_run_id):
assert np.isnan(second_method_drs_run_id)
else:
assert first_method_drs_run_id == second_method_drs_run_id
| import numpy as np
import photon_stream as ps
import photon_stream_production as psp
import pkg_resources
import os
runinfo_path = pkg_resources.resource_filename(
'photon_stream_production',
os.path.join('tests', 'resources', 'runinfo_20161115_to_20170103.csv')
)
drs_fRunID_for_obs_run = psp.drs_run._drs_fRunID_for_obs_run
def test_drs_run_assignment():
ri = psp.runinfo.read(runinfo_path)
ro = psp.drs_run.assign_drs_runs(ri)
ri = ri[(ri.fNight > 20161229) & (ri.fNight <= 20170102)]
ro = ro[(ro.fNight > 20161229) & (ro.fNight <= 20170102)]
for i, row in ri.iterrows():
assert row.fNight == ro.loc[i, 'fNight']
assert row.fRunID == ro.loc[i, 'fRunID']
if row.fRunTypeKey == psp.runinfo.OBSERVATION_RUN_TYPE_KEY:
first_method_drs_run_id = drs_fRunID_for_obs_run(
runinfo=ri,
fNight=row.fNight,
fRunID=row.fRunID
)
second_method_drs_run_id = ro.loc[i, 'DrsRunID']
if np.isnan(first_method_drs_run_id):
assert np.isnan(second_method_drs_run_id)
else:
assert first_method_drs_run_id == second_method_drs_run_id
| none | 1 | 2.09793 | 2 |
|
accounts/migrations/0001_initial.py | vikifox/CMDB | 16 | 9709 | # -*- coding: utf-8 -*-
# Generated by Django 1.11.20 on 2019-04-18 05:56
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
('cmdb', '0001_initial'),
('appconf', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='UserInfo',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('password', models.CharField(max_length=128, verbose_name='password')),
('last_login', models.DateTimeField(blank=True, null=True, verbose_name='last login')),
('username', models.CharField(db_index=True, max_length=40, unique=True)),
('email', models.EmailField(max_length=255)),
('is_active', models.BooleanField(default=False)),
('is_superuser', models.BooleanField(default=False)),
('nickname', models.CharField(blank=True, max_length=64, null=True)),
('ldap_name', models.CharField(blank=True, max_length=64)),
],
options={
'abstract': False,
},
),
migrations.CreateModel(
name='PermissionList',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=64)),
('url', models.CharField(max_length=255)),
],
),
migrations.CreateModel(
name='RoleList',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=64)),
('delivery', models.ManyToManyField(blank=True, to='appconf.Project')),
('permission', models.ManyToManyField(blank=True, to='accounts.PermissionList')),
('webssh', models.ManyToManyField(blank=True, to='cmdb.HostGroup')),
],
),
migrations.AddField(
model_name='userinfo',
name='role',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='accounts.RoleList'),
),
]
| # -*- coding: utf-8 -*-
# Generated by Django 1.11.20 on 2019-04-18 05:56
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
('cmdb', '0001_initial'),
('appconf', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='UserInfo',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('password', models.CharField(max_length=128, verbose_name='password')),
('last_login', models.DateTimeField(blank=True, null=True, verbose_name='last login')),
('username', models.CharField(db_index=True, max_length=40, unique=True)),
('email', models.EmailField(max_length=255)),
('is_active', models.BooleanField(default=False)),
('is_superuser', models.BooleanField(default=False)),
('nickname', models.CharField(blank=True, max_length=64, null=True)),
('ldap_name', models.CharField(blank=True, max_length=64)),
],
options={
'abstract': False,
},
),
migrations.CreateModel(
name='PermissionList',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=64)),
('url', models.CharField(max_length=255)),
],
),
migrations.CreateModel(
name='RoleList',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=64)),
('delivery', models.ManyToManyField(blank=True, to='appconf.Project')),
('permission', models.ManyToManyField(blank=True, to='accounts.PermissionList')),
('webssh', models.ManyToManyField(blank=True, to='cmdb.HostGroup')),
],
),
migrations.AddField(
model_name='userinfo',
name='role',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='accounts.RoleList'),
),
]
| en | 0.583576 | # -*- coding: utf-8 -*- # Generated by Django 1.11.20 on 2019-04-18 05:56 | 1.736094 | 2 |
autoscaler/azure.py | gabrieladt/kops-ec2-autoscaler | 0 | 9710 | import http
import logging
from typing import List, Tuple, MutableMapping
from datetime import datetime
import re
from requests.packages.urllib3 import Retry
import autoscaler.utils as utils
from autoscaler.autoscaling_groups import AutoScalingGroup
from autoscaler.azure_api import AzureApi, AzureScaleSet, AzureScaleSetInstance
from autoscaler.utils import TransformingFuture, AllCompletedFuture, CompletedFuture
logger = logging.getLogger(__name__)
_RETRY_TIME_LIMIT = 30
class AzureBoundedRetry(Retry):
"""
XXX: Azure sometimes sends us a Retry-After: 1200, even when we still have quota, causing our client to appear to hang.
Ignore them and just retry after 30secs
"""
def __init__(self, **kwargs):
super().__init__(**kwargs)
@staticmethod
def from_retry(retry):
new_retry = AzureBoundedRetry()
new_retry.total = retry.total
new_retry.connect = retry.connect
new_retry.read = retry.read
new_retry.backoff_factor = retry.backoff_factor
new_retry.BACKOFF_MAX = retry.BACKOFF_MAX
new_retry.status_forcelist = retry.status_forcelist
new_retry.method_whitelist = retry.method_whitelist
return new_retry
def get_retry_after(self, response):
retry_after = super().get_retry_after(response)
if response.status != http.HTTPStatus.TOO_MANY_REQUESTS or retry_after <= _RETRY_TIME_LIMIT:
return retry_after
headers = {}
for header in ['Retry-After',
'x-ms-ratelimit-remaining-subscription-reads',
'x-ms-ratelimit-remaining-subscription-writes',
'x-ms-ratelimit-remaining-tenant-reads',
'x-ms-ratelimit-remaining-tenant-writes',
'x-ms-ratelimit-remaining-subscription-resource-requests',
'x-ms-ratelimit-remaining-subscription-resource-entities-read',
'x-ms-ratelimit-remaining-tenant-resource-requests',
'x-ms-ratelimit-remaining-tenant-resource-entities-read']:
value = response.getheader(header)
if value is not None:
headers[header] = value
logger.warn("Azure request throttled: {}".format(headers))
return _RETRY_TIME_LIMIT
class AzureGroups(object):
def __init__(self, resource_groups, slow_scale_classes, client: AzureApi):
self.resource_groups = resource_groups
self.slow_scale_classes = slow_scale_classes
self.client = client
def get_all_groups(self, kube_nodes):
groups = []
if self.client:
for resource_group in self.resource_groups:
scale_sets_by_type = {}
for scale_set in self.client.list_scale_sets(resource_group.name):
scale_sets_by_type.setdefault((scale_set.location, scale_set.instance_type), []).append(scale_set)
for key, scale_sets in scale_sets_by_type.items():
location, instance_type = key
slow_scale = _get_azure_class(instance_type) in self.slow_scale_classes
groups.append(AzureVirtualScaleSet(location, resource_group.name, self.client, instance_type, slow_scale, scale_sets, kube_nodes))
return groups
_CLASS_PAT = re.compile(r'\w+_(?P<class>[A-Z]+).+')
def _get_azure_class(type_):
m = _CLASS_PAT.match(type_)
return m.group('class')
_SCALE_SET_SIZE_LIMIT = 100
# Appears as an unbounded scale set. Currently, Azure Scale Sets have a limit of 100 hosts.
class AzureVirtualScaleSet(AutoScalingGroup):
provider = 'azure'
def __init__(self, region, resource_group, client: AzureApi, instance_type, slow_scale: bool, scale_sets: List[AzureScaleSet], kube_nodes):
self.client = client
self.instance_type = instance_type
self.tags = {}
self.name = 'virtual_scale_set_' + instance_type + '_' + region + '_' + resource_group
self.scale_sets = dict((scale_set.name, scale_set) for scale_set in scale_sets)
self.desired_capacity = sum(scale_set.capacity for scale_set in scale_sets)
self.region = region
self.resource_group = resource_group
self.selectors = dict(self.tags)
# HACK: for matching node selectors
self.selectors['azure/type'] = self.instance_type
self.selectors['azure/class'] = _get_azure_class(self.instance_type)
self.slow_scale = slow_scale
self.min_size = 0
self.max_size = 10000
self.is_spot = False
self.vm_id_to_instance: MutableMapping[str, Tuple[str, AzureScaleSetInstance]] = {}
self.instances = {}
self.timeout_until = None
self.timeout_reason = None
self._global_priority = None
self.no_schedule_taints = {}
for scale_set in scale_sets:
if scale_set.timeout_until is not None:
if self.timeout_until is None or self.timeout_until < scale_set.timeout_until:
self.timeout_until = scale_set.timeout_until
self.timeout_reason = scale_set.name + ": " + scale_set.timeout_reason
if scale_set.priority is not None:
if self._global_priority is None:
self._global_priority = scale_set.priority
else:
self._global_priority = min(scale_set.priority, self._global_priority)
if not self.no_schedule_taints:
self.no_schedule_taints = scale_set.no_schedule_taints
if scale_set.capacity == 0:
continue
for instance in self.client.list_scale_set_instances(scale_set):
self.vm_id_to_instance[instance.vm_id] = (scale_set.name, instance)
self.instances[instance.vm_id] = AzureInstance(instance.vm_id, self.instance_type, instance.launch_time, self.tags)
self.nodes = [node for node in kube_nodes if node.instance_id in self.vm_id_to_instance]
self.unschedulable_nodes = [n for n in self.nodes if n.unschedulable]
self._id = (self.region, self.name)
def is_timed_out(self):
if self.timeout_until and datetime.now(self.timeout_until.tzinfo) < self.timeout_until:
logger.warn("{} is timed out until {} because {}".format(self._id, self.timeout_until, self.timeout_reason))
return True
return False
@property
def global_priority(self):
if self._global_priority is None:
return super().global_priority
return self._global_priority
def get_azure_instances(self):
return self.instances.values()
@property
def instance_ids(self):
return self.vm_id_to_instance.keys()
def set_desired_capacity(self, new_desired_capacity):
"""
sets the desired capacity of the underlying ASG directly.
note that this is for internal control.
for scaling purposes, please use scale() instead.
"""
scale_out = new_desired_capacity - self.desired_capacity
assert scale_out >= 0
if scale_out == 0:
return CompletedFuture(False)
futures = []
for scale_set in sorted(self.scale_sets.values(), key=lambda x: (x.priority, x.name)):
if scale_set.capacity < _SCALE_SET_SIZE_LIMIT:
if self.slow_scale:
new_group_capacity = scale_set.capacity + 1
else:
new_group_capacity = min(_SCALE_SET_SIZE_LIMIT, scale_set.capacity + scale_out)
scale_out -= (new_group_capacity - scale_set.capacity)
if scale_set.provisioning_state == 'Updating':
logger.warn("Update of {} already in progress".format(scale_set.name))
continue
if scale_set.provisioning_state == 'Failed':
logger.error("{} failed provisioning. Skipping it for scaling.".format(scale_set.name))
continue
# Update our cached version
self.scale_sets[scale_set.name].capacity = new_group_capacity
futures.append(self.client.update_scale_set(scale_set, new_group_capacity))
logger.info("Scaling Azure Scale Set {} to {}".format(scale_set.name, new_group_capacity))
if scale_out == 0:
break
if scale_out > 0:
logger.error("Not enough scale sets to reach desired capacity {} for {}".format(new_desired_capacity, self))
self.desired_capacity = new_desired_capacity - scale_out
logger.info("ASG: {} new_desired_capacity: {}".format(self, new_desired_capacity))
return TransformingFuture(True, AllCompletedFuture(futures))
def terminate_instances(self, vm_ids):
vm_ids = list(vm_ids)
instances = {}
for vm_id in vm_ids:
scale_set_name, instance = self.vm_id_to_instance[vm_id]
# Update our cached copy of the Scale Set
self.scale_sets[scale_set_name].capacity -= 1
instances.setdefault(scale_set_name, []).append(instance)
logger.info('Terminated instances %s', vm_ids)
futures = []
for scale_set_name, scale_set_instances in instances.items():
futures.append(self.client.terminate_scale_set_instances(self.scale_sets[scale_set_name], scale_set_instances))
return AllCompletedFuture(futures)
def scale_nodes_in(self, nodes):
"""
scale down asg by terminating the given node.
returns a future indicating when the request completes.
"""
for node in nodes:
self.nodes.remove(node)
return self.terminate_instances(node.instance_id for node in nodes)
def __str__(self):
return 'AzureVirtualScaleSet({name}, {selectors_hash})'.format(name=self.name, selectors_hash=utils.selectors_to_hash(self.selectors))
def __repr__(self):
return str(self)
class AzureInstance(object):
provider = 'azure'
def __init__(self, instance_id, instance_type, launch_time, tags):
self.id = instance_id
self.instance_type = instance_type
self.launch_time = launch_time
self.tags = tags
def __str__(self):
return 'AzureInstance({}, {})'.format(self.id, self.instance_type)
def __repr__(self):
return str(self) | import http
import logging
from typing import List, Tuple, MutableMapping
from datetime import datetime
import re
from requests.packages.urllib3 import Retry
import autoscaler.utils as utils
from autoscaler.autoscaling_groups import AutoScalingGroup
from autoscaler.azure_api import AzureApi, AzureScaleSet, AzureScaleSetInstance
from autoscaler.utils import TransformingFuture, AllCompletedFuture, CompletedFuture
logger = logging.getLogger(__name__)
_RETRY_TIME_LIMIT = 30
class AzureBoundedRetry(Retry):
"""
XXX: Azure sometimes sends us a Retry-After: 1200, even when we still have quota, causing our client to appear to hang.
Ignore them and just retry after 30secs
"""
def __init__(self, **kwargs):
super().__init__(**kwargs)
@staticmethod
def from_retry(retry):
new_retry = AzureBoundedRetry()
new_retry.total = retry.total
new_retry.connect = retry.connect
new_retry.read = retry.read
new_retry.backoff_factor = retry.backoff_factor
new_retry.BACKOFF_MAX = retry.BACKOFF_MAX
new_retry.status_forcelist = retry.status_forcelist
new_retry.method_whitelist = retry.method_whitelist
return new_retry
def get_retry_after(self, response):
retry_after = super().get_retry_after(response)
if response.status != http.HTTPStatus.TOO_MANY_REQUESTS or retry_after <= _RETRY_TIME_LIMIT:
return retry_after
headers = {}
for header in ['Retry-After',
'x-ms-ratelimit-remaining-subscription-reads',
'x-ms-ratelimit-remaining-subscription-writes',
'x-ms-ratelimit-remaining-tenant-reads',
'x-ms-ratelimit-remaining-tenant-writes',
'x-ms-ratelimit-remaining-subscription-resource-requests',
'x-ms-ratelimit-remaining-subscription-resource-entities-read',
'x-ms-ratelimit-remaining-tenant-resource-requests',
'x-ms-ratelimit-remaining-tenant-resource-entities-read']:
value = response.getheader(header)
if value is not None:
headers[header] = value
logger.warn("Azure request throttled: {}".format(headers))
return _RETRY_TIME_LIMIT
class AzureGroups(object):
def __init__(self, resource_groups, slow_scale_classes, client: AzureApi):
self.resource_groups = resource_groups
self.slow_scale_classes = slow_scale_classes
self.client = client
def get_all_groups(self, kube_nodes):
groups = []
if self.client:
for resource_group in self.resource_groups:
scale_sets_by_type = {}
for scale_set in self.client.list_scale_sets(resource_group.name):
scale_sets_by_type.setdefault((scale_set.location, scale_set.instance_type), []).append(scale_set)
for key, scale_sets in scale_sets_by_type.items():
location, instance_type = key
slow_scale = _get_azure_class(instance_type) in self.slow_scale_classes
groups.append(AzureVirtualScaleSet(location, resource_group.name, self.client, instance_type, slow_scale, scale_sets, kube_nodes))
return groups
_CLASS_PAT = re.compile(r'\w+_(?P<class>[A-Z]+).+')
def _get_azure_class(type_):
m = _CLASS_PAT.match(type_)
return m.group('class')
_SCALE_SET_SIZE_LIMIT = 100
# Appears as an unbounded scale set. Currently, Azure Scale Sets have a limit of 100 hosts.
class AzureVirtualScaleSet(AutoScalingGroup):
provider = 'azure'
def __init__(self, region, resource_group, client: AzureApi, instance_type, slow_scale: bool, scale_sets: List[AzureScaleSet], kube_nodes):
self.client = client
self.instance_type = instance_type
self.tags = {}
self.name = 'virtual_scale_set_' + instance_type + '_' + region + '_' + resource_group
self.scale_sets = dict((scale_set.name, scale_set) for scale_set in scale_sets)
self.desired_capacity = sum(scale_set.capacity for scale_set in scale_sets)
self.region = region
self.resource_group = resource_group
self.selectors = dict(self.tags)
# HACK: for matching node selectors
self.selectors['azure/type'] = self.instance_type
self.selectors['azure/class'] = _get_azure_class(self.instance_type)
self.slow_scale = slow_scale
self.min_size = 0
self.max_size = 10000
self.is_spot = False
self.vm_id_to_instance: MutableMapping[str, Tuple[str, AzureScaleSetInstance]] = {}
self.instances = {}
self.timeout_until = None
self.timeout_reason = None
self._global_priority = None
self.no_schedule_taints = {}
for scale_set in scale_sets:
if scale_set.timeout_until is not None:
if self.timeout_until is None or self.timeout_until < scale_set.timeout_until:
self.timeout_until = scale_set.timeout_until
self.timeout_reason = scale_set.name + ": " + scale_set.timeout_reason
if scale_set.priority is not None:
if self._global_priority is None:
self._global_priority = scale_set.priority
else:
self._global_priority = min(scale_set.priority, self._global_priority)
if not self.no_schedule_taints:
self.no_schedule_taints = scale_set.no_schedule_taints
if scale_set.capacity == 0:
continue
for instance in self.client.list_scale_set_instances(scale_set):
self.vm_id_to_instance[instance.vm_id] = (scale_set.name, instance)
self.instances[instance.vm_id] = AzureInstance(instance.vm_id, self.instance_type, instance.launch_time, self.tags)
self.nodes = [node for node in kube_nodes if node.instance_id in self.vm_id_to_instance]
self.unschedulable_nodes = [n for n in self.nodes if n.unschedulable]
self._id = (self.region, self.name)
def is_timed_out(self):
if self.timeout_until and datetime.now(self.timeout_until.tzinfo) < self.timeout_until:
logger.warn("{} is timed out until {} because {}".format(self._id, self.timeout_until, self.timeout_reason))
return True
return False
@property
def global_priority(self):
if self._global_priority is None:
return super().global_priority
return self._global_priority
def get_azure_instances(self):
return self.instances.values()
@property
def instance_ids(self):
return self.vm_id_to_instance.keys()
def set_desired_capacity(self, new_desired_capacity):
"""
sets the desired capacity of the underlying ASG directly.
note that this is for internal control.
for scaling purposes, please use scale() instead.
"""
scale_out = new_desired_capacity - self.desired_capacity
assert scale_out >= 0
if scale_out == 0:
return CompletedFuture(False)
futures = []
for scale_set in sorted(self.scale_sets.values(), key=lambda x: (x.priority, x.name)):
if scale_set.capacity < _SCALE_SET_SIZE_LIMIT:
if self.slow_scale:
new_group_capacity = scale_set.capacity + 1
else:
new_group_capacity = min(_SCALE_SET_SIZE_LIMIT, scale_set.capacity + scale_out)
scale_out -= (new_group_capacity - scale_set.capacity)
if scale_set.provisioning_state == 'Updating':
logger.warn("Update of {} already in progress".format(scale_set.name))
continue
if scale_set.provisioning_state == 'Failed':
logger.error("{} failed provisioning. Skipping it for scaling.".format(scale_set.name))
continue
# Update our cached version
self.scale_sets[scale_set.name].capacity = new_group_capacity
futures.append(self.client.update_scale_set(scale_set, new_group_capacity))
logger.info("Scaling Azure Scale Set {} to {}".format(scale_set.name, new_group_capacity))
if scale_out == 0:
break
if scale_out > 0:
logger.error("Not enough scale sets to reach desired capacity {} for {}".format(new_desired_capacity, self))
self.desired_capacity = new_desired_capacity - scale_out
logger.info("ASG: {} new_desired_capacity: {}".format(self, new_desired_capacity))
return TransformingFuture(True, AllCompletedFuture(futures))
def terminate_instances(self, vm_ids):
vm_ids = list(vm_ids)
instances = {}
for vm_id in vm_ids:
scale_set_name, instance = self.vm_id_to_instance[vm_id]
# Update our cached copy of the Scale Set
self.scale_sets[scale_set_name].capacity -= 1
instances.setdefault(scale_set_name, []).append(instance)
logger.info('Terminated instances %s', vm_ids)
futures = []
for scale_set_name, scale_set_instances in instances.items():
futures.append(self.client.terminate_scale_set_instances(self.scale_sets[scale_set_name], scale_set_instances))
return AllCompletedFuture(futures)
def scale_nodes_in(self, nodes):
"""
scale down asg by terminating the given node.
returns a future indicating when the request completes.
"""
for node in nodes:
self.nodes.remove(node)
return self.terminate_instances(node.instance_id for node in nodes)
def __str__(self):
return 'AzureVirtualScaleSet({name}, {selectors_hash})'.format(name=self.name, selectors_hash=utils.selectors_to_hash(self.selectors))
def __repr__(self):
return str(self)
class AzureInstance(object):
provider = 'azure'
def __init__(self, instance_id, instance_type, launch_time, tags):
self.id = instance_id
self.instance_type = instance_type
self.launch_time = launch_time
self.tags = tags
def __str__(self):
return 'AzureInstance({}, {})'.format(self.id, self.instance_type)
def __repr__(self):
return str(self) | en | 0.86913 | XXX: Azure sometimes sends us a Retry-After: 1200, even when we still have quota, causing our client to appear to hang. Ignore them and just retry after 30secs # Appears as an unbounded scale set. Currently, Azure Scale Sets have a limit of 100 hosts. # HACK: for matching node selectors sets the desired capacity of the underlying ASG directly. note that this is for internal control. for scaling purposes, please use scale() instead. # Update our cached version # Update our cached copy of the Scale Set scale down asg by terminating the given node. returns a future indicating when the request completes. | 2.186733 | 2 |
sort_insertion.py | rachitmishra/45 | 0 | 9711 | """
Insertion Sort
Approach: Loop
Complexity: O(n2)
"""
def sort_insertion(input_arr):
print("""""""""""""""""""""""""")
print("input " + str(input_arr))
print("""""""""""""""""""""""""")
ln = len(input_arr)
i = 1 # Assuming first element is sorted
while i < ln: # n times
c = input_arr[i]
p = i
while p > 0 and input_arr[p - 1] > c: # n times
input_arr[p] = input_arr[p - 1]
p -= 1
input_arr[p] = c
i += 1
print("pass " + str(i) + " " + str(input_arr))
print("""""""""""""""""""""""""")
print("result " + str(input_arr))
print("""""""""""""""""""""""""")
if __name__ == '__main__':
arr = [21, 4, 1, 3, 9, 20, 25, 6, 21, 14]
sort_insertion(arr)
| """
Insertion Sort
Approach: Loop
Complexity: O(n2)
"""
def sort_insertion(input_arr):
print("""""""""""""""""""""""""")
print("input " + str(input_arr))
print("""""""""""""""""""""""""")
ln = len(input_arr)
i = 1 # Assuming first element is sorted
while i < ln: # n times
c = input_arr[i]
p = i
while p > 0 and input_arr[p - 1] > c: # n times
input_arr[p] = input_arr[p - 1]
p -= 1
input_arr[p] = c
i += 1
print("pass " + str(i) + " " + str(input_arr))
print("""""""""""""""""""""""""")
print("result " + str(input_arr))
print("""""""""""""""""""""""""")
if __name__ == '__main__':
arr = [21, 4, 1, 3, 9, 20, 25, 6, 21, 14]
sort_insertion(arr)
| en | 0.709097 | Insertion Sort Approach: Loop Complexity: O(n2) # Assuming first element is sorted # n times # n times | 4.170901 | 4 |
Python2/tareas/tarea_7.py | eveiramirez/python_class | 0 | 9712 | <reponame>eveiramirez/python_class<filename>Python2/tareas/tarea_7.py
"""
NAME
tarea_7.py
VERSION
[1.0]
AUTHOR
<NAME>
CONTACT
<EMAIL>
GITHUB
https://github.com/eveiramirez/python_class/blob/master/Python2/tareas/tarea_7.py
DESCRIPTION
Este programa contiene arrays estructurados para los arrays
creados en el ejercicio 1, los cuales son:
Produccion
Costos
Costos por g/L
CATEGORY
Numpy
"""
import numpy as np
# Crear array con la produccion de cada gen para cada temperatura
production = np.array([("Gen1", 5, 3), ("Gen2", 11, 7),
("Gen3", 4, 9), ("Gen4", 2, 6)],
dtype=[("name", (np.str_, 10)),
("production_cond1", np.int32),
("production_cond2", np.int32)])
# Crear array con los costos de induccion
costs = np.array([("Gen1", 3.5), ("Gen2", 5), ("Gen3", 7),
("Gen4", 4.3)], dtype=[("name", (np.str_, 10)),
("cost", np.float64)])
# Crear array con los costos por g/L para condicion 1
pc_cond1 = production["production_cond1"]/costs["cost"]
# Crear array con los costos por g/L para temperatura 2
pc_cond2 = production["production_cond2"]/costs["cost"]
# Crear lista con los costos por g/L para cada gene guardados en una
# tupla
gene_list = []
for gene in range(0, 4):
gene_list.append((f"Gen{gene+1}", pc_cond1[gene], pc_cond2[gene]))
# Crear array con los costos por g/L
prod_costs = np.array(gene_list, dtype=[("name", (np.str_, 10)),
("pc_cond1", np.float64),
("pc_cond2", np.float64)])
# Imprimir array de los costos por g/L
print(prod_costs)
| """
NAME
tarea_7.py
VERSION
[1.0]
AUTHOR
<NAME>
CONTACT
<EMAIL>
GITHUB
https://github.com/eveiramirez/python_class/blob/master/Python2/tareas/tarea_7.py
DESCRIPTION
Este programa contiene arrays estructurados para los arrays
creados en el ejercicio 1, los cuales son:
Produccion
Costos
Costos por g/L
CATEGORY
Numpy
"""
import numpy as np
# Crear array con la produccion de cada gen para cada temperatura
production = np.array([("Gen1", 5, 3), ("Gen2", 11, 7),
("Gen3", 4, 9), ("Gen4", 2, 6)],
dtype=[("name", (np.str_, 10)),
("production_cond1", np.int32),
("production_cond2", np.int32)])
# Crear array con los costos de induccion
costs = np.array([("Gen1", 3.5), ("Gen2", 5), ("Gen3", 7),
("Gen4", 4.3)], dtype=[("name", (np.str_, 10)),
("cost", np.float64)])
# Crear array con los costos por g/L para condicion 1
pc_cond1 = production["production_cond1"]/costs["cost"]
# Crear array con los costos por g/L para temperatura 2
pc_cond2 = production["production_cond2"]/costs["cost"]
# Crear lista con los costos por g/L para cada gene guardados en una
# tupla
gene_list = []
for gene in range(0, 4):
gene_list.append((f"Gen{gene+1}", pc_cond1[gene], pc_cond2[gene]))
# Crear array con los costos por g/L
prod_costs = np.array(gene_list, dtype=[("name", (np.str_, 10)),
("pc_cond1", np.float64),
("pc_cond2", np.float64)])
# Imprimir array de los costos por g/L
print(prod_costs) | es | 0.833208 | NAME tarea_7.py VERSION [1.0] AUTHOR <NAME> CONTACT <EMAIL> GITHUB https://github.com/eveiramirez/python_class/blob/master/Python2/tareas/tarea_7.py DESCRIPTION Este programa contiene arrays estructurados para los arrays creados en el ejercicio 1, los cuales son: Produccion Costos Costos por g/L CATEGORY Numpy # Crear array con la produccion de cada gen para cada temperatura # Crear array con los costos de induccion # Crear array con los costos por g/L para condicion 1 # Crear array con los costos por g/L para temperatura 2 # Crear lista con los costos por g/L para cada gene guardados en una # tupla # Crear array con los costos por g/L # Imprimir array de los costos por g/L | 2.922862 | 3 |
iguanas/pipeline/_base_pipeline.py | paypal/Iguanas | 20 | 9713 | <gh_stars>10-100
"""
Base pipeline class. Main rule generator classes inherit from this one.
"""
from copy import deepcopy
from typing import List, Tuple, Union, Dict
from iguanas.pipeline.class_accessor import ClassAccessor
from iguanas.utils.typing import PandasDataFrameType, PandasSeriesType
import iguanas.utils.utils as utils
from iguanas.exceptions import DataFrameSizeError
class _BasePipeline:
"""
Base pipeline class. Main pipeline classes inherit from this one.
Parameters
----------
steps : List[Tuple[str, object]]
The steps to be applied as part of the pipeline.
verbose : int, optional
Controls the verbosity - the higher, the more messages. >0 : gives
the overall progress of the training of the pipeline; >1 : shows the
current step being trained.
Attributes
----------
steps_ : List[Tuple[str, object]]
The steps corresponding to the fitted pipeline.
rules : Rules
The Rules object containing the rules produced from fitting the
pipeline.
"""
def __init__(self,
steps: List[Tuple[str, object]],
verbose: int) -> None:
self.steps = steps
self.verbose = verbose
self.steps_ = None
self.rules = None
def get_params(self) -> dict:
"""
Returns the parameters of each step in the pipeline.
Returns
-------
dict
The parameters of each step in the pipeline.
"""
pipeline_params = {}
steps_ = self.steps if self.steps_ is None else self.steps_
for step_tag, step in steps_:
step_param_dict = deepcopy(step.__dict__)
pipeline_params[step_tag] = step_param_dict
# If step inherits from _BasePipeline, call its get_params to get
# the parameters each class in the pipeline
if issubclass(step.__class__, _BasePipeline):
step_param_dict = step.get_params()
pipeline_params.update(step_param_dict)
return pipeline_params
def _update_kwargs(self,
params: dict) -> None:
"""
Updates the given parameters of the given steps in the pipeline.
Parameters
----------
params : dict
A dictionary where each key corresponds to the tag used for the
pipeline step. Each value should be a dictionary of the parameters
(keys) and their new values (values).
"""
for step_tag, step in self.steps:
# If step inherits from _BasePipeline, call its _update_kwargs
if issubclass(step.__class__, _BasePipeline):
step._update_kwargs(params)
if step_tag in params.keys():
# If a parameter in `params` is not in the keyword arguments
# of the class (excl when kwargs is present), raise exception
for param in params[step_tag].keys():
if param not in step.__dict__.keys() and 'kwargs' not in step.__dict__.keys():
raise ValueError(
f'Parameter `{param}` not found in keyword arguments for class in step `{step_tag}`'
)
step.__dict__.update(params[step_tag])
def _pipeline_fit(self,
step_tag: str,
step: object,
X: Union[PandasDataFrameType, dict],
y: Union[PandasSeriesType, dict],
sample_weight: Union[PandasSeriesType, dict]) -> None:
"""
Runs the following before applying the `fit` method of `step`:
1. Checks the parameters of `step` for `ClassAccessor` objects. If a
`ClassAccessor` object is found, the parameter in `step` is updated
with the class attribute denoted by the `ClassAccessor` object.
2. Checks if `X`, `y` or `sample_weight` are dictionaries. If so,
then the dataset aligned to `step_tag` is extracted.
Parameters
----------
step_tag : str
The tag corresponding to the step.
step : object
The step in the pipeline.
X : Union[PandasDataFrameType, dict]
The dataset or dictionary of datasets for each pipeline step.
y : Union[PandasSeriesType, dict]
The binary target column or dictionary of binary target columns
for each pipeline step.
sample_weight : Union[PandasSeriesType, dict], optional
Row-wise weights or dictionary of row-wise weights for each
pipeline step. Defaults to None.
"""
step = self._check_accessor(step)
X, y, sample_weight = [
utils.return_dataset_if_dict(
step_tag=step_tag, df=df
) for df in (X, y, sample_weight)
]
step.fit(X, y, sample_weight)
def _pipeline_transform(self,
step_tag: str,
step: object,
X: Union[PandasDataFrameType, dict]) -> PandasDataFrameType:
"""
Runs the following before applying the `transform` method of `step`:
1. Checks the parameters of `step` for `ClassAccessor` objects. If a
`ClassAccessor` object is found, the parameter in `step` is updated
with the class attribute denoted by the `ClassAccessor` object.
2. Checks if `X`, `y` or `sample_weight` are dictionaries. If so,
then the dataset aligned to `step_tag` is extracted.
Parameters
----------
step_tag : str
The tag corresponding to the step.
step : object
The step in the pipeline.
X : Union[PandasDataFrameType, dict]
The dataset or dictionary of datasets for each pipeline step.
Returns
-------
PandasDataFrameType
The transformed dataset.
"""
step = self._check_accessor(step)
X = utils.return_dataset_if_dict(step_tag=step_tag, df=X)
X = step.transform(X)
self._exception_if_no_cols_in_X(X, step_tag)
return X
def _pipeline_predict(self,
step: object,
X: Union[PandasDataFrameType, dict]) -> PandasSeriesType:
"""
Runs the following before applying the `predict` method of `step`:
1. Checks the parameters of `step` for `ClassAccessor` objects. If a
`ClassAccessor` object is found, the parameter in `step` is updated
with the class attribute denoted by the `ClassAccessor` object.
Parameters
----------
step : object
The step in the pipeline.
X : Union[PandasDataFrameType, dict]
The dataset or dictionary of datasets for each pipeline step.
Returns
-------
PandasSeriesType
The prediction of the final step.
"""
step = self._check_accessor(step)
return step.predict(X)
def _pipeline_fit_transform(self,
step_tag: str,
step: object,
X: Union[PandasDataFrameType, dict],
y: Union[PandasSeriesType, dict],
sample_weight: Union[PandasSeriesType, dict]) -> PandasDataFrameType:
"""
Runs the following before applying the `fit_transform` method of `step`:
1. Checks the parameters of `step` for `ClassAccessor` objects. If a
`ClassAccessor` object is found, the parameter in `step` is updated
with the class attribute denoted by the `ClassAccessor` object.
2. Checks if `X`, `y` or `sample_weight` are dictionaries. If so,
then the dataset aligned to `step_tag` is extracted.
Parameters
----------
step_tag : str
The tag corresponding to the step.
step : object
The step in the pipeline.
X : Union[PandasDataFrameType, dict]
The dataset or dictionary of datasets for each pipeline step.
y : Union[PandasSeriesType, dict]
The binary target column or dictionary of binary target columns
for each pipeline step.
sample_weight : Union[PandasSeriesType, dict], optional
Row-wise weights or dictionary of row-wise weights for each
pipeline step. Defaults to None.
Returns
-------
PandasDataFrameType
The transformed dataset.
"""
step = self._check_accessor(step)
X, y, sample_weight = [
utils.return_dataset_if_dict(
step_tag=step_tag, df=df
) for df in (X, y, sample_weight)
]
X = step.fit_transform(X, y, sample_weight)
self._exception_if_no_cols_in_X(X, step_tag)
return X
def _check_accessor(self,
step: object) -> object:
"""
Checks whether the any of the parameters in the given `step` is of type
ClassAccessor. If so, then it runs the ClassAccessor's `get` method,
which extracts the given attribute from the given step in the pipeline,
and injects it into the parameter.
"""
def _check_accessor_iterable(iterable: Union[list, tuple],
pipeline_params: Dict[str, dict]) -> None:
"""
Iterates through an iterable - if the element is another iterable,
_check_accessor_iterable is called again. If the the element is a
CheckAccessor, its `get` method is called (which extracts the given
attribute from the given step in the pipeline) - this attribute is
then assigned in place of the original element.
"""
for idx, value in enumerate(iterable):
if isinstance(value, (list, tuple)):
_check_accessor_iterable(value, pipeline_params)
elif isinstance(value, ClassAccessor):
try:
iterable[idx] = value.get(pipeline_params)
except TypeError:
raise TypeError(
'`ClassAccessor` object must be within a mutable iterable.'
)
step_param_dict = step.__dict__
for param, value in step_param_dict.items():
# If parameter value is an instantiated class, but not a
# ClassAccessor, call _check_accessor again
if hasattr(value, '__dict__') and value.__dict__ and not isinstance(value, ClassAccessor):
self._check_accessor(value)
# If parameter value is a list or tuple, call
# _check_accessor_iterable
elif isinstance(value, (list, tuple)):
pipeline_params = self.get_params()
_check_accessor_iterable(value, pipeline_params)
# If the parameter value is a ClassAccessor, call its get method
elif isinstance(value, ClassAccessor):
pipeline_params = self.get_params()
step.__dict__[param] = value.get(pipeline_params)
return step
@staticmethod
def _exception_if_no_cols_in_X(X: PandasDataFrameType,
step_tag: str) -> Union[None, DataFrameSizeError]:
"""Raises an exception if `X` has no columns."""
if X.shape[1] == 0:
raise DataFrameSizeError(
f'`X` has been reduced to zero columns after the `{step_tag}` step in the pipeline.'
)
| """
Base pipeline class. Main rule generator classes inherit from this one.
"""
from copy import deepcopy
from typing import List, Tuple, Union, Dict
from iguanas.pipeline.class_accessor import ClassAccessor
from iguanas.utils.typing import PandasDataFrameType, PandasSeriesType
import iguanas.utils.utils as utils
from iguanas.exceptions import DataFrameSizeError
class _BasePipeline:
"""
Base pipeline class. Main pipeline classes inherit from this one.
Parameters
----------
steps : List[Tuple[str, object]]
The steps to be applied as part of the pipeline.
verbose : int, optional
Controls the verbosity - the higher, the more messages. >0 : gives
the overall progress of the training of the pipeline; >1 : shows the
current step being trained.
Attributes
----------
steps_ : List[Tuple[str, object]]
The steps corresponding to the fitted pipeline.
rules : Rules
The Rules object containing the rules produced from fitting the
pipeline.
"""
def __init__(self,
steps: List[Tuple[str, object]],
verbose: int) -> None:
self.steps = steps
self.verbose = verbose
self.steps_ = None
self.rules = None
def get_params(self) -> dict:
"""
Returns the parameters of each step in the pipeline.
Returns
-------
dict
The parameters of each step in the pipeline.
"""
pipeline_params = {}
steps_ = self.steps if self.steps_ is None else self.steps_
for step_tag, step in steps_:
step_param_dict = deepcopy(step.__dict__)
pipeline_params[step_tag] = step_param_dict
# If step inherits from _BasePipeline, call its get_params to get
# the parameters each class in the pipeline
if issubclass(step.__class__, _BasePipeline):
step_param_dict = step.get_params()
pipeline_params.update(step_param_dict)
return pipeline_params
def _update_kwargs(self,
params: dict) -> None:
"""
Updates the given parameters of the given steps in the pipeline.
Parameters
----------
params : dict
A dictionary where each key corresponds to the tag used for the
pipeline step. Each value should be a dictionary of the parameters
(keys) and their new values (values).
"""
for step_tag, step in self.steps:
# If step inherits from _BasePipeline, call its _update_kwargs
if issubclass(step.__class__, _BasePipeline):
step._update_kwargs(params)
if step_tag in params.keys():
# If a parameter in `params` is not in the keyword arguments
# of the class (excl when kwargs is present), raise exception
for param in params[step_tag].keys():
if param not in step.__dict__.keys() and 'kwargs' not in step.__dict__.keys():
raise ValueError(
f'Parameter `{param}` not found in keyword arguments for class in step `{step_tag}`'
)
step.__dict__.update(params[step_tag])
def _pipeline_fit(self,
step_tag: str,
step: object,
X: Union[PandasDataFrameType, dict],
y: Union[PandasSeriesType, dict],
sample_weight: Union[PandasSeriesType, dict]) -> None:
"""
Runs the following before applying the `fit` method of `step`:
1. Checks the parameters of `step` for `ClassAccessor` objects. If a
`ClassAccessor` object is found, the parameter in `step` is updated
with the class attribute denoted by the `ClassAccessor` object.
2. Checks if `X`, `y` or `sample_weight` are dictionaries. If so,
then the dataset aligned to `step_tag` is extracted.
Parameters
----------
step_tag : str
The tag corresponding to the step.
step : object
The step in the pipeline.
X : Union[PandasDataFrameType, dict]
The dataset or dictionary of datasets for each pipeline step.
y : Union[PandasSeriesType, dict]
The binary target column or dictionary of binary target columns
for each pipeline step.
sample_weight : Union[PandasSeriesType, dict], optional
Row-wise weights or dictionary of row-wise weights for each
pipeline step. Defaults to None.
"""
step = self._check_accessor(step)
X, y, sample_weight = [
utils.return_dataset_if_dict(
step_tag=step_tag, df=df
) for df in (X, y, sample_weight)
]
step.fit(X, y, sample_weight)
def _pipeline_transform(self,
step_tag: str,
step: object,
X: Union[PandasDataFrameType, dict]) -> PandasDataFrameType:
"""
Runs the following before applying the `transform` method of `step`:
1. Checks the parameters of `step` for `ClassAccessor` objects. If a
`ClassAccessor` object is found, the parameter in `step` is updated
with the class attribute denoted by the `ClassAccessor` object.
2. Checks if `X`, `y` or `sample_weight` are dictionaries. If so,
then the dataset aligned to `step_tag` is extracted.
Parameters
----------
step_tag : str
The tag corresponding to the step.
step : object
The step in the pipeline.
X : Union[PandasDataFrameType, dict]
The dataset or dictionary of datasets for each pipeline step.
Returns
-------
PandasDataFrameType
The transformed dataset.
"""
step = self._check_accessor(step)
X = utils.return_dataset_if_dict(step_tag=step_tag, df=X)
X = step.transform(X)
self._exception_if_no_cols_in_X(X, step_tag)
return X
def _pipeline_predict(self,
step: object,
X: Union[PandasDataFrameType, dict]) -> PandasSeriesType:
"""
Runs the following before applying the `predict` method of `step`:
1. Checks the parameters of `step` for `ClassAccessor` objects. If a
`ClassAccessor` object is found, the parameter in `step` is updated
with the class attribute denoted by the `ClassAccessor` object.
Parameters
----------
step : object
The step in the pipeline.
X : Union[PandasDataFrameType, dict]
The dataset or dictionary of datasets for each pipeline step.
Returns
-------
PandasSeriesType
The prediction of the final step.
"""
step = self._check_accessor(step)
return step.predict(X)
def _pipeline_fit_transform(self,
step_tag: str,
step: object,
X: Union[PandasDataFrameType, dict],
y: Union[PandasSeriesType, dict],
sample_weight: Union[PandasSeriesType, dict]) -> PandasDataFrameType:
"""
Runs the following before applying the `fit_transform` method of `step`:
1. Checks the parameters of `step` for `ClassAccessor` objects. If a
`ClassAccessor` object is found, the parameter in `step` is updated
with the class attribute denoted by the `ClassAccessor` object.
2. Checks if `X`, `y` or `sample_weight` are dictionaries. If so,
then the dataset aligned to `step_tag` is extracted.
Parameters
----------
step_tag : str
The tag corresponding to the step.
step : object
The step in the pipeline.
X : Union[PandasDataFrameType, dict]
The dataset or dictionary of datasets for each pipeline step.
y : Union[PandasSeriesType, dict]
The binary target column or dictionary of binary target columns
for each pipeline step.
sample_weight : Union[PandasSeriesType, dict], optional
Row-wise weights or dictionary of row-wise weights for each
pipeline step. Defaults to None.
Returns
-------
PandasDataFrameType
The transformed dataset.
"""
step = self._check_accessor(step)
X, y, sample_weight = [
utils.return_dataset_if_dict(
step_tag=step_tag, df=df
) for df in (X, y, sample_weight)
]
X = step.fit_transform(X, y, sample_weight)
self._exception_if_no_cols_in_X(X, step_tag)
return X
def _check_accessor(self,
step: object) -> object:
"""
Checks whether the any of the parameters in the given `step` is of type
ClassAccessor. If so, then it runs the ClassAccessor's `get` method,
which extracts the given attribute from the given step in the pipeline,
and injects it into the parameter.
"""
def _check_accessor_iterable(iterable: Union[list, tuple],
pipeline_params: Dict[str, dict]) -> None:
"""
Iterates through an iterable - if the element is another iterable,
_check_accessor_iterable is called again. If the the element is a
CheckAccessor, its `get` method is called (which extracts the given
attribute from the given step in the pipeline) - this attribute is
then assigned in place of the original element.
"""
for idx, value in enumerate(iterable):
if isinstance(value, (list, tuple)):
_check_accessor_iterable(value, pipeline_params)
elif isinstance(value, ClassAccessor):
try:
iterable[idx] = value.get(pipeline_params)
except TypeError:
raise TypeError(
'`ClassAccessor` object must be within a mutable iterable.'
)
step_param_dict = step.__dict__
for param, value in step_param_dict.items():
# If parameter value is an instantiated class, but not a
# ClassAccessor, call _check_accessor again
if hasattr(value, '__dict__') and value.__dict__ and not isinstance(value, ClassAccessor):
self._check_accessor(value)
# If parameter value is a list or tuple, call
# _check_accessor_iterable
elif isinstance(value, (list, tuple)):
pipeline_params = self.get_params()
_check_accessor_iterable(value, pipeline_params)
# If the parameter value is a ClassAccessor, call its get method
elif isinstance(value, ClassAccessor):
pipeline_params = self.get_params()
step.__dict__[param] = value.get(pipeline_params)
return step
@staticmethod
def _exception_if_no_cols_in_X(X: PandasDataFrameType,
step_tag: str) -> Union[None, DataFrameSizeError]:
"""Raises an exception if `X` has no columns."""
if X.shape[1] == 0:
raise DataFrameSizeError(
f'`X` has been reduced to zero columns after the `{step_tag}` step in the pipeline.'
) | en | 0.617714 | Base pipeline class. Main rule generator classes inherit from this one. Base pipeline class. Main pipeline classes inherit from this one. Parameters ---------- steps : List[Tuple[str, object]] The steps to be applied as part of the pipeline. verbose : int, optional Controls the verbosity - the higher, the more messages. >0 : gives the overall progress of the training of the pipeline; >1 : shows the current step being trained. Attributes ---------- steps_ : List[Tuple[str, object]] The steps corresponding to the fitted pipeline. rules : Rules The Rules object containing the rules produced from fitting the pipeline. Returns the parameters of each step in the pipeline. Returns ------- dict The parameters of each step in the pipeline. # If step inherits from _BasePipeline, call its get_params to get # the parameters each class in the pipeline Updates the given parameters of the given steps in the pipeline. Parameters ---------- params : dict A dictionary where each key corresponds to the tag used for the pipeline step. Each value should be a dictionary of the parameters (keys) and their new values (values). # If step inherits from _BasePipeline, call its _update_kwargs # If a parameter in `params` is not in the keyword arguments # of the class (excl when kwargs is present), raise exception Runs the following before applying the `fit` method of `step`: 1. Checks the parameters of `step` for `ClassAccessor` objects. If a `ClassAccessor` object is found, the parameter in `step` is updated with the class attribute denoted by the `ClassAccessor` object. 2. Checks if `X`, `y` or `sample_weight` are dictionaries. If so, then the dataset aligned to `step_tag` is extracted. Parameters ---------- step_tag : str The tag corresponding to the step. step : object The step in the pipeline. X : Union[PandasDataFrameType, dict] The dataset or dictionary of datasets for each pipeline step. y : Union[PandasSeriesType, dict] The binary target column or dictionary of binary target columns for each pipeline step. sample_weight : Union[PandasSeriesType, dict], optional Row-wise weights or dictionary of row-wise weights for each pipeline step. Defaults to None. Runs the following before applying the `transform` method of `step`: 1. Checks the parameters of `step` for `ClassAccessor` objects. If a `ClassAccessor` object is found, the parameter in `step` is updated with the class attribute denoted by the `ClassAccessor` object. 2. Checks if `X`, `y` or `sample_weight` are dictionaries. If so, then the dataset aligned to `step_tag` is extracted. Parameters ---------- step_tag : str The tag corresponding to the step. step : object The step in the pipeline. X : Union[PandasDataFrameType, dict] The dataset or dictionary of datasets for each pipeline step. Returns ------- PandasDataFrameType The transformed dataset. Runs the following before applying the `predict` method of `step`: 1. Checks the parameters of `step` for `ClassAccessor` objects. If a `ClassAccessor` object is found, the parameter in `step` is updated with the class attribute denoted by the `ClassAccessor` object. Parameters ---------- step : object The step in the pipeline. X : Union[PandasDataFrameType, dict] The dataset or dictionary of datasets for each pipeline step. Returns ------- PandasSeriesType The prediction of the final step. Runs the following before applying the `fit_transform` method of `step`: 1. Checks the parameters of `step` for `ClassAccessor` objects. If a `ClassAccessor` object is found, the parameter in `step` is updated with the class attribute denoted by the `ClassAccessor` object. 2. Checks if `X`, `y` or `sample_weight` are dictionaries. If so, then the dataset aligned to `step_tag` is extracted. Parameters ---------- step_tag : str The tag corresponding to the step. step : object The step in the pipeline. X : Union[PandasDataFrameType, dict] The dataset or dictionary of datasets for each pipeline step. y : Union[PandasSeriesType, dict] The binary target column or dictionary of binary target columns for each pipeline step. sample_weight : Union[PandasSeriesType, dict], optional Row-wise weights or dictionary of row-wise weights for each pipeline step. Defaults to None. Returns ------- PandasDataFrameType The transformed dataset. Checks whether the any of the parameters in the given `step` is of type ClassAccessor. If so, then it runs the ClassAccessor's `get` method, which extracts the given attribute from the given step in the pipeline, and injects it into the parameter. Iterates through an iterable - if the element is another iterable, _check_accessor_iterable is called again. If the the element is a CheckAccessor, its `get` method is called (which extracts the given attribute from the given step in the pipeline) - this attribute is then assigned in place of the original element. # If parameter value is an instantiated class, but not a # ClassAccessor, call _check_accessor again # If parameter value is a list or tuple, call # _check_accessor_iterable # If the parameter value is a ClassAccessor, call its get method Raises an exception if `X` has no columns. | 2.562291 | 3 |
test_activity_merger.py | AlexanderMakarov/activitywatch-ets | 0 | 9714 | import unittest
import datetime
from parameterized import parameterized
from activity_merger import Interval
from aw_core.models import Event
from typing import List, Tuple
def _build_datetime(seed: int) -> datetime.datetime:
return datetime.datetime(2000, 1, seed, seed, 0, 0).astimezone(datetime.timezone.utc)
def _build_timedelta(seed: int) -> datetime.timedelta:
return _build_datetime(seed + 1) - _build_datetime(1)
def build_intervals_linked_list(data: List[Tuple[int, bool, int]]) -> Interval:
"""
Builds intervals linked list from the list of tuples. Doesn't check parameters.
:param data: List of tuples (day of start, flag to return `Interval` from the function, duration).
:return: Chosen interval.
"""
result = None
previous = None
for (seed, is_target, duration) in data:
if not previous:
previous = Interval(_build_datetime(seed), _build_datetime(seed + duration))
else:
tmp = Interval(_build_datetime(seed), _build_datetime(seed + duration), previous)
previous.next = tmp
previous = tmp
if is_target:
assert result is None, f"Wrong parameters - '{seed}' interval is marked as result but is not first."
result = previous
return result
class TestInterval(unittest.TestCase):
@parameterized.expand([
(
"Simple the only interval",
build_intervals_linked_list([
(1, True, 1)
]),
1
),
(
"The same interval",
build_intervals_linked_list([
(1, False, 1),
(5, True, 1),
(6, False, 1)
]),
5
),
(
"Exact Interval right before",
build_intervals_linked_list([
(5, False, 1),
(6, True, 1),
(7, False, 1)
]),
5
),
(
"Exact Interval right after",
build_intervals_linked_list([
(3, False, 1),
(4, True, 1),
(5, False, 1)
]),
5
),
(
"Exact Interval far after",
build_intervals_linked_list([
(3, True, 1),
(4, False, 1),
(5, False, 1),
(6, False, 1),
]),
5
),
(
"Exact Interval far before",
build_intervals_linked_list([
(4, False, 1),
(5, False, 1),
(6, False, 1),
(7, True, 1),
]),
5
),
])
def test_find_closest_by_start(self, test_name, interval, expected_start_seed):
target = _build_datetime(5)
actual: Interval = interval.find_closest(target, datetime.timedelta(0), False)
expected = _build_datetime(expected_start_seed)
self.assertEqual(actual.start_time, expected, f"'{test_name}' case failed.")
@parameterized.expand([
(
"Simple the only interval",
build_intervals_linked_list([
(1, True, 1)
]),
1
),
(
"The same interval",
build_intervals_linked_list([
(1, False, 1),
(4, True, 1),
(6, False, 1),
]),
4
),
(
"Exact Interval right before",
build_intervals_linked_list([
(4, False, 1),
(6, True, 1),
(7, False, 1),
]),
4
),
(
"Exact Interval right after",
build_intervals_linked_list([
(1, False, 1),
(2, True, 1),
(4, False, 1),
]),
4
),
(
"Exact Interval far after",
build_intervals_linked_list([
(2, True, 1),
(3, False, 1),
(4, False, 1),
(5, False, 1),
]),
4
),
(
"Exact Interval far before",
build_intervals_linked_list([
(3, False, 1),
(4, False, 1),
(6, False, 1),
(7, True, 1),
]),
4
),
])
def test_find_closest_by_end(self, test_name, interval: Interval, expected_start_seed):
target = _build_datetime(5)
actual: Interval = interval.find_closest(target, datetime.timedelta(0), True)
expected = _build_datetime(expected_start_seed)
self.assertEqual(actual.start_time, expected, f"'{test_name}' case failed.")
@parameterized.expand([
(
"Event at middle",
build_intervals_linked_list([
(3, True, 5),
]),
Event(1, _build_datetime(5), _build_timedelta(1)),
build_intervals_linked_list([
(3, True, 2),
(5, False, 1),
(6, False, 2),
]),
),
(
"Event start equal interval start",
build_intervals_linked_list([
(5, True, 5),
]),
Event(1, _build_datetime(5), _build_timedelta(1)),
build_intervals_linked_list([
(5, True, 1),
(6, False, 4),
]),
),
(
"Event end equal interval end",
build_intervals_linked_list([
(4, True, 2),
]),
Event(1, _build_datetime(5), _build_timedelta(1)),
build_intervals_linked_list([
(4, True, 1),
(5, False, 1),
]),
),
])
def test_separate_new_at_middle(self, test_name: str, interval: Interval, event: Event,
expected_interval_offset_2_num_4: Interval):
actual: Interval = interval.separate_new_at_middle(event, datetime.timedelta(0))
self.assertListEqual(actual.get_range(-2, 4), expected_interval_offset_2_num_4.get_range(-2, 4),
f"'{test_name}' case failed.")
if __name__ == '__main__':
unittest.main()
| import unittest
import datetime
from parameterized import parameterized
from activity_merger import Interval
from aw_core.models import Event
from typing import List, Tuple
def _build_datetime(seed: int) -> datetime.datetime:
return datetime.datetime(2000, 1, seed, seed, 0, 0).astimezone(datetime.timezone.utc)
def _build_timedelta(seed: int) -> datetime.timedelta:
return _build_datetime(seed + 1) - _build_datetime(1)
def build_intervals_linked_list(data: List[Tuple[int, bool, int]]) -> Interval:
"""
Builds intervals linked list from the list of tuples. Doesn't check parameters.
:param data: List of tuples (day of start, flag to return `Interval` from the function, duration).
:return: Chosen interval.
"""
result = None
previous = None
for (seed, is_target, duration) in data:
if not previous:
previous = Interval(_build_datetime(seed), _build_datetime(seed + duration))
else:
tmp = Interval(_build_datetime(seed), _build_datetime(seed + duration), previous)
previous.next = tmp
previous = tmp
if is_target:
assert result is None, f"Wrong parameters - '{seed}' interval is marked as result but is not first."
result = previous
return result
class TestInterval(unittest.TestCase):
@parameterized.expand([
(
"Simple the only interval",
build_intervals_linked_list([
(1, True, 1)
]),
1
),
(
"The same interval",
build_intervals_linked_list([
(1, False, 1),
(5, True, 1),
(6, False, 1)
]),
5
),
(
"Exact Interval right before",
build_intervals_linked_list([
(5, False, 1),
(6, True, 1),
(7, False, 1)
]),
5
),
(
"Exact Interval right after",
build_intervals_linked_list([
(3, False, 1),
(4, True, 1),
(5, False, 1)
]),
5
),
(
"Exact Interval far after",
build_intervals_linked_list([
(3, True, 1),
(4, False, 1),
(5, False, 1),
(6, False, 1),
]),
5
),
(
"Exact Interval far before",
build_intervals_linked_list([
(4, False, 1),
(5, False, 1),
(6, False, 1),
(7, True, 1),
]),
5
),
])
def test_find_closest_by_start(self, test_name, interval, expected_start_seed):
target = _build_datetime(5)
actual: Interval = interval.find_closest(target, datetime.timedelta(0), False)
expected = _build_datetime(expected_start_seed)
self.assertEqual(actual.start_time, expected, f"'{test_name}' case failed.")
@parameterized.expand([
(
"Simple the only interval",
build_intervals_linked_list([
(1, True, 1)
]),
1
),
(
"The same interval",
build_intervals_linked_list([
(1, False, 1),
(4, True, 1),
(6, False, 1),
]),
4
),
(
"Exact Interval right before",
build_intervals_linked_list([
(4, False, 1),
(6, True, 1),
(7, False, 1),
]),
4
),
(
"Exact Interval right after",
build_intervals_linked_list([
(1, False, 1),
(2, True, 1),
(4, False, 1),
]),
4
),
(
"Exact Interval far after",
build_intervals_linked_list([
(2, True, 1),
(3, False, 1),
(4, False, 1),
(5, False, 1),
]),
4
),
(
"Exact Interval far before",
build_intervals_linked_list([
(3, False, 1),
(4, False, 1),
(6, False, 1),
(7, True, 1),
]),
4
),
])
def test_find_closest_by_end(self, test_name, interval: Interval, expected_start_seed):
target = _build_datetime(5)
actual: Interval = interval.find_closest(target, datetime.timedelta(0), True)
expected = _build_datetime(expected_start_seed)
self.assertEqual(actual.start_time, expected, f"'{test_name}' case failed.")
@parameterized.expand([
(
"Event at middle",
build_intervals_linked_list([
(3, True, 5),
]),
Event(1, _build_datetime(5), _build_timedelta(1)),
build_intervals_linked_list([
(3, True, 2),
(5, False, 1),
(6, False, 2),
]),
),
(
"Event start equal interval start",
build_intervals_linked_list([
(5, True, 5),
]),
Event(1, _build_datetime(5), _build_timedelta(1)),
build_intervals_linked_list([
(5, True, 1),
(6, False, 4),
]),
),
(
"Event end equal interval end",
build_intervals_linked_list([
(4, True, 2),
]),
Event(1, _build_datetime(5), _build_timedelta(1)),
build_intervals_linked_list([
(4, True, 1),
(5, False, 1),
]),
),
])
def test_separate_new_at_middle(self, test_name: str, interval: Interval, event: Event,
expected_interval_offset_2_num_4: Interval):
actual: Interval = interval.separate_new_at_middle(event, datetime.timedelta(0))
self.assertListEqual(actual.get_range(-2, 4), expected_interval_offset_2_num_4.get_range(-2, 4),
f"'{test_name}' case failed.")
if __name__ == '__main__':
unittest.main()
| en | 0.652758 | Builds intervals linked list from the list of tuples. Doesn't check parameters. :param data: List of tuples (day of start, flag to return `Interval` from the function, duration). :return: Chosen interval. | 2.305334 | 2 |
pommerman/agents/player_agent.py | alekseynp/playground | 8 | 9715 | """
NOTE:
There are a few minor complications to fluid human control which make this
code a little more involved than trivial.
1. Key press-release cycles can be, and often are, faster than one tick of
the game/simulation, but the player still wants that cycle to count, i.e.
to lay a bomb!
2. When holding down a key, the player expects that action to be repeated,
at least after a slight delay.
3. But when holding a key down (say, move left) and simultaneously doing a
quick press-release cycle (put a bomb), we want the held-down key to keep
being executed, but the cycle should have happened in-between.
The way we solve this problem is by separating key-state and actions-to-do.
We hold the actions that need be executed in a queue (`self._action_q`) and
a state for all considered keys.
1. When a key is pressed down, we note the time and mark it as down.
2. If it is released quickly thereafter, before a game tick could happen,
we add its action into the queue. This often happens when putting bombs.
3. If it's still pressed down as we enter a game tick, we do some math to see
if it's time for a "repeat" event and, if so, push an action to the queue.
4. Just work off one item from the queue each tick.
This way, the input is "natural" and things like dropping a bomb while doing
a diagonal walk from one end to the other "just work".
"""
from time import time
from . import BaseAgent
from .. import characters
REPEAT_DELAY = 0.2 # seconds
REPEAT_INTERVAL = 0.1
class Keystate:
def __init__(self):
self.keydown_time = time()
self.last_repeat_time = None
self.fired = False
def should_fire(self):
if self.last_repeat_time is None:
# The first repetition:
if time() - self.keydown_time > REPEAT_DELAY:
return True
else:
# A repetition after the first:
if time() - self.last_repeat_time > REPEAT_INTERVAL:
return True
# No repetition yet
return False
def mark_fired(self):
self.last_repeat_time = time()
self.fired = True
class PlayerAgent(BaseAgent):
"""The Player Agent that lets the user control a character."""
def __init__(self, character=characters.Bomber, agent_control='arrows'):
super(PlayerAgent, self).__init__(character)
##
# @NOTE: DO NOT move this import outside the constructor. It will
# not work in headless environments like a Docker container
# and prevents Pommerman from running.
#
from pyglet.window import key
CONTROLS = {
'arrows': {
key.UP: 1,
key.DOWN: 2,
key.LEFT: 3,
key.RIGHT: 4,
key.SPACE: 5,
key.M: 6 # In Pommerman, this will freeze the game.
},
'wasd': {
key.W: 1,
key.S: 2,
key.A: 3,
key.D: 4,
key.E: 5,
key.Q: 6 # In Pommerman, this will freeze the game.
}
}
assert agent_control in CONTROLS, "Unknown control: {}".format(
agent_control)
self._key2act = CONTROLS[agent_control]
self._action_q = []
self._keystate = {}
def act(self, obs, action_space):
# Go through the keys and fire for those that needs repetition (because they're held down)
for k, state in self._keystate.items():
if state.should_fire():
self._action_q.append(k)
state.mark_fired()
act = 0
if self._action_q: # Work off the keys that are queued.
act = self._key2act[self._action_q.pop(0)]
return act
@staticmethod
def has_user_input():
return True
def on_key_press(self, k, mod):
# Ignore if we're not handling the key. Avoids "shadowing" ticks in
# multiplayer mode.
if k in self._key2act:
self._keystate[k] = Keystate()
def on_key_release(self, k, mod):
# We only need to act on keys for which we did something in the
# `key_press` event, and ignore any other key releases.
if k in self._keystate:
# Only mark this as a "press" upon release if it was a quick one,
# i.e. not held down and executed already
if not self._keystate[k].fired:
self._action_q.append(k)
del self._keystate[k]
| """
NOTE:
There are a few minor complications to fluid human control which make this
code a little more involved than trivial.
1. Key press-release cycles can be, and often are, faster than one tick of
the game/simulation, but the player still wants that cycle to count, i.e.
to lay a bomb!
2. When holding down a key, the player expects that action to be repeated,
at least after a slight delay.
3. But when holding a key down (say, move left) and simultaneously doing a
quick press-release cycle (put a bomb), we want the held-down key to keep
being executed, but the cycle should have happened in-between.
The way we solve this problem is by separating key-state and actions-to-do.
We hold the actions that need be executed in a queue (`self._action_q`) and
a state for all considered keys.
1. When a key is pressed down, we note the time and mark it as down.
2. If it is released quickly thereafter, before a game tick could happen,
we add its action into the queue. This often happens when putting bombs.
3. If it's still pressed down as we enter a game tick, we do some math to see
if it's time for a "repeat" event and, if so, push an action to the queue.
4. Just work off one item from the queue each tick.
This way, the input is "natural" and things like dropping a bomb while doing
a diagonal walk from one end to the other "just work".
"""
from time import time
from . import BaseAgent
from .. import characters
REPEAT_DELAY = 0.2 # seconds
REPEAT_INTERVAL = 0.1
class Keystate:
def __init__(self):
self.keydown_time = time()
self.last_repeat_time = None
self.fired = False
def should_fire(self):
if self.last_repeat_time is None:
# The first repetition:
if time() - self.keydown_time > REPEAT_DELAY:
return True
else:
# A repetition after the first:
if time() - self.last_repeat_time > REPEAT_INTERVAL:
return True
# No repetition yet
return False
def mark_fired(self):
self.last_repeat_time = time()
self.fired = True
class PlayerAgent(BaseAgent):
"""The Player Agent that lets the user control a character."""
def __init__(self, character=characters.Bomber, agent_control='arrows'):
super(PlayerAgent, self).__init__(character)
##
# @NOTE: DO NOT move this import outside the constructor. It will
# not work in headless environments like a Docker container
# and prevents Pommerman from running.
#
from pyglet.window import key
CONTROLS = {
'arrows': {
key.UP: 1,
key.DOWN: 2,
key.LEFT: 3,
key.RIGHT: 4,
key.SPACE: 5,
key.M: 6 # In Pommerman, this will freeze the game.
},
'wasd': {
key.W: 1,
key.S: 2,
key.A: 3,
key.D: 4,
key.E: 5,
key.Q: 6 # In Pommerman, this will freeze the game.
}
}
assert agent_control in CONTROLS, "Unknown control: {}".format(
agent_control)
self._key2act = CONTROLS[agent_control]
self._action_q = []
self._keystate = {}
def act(self, obs, action_space):
# Go through the keys and fire for those that needs repetition (because they're held down)
for k, state in self._keystate.items():
if state.should_fire():
self._action_q.append(k)
state.mark_fired()
act = 0
if self._action_q: # Work off the keys that are queued.
act = self._key2act[self._action_q.pop(0)]
return act
@staticmethod
def has_user_input():
return True
def on_key_press(self, k, mod):
# Ignore if we're not handling the key. Avoids "shadowing" ticks in
# multiplayer mode.
if k in self._key2act:
self._keystate[k] = Keystate()
def on_key_release(self, k, mod):
# We only need to act on keys for which we did something in the
# `key_press` event, and ignore any other key releases.
if k in self._keystate:
# Only mark this as a "press" upon release if it was a quick one,
# i.e. not held down and executed already
if not self._keystate[k].fired:
self._action_q.append(k)
del self._keystate[k]
| en | 0.949811 | NOTE: There are a few minor complications to fluid human control which make this code a little more involved than trivial. 1. Key press-release cycles can be, and often are, faster than one tick of the game/simulation, but the player still wants that cycle to count, i.e. to lay a bomb! 2. When holding down a key, the player expects that action to be repeated, at least after a slight delay. 3. But when holding a key down (say, move left) and simultaneously doing a quick press-release cycle (put a bomb), we want the held-down key to keep being executed, but the cycle should have happened in-between. The way we solve this problem is by separating key-state and actions-to-do. We hold the actions that need be executed in a queue (`self._action_q`) and a state for all considered keys. 1. When a key is pressed down, we note the time and mark it as down. 2. If it is released quickly thereafter, before a game tick could happen, we add its action into the queue. This often happens when putting bombs. 3. If it's still pressed down as we enter a game tick, we do some math to see if it's time for a "repeat" event and, if so, push an action to the queue. 4. Just work off one item from the queue each tick. This way, the input is "natural" and things like dropping a bomb while doing a diagonal walk from one end to the other "just work". # seconds # The first repetition: # A repetition after the first: # No repetition yet The Player Agent that lets the user control a character. ## # @NOTE: DO NOT move this import outside the constructor. It will # not work in headless environments like a Docker container # and prevents Pommerman from running. # # In Pommerman, this will freeze the game. # In Pommerman, this will freeze the game. # Go through the keys and fire for those that needs repetition (because they're held down) # Work off the keys that are queued. # Ignore if we're not handling the key. Avoids "shadowing" ticks in # multiplayer mode. # We only need to act on keys for which we did something in the # `key_press` event, and ignore any other key releases. # Only mark this as a "press" upon release if it was a quick one, # i.e. not held down and executed already | 3.976842 | 4 |
tests/rest/test_rest.py | sapshah-cisco/cobra | 93 | 9716 | <filename>tests/rest/test_rest.py
# Copyright 2015 Cisco Systems, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
from future import standard_library
standard_library.install_aliases()
from builtins import str
from builtins import range
from builtins import object
import http.client
import os
import pytest
import random
import string
import time
import xml.etree.ElementTree as ET
import logging
from cobra.internal.codec.jsoncodec import toJSONStr, fromJSONStr
from cobra.internal.codec.xmlcodec import toXMLStr, fromXMLStr
import cobra.mit.access
import cobra.mit.request
import cobra.mit.session
cobra = pytest.importorskip("cobra")
cobra.model = pytest.importorskip("cobra.model")
cobra.model.fv = pytest.importorskip("cobra.model.fv")
import cobra.model.pol
import cobra.model.infra
import cobra.services
pytestmark = pytest.mark.skipif(pytest.config.getvalue('apic') == [],
reason="You must specify at least one --apic " +
"option on the CLI")
slow = pytest.mark.slow
http.client.HTTPConnection.debuglevel = 1
logging.basicConfig(level=logging.DEBUG)
fakeDevicePackageZip = 'Archive.zip'
realDevicePackageZip = 'asa-device-pkg.zip'
@pytest.fixture(params=pytest.config.getvalue('apic'))
def moDir(request):
url, user, password, secure = request.param
secure = False if secure == 'False' else True
session = cobra.mit.session.LoginSession(url, user, password,
secure=secure)
md = cobra.mit.access.MoDirectory(session)
md.login()
return md
class Test_rest_configrequest(object):
def test_createtenant(self, moDir, tenantname):
"""
create a tenant and commit it
"""
dcid = str(time.time()).replace('.', '')
polUni = cobra.model.pol.Uni('')
tenant = cobra.model.fv.Tenant(polUni, tenantname[0])
configRequest = cobra.mit.request.ConfigRequest()
configRequest.addMo(tenant)
configRequest.subtree = 'full'
configRequest.id = dcid
mos = moDir.commit(configRequest)
assert mos
mo = mos[0]
assert len(mos) > 0
assert str(mo.dn) == str(tenant.dn)
assert len(list(mo.children)) >= 1
def test_lookupcreatedtenant(self, moDir, tenantname):
tenant = moDir.lookupByDn('uni/tn-{0}'.format(tenantname[0]))
assert tenant
def test_deletetenant(self, moDir, tenantname):
tenant = moDir.lookupByDn('uni/tn-{0}'.format(tenantname[0]))
tenant.delete()
configRequest = cobra.mit.request.ConfigRequest()
configRequest.addMo(tenant)
r = moDir.commit(configRequest)
assert r == []
tenant = moDir.lookupByDn('uni/tn-{0}'.format(tenantname[0]))
assert not tenant
class Test_rest_classquery(object):
def test_classquery_shorthand_filter(self, moDir):
"""
check that lookupByClass is able to lookup tenant common and only one
item is returned
"""
commonTn = moDir.lookupByClass(
'fvTenant', propFilter='eq(fvTenant.name, "common")')
assert len(commonTn) == 1
commonTn = commonTn[0]
assert str(commonTn.dn) == 'uni/tn-common'
def test_classquery_normal(self, moDir):
"""
check that a class query with no special properties succeeds
we should get at least three tenants (infra, mgmt, common)
"""
classQuery = cobra.mit.request.ClassQuery('fvTenant')
commonTn = moDir.query(classQuery)
def findtn(tnlist, tnname):
for tn in tnlist:
if tn.name == tnname:
return True
return False
assert findtn(commonTn, 'common')
assert findtn(commonTn, 'infra')
assert findtn(commonTn, 'mgmt')
def test_classquery_filter(self, moDir):
"""
check that a class query with a property filter works
"""
classQuery = cobra.mit.request.ClassQuery('fvTenant')
classQuery.propFilter = 'eq(fvTenant.name, "common")'
commonTn = moDir.query(classQuery)
commonTn = commonTn[0]
assert str(commonTn.dn) == 'uni/tn-common'
def test_classquery_subtree(self, moDir):
"""
check that a class query with a subtree response
"""
classQuery = cobra.mit.request.ClassQuery('fvTenant')
classQuery.subtree = 'full'
classQuery.propFilter = 'eq(fvTenant.name, "common")'
commonTn = moDir.query(classQuery)
commonTn = commonTn[0]
assert str(commonTn.dn) == 'uni/tn-common'
# expect at least 3 child objects
assert len(list(commonTn.children)) >= 3
assert str(commonTn.BD['default'].dn) == 'uni/tn-common/BD-default'
@pytest.mark.parametrize("cls,subtree", [
pytest.mark.skipif(pytest.config.getvalue('apic') == [],
reason="no --apic")(('fvTenant', 'full')),
pytest.mark.skipif(pytest.config.getvalue('apic') == [],
reason="no --apic")(('infraInfra', 'no')),
pytest.mark.skipif(pytest.config.getvalue('apic') == [],
reason="no --apic")(('fvAEPg', 'full')),
pytest.mark.skipif(pytest.config.getvalue('apic') == [],
reason="no --apic")(('infraFuncP', 'full')),
pytest.mark.skipif(pytest.config.getvalue('apic') == [],
reason="no --apic")(('fabricNode', 'no')),
pytest.mark.skipif(pytest.config.getvalue('apic') == [],
reason="no --apic")(('topSystem', 'full')),
])
def test_classquery_many(self, moDir, cls, subtree):
classQuery = cobra.mit.request.ClassQuery(cls)
classQuery.subtree = subtree
# classQuery.propFilter='eq(fvTenant.name, "common")'
mos = moDir.query(classQuery)
assert len(mos) > 0
def test_classquery_verifyxml(self, moDir):
"""
verify that the XML returned by lookupByClass is valid
"""
commonTn = moDir.lookupByClass(
'fvTenant', propFilter='eq(fvTenant.name, "common")')
commonTn = commonTn[0]
xml = ET.fromstring(toXMLStr(commonTn))
assert xml.tag == 'fvTenant'
def test_classquery_negative(self, moDir):
"""
generate a random tenant name and ensure that we dont find a match for it
"""
tenantName = ''.join(random.choice(string.ascii_lowercase)
for i in range(64))
tenant = moDir.lookupByClass(
'fvTenant', propFilter='eq(fvTenant.name, "{0}")'.format(tenantName))
assert len(tenant) == 0
class Test_rest_dnquery(object):
def test_dnquery_normal(self, moDir, dn):
dnQuery = cobra.mit.request.DnQuery(dn)
dnQuery.subtree = 'full'
commonTn = moDir.query(dnQuery)
assert len(commonTn) == 1
commonTn = commonTn[0]
assert str(commonTn.dn) == str(dn)
# expect at least 3 child objects
assert len(list(commonTn.children)) >= 3
assert str(commonTn.BD['default'].dn) == 'uni/tn-common/BD-default'
def test_dnquery_shorthand(self, moDir, dn):
commonTn = moDir.lookupByDn(dn)
assert str(commonTn.dn) == str(dn)
class Test_rest_getLoginDomains(object):
def test_getDomains(self, apic):
"""Verify that the getLoginDomains() method works.
"""
url, user, password, secure = apic
secure = False if secure == 'False' else True
session = cobra.mit.session.LoginSession(url, user, password,
secure=secure)
session.getLoginDomains()
assert session.domains != []
def test_loginDomains_setting(self, apic):
"""Verify that the loginDomain can be set."""
url, user, password, secure = apic
secure = False if secure == 'False' else True
session = cobra.mit.session.LoginSession(url, user, password,
secure=secure)
session.getLoginDomains()
session.loginDomain = session.domains[0]
assert session.loginDomain == session.domains[0]
class Test_rest_login(object):
def test_login_positive(self, apic):
"""
verify that the login function works
"""
url, user, password, secure = apic
secure = False if secure == 'False' else True
session = cobra.mit.session.LoginSession(url, user, password,
secure=secure)
moDir = cobra.mit.access.MoDirectory(session)
moDir.login()
assert moDir._session
def test_login_negative(self, apic):
"""
verify that the invalid logins throw an exception
"""
url, user, password, secure = apic
secure = False if secure == 'False' else True
session = cobra.mit.session.LoginSession(url, user, '<PASSWORD>',
secure=secure)
moDir = cobra.mit.access.MoDirectory(session)
with pytest.raises(cobra.mit.session.LoginError):
moDir.login()
@slow
def test_login_timeout(self, apic):
"""
verify that the session times out properly
"""
url, user, password, secure = apic
secure = False if secure == 'False' else True
session = cobra.mit.session.LoginSession(url, user, password,
secure=secure)
moDir = cobra.mit.access.MoDirectory(session)
moDir.login()
start = time.time()
pki = moDir.lookupByDn('uni/userext/pkiext/webtokendata')
refreshTime = pki.webtokenTimeoutSeconds
sleepTime = float(refreshTime) - (time.time() - start)
sleepTime += 1.0 # one second buffer, for good measure
time.sleep(sleepTime)
with pytest.raises(cobra.mit.request.QueryError):
moDir.lookupByClass('pkiWebTokenData')
def test_login_get_timeout(self, apic):
"""
verify that the session times out properly
"""
url, user, password, secure = apic
secure = False if secure == 'False' else True
session = cobra.mit.session.LoginSession(url, user, password,
secure=secure)
moDir = cobra.mit.access.MoDirectory(session)
moDir.login()
assert moDir._session.refreshTime > int(time.time())
assert moDir._session.refreshTimeoutSeconds > 0
def test_rest_login_reauth(self, apic):
"""Verify that the reauth call returns a different session cookie."""
url, user, password, secure = apic
secure = False if secure == 'False' else True
session = cobra.mit.session.LoginSession(url, user, password,
secure=secure)
moDir = cobra.mit.access.MoDirectory(session)
moDir.login()
orig_cookie = session.cookie
# sleep for 5 seconds to ensure we get a different cookie.
time.sleep(5)
moDir.reauth()
assert orig_cookie != session.cookie
class Test_rest_tracequery(object):
@pytest.mark.parametrize("cls", [
pytest.mark.skipif(pytest.config.getvalue('apic') == [],
reason="no --apic")('fvEpP'),
pytest.mark.skipif(pytest.config.getvalue('apic') == [],
reason="no --apic")('vlanCktEp'),
pytest.mark.skipif(pytest.config.getvalue('apic') == [],
reason="no --apic")('actrlRule'),
])
def test_tracequery(self, moDir, cls):
"""
Query every leaf in the fabric for some concrete objects and try to
find at least one response. If we don't get that, we fail
"""
traceResponse = 0
nodes = moDir.lookupByClass(
'fabricNode', propFilter='eq(fabricNode.role,"leaf"')
assert len(nodes) > 0
for node in nodes:
a = cobra.mit.request.TraceQuery(node.dn, cls)
print(a.getUrl(moDir._session))
mos = moDir.query(a)
for mo in mos:
print(mo.dn)
traceResponse += len(mos)
assert traceResponse > 0
class Test_services_devicepackage(object):
fakePackage = os.path.join(os.path.dirname(os.path.realpath(__file__)),
fakeDevicePackageZip)
realPackage = os.path.join(os.path.dirname(os.path.realpath(__file__)),
realDevicePackageZip)
def test_packagevalidate(self):
"""
Make sure that invalid device packages throw an exception when validation
is enabled
"""
with pytest.raises(AttributeError):
cobra.services.UploadPackage(self.fakePackage, validate=True)
def test_packagedonotvalidate(self):
"""
Make sure that if validation is not enabled, no exception is thrown
"""
packageUpload = cobra.services.UploadPackage(self.fakePackage)
assert packageUpload.devicePackagePath == self.fakePackage
def test_uploadpackage(self, moDir):
"""
ensure that the device package upload returns a 200
"""
packageUpload = cobra.services.UploadPackage(self.realPackage,
validate=True)
r = moDir.commit(packageUpload)
assert r == []
def test_validateupload(self, moDir):
"""
make sure that the uploaded device package is found
"""
uni = cobra.model.pol.Uni('')
infra = cobra.model.infra.Infra(uni)
vnsQuery = cobra.mit.request.DnQuery(infra.dn)
vnsQuery.propFilter = 'eq(vnsMDev.vendor,"CISCO")'
vnsQuery.queryTarget = 'subtree'
vnsQuery.classFilter = 'vnsMDev'
packages = moDir.query(vnsQuery)
assert len(packages) > 0
package = packages[0]
assert package.vendor == 'CISCO'
assert package.model == 'ASA'
# for package in packages:
# print '\n'.join(['%s:\t%s' % (k,getattr(package,k)) for k in package.meta.props.names])
# print package.dn
| <filename>tests/rest/test_rest.py
# Copyright 2015 Cisco Systems, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
from future import standard_library
standard_library.install_aliases()
from builtins import str
from builtins import range
from builtins import object
import http.client
import os
import pytest
import random
import string
import time
import xml.etree.ElementTree as ET
import logging
from cobra.internal.codec.jsoncodec import toJSONStr, fromJSONStr
from cobra.internal.codec.xmlcodec import toXMLStr, fromXMLStr
import cobra.mit.access
import cobra.mit.request
import cobra.mit.session
cobra = pytest.importorskip("cobra")
cobra.model = pytest.importorskip("cobra.model")
cobra.model.fv = pytest.importorskip("cobra.model.fv")
import cobra.model.pol
import cobra.model.infra
import cobra.services
pytestmark = pytest.mark.skipif(pytest.config.getvalue('apic') == [],
reason="You must specify at least one --apic " +
"option on the CLI")
slow = pytest.mark.slow
http.client.HTTPConnection.debuglevel = 1
logging.basicConfig(level=logging.DEBUG)
fakeDevicePackageZip = 'Archive.zip'
realDevicePackageZip = 'asa-device-pkg.zip'
@pytest.fixture(params=pytest.config.getvalue('apic'))
def moDir(request):
url, user, password, secure = request.param
secure = False if secure == 'False' else True
session = cobra.mit.session.LoginSession(url, user, password,
secure=secure)
md = cobra.mit.access.MoDirectory(session)
md.login()
return md
class Test_rest_configrequest(object):
def test_createtenant(self, moDir, tenantname):
"""
create a tenant and commit it
"""
dcid = str(time.time()).replace('.', '')
polUni = cobra.model.pol.Uni('')
tenant = cobra.model.fv.Tenant(polUni, tenantname[0])
configRequest = cobra.mit.request.ConfigRequest()
configRequest.addMo(tenant)
configRequest.subtree = 'full'
configRequest.id = dcid
mos = moDir.commit(configRequest)
assert mos
mo = mos[0]
assert len(mos) > 0
assert str(mo.dn) == str(tenant.dn)
assert len(list(mo.children)) >= 1
def test_lookupcreatedtenant(self, moDir, tenantname):
tenant = moDir.lookupByDn('uni/tn-{0}'.format(tenantname[0]))
assert tenant
def test_deletetenant(self, moDir, tenantname):
tenant = moDir.lookupByDn('uni/tn-{0}'.format(tenantname[0]))
tenant.delete()
configRequest = cobra.mit.request.ConfigRequest()
configRequest.addMo(tenant)
r = moDir.commit(configRequest)
assert r == []
tenant = moDir.lookupByDn('uni/tn-{0}'.format(tenantname[0]))
assert not tenant
class Test_rest_classquery(object):
def test_classquery_shorthand_filter(self, moDir):
"""
check that lookupByClass is able to lookup tenant common and only one
item is returned
"""
commonTn = moDir.lookupByClass(
'fvTenant', propFilter='eq(fvTenant.name, "common")')
assert len(commonTn) == 1
commonTn = commonTn[0]
assert str(commonTn.dn) == 'uni/tn-common'
def test_classquery_normal(self, moDir):
"""
check that a class query with no special properties succeeds
we should get at least three tenants (infra, mgmt, common)
"""
classQuery = cobra.mit.request.ClassQuery('fvTenant')
commonTn = moDir.query(classQuery)
def findtn(tnlist, tnname):
for tn in tnlist:
if tn.name == tnname:
return True
return False
assert findtn(commonTn, 'common')
assert findtn(commonTn, 'infra')
assert findtn(commonTn, 'mgmt')
def test_classquery_filter(self, moDir):
"""
check that a class query with a property filter works
"""
classQuery = cobra.mit.request.ClassQuery('fvTenant')
classQuery.propFilter = 'eq(fvTenant.name, "common")'
commonTn = moDir.query(classQuery)
commonTn = commonTn[0]
assert str(commonTn.dn) == 'uni/tn-common'
def test_classquery_subtree(self, moDir):
"""
check that a class query with a subtree response
"""
classQuery = cobra.mit.request.ClassQuery('fvTenant')
classQuery.subtree = 'full'
classQuery.propFilter = 'eq(fvTenant.name, "common")'
commonTn = moDir.query(classQuery)
commonTn = commonTn[0]
assert str(commonTn.dn) == 'uni/tn-common'
# expect at least 3 child objects
assert len(list(commonTn.children)) >= 3
assert str(commonTn.BD['default'].dn) == 'uni/tn-common/BD-default'
@pytest.mark.parametrize("cls,subtree", [
pytest.mark.skipif(pytest.config.getvalue('apic') == [],
reason="no --apic")(('fvTenant', 'full')),
pytest.mark.skipif(pytest.config.getvalue('apic') == [],
reason="no --apic")(('infraInfra', 'no')),
pytest.mark.skipif(pytest.config.getvalue('apic') == [],
reason="no --apic")(('fvAEPg', 'full')),
pytest.mark.skipif(pytest.config.getvalue('apic') == [],
reason="no --apic")(('infraFuncP', 'full')),
pytest.mark.skipif(pytest.config.getvalue('apic') == [],
reason="no --apic")(('fabricNode', 'no')),
pytest.mark.skipif(pytest.config.getvalue('apic') == [],
reason="no --apic")(('topSystem', 'full')),
])
def test_classquery_many(self, moDir, cls, subtree):
classQuery = cobra.mit.request.ClassQuery(cls)
classQuery.subtree = subtree
# classQuery.propFilter='eq(fvTenant.name, "common")'
mos = moDir.query(classQuery)
assert len(mos) > 0
def test_classquery_verifyxml(self, moDir):
"""
verify that the XML returned by lookupByClass is valid
"""
commonTn = moDir.lookupByClass(
'fvTenant', propFilter='eq(fvTenant.name, "common")')
commonTn = commonTn[0]
xml = ET.fromstring(toXMLStr(commonTn))
assert xml.tag == 'fvTenant'
def test_classquery_negative(self, moDir):
"""
generate a random tenant name and ensure that we dont find a match for it
"""
tenantName = ''.join(random.choice(string.ascii_lowercase)
for i in range(64))
tenant = moDir.lookupByClass(
'fvTenant', propFilter='eq(fvTenant.name, "{0}")'.format(tenantName))
assert len(tenant) == 0
class Test_rest_dnquery(object):
def test_dnquery_normal(self, moDir, dn):
dnQuery = cobra.mit.request.DnQuery(dn)
dnQuery.subtree = 'full'
commonTn = moDir.query(dnQuery)
assert len(commonTn) == 1
commonTn = commonTn[0]
assert str(commonTn.dn) == str(dn)
# expect at least 3 child objects
assert len(list(commonTn.children)) >= 3
assert str(commonTn.BD['default'].dn) == 'uni/tn-common/BD-default'
def test_dnquery_shorthand(self, moDir, dn):
commonTn = moDir.lookupByDn(dn)
assert str(commonTn.dn) == str(dn)
class Test_rest_getLoginDomains(object):
def test_getDomains(self, apic):
"""Verify that the getLoginDomains() method works.
"""
url, user, password, secure = apic
secure = False if secure == 'False' else True
session = cobra.mit.session.LoginSession(url, user, password,
secure=secure)
session.getLoginDomains()
assert session.domains != []
def test_loginDomains_setting(self, apic):
"""Verify that the loginDomain can be set."""
url, user, password, secure = apic
secure = False if secure == 'False' else True
session = cobra.mit.session.LoginSession(url, user, password,
secure=secure)
session.getLoginDomains()
session.loginDomain = session.domains[0]
assert session.loginDomain == session.domains[0]
class Test_rest_login(object):
def test_login_positive(self, apic):
"""
verify that the login function works
"""
url, user, password, secure = apic
secure = False if secure == 'False' else True
session = cobra.mit.session.LoginSession(url, user, password,
secure=secure)
moDir = cobra.mit.access.MoDirectory(session)
moDir.login()
assert moDir._session
def test_login_negative(self, apic):
"""
verify that the invalid logins throw an exception
"""
url, user, password, secure = apic
secure = False if secure == 'False' else True
session = cobra.mit.session.LoginSession(url, user, '<PASSWORD>',
secure=secure)
moDir = cobra.mit.access.MoDirectory(session)
with pytest.raises(cobra.mit.session.LoginError):
moDir.login()
@slow
def test_login_timeout(self, apic):
"""
verify that the session times out properly
"""
url, user, password, secure = apic
secure = False if secure == 'False' else True
session = cobra.mit.session.LoginSession(url, user, password,
secure=secure)
moDir = cobra.mit.access.MoDirectory(session)
moDir.login()
start = time.time()
pki = moDir.lookupByDn('uni/userext/pkiext/webtokendata')
refreshTime = pki.webtokenTimeoutSeconds
sleepTime = float(refreshTime) - (time.time() - start)
sleepTime += 1.0 # one second buffer, for good measure
time.sleep(sleepTime)
with pytest.raises(cobra.mit.request.QueryError):
moDir.lookupByClass('pkiWebTokenData')
def test_login_get_timeout(self, apic):
"""
verify that the session times out properly
"""
url, user, password, secure = apic
secure = False if secure == 'False' else True
session = cobra.mit.session.LoginSession(url, user, password,
secure=secure)
moDir = cobra.mit.access.MoDirectory(session)
moDir.login()
assert moDir._session.refreshTime > int(time.time())
assert moDir._session.refreshTimeoutSeconds > 0
def test_rest_login_reauth(self, apic):
"""Verify that the reauth call returns a different session cookie."""
url, user, password, secure = apic
secure = False if secure == 'False' else True
session = cobra.mit.session.LoginSession(url, user, password,
secure=secure)
moDir = cobra.mit.access.MoDirectory(session)
moDir.login()
orig_cookie = session.cookie
# sleep for 5 seconds to ensure we get a different cookie.
time.sleep(5)
moDir.reauth()
assert orig_cookie != session.cookie
class Test_rest_tracequery(object):
@pytest.mark.parametrize("cls", [
pytest.mark.skipif(pytest.config.getvalue('apic') == [],
reason="no --apic")('fvEpP'),
pytest.mark.skipif(pytest.config.getvalue('apic') == [],
reason="no --apic")('vlanCktEp'),
pytest.mark.skipif(pytest.config.getvalue('apic') == [],
reason="no --apic")('actrlRule'),
])
def test_tracequery(self, moDir, cls):
"""
Query every leaf in the fabric for some concrete objects and try to
find at least one response. If we don't get that, we fail
"""
traceResponse = 0
nodes = moDir.lookupByClass(
'fabricNode', propFilter='eq(fabricNode.role,"leaf"')
assert len(nodes) > 0
for node in nodes:
a = cobra.mit.request.TraceQuery(node.dn, cls)
print(a.getUrl(moDir._session))
mos = moDir.query(a)
for mo in mos:
print(mo.dn)
traceResponse += len(mos)
assert traceResponse > 0
class Test_services_devicepackage(object):
fakePackage = os.path.join(os.path.dirname(os.path.realpath(__file__)),
fakeDevicePackageZip)
realPackage = os.path.join(os.path.dirname(os.path.realpath(__file__)),
realDevicePackageZip)
def test_packagevalidate(self):
"""
Make sure that invalid device packages throw an exception when validation
is enabled
"""
with pytest.raises(AttributeError):
cobra.services.UploadPackage(self.fakePackage, validate=True)
def test_packagedonotvalidate(self):
"""
Make sure that if validation is not enabled, no exception is thrown
"""
packageUpload = cobra.services.UploadPackage(self.fakePackage)
assert packageUpload.devicePackagePath == self.fakePackage
def test_uploadpackage(self, moDir):
"""
ensure that the device package upload returns a 200
"""
packageUpload = cobra.services.UploadPackage(self.realPackage,
validate=True)
r = moDir.commit(packageUpload)
assert r == []
def test_validateupload(self, moDir):
"""
make sure that the uploaded device package is found
"""
uni = cobra.model.pol.Uni('')
infra = cobra.model.infra.Infra(uni)
vnsQuery = cobra.mit.request.DnQuery(infra.dn)
vnsQuery.propFilter = 'eq(vnsMDev.vendor,"CISCO")'
vnsQuery.queryTarget = 'subtree'
vnsQuery.classFilter = 'vnsMDev'
packages = moDir.query(vnsQuery)
assert len(packages) > 0
package = packages[0]
assert package.vendor == 'CISCO'
assert package.model == 'ASA'
# for package in packages:
# print '\n'.join(['%s:\t%s' % (k,getattr(package,k)) for k in package.meta.props.names])
# print package.dn
| en | 0.831536 | # Copyright 2015 Cisco Systems, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. create a tenant and commit it check that lookupByClass is able to lookup tenant common and only one item is returned check that a class query with no special properties succeeds we should get at least three tenants (infra, mgmt, common) check that a class query with a property filter works check that a class query with a subtree response # expect at least 3 child objects # classQuery.propFilter='eq(fvTenant.name, "common")' verify that the XML returned by lookupByClass is valid generate a random tenant name and ensure that we dont find a match for it # expect at least 3 child objects Verify that the getLoginDomains() method works. Verify that the loginDomain can be set. verify that the login function works verify that the invalid logins throw an exception verify that the session times out properly # one second buffer, for good measure verify that the session times out properly Verify that the reauth call returns a different session cookie. # sleep for 5 seconds to ensure we get a different cookie. Query every leaf in the fabric for some concrete objects and try to find at least one response. If we don't get that, we fail Make sure that invalid device packages throw an exception when validation is enabled Make sure that if validation is not enabled, no exception is thrown ensure that the device package upload returns a 200 make sure that the uploaded device package is found # for package in packages: # print '\n'.join(['%s:\t%s' % (k,getattr(package,k)) for k in package.meta.props.names]) # print package.dn | 1.850782 | 2 |
spanglish/tests/fixtures/models/language.py | omaraljazairy/FedalAPI | 0 | 9717 | <gh_stars>0
""" fixtures that return an sql statement with a list of values to be inserted."""
def load_language():
""" return the sql and values of the insert queuery."""
sql = """
INSERT INTO Spanglish_Test.Language
(
`name`, `iso-639-1`
)
VALUES (%s, %s)
"""
values = [
(
'English', 'EN'
),
(
'Spanish', 'ES'
),
(
'Dutch', 'NL'
)
]
return {
'sql': sql,
'values': values
}
| """ fixtures that return an sql statement with a list of values to be inserted."""
def load_language():
""" return the sql and values of the insert queuery."""
sql = """
INSERT INTO Spanglish_Test.Language
(
`name`, `iso-639-1`
)
VALUES (%s, %s)
"""
values = [
(
'English', 'EN'
),
(
'Spanish', 'ES'
),
(
'Dutch', 'NL'
)
]
return {
'sql': sql,
'values': values
} | en | 0.445565 | fixtures that return an sql statement with a list of values to be inserted. return the sql and values of the insert queuery. INSERT INTO Spanglish_Test.Language ( `name`, `iso-639-1` ) VALUES (%s, %s) | 2.761919 | 3 |
main-hs2.py | tradewartracker/phase-one-product-hs2 | 0 | 9718 | import datetime as dt
from os.path import dirname, join
import numpy as np
import pandas as pd
import pyarrow as pa
import pyarrow.parquet as pq
from bokeh.io import curdoc
from bokeh.layouts import column, gridplot, row
from bokeh.models import ColumnDataSource, DataRange1d, Select, HoverTool, Panel, Tabs, LinearColorMapper, Range1d
from bokeh.models import NumeralTickFormatter, Title, Label, Paragraph, Div, CustomJSHover, BoxAnnotation
from bokeh.models import ColorBar
from bokeh.palettes import brewer, Spectral6
from bokeh.plotting import figure
from bokeh.embed import server_document
from bokeh.transform import factor_cmap
#################################################################################
# This just loads in the data...
# Alot of this was built of this "cross-fire demo"
# https://github.com/bokeh/bokeh/blob/branch-2.3/examples/app/crossfilter/main.py
start_date = dt.datetime(2017,7,1)
end_date = dt.datetime(2022,1,1)
background = "#ffffff"
file = "./data"+ "/data.parquet"
df = pq.read_table(file).to_pandas()
df.sort_index(inplace=True)
options = df.index.unique(0).to_list()
#print(options)
product = "HS CODE 72, IRON AND STEEL"
level = "US Dollars"
#################################################################################
#These are functions used in the plot...
def growth_trade(foo):
# what this function does is take a dataframe and create a relative
return 100*((foo["china_exports"]/foo["china_exports"].shift(12)) - 1)
def cum_trade(foo):
outdf = pd.DataFrame([])
outdf["cuml_trade_2017"] = foo["china_exports"].loc["2017"].cumsum()
outdf.index = pd.date_range(start="2020-01-01", end="2020-12-01", freq = "MS")
outdf["cuml_trade_2020"] = foo["china_exports"].loc["2020"].cumsum()
return outdf
#################################################################################
# Then this makes the simple plots:
def make_plot():
height = int(1.15*533)
width = int(1.15*750)
foo = df.loc[product_select.value]
#foo = df.query("@a < a")
# below there is an object of selections which will be one of the values in
# the list of options. So the .value then grabs that particular option selected.
x = foo.index
if level_select.value == 'US Dollars':
y = foo['china_exports']
if level_select.value == 'Year over Year % Change':
y = growth_trade(foo)
if level_select.value == "Cumulative Purchases 2020 vs 2017":
cuml = cum_trade(foo)
x = cuml.index
y2017 = cuml["cuml_trade_2017"]
y2020 = cuml["cuml_trade_2020"]
title = "US Exports to China of " + product_select.value.title().upper()
if level_select.value != "Cumulative Purchases 2020 vs 2017":
# This is standard bokeh stuff so far
plot = figure(x_axis_type="datetime", plot_height = height, plot_width=width, toolbar_location = 'below',
tools = "box_zoom, reset, pan, xwheel_zoom", title = title,
x_range = (start_date,end_date) )
plot.line(x = x,
y = y, line_width=3.5, line_alpha=0.75, line_color = "slategray")
if level_select.value == "Cumulative Purchases 2020 vs 2017":
plot = figure(x_axis_type="datetime", plot_height = height, plot_width=width, toolbar_location = 'below',
tools = "box_zoom, reset, pan", title = title,
x_range = (dt.datetime(2020,1,1),dt.datetime(2021,2,1)) )
plot.line(x = x,
y = y2017, line_width=3.5, line_alpha=0.5, line_color = "red", line_dash = "dashed"
, legend_label= "2017")
plot.line(x = x,
y = y2020, line_width=3.5, line_alpha=0.75, line_color = "darkblue"
, legend_label= "2020")
plot.legend.title = 'Cumulative Purchases'
plot.legend.location = "top_left"
plot.legend.title_text_font_style = "bold"
# fixed attributes
plot.xaxis.axis_label = None
plot.yaxis.axis_label = ""
plot.axis.axis_label_text_font_style = "bold"
plot.grid.grid_line_alpha = 0.3
TIMETOOLTIPS = """
<div style="background-color:#F5F5F5; opacity: 0.95; border: 15px 15px 15px 15px;">
<div style = "text-align:left;">"""
if level_select.value == 'Year over Year % Change':
TIMETOOLTIPS = TIMETOOLTIPS + """
<span style="font-size: 13px; font-weight: bold"> $data_x{%b %Y}: $data_y{0}%</span>
</div>
</div>
"""
plot.add_tools(HoverTool(tooltips = TIMETOOLTIPS, line_policy='nearest', formatters={'$data_x': 'datetime'}))
if level_select.value == 'US Dollars':
TIMETOOLTIPS = TIMETOOLTIPS + """
<span style="font-size: 13px; font-weight: bold"> $data_x{%b %Y}: $data_y{$0.0a}</span>
</div>
</div>
"""
plot.add_tools(HoverTool(tooltips = TIMETOOLTIPS, line_policy='nearest', formatters={'$data_x': 'datetime'}))
if level_select.value == "Cumulative Purchases 2020 vs 2017":
#################################################################################
singlesource2020 = ColumnDataSource({
'xs': x.values,
'ys': y2020.values,
"dates": np.array(x),
})
c2020 = plot.circle(x="xs", y="ys", size=35,
source = singlesource2020, color = "crimson",alpha=0.0)
singlesource2017 = ColumnDataSource({
'xs': x.values,
'ys': y2017.values,
"dates": np.array(pd.date_range(start="2017-01-01", end="2017-12-01", freq = "MS")),
})
c2017 = plot.circle(x="xs", y="ys", size=35,
source = singlesource2017, color = "darkblue",alpha=0.0)
TIMETOOLTIPS = TIMETOOLTIPS + """
<span style="font-size: 13px; font-weight: bold"> @dates{%b %Y}: $data_y{$0.0a}</span>
</div>
</div>
"""
plot.add_tools(HoverTool(tooltips = TIMETOOLTIPS, line_policy='nearest', formatters={'@dates': 'datetime'}, renderers = [c2017,c2020]))
if level_select.value == 'Year over Year % Change':
if y.max() > 1500:
plot.y_range.end = 1500
plot.title.text_font_size = '13pt'
plot.background_fill_color = background
plot.background_fill_alpha = 0.75
plot.border_fill_color = background
tradewar_box = BoxAnnotation(left=dt.datetime(2018,7,1), right=dt.datetime(2019,10,11), fill_color='red', fill_alpha=0.1)
plot.add_layout(tradewar_box)
tradewar_box = BoxAnnotation(left=dt.datetime(2020,1,1), right=dt.datetime(2021,12,31), fill_color='blue', fill_alpha=0.1)
plot.add_layout(tradewar_box)
#p.yaxis.axis_label =
plot.yaxis.axis_label_text_font_style = 'bold'
plot.yaxis.axis_label_text_font_size = "13px"
plot.sizing_mode= "scale_both"
if level_select.value != 'Year over Year % Change':
plot.yaxis.formatter = NumeralTickFormatter(format="($0. a)")
plot.yaxis.axis_label = "US Dollars"
if level_select.value == 'Year over Year % Change':
plot.yaxis.axis_label = level_select.value
plot.max_height = height
plot.max_width = width
plot.min_height = int(0.25*height)
plot.min_width = int(0.25*width)
return plot
def update_plot(attrname, old, new):
layout.children[0] = make_plot()
# This part is still not clear to me. but it tells it what to update and where to put it
# so it updates the layout and [0] is the first option (see below there is a row with the
# first entry the plot, then the controls)
level_select = Select(value=level, title='Tranformations', options=['US Dollars', 'Year over Year % Change', "Cumulative Purchases 2020 vs 2017"])
level_select.on_change('value', update_plot)
#print(sorted(options))
product_select = Select(value=product, title='Product', options=sorted(options), width=400)
# This is the key thing that creates teh selection object
product_select.on_change('value', update_plot)
# Change the value upone selection via the update plot
div0 = Div(text = """Categories are at both the HS2 and HS4 level. Only Phase One covered products as defined in Annex 6-1 of The Agreement within that HS Code are shown. Red marks the period of Section 301 tariffs and retaliation. Blue is period of agreement.\n
\n
\n
""", width=400, background = background, style={"justify-content": "space-between", "display": "flex"} )
div1 = Div(text = """Transformations: US Dollars, year over year growth rate and cumulative purchases in 2017 vs 2020.\n The later transformation cumulates Chinese purchases over each month in 2017 and 2020 and compares each. Because 2017 is the benchmark year for The Agreement, this measure provides a sense, for each product category, China's progress towards meeting their purchase commitments.\n
""", width=400, background = background, style={"justify-content": "space-between", "display": "flex"} )
controls = column(product_select, div0, level_select, div1)
height = int(1.95*533)
width = int(1.95*675)
layout = row(make_plot(), controls, sizing_mode = "scale_height", max_height = height, max_width = width,
min_height = int(0.25*height), min_width = int(0.25*width))
curdoc().add_root(layout)
curdoc().title = "us-china-products"
| import datetime as dt
from os.path import dirname, join
import numpy as np
import pandas as pd
import pyarrow as pa
import pyarrow.parquet as pq
from bokeh.io import curdoc
from bokeh.layouts import column, gridplot, row
from bokeh.models import ColumnDataSource, DataRange1d, Select, HoverTool, Panel, Tabs, LinearColorMapper, Range1d
from bokeh.models import NumeralTickFormatter, Title, Label, Paragraph, Div, CustomJSHover, BoxAnnotation
from bokeh.models import ColorBar
from bokeh.palettes import brewer, Spectral6
from bokeh.plotting import figure
from bokeh.embed import server_document
from bokeh.transform import factor_cmap
#################################################################################
# This just loads in the data...
# Alot of this was built of this "cross-fire demo"
# https://github.com/bokeh/bokeh/blob/branch-2.3/examples/app/crossfilter/main.py
start_date = dt.datetime(2017,7,1)
end_date = dt.datetime(2022,1,1)
background = "#ffffff"
file = "./data"+ "/data.parquet"
df = pq.read_table(file).to_pandas()
df.sort_index(inplace=True)
options = df.index.unique(0).to_list()
#print(options)
product = "HS CODE 72, IRON AND STEEL"
level = "US Dollars"
#################################################################################
#These are functions used in the plot...
def growth_trade(foo):
# what this function does is take a dataframe and create a relative
return 100*((foo["china_exports"]/foo["china_exports"].shift(12)) - 1)
def cum_trade(foo):
outdf = pd.DataFrame([])
outdf["cuml_trade_2017"] = foo["china_exports"].loc["2017"].cumsum()
outdf.index = pd.date_range(start="2020-01-01", end="2020-12-01", freq = "MS")
outdf["cuml_trade_2020"] = foo["china_exports"].loc["2020"].cumsum()
return outdf
#################################################################################
# Then this makes the simple plots:
def make_plot():
height = int(1.15*533)
width = int(1.15*750)
foo = df.loc[product_select.value]
#foo = df.query("@a < a")
# below there is an object of selections which will be one of the values in
# the list of options. So the .value then grabs that particular option selected.
x = foo.index
if level_select.value == 'US Dollars':
y = foo['china_exports']
if level_select.value == 'Year over Year % Change':
y = growth_trade(foo)
if level_select.value == "Cumulative Purchases 2020 vs 2017":
cuml = cum_trade(foo)
x = cuml.index
y2017 = cuml["cuml_trade_2017"]
y2020 = cuml["cuml_trade_2020"]
title = "US Exports to China of " + product_select.value.title().upper()
if level_select.value != "Cumulative Purchases 2020 vs 2017":
# This is standard bokeh stuff so far
plot = figure(x_axis_type="datetime", plot_height = height, plot_width=width, toolbar_location = 'below',
tools = "box_zoom, reset, pan, xwheel_zoom", title = title,
x_range = (start_date,end_date) )
plot.line(x = x,
y = y, line_width=3.5, line_alpha=0.75, line_color = "slategray")
if level_select.value == "Cumulative Purchases 2020 vs 2017":
plot = figure(x_axis_type="datetime", plot_height = height, plot_width=width, toolbar_location = 'below',
tools = "box_zoom, reset, pan", title = title,
x_range = (dt.datetime(2020,1,1),dt.datetime(2021,2,1)) )
plot.line(x = x,
y = y2017, line_width=3.5, line_alpha=0.5, line_color = "red", line_dash = "dashed"
, legend_label= "2017")
plot.line(x = x,
y = y2020, line_width=3.5, line_alpha=0.75, line_color = "darkblue"
, legend_label= "2020")
plot.legend.title = 'Cumulative Purchases'
plot.legend.location = "top_left"
plot.legend.title_text_font_style = "bold"
# fixed attributes
plot.xaxis.axis_label = None
plot.yaxis.axis_label = ""
plot.axis.axis_label_text_font_style = "bold"
plot.grid.grid_line_alpha = 0.3
TIMETOOLTIPS = """
<div style="background-color:#F5F5F5; opacity: 0.95; border: 15px 15px 15px 15px;">
<div style = "text-align:left;">"""
if level_select.value == 'Year over Year % Change':
TIMETOOLTIPS = TIMETOOLTIPS + """
<span style="font-size: 13px; font-weight: bold"> $data_x{%b %Y}: $data_y{0}%</span>
</div>
</div>
"""
plot.add_tools(HoverTool(tooltips = TIMETOOLTIPS, line_policy='nearest', formatters={'$data_x': 'datetime'}))
if level_select.value == 'US Dollars':
TIMETOOLTIPS = TIMETOOLTIPS + """
<span style="font-size: 13px; font-weight: bold"> $data_x{%b %Y}: $data_y{$0.0a}</span>
</div>
</div>
"""
plot.add_tools(HoverTool(tooltips = TIMETOOLTIPS, line_policy='nearest', formatters={'$data_x': 'datetime'}))
if level_select.value == "Cumulative Purchases 2020 vs 2017":
#################################################################################
singlesource2020 = ColumnDataSource({
'xs': x.values,
'ys': y2020.values,
"dates": np.array(x),
})
c2020 = plot.circle(x="xs", y="ys", size=35,
source = singlesource2020, color = "crimson",alpha=0.0)
singlesource2017 = ColumnDataSource({
'xs': x.values,
'ys': y2017.values,
"dates": np.array(pd.date_range(start="2017-01-01", end="2017-12-01", freq = "MS")),
})
c2017 = plot.circle(x="xs", y="ys", size=35,
source = singlesource2017, color = "darkblue",alpha=0.0)
TIMETOOLTIPS = TIMETOOLTIPS + """
<span style="font-size: 13px; font-weight: bold"> @dates{%b %Y}: $data_y{$0.0a}</span>
</div>
</div>
"""
plot.add_tools(HoverTool(tooltips = TIMETOOLTIPS, line_policy='nearest', formatters={'@dates': 'datetime'}, renderers = [c2017,c2020]))
if level_select.value == 'Year over Year % Change':
if y.max() > 1500:
plot.y_range.end = 1500
plot.title.text_font_size = '13pt'
plot.background_fill_color = background
plot.background_fill_alpha = 0.75
plot.border_fill_color = background
tradewar_box = BoxAnnotation(left=dt.datetime(2018,7,1), right=dt.datetime(2019,10,11), fill_color='red', fill_alpha=0.1)
plot.add_layout(tradewar_box)
tradewar_box = BoxAnnotation(left=dt.datetime(2020,1,1), right=dt.datetime(2021,12,31), fill_color='blue', fill_alpha=0.1)
plot.add_layout(tradewar_box)
#p.yaxis.axis_label =
plot.yaxis.axis_label_text_font_style = 'bold'
plot.yaxis.axis_label_text_font_size = "13px"
plot.sizing_mode= "scale_both"
if level_select.value != 'Year over Year % Change':
plot.yaxis.formatter = NumeralTickFormatter(format="($0. a)")
plot.yaxis.axis_label = "US Dollars"
if level_select.value == 'Year over Year % Change':
plot.yaxis.axis_label = level_select.value
plot.max_height = height
plot.max_width = width
plot.min_height = int(0.25*height)
plot.min_width = int(0.25*width)
return plot
def update_plot(attrname, old, new):
layout.children[0] = make_plot()
# This part is still not clear to me. but it tells it what to update and where to put it
# so it updates the layout and [0] is the first option (see below there is a row with the
# first entry the plot, then the controls)
level_select = Select(value=level, title='Tranformations', options=['US Dollars', 'Year over Year % Change', "Cumulative Purchases 2020 vs 2017"])
level_select.on_change('value', update_plot)
#print(sorted(options))
product_select = Select(value=product, title='Product', options=sorted(options), width=400)
# This is the key thing that creates teh selection object
product_select.on_change('value', update_plot)
# Change the value upone selection via the update plot
div0 = Div(text = """Categories are at both the HS2 and HS4 level. Only Phase One covered products as defined in Annex 6-1 of The Agreement within that HS Code are shown. Red marks the period of Section 301 tariffs and retaliation. Blue is period of agreement.\n
\n
\n
""", width=400, background = background, style={"justify-content": "space-between", "display": "flex"} )
div1 = Div(text = """Transformations: US Dollars, year over year growth rate and cumulative purchases in 2017 vs 2020.\n The later transformation cumulates Chinese purchases over each month in 2017 and 2020 and compares each. Because 2017 is the benchmark year for The Agreement, this measure provides a sense, for each product category, China's progress towards meeting their purchase commitments.\n
""", width=400, background = background, style={"justify-content": "space-between", "display": "flex"} )
controls = column(product_select, div0, level_select, div1)
height = int(1.95*533)
width = int(1.95*675)
layout = row(make_plot(), controls, sizing_mode = "scale_height", max_height = height, max_width = width,
min_height = int(0.25*height), min_width = int(0.25*width))
curdoc().add_root(layout)
curdoc().title = "us-china-products"
| en | 0.622527 | ################################################################################# # This just loads in the data... # Alot of this was built of this "cross-fire demo" # https://github.com/bokeh/bokeh/blob/branch-2.3/examples/app/crossfilter/main.py #print(options) ################################################################################# #These are functions used in the plot... # what this function does is take a dataframe and create a relative ################################################################################# # Then this makes the simple plots: #foo = df.query("@a < a") # below there is an object of selections which will be one of the values in # the list of options. So the .value then grabs that particular option selected. # This is standard bokeh stuff so far # fixed attributes <div style="background-color:#F5F5F5; opacity: 0.95; border: 15px 15px 15px 15px;"> <div style = "text-align:left;"> <span style="font-size: 13px; font-weight: bold"> $data_x{%b %Y}: $data_y{0}%</span> </div> </div> <span style="font-size: 13px; font-weight: bold"> $data_x{%b %Y}: $data_y{$0.0a}</span> </div> </div> ################################################################################# <span style="font-size: 13px; font-weight: bold"> @dates{%b %Y}: $data_y{$0.0a}</span> </div> </div> #p.yaxis.axis_label = # This part is still not clear to me. but it tells it what to update and where to put it # so it updates the layout and [0] is the first option (see below there is a row with the # first entry the plot, then the controls) #print(sorted(options)) # This is the key thing that creates teh selection object # Change the value upone selection via the update plot Categories are at both the HS2 and HS4 level. Only Phase One covered products as defined in Annex 6-1 of The Agreement within that HS Code are shown. Red marks the period of Section 301 tariffs and retaliation. Blue is period of agreement.\n \n \n Transformations: US Dollars, year over year growth rate and cumulative purchases in 2017 vs 2020.\n The later transformation cumulates Chinese purchases over each month in 2017 and 2020 and compares each. Because 2017 is the benchmark year for The Agreement, this measure provides a sense, for each product category, China's progress towards meeting their purchase commitments.\n | 2.317365 | 2 |
aiohttp_middlewares/https.py | alxpy/aiohttp-middlewares | 34 | 9719 | <reponame>alxpy/aiohttp-middlewares<filename>aiohttp_middlewares/https.py
"""
================
HTTPS Middleware
================
Change scheme for current request when aiohttp application deployed behind
reverse proxy with HTTPS enabled.
Usage
=====
.. code-block:: python
from aiohttp import web
from aiohttp_middlewares import https_middleware
# Basic usage
app = web.Application(middlewares=[https_middleware()])
# Specify custom headers to match, not `X-Forwarded-Proto: https`
app = web.Application(
middlewares=https_middleware({"Forwarded": "https"})
)
"""
import logging
from aiohttp import web
from aiohttp.web_middlewares import _Handler, _Middleware
from .annotations import DictStrStr
DEFAULT_MATCH_HEADERS = {"X-Forwarded-Proto": "https"}
logger = logging.getLogger(__name__)
def https_middleware(match_headers: DictStrStr = None) -> _Middleware:
"""
Change scheme for current request when aiohttp application deployed behind
reverse proxy with HTTPS enabled.
This middleware is required to use, when your aiohttp app deployed behind
nginx with HTTPS enabled, after aiohttp discounted
``secure_proxy_ssl_header`` keyword argument in
https://github.com/aio-libs/aiohttp/pull/2299.
:param match_headers:
Dict of header(s) from reverse proxy to specify that aiohttp run behind
HTTPS. By default:
.. code-block:: python
{"X-Forwarded-Proto": "https"}
"""
@web.middleware
async def middleware(
request: web.Request, handler: _Handler
) -> web.StreamResponse:
"""Change scheme of current request when HTTPS headers matched."""
headers = DEFAULT_MATCH_HEADERS
if match_headers is not None:
headers = match_headers
matched = any(
request.headers.get(key) == value for key, value in headers.items()
)
if matched:
logger.debug(
"Substitute request URL scheme to https",
extra={
"headers": headers,
"request_headers": dict(request.headers),
},
)
request = request.clone(scheme="https")
return await handler(request)
return middleware
| """
================
HTTPS Middleware
================
Change scheme for current request when aiohttp application deployed behind
reverse proxy with HTTPS enabled.
Usage
=====
.. code-block:: python
from aiohttp import web
from aiohttp_middlewares import https_middleware
# Basic usage
app = web.Application(middlewares=[https_middleware()])
# Specify custom headers to match, not `X-Forwarded-Proto: https`
app = web.Application(
middlewares=https_middleware({"Forwarded": "https"})
)
"""
import logging
from aiohttp import web
from aiohttp.web_middlewares import _Handler, _Middleware
from .annotations import DictStrStr
DEFAULT_MATCH_HEADERS = {"X-Forwarded-Proto": "https"}
logger = logging.getLogger(__name__)
def https_middleware(match_headers: DictStrStr = None) -> _Middleware:
"""
Change scheme for current request when aiohttp application deployed behind
reverse proxy with HTTPS enabled.
This middleware is required to use, when your aiohttp app deployed behind
nginx with HTTPS enabled, after aiohttp discounted
``secure_proxy_ssl_header`` keyword argument in
https://github.com/aio-libs/aiohttp/pull/2299.
:param match_headers:
Dict of header(s) from reverse proxy to specify that aiohttp run behind
HTTPS. By default:
.. code-block:: python
{"X-Forwarded-Proto": "https"}
"""
@web.middleware
async def middleware(
request: web.Request, handler: _Handler
) -> web.StreamResponse:
"""Change scheme of current request when HTTPS headers matched."""
headers = DEFAULT_MATCH_HEADERS
if match_headers is not None:
headers = match_headers
matched = any(
request.headers.get(key) == value for key, value in headers.items()
)
if matched:
logger.debug(
"Substitute request URL scheme to https",
extra={
"headers": headers,
"request_headers": dict(request.headers),
},
)
request = request.clone(scheme="https")
return await handler(request)
return middleware | en | 0.616694 | ================ HTTPS Middleware ================ Change scheme for current request when aiohttp application deployed behind reverse proxy with HTTPS enabled. Usage ===== .. code-block:: python from aiohttp import web from aiohttp_middlewares import https_middleware # Basic usage app = web.Application(middlewares=[https_middleware()]) # Specify custom headers to match, not `X-Forwarded-Proto: https` app = web.Application( middlewares=https_middleware({"Forwarded": "https"}) ) Change scheme for current request when aiohttp application deployed behind reverse proxy with HTTPS enabled. This middleware is required to use, when your aiohttp app deployed behind nginx with HTTPS enabled, after aiohttp discounted ``secure_proxy_ssl_header`` keyword argument in https://github.com/aio-libs/aiohttp/pull/2299. :param match_headers: Dict of header(s) from reverse proxy to specify that aiohttp run behind HTTPS. By default: .. code-block:: python {"X-Forwarded-Proto": "https"} Change scheme of current request when HTTPS headers matched. | 2.383646 | 2 |
show/drawing.py | nohamanona/poke-auto-fuka | 5 | 9720 | <reponame>nohamanona/poke-auto-fuka<filename>show/drawing.py
import cv2
import numpy as np
class DrawingClass(object):
def __init__(self):
self.draw_command ='None'
self.frame_count = 0
def drawing(self, frame, fps, num_egg, htc_egg, state):
cv2.putText(frame, 'FPS: {:.2f}'.format(fps),
(10, 30), cv2.FONT_HERSHEY_SIMPLEX, 1.0, (0, 255, 0), thickness=2)
cv2.putText(frame, 'Possessed EGG: {}'.format(num_egg),
(10, 100), cv2.FONT_HERSHEY_SIMPLEX, 1.0, (0, 255, 255), thickness=2)
cv2.putText(frame, 'Hatched EGG: {}'.format(htc_egg),
(10, 130), cv2.FONT_HERSHEY_SIMPLEX, 1.0, (0, 255, 255), thickness=2)
cv2.putText(frame, 'State: {}'.format(state),
(250, 30), cv2.FONT_HERSHEY_SIMPLEX, 1.0, (0, 255, 255), thickness=2)
return frame
def draw_controler(self, frame, command):
#print('draw',command)
if command =='LX MIN':
self.draw_command = 'LX MIN'
elif command =='LX MAX':
self.draw_command = 'LX MAX'
elif command =='LY MIN':
self.draw_command = 'LY MIN'
elif command =='LY MAX':
self.draw_command = 'LY MAX'
elif command =='Button A':
self.draw_command = 'Button A'
elif command =='Button B':
self.draw_command = 'Button B'
elif command =='Button X':
self.draw_command = 'Button X'
elif command =='Button Y':
self.draw_command = 'Button Y'
elif command =='HAT TOP':
self.draw_command = 'HAT TOP'
elif command =='HAT RIGHT':
self.draw_command = 'HAT RIGHT'
elif command =='HAT BOTTOM':
self.draw_command = 'HAT BOTTOM'
elif command =='HAT LEFT':
self.draw_command = 'HAT LEFT'
elif command =='Button START':
self.draw_command = 'Button START'
elif command =='STOP':
self.draw_command = 'STOP'
#stick
if self.draw_command =='LX MIN' or self.draw_command =='HAT LEFT':
cv2.circle(frame, (970, 490), 20, (0, 0, 255), thickness=-1)
elif self.draw_command =='LX MAX' or self.draw_command =='HAT RIGHT':
cv2.circle(frame, (1030, 490), 20, (0, 0, 255), thickness=-1)
elif self.draw_command =='LY MIN' or self.draw_command =='HAT TOP':
cv2.circle(frame, (1000, 460), 20, (0, 0, 255), thickness=-1)
elif self.draw_command =='LY MAX' or self.draw_command =='HAT BOTTOM':
cv2.circle(frame, (1000, 520), 20, (0, 0, 255), thickness=-1)
else:
cv2.circle(frame, (1000, 490), 20, (0, 0, 255), thickness=-1)
cv2.circle(frame, (1000, 490), 50, (0, 0, 255), thickness=2)
#button
if self.draw_command =='Button X':
cv2.circle(frame, (1180, 460), 15, (0, 0, 255), thickness=-1)
cv2.putText(frame, 'X',(1172, 468), cv2.FONT_HERSHEY_SIMPLEX, 0.8, (255, 255, 255), thickness=2)
self.frame_count +=1
elif self.frame_count == 6:
cv2.circle(frame, (1180, 460), 15, (0, 0, 255), thickness=2)
cv2.putText(frame, 'X',(1172, 468), cv2.FONT_HERSHEY_SIMPLEX, 0.8, (0, 0, 255), thickness=2)
self.frame_count =0
self.draw_command = 'None'
else:
cv2.circle(frame, (1180, 460), 15, (0, 0, 255), thickness=2)
cv2.putText(frame, 'X',(1172, 468), cv2.FONT_HERSHEY_SIMPLEX, 0.8, (0, 0, 255), thickness=2)
if self.draw_command =='Button B':
cv2.circle(frame, (1180, 520), 15, (0, 0, 255), thickness=-1)
cv2.putText(frame, 'B',(1172, 528), cv2.FONT_HERSHEY_SIMPLEX, 0.8, (255, 255, 255), thickness=2)
self.frame_count +=1
elif self.frame_count == 6:
cv2.circle(frame, (1180, 520), 15, (0, 0, 255), thickness=2)
cv2.putText(frame, 'B',(1172, 528), cv2.FONT_HERSHEY_SIMPLEX, 0.8, (0, 0, 255), thickness=2)
self.frame_count =0
self.draw_command = 'None'
else:
cv2.circle(frame, (1180, 520), 15, (0, 0, 255), thickness=2)
cv2.putText(frame, 'B',(1172, 528), cv2.FONT_HERSHEY_SIMPLEX, 0.8, (0, 0, 255), thickness=2)
if self.draw_command =='Button Y':
cv2.circle(frame, (1150, 490), 15, (0, 0, 255), thickness=-1)
cv2.putText(frame, 'Y',(1142, 498), cv2.FONT_HERSHEY_SIMPLEX, 0.8, (255, 255, 255), thickness=2)
self.frame_count +=1
elif self.frame_count == 6:
cv2.circle(frame, (1150, 490), 15, (0, 0, 255), thickness=2)
cv2.putText(frame, 'Y',(1142, 498), cv2.FONT_HERSHEY_SIMPLEX, 0.8, (0, 0, 255), thickness=2)
self.frame_count =0
self.draw_command = 'None'
else:
cv2.circle(frame, (1150, 490), 15, (0, 0, 255), thickness=2)
cv2.putText(frame, 'Y',(1142, 498), cv2.FONT_HERSHEY_SIMPLEX, 0.8, (0, 0, 255), thickness=2)
if self.draw_command =='Button A':
cv2.circle(frame, (1210, 490), 15, (0, 0, 255), thickness=-1)
cv2.putText(frame, 'A',(1202, 498), cv2.FONT_HERSHEY_SIMPLEX, 0.8, (255, 255, 255), thickness=2)
self.frame_count +=1
elif self.frame_count == 6:
cv2.circle(frame, (1210, 490), 15, (0, 0, 255), thickness=2)
cv2.putText(frame, 'A',(1202, 498), cv2.FONT_HERSHEY_SIMPLEX, 0.8, (0, 0, 255), thickness=2)
self.frame_count =0
self.draw_command = 'None'
else:
cv2.circle(frame, (1210, 490), 15, (0, 0, 255), thickness=2)
cv2.putText(frame, 'A',(1202, 498), cv2.FONT_HERSHEY_SIMPLEX, 0.8, (0, 0, 255), thickness=2)
if self.draw_command =='Button START':
cv2.circle(frame, (1130, 423), 10, (0, 0, 255), thickness=-1)
cv2.putText(frame, '+',(1120, 430), cv2.FONT_HERSHEY_SIMPLEX, 0.8, (255, 255, 255), thickness=2)
self.frame_count +=1
elif self.frame_count == 6:
cv2.circle(frame, (1130, 423), 10, (0, 0, 255), thickness=1)
cv2.putText(frame, '+',(1120, 430), cv2.FONT_HERSHEY_SIMPLEX, 0.8, (0, 0, 255), thickness=2)
self.frame_count =0
self.draw_command = 'None'
else:
cv2.circle(frame, (1130, 423), 10, (0, 0, 255), thickness=1)
cv2.putText(frame, '+',(1120, 430), cv2.FONT_HERSHEY_SIMPLEX, 0.8, (0, 0, 255), thickness=2)
return frame | import cv2
import numpy as np
class DrawingClass(object):
def __init__(self):
self.draw_command ='None'
self.frame_count = 0
def drawing(self, frame, fps, num_egg, htc_egg, state):
cv2.putText(frame, 'FPS: {:.2f}'.format(fps),
(10, 30), cv2.FONT_HERSHEY_SIMPLEX, 1.0, (0, 255, 0), thickness=2)
cv2.putText(frame, 'Possessed EGG: {}'.format(num_egg),
(10, 100), cv2.FONT_HERSHEY_SIMPLEX, 1.0, (0, 255, 255), thickness=2)
cv2.putText(frame, 'Hatched EGG: {}'.format(htc_egg),
(10, 130), cv2.FONT_HERSHEY_SIMPLEX, 1.0, (0, 255, 255), thickness=2)
cv2.putText(frame, 'State: {}'.format(state),
(250, 30), cv2.FONT_HERSHEY_SIMPLEX, 1.0, (0, 255, 255), thickness=2)
return frame
def draw_controler(self, frame, command):
#print('draw',command)
if command =='LX MIN':
self.draw_command = 'LX MIN'
elif command =='LX MAX':
self.draw_command = 'LX MAX'
elif command =='LY MIN':
self.draw_command = 'LY MIN'
elif command =='LY MAX':
self.draw_command = 'LY MAX'
elif command =='Button A':
self.draw_command = 'Button A'
elif command =='Button B':
self.draw_command = 'Button B'
elif command =='Button X':
self.draw_command = 'Button X'
elif command =='Button Y':
self.draw_command = 'Button Y'
elif command =='HAT TOP':
self.draw_command = 'HAT TOP'
elif command =='HAT RIGHT':
self.draw_command = 'HAT RIGHT'
elif command =='HAT BOTTOM':
self.draw_command = 'HAT BOTTOM'
elif command =='HAT LEFT':
self.draw_command = 'HAT LEFT'
elif command =='Button START':
self.draw_command = 'Button START'
elif command =='STOP':
self.draw_command = 'STOP'
#stick
if self.draw_command =='LX MIN' or self.draw_command =='HAT LEFT':
cv2.circle(frame, (970, 490), 20, (0, 0, 255), thickness=-1)
elif self.draw_command =='LX MAX' or self.draw_command =='HAT RIGHT':
cv2.circle(frame, (1030, 490), 20, (0, 0, 255), thickness=-1)
elif self.draw_command =='LY MIN' or self.draw_command =='HAT TOP':
cv2.circle(frame, (1000, 460), 20, (0, 0, 255), thickness=-1)
elif self.draw_command =='LY MAX' or self.draw_command =='HAT BOTTOM':
cv2.circle(frame, (1000, 520), 20, (0, 0, 255), thickness=-1)
else:
cv2.circle(frame, (1000, 490), 20, (0, 0, 255), thickness=-1)
cv2.circle(frame, (1000, 490), 50, (0, 0, 255), thickness=2)
#button
if self.draw_command =='Button X':
cv2.circle(frame, (1180, 460), 15, (0, 0, 255), thickness=-1)
cv2.putText(frame, 'X',(1172, 468), cv2.FONT_HERSHEY_SIMPLEX, 0.8, (255, 255, 255), thickness=2)
self.frame_count +=1
elif self.frame_count == 6:
cv2.circle(frame, (1180, 460), 15, (0, 0, 255), thickness=2)
cv2.putText(frame, 'X',(1172, 468), cv2.FONT_HERSHEY_SIMPLEX, 0.8, (0, 0, 255), thickness=2)
self.frame_count =0
self.draw_command = 'None'
else:
cv2.circle(frame, (1180, 460), 15, (0, 0, 255), thickness=2)
cv2.putText(frame, 'X',(1172, 468), cv2.FONT_HERSHEY_SIMPLEX, 0.8, (0, 0, 255), thickness=2)
if self.draw_command =='Button B':
cv2.circle(frame, (1180, 520), 15, (0, 0, 255), thickness=-1)
cv2.putText(frame, 'B',(1172, 528), cv2.FONT_HERSHEY_SIMPLEX, 0.8, (255, 255, 255), thickness=2)
self.frame_count +=1
elif self.frame_count == 6:
cv2.circle(frame, (1180, 520), 15, (0, 0, 255), thickness=2)
cv2.putText(frame, 'B',(1172, 528), cv2.FONT_HERSHEY_SIMPLEX, 0.8, (0, 0, 255), thickness=2)
self.frame_count =0
self.draw_command = 'None'
else:
cv2.circle(frame, (1180, 520), 15, (0, 0, 255), thickness=2)
cv2.putText(frame, 'B',(1172, 528), cv2.FONT_HERSHEY_SIMPLEX, 0.8, (0, 0, 255), thickness=2)
if self.draw_command =='Button Y':
cv2.circle(frame, (1150, 490), 15, (0, 0, 255), thickness=-1)
cv2.putText(frame, 'Y',(1142, 498), cv2.FONT_HERSHEY_SIMPLEX, 0.8, (255, 255, 255), thickness=2)
self.frame_count +=1
elif self.frame_count == 6:
cv2.circle(frame, (1150, 490), 15, (0, 0, 255), thickness=2)
cv2.putText(frame, 'Y',(1142, 498), cv2.FONT_HERSHEY_SIMPLEX, 0.8, (0, 0, 255), thickness=2)
self.frame_count =0
self.draw_command = 'None'
else:
cv2.circle(frame, (1150, 490), 15, (0, 0, 255), thickness=2)
cv2.putText(frame, 'Y',(1142, 498), cv2.FONT_HERSHEY_SIMPLEX, 0.8, (0, 0, 255), thickness=2)
if self.draw_command =='Button A':
cv2.circle(frame, (1210, 490), 15, (0, 0, 255), thickness=-1)
cv2.putText(frame, 'A',(1202, 498), cv2.FONT_HERSHEY_SIMPLEX, 0.8, (255, 255, 255), thickness=2)
self.frame_count +=1
elif self.frame_count == 6:
cv2.circle(frame, (1210, 490), 15, (0, 0, 255), thickness=2)
cv2.putText(frame, 'A',(1202, 498), cv2.FONT_HERSHEY_SIMPLEX, 0.8, (0, 0, 255), thickness=2)
self.frame_count =0
self.draw_command = 'None'
else:
cv2.circle(frame, (1210, 490), 15, (0, 0, 255), thickness=2)
cv2.putText(frame, 'A',(1202, 498), cv2.FONT_HERSHEY_SIMPLEX, 0.8, (0, 0, 255), thickness=2)
if self.draw_command =='Button START':
cv2.circle(frame, (1130, 423), 10, (0, 0, 255), thickness=-1)
cv2.putText(frame, '+',(1120, 430), cv2.FONT_HERSHEY_SIMPLEX, 0.8, (255, 255, 255), thickness=2)
self.frame_count +=1
elif self.frame_count == 6:
cv2.circle(frame, (1130, 423), 10, (0, 0, 255), thickness=1)
cv2.putText(frame, '+',(1120, 430), cv2.FONT_HERSHEY_SIMPLEX, 0.8, (0, 0, 255), thickness=2)
self.frame_count =0
self.draw_command = 'None'
else:
cv2.circle(frame, (1130, 423), 10, (0, 0, 255), thickness=1)
cv2.putText(frame, '+',(1120, 430), cv2.FONT_HERSHEY_SIMPLEX, 0.8, (0, 0, 255), thickness=2)
return frame | en | 0.49165 | #print('draw',command) #stick #button | 2.828963 | 3 |
backtest.py | YangTaoCN/IntroNeuralNetworks | 0 | 9721 | import pandas_datareader.data as pdr
import yfinance as fix
import numpy as np
fix.pdr_override()
def back_test(strategy, seq_len, ticker, start_date, end_date, dim):
"""
A simple back test for a given date period
:param strategy: the chosen strategy. Note to have already formed the model, and fitted with training data.
:param seq_len: length of the days used for prediction
:param ticker: company ticker
:param start_date: starting date
:type start_date: "YYYY-mm-dd"
:param end_date: ending date
:type end_date: "YYYY-mm-dd"
:param dim: dimension required for strategy: 3dim for LSTM and 2dim for MLP
:type dim: tuple
:return: Percentage errors array that gives the errors for every test in the given date range
"""
data = pdr.get_data_yahoo(ticker, start_date, end_date)
stock_data = data["Adj Close"]
errors = []
for i in range((len(stock_data) // 10) * 10 - seq_len - 1):
x = np.array(stock_data.iloc[i: i + seq_len, 1]).reshape(dim) / 200
y = np.array(stock_data.iloc[i + seq_len + 1, 1]) / 200
predict = strategy.predict(x)
while predict == 0:
predict = strategy.predict(x)
error = (predict - y) / 100
errors.append(error)
total_error = np.array(errors)
print(f"Average error = {total_error.mean()}")
# If you want to see the full error list then print the following statement
# print(errors)
| import pandas_datareader.data as pdr
import yfinance as fix
import numpy as np
fix.pdr_override()
def back_test(strategy, seq_len, ticker, start_date, end_date, dim):
"""
A simple back test for a given date period
:param strategy: the chosen strategy. Note to have already formed the model, and fitted with training data.
:param seq_len: length of the days used for prediction
:param ticker: company ticker
:param start_date: starting date
:type start_date: "YYYY-mm-dd"
:param end_date: ending date
:type end_date: "YYYY-mm-dd"
:param dim: dimension required for strategy: 3dim for LSTM and 2dim for MLP
:type dim: tuple
:return: Percentage errors array that gives the errors for every test in the given date range
"""
data = pdr.get_data_yahoo(ticker, start_date, end_date)
stock_data = data["Adj Close"]
errors = []
for i in range((len(stock_data) // 10) * 10 - seq_len - 1):
x = np.array(stock_data.iloc[i: i + seq_len, 1]).reshape(dim) / 200
y = np.array(stock_data.iloc[i + seq_len + 1, 1]) / 200
predict = strategy.predict(x)
while predict == 0:
predict = strategy.predict(x)
error = (predict - y) / 100
errors.append(error)
total_error = np.array(errors)
print(f"Average error = {total_error.mean()}")
# If you want to see the full error list then print the following statement
# print(errors)
| en | 0.720146 | A simple back test for a given date period :param strategy: the chosen strategy. Note to have already formed the model, and fitted with training data. :param seq_len: length of the days used for prediction :param ticker: company ticker :param start_date: starting date :type start_date: "YYYY-mm-dd" :param end_date: ending date :type end_date: "YYYY-mm-dd" :param dim: dimension required for strategy: 3dim for LSTM and 2dim for MLP :type dim: tuple :return: Percentage errors array that gives the errors for every test in the given date range # If you want to see the full error list then print the following statement # print(errors) | 3.334843 | 3 |
src/tespy/components/subsystems.py | jbueck/tespy | 0 | 9722 | # -*- coding: utf-8
"""Module for custom component groups.
It is possible to create subsystems of component groups in tespy. The subsystem
class is the base class for custom subsystems.
This file is part of project TESPy (github.com/oemof/tespy). It's copyrighted
by the contributors recorded in the version control history of the file,
available from its original location tespy/components/subsystems.py
SPDX-License-Identifier: MIT
"""
import logging
# %%
class subsystem:
r"""
Class subsystem is the base class of all TESPy subsystems.
Parameters
----------
label : str
The label of the subsystem.
Example
-------
Basic example for a setting up a tespy.components.subsystems.subsystem
object. This example does not run a tespy calculation!
>>> from tespy.components import subsystem
>>> mysub = subsystem('mySubsystem')
>>> type(mysub)
<class 'tespy.components.subsystems.subsystem'>
>>> mysub.get_attr('label')
'mySubsystem'
"""
def __init__(self, label):
if not isinstance(label, str):
msg = 'Subsystem label must be of type str!'
logging.error(msg)
raise ValueError(msg)
elif len([x for x in [';', ', ', '.'] if x in label]) > 0:
msg = 'Can\'t use ' + str([';', ', ', '.']) + ' in label.'
logging.error(msg)
raise ValueError(msg)
else:
self.label = label
self.comps = {}
self.conns = {}
self.create_comps()
self.create_conns()
def get_attr(self, key):
r"""
Get the value of a subsystem's attribute.
Parameters
----------
key : str
The attribute you want to retrieve.
Returns
-------
out :
Value of specified attribute.
"""
if key in self.__dict__:
return self.__dict__[key]
else:
msg = 'Subsystem ' + self.label + ' has no attribute ' + key + '.'
logging.error(msg)
raise KeyError(msg)
def create_comps(self):
"""Create the subsystem's components."""
return
def create_conns(self):
"""Create the subsystem's connections."""
return
| # -*- coding: utf-8
"""Module for custom component groups.
It is possible to create subsystems of component groups in tespy. The subsystem
class is the base class for custom subsystems.
This file is part of project TESPy (github.com/oemof/tespy). It's copyrighted
by the contributors recorded in the version control history of the file,
available from its original location tespy/components/subsystems.py
SPDX-License-Identifier: MIT
"""
import logging
# %%
class subsystem:
r"""
Class subsystem is the base class of all TESPy subsystems.
Parameters
----------
label : str
The label of the subsystem.
Example
-------
Basic example for a setting up a tespy.components.subsystems.subsystem
object. This example does not run a tespy calculation!
>>> from tespy.components import subsystem
>>> mysub = subsystem('mySubsystem')
>>> type(mysub)
<class 'tespy.components.subsystems.subsystem'>
>>> mysub.get_attr('label')
'mySubsystem'
"""
def __init__(self, label):
if not isinstance(label, str):
msg = 'Subsystem label must be of type str!'
logging.error(msg)
raise ValueError(msg)
elif len([x for x in [';', ', ', '.'] if x in label]) > 0:
msg = 'Can\'t use ' + str([';', ', ', '.']) + ' in label.'
logging.error(msg)
raise ValueError(msg)
else:
self.label = label
self.comps = {}
self.conns = {}
self.create_comps()
self.create_conns()
def get_attr(self, key):
r"""
Get the value of a subsystem's attribute.
Parameters
----------
key : str
The attribute you want to retrieve.
Returns
-------
out :
Value of specified attribute.
"""
if key in self.__dict__:
return self.__dict__[key]
else:
msg = 'Subsystem ' + self.label + ' has no attribute ' + key + '.'
logging.error(msg)
raise KeyError(msg)
def create_comps(self):
"""Create the subsystem's components."""
return
def create_conns(self):
"""Create the subsystem's connections."""
return
| en | 0.600084 | # -*- coding: utf-8 Module for custom component groups. It is possible to create subsystems of component groups in tespy. The subsystem class is the base class for custom subsystems. This file is part of project TESPy (github.com/oemof/tespy). It's copyrighted by the contributors recorded in the version control history of the file, available from its original location tespy/components/subsystems.py SPDX-License-Identifier: MIT # %% Class subsystem is the base class of all TESPy subsystems. Parameters ---------- label : str The label of the subsystem. Example ------- Basic example for a setting up a tespy.components.subsystems.subsystem object. This example does not run a tespy calculation! >>> from tespy.components import subsystem >>> mysub = subsystem('mySubsystem') >>> type(mysub) <class 'tespy.components.subsystems.subsystem'> >>> mysub.get_attr('label') 'mySubsystem' Get the value of a subsystem's attribute. Parameters ---------- key : str The attribute you want to retrieve. Returns ------- out : Value of specified attribute. Create the subsystem's components. Create the subsystem's connections. | 2.928988 | 3 |
fairscale/optim/oss.py | blefaudeux/fairscale | 1 | 9723 | <filename>fairscale/optim/oss.py
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
import copy
import logging
from typing import TYPE_CHECKING, Any, Callable, List, Optional, Type
import torch
import torch.distributed as dist
from torch.optim import SGD, Optimizer
from .utils import broadcast_object, recursive_copy_to_device
if TYPE_CHECKING:
from torch.optim.optimizer import _params_t
else:
_params_t = Any
class OSS(Optimizer):
"""Wraps an arbitrary :class:`optim.Optimizer <torch.optim.Optimizer>`
optimizer and shards its state as described by ZeRO_.
::
opt = OSS(params, optim=torch.optim.Adam, lr=0.01)
.. _ZeRO: https://arxiv.org/abs/1910.02054
Pipe combines pipeline parallelism with checkpointing to reduce peak
memory required to train while minimizing device under-utilization.
You should determine the balance when defining a :class:`Pipe` module, as
balancing will not be done automatically. The module will be partitioned
into multiple devices according to the given balance. You may rely on
heuristics to find your own optimal configuration.
Args:
params (list of tensors):
parameters to be optimized
Keyword Args:
optim (torch.nn.Optimizer):
optimizer to shard (default: SGD)
group (group):
torch.distributed group (default: group.WORLD)
"""
optim: Optimizer
in_super_constructor: bool
def __init__(
self,
params: _params_t,
optim: Type[Optimizer] = SGD,
group: Any = dist.group.WORLD,
**defaults: Any
):
self.in_super_constructor = True
super().__init__(params, defaults)
self.in_super_constructor = False
self.group = group
self.rank = dist.get_rank(group)
param_groups = self.partition_parameters()
self.optim = optim(param_groups[self.rank], **defaults)
# Optional consolidated optimizer state
self._global_state_dict = []
def partition_parameters(self) -> List[List[dict]]:
"""Partitions parameters across distributed ranks.
Returns a list of param_groups (which is a list of dict) where each
element of the list contains the param_groups for a rank. Element 0
corresponds to rank 0, etc. We need all the ranks for the broadcast
inside step().
"""
world_size = dist.get_world_size(self.group)
param_groups: List[List] = [list() for _ in range(world_size)]
sizes = [0] * world_size
for param_group in self.param_groups:
param_lists: List[List] = [list() for _ in range(world_size)]
for param in param_group["params"]:
# Add this param to rank with smallest size.
rank = sizes.index(min(sizes))
param_lists[rank].append(param)
sizes[rank] += param.numel()
for rank, params in enumerate(param_lists):
if len(params) > 0:
pg = copy.copy(param_group)
pg["params"] = params
param_groups[rank].append(pg)
return param_groups
def step(self, closure: Optional[Callable[[], float]] = None) -> Optional[float]:
loss = self.optim.step(closure=closure)
for rank, param_groups in enumerate(self.partition_parameters()):
for param_group in param_groups:
for param in param_group["params"]:
dist.broadcast(param, rank, group=self.group)
return loss
def state_dict(self) -> dict:
""" Gets this rank's state_dict. """
return self.optim.state_dict()
def _collect_state_dict(self) -> List[dict]:
"""
Collect all the state shards
"""
empty_buffer = torch.empty([1], dtype=torch.uint8)
global_optim_state = []
local_state = self.state_dict()
if len(local_state["state"]) == 0:
return []
for rank in range(dist.get_world_size(group=self.group)):
if rank == self.rank:
logging.info("Saving self state")
global_optim_state.append(
recursive_copy_to_device(
local_state, non_blocking=True, device=torch.device("cpu")
)
)
# Sync with other replicas
broadcast_object(empty_buffer, src_rank=rank)
else:
# Reuse the param_groups from this rank, these are shared across replicas
logging.info("Receiving state from rank %s ", rank)
replica_state = {
"state": broadcast_object(empty_buffer, src_rank=rank),
"param_groups": local_state["param_groups"],
}
# Fetch from the other replicas
global_optim_state.append(
recursive_copy_to_device(
replica_state, non_blocking=True, device=torch.device("cpu")
)
)
logging.info("State from rank %s received", rank)
return global_optim_state
def _broadcast_state_dict(self) -> None:
"""
Broadcast this rank's state shard, discard others
"""
empty_buffer = torch.empty([1], dtype=torch.uint8)
local_state = self.state_dict()
if len(local_state["state"]) == 0:
return
for rank in range(dist.get_world_size(group=self.group)):
if rank == self.rank:
# Send the state to the reference replica
logging.info(
"Sending the sharded SGD state to the reference replica from rank %s",
rank,
)
broadcast_object(local_state["state"], src_rank=rank)
else:
# Discard this tensor/rank, broadcast necessary for syncing
logging.info("Discarding broadcast from rank %s", rank)
broadcast_object(empty_buffer, src_rank=rank)
def consolidate_state_dict(self, recipient_rank: int = 0) -> List[dict]:
""" Update the consolidated state_dict list, one per rank.
This needs to be called on all replicas """
if self.rank == recipient_rank:
# Pull the sharded state from all the other replicas
# Store all the states in order, rank by rank
logging.info("Pulling the sharded SGD state from all replicas")
self._global_state_dict = self._collect_state_dict()
else:
# Acknowledge broadcasts, and send this rank's shard when needed
self._broadcast_state_dict()
@property
def global_state_dict(self):
"""
Return the last known global optimizer state, which consist of a list of the shards.
NOTE: This is limited to the replica which was responsible for the consolidation.
The state may also not be up to date, depending on when `consolidate_state_dict` was last called
"""
assert (
len(self._global_state_dict) > 0
), "The optimizer state is not materialized, please call consolidate_state_dict on every replica beforehand"
return self._global_state_dict
def load_state_dict(self, state_dict: dict) -> None:
""" Loads this rank's state_dict. """
self.optim.load_state_dict(state_dict)
def add_param_group(self, param_group: dict) -> None:
super().add_param_group(param_group)
if not self.in_super_constructor:
param_groups = self.partition_parameters()[self.rank]
if len(param_groups) == len(self.optim.param_groups) + 1:
self.optim.add_param_group(param_groups[-1])
| <filename>fairscale/optim/oss.py
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
import copy
import logging
from typing import TYPE_CHECKING, Any, Callable, List, Optional, Type
import torch
import torch.distributed as dist
from torch.optim import SGD, Optimizer
from .utils import broadcast_object, recursive_copy_to_device
if TYPE_CHECKING:
from torch.optim.optimizer import _params_t
else:
_params_t = Any
class OSS(Optimizer):
"""Wraps an arbitrary :class:`optim.Optimizer <torch.optim.Optimizer>`
optimizer and shards its state as described by ZeRO_.
::
opt = OSS(params, optim=torch.optim.Adam, lr=0.01)
.. _ZeRO: https://arxiv.org/abs/1910.02054
Pipe combines pipeline parallelism with checkpointing to reduce peak
memory required to train while minimizing device under-utilization.
You should determine the balance when defining a :class:`Pipe` module, as
balancing will not be done automatically. The module will be partitioned
into multiple devices according to the given balance. You may rely on
heuristics to find your own optimal configuration.
Args:
params (list of tensors):
parameters to be optimized
Keyword Args:
optim (torch.nn.Optimizer):
optimizer to shard (default: SGD)
group (group):
torch.distributed group (default: group.WORLD)
"""
optim: Optimizer
in_super_constructor: bool
def __init__(
self,
params: _params_t,
optim: Type[Optimizer] = SGD,
group: Any = dist.group.WORLD,
**defaults: Any
):
self.in_super_constructor = True
super().__init__(params, defaults)
self.in_super_constructor = False
self.group = group
self.rank = dist.get_rank(group)
param_groups = self.partition_parameters()
self.optim = optim(param_groups[self.rank], **defaults)
# Optional consolidated optimizer state
self._global_state_dict = []
def partition_parameters(self) -> List[List[dict]]:
"""Partitions parameters across distributed ranks.
Returns a list of param_groups (which is a list of dict) where each
element of the list contains the param_groups for a rank. Element 0
corresponds to rank 0, etc. We need all the ranks for the broadcast
inside step().
"""
world_size = dist.get_world_size(self.group)
param_groups: List[List] = [list() for _ in range(world_size)]
sizes = [0] * world_size
for param_group in self.param_groups:
param_lists: List[List] = [list() for _ in range(world_size)]
for param in param_group["params"]:
# Add this param to rank with smallest size.
rank = sizes.index(min(sizes))
param_lists[rank].append(param)
sizes[rank] += param.numel()
for rank, params in enumerate(param_lists):
if len(params) > 0:
pg = copy.copy(param_group)
pg["params"] = params
param_groups[rank].append(pg)
return param_groups
def step(self, closure: Optional[Callable[[], float]] = None) -> Optional[float]:
loss = self.optim.step(closure=closure)
for rank, param_groups in enumerate(self.partition_parameters()):
for param_group in param_groups:
for param in param_group["params"]:
dist.broadcast(param, rank, group=self.group)
return loss
def state_dict(self) -> dict:
""" Gets this rank's state_dict. """
return self.optim.state_dict()
def _collect_state_dict(self) -> List[dict]:
"""
Collect all the state shards
"""
empty_buffer = torch.empty([1], dtype=torch.uint8)
global_optim_state = []
local_state = self.state_dict()
if len(local_state["state"]) == 0:
return []
for rank in range(dist.get_world_size(group=self.group)):
if rank == self.rank:
logging.info("Saving self state")
global_optim_state.append(
recursive_copy_to_device(
local_state, non_blocking=True, device=torch.device("cpu")
)
)
# Sync with other replicas
broadcast_object(empty_buffer, src_rank=rank)
else:
# Reuse the param_groups from this rank, these are shared across replicas
logging.info("Receiving state from rank %s ", rank)
replica_state = {
"state": broadcast_object(empty_buffer, src_rank=rank),
"param_groups": local_state["param_groups"],
}
# Fetch from the other replicas
global_optim_state.append(
recursive_copy_to_device(
replica_state, non_blocking=True, device=torch.device("cpu")
)
)
logging.info("State from rank %s received", rank)
return global_optim_state
def _broadcast_state_dict(self) -> None:
"""
Broadcast this rank's state shard, discard others
"""
empty_buffer = torch.empty([1], dtype=torch.uint8)
local_state = self.state_dict()
if len(local_state["state"]) == 0:
return
for rank in range(dist.get_world_size(group=self.group)):
if rank == self.rank:
# Send the state to the reference replica
logging.info(
"Sending the sharded SGD state to the reference replica from rank %s",
rank,
)
broadcast_object(local_state["state"], src_rank=rank)
else:
# Discard this tensor/rank, broadcast necessary for syncing
logging.info("Discarding broadcast from rank %s", rank)
broadcast_object(empty_buffer, src_rank=rank)
def consolidate_state_dict(self, recipient_rank: int = 0) -> List[dict]:
""" Update the consolidated state_dict list, one per rank.
This needs to be called on all replicas """
if self.rank == recipient_rank:
# Pull the sharded state from all the other replicas
# Store all the states in order, rank by rank
logging.info("Pulling the sharded SGD state from all replicas")
self._global_state_dict = self._collect_state_dict()
else:
# Acknowledge broadcasts, and send this rank's shard when needed
self._broadcast_state_dict()
@property
def global_state_dict(self):
"""
Return the last known global optimizer state, which consist of a list of the shards.
NOTE: This is limited to the replica which was responsible for the consolidation.
The state may also not be up to date, depending on when `consolidate_state_dict` was last called
"""
assert (
len(self._global_state_dict) > 0
), "The optimizer state is not materialized, please call consolidate_state_dict on every replica beforehand"
return self._global_state_dict
def load_state_dict(self, state_dict: dict) -> None:
""" Loads this rank's state_dict. """
self.optim.load_state_dict(state_dict)
def add_param_group(self, param_group: dict) -> None:
super().add_param_group(param_group)
if not self.in_super_constructor:
param_groups = self.partition_parameters()[self.rank]
if len(param_groups) == len(self.optim.param_groups) + 1:
self.optim.add_param_group(param_groups[-1])
| en | 0.861915 | # Copyright (c) Facebook, Inc. and its affiliates. All rights reserved. # # This source code is licensed under the BSD license found in the # LICENSE file in the root directory of this source tree. Wraps an arbitrary :class:`optim.Optimizer <torch.optim.Optimizer>` optimizer and shards its state as described by ZeRO_. :: opt = OSS(params, optim=torch.optim.Adam, lr=0.01) .. _ZeRO: https://arxiv.org/abs/1910.02054 Pipe combines pipeline parallelism with checkpointing to reduce peak memory required to train while minimizing device under-utilization. You should determine the balance when defining a :class:`Pipe` module, as balancing will not be done automatically. The module will be partitioned into multiple devices according to the given balance. You may rely on heuristics to find your own optimal configuration. Args: params (list of tensors): parameters to be optimized Keyword Args: optim (torch.nn.Optimizer): optimizer to shard (default: SGD) group (group): torch.distributed group (default: group.WORLD) # Optional consolidated optimizer state Partitions parameters across distributed ranks. Returns a list of param_groups (which is a list of dict) where each element of the list contains the param_groups for a rank. Element 0 corresponds to rank 0, etc. We need all the ranks for the broadcast inside step(). # Add this param to rank with smallest size. Gets this rank's state_dict. Collect all the state shards # Sync with other replicas # Reuse the param_groups from this rank, these are shared across replicas # Fetch from the other replicas Broadcast this rank's state shard, discard others # Send the state to the reference replica # Discard this tensor/rank, broadcast necessary for syncing Update the consolidated state_dict list, one per rank. This needs to be called on all replicas # Pull the sharded state from all the other replicas # Store all the states in order, rank by rank # Acknowledge broadcasts, and send this rank's shard when needed Return the last known global optimizer state, which consist of a list of the shards. NOTE: This is limited to the replica which was responsible for the consolidation. The state may also not be up to date, depending on when `consolidate_state_dict` was last called Loads this rank's state_dict. | 2.09564 | 2 |
setup.py | ninezerozeronine/raytracing-one-weekend | 0 | 9724 | from setuptools import setup, find_packages
setup(
name="raytracing-one-weekend",
version="0.0.0",
author="<NAME>",
author_email="<EMAIL>",
description="A raytracer achievable in a weekend.",
url="https://github.com/ninezerozeronine/raytracing-one-weekend",
install_requires=[
"Pillow",
"numpy",
],
packages=find_packages('src'),
package_dir={'': 'src'},
)
| from setuptools import setup, find_packages
setup(
name="raytracing-one-weekend",
version="0.0.0",
author="<NAME>",
author_email="<EMAIL>",
description="A raytracer achievable in a weekend.",
url="https://github.com/ninezerozeronine/raytracing-one-weekend",
install_requires=[
"Pillow",
"numpy",
],
packages=find_packages('src'),
package_dir={'': 'src'},
)
| none | 1 | 1.288188 | 1 |
|
homepage/urls.py | r0kym/SNI-backend | 1 | 9725 | <gh_stars>1-10
"""
URLconf of the homepage
"""
from django.urls import path, include
from . import views
urlpatterns = [
path('', views.home, name='home'),
path('auth', views.auth, name='auth'),
path('auth/public', views.auth_public, name='auth-public'),
path('auth/full', views.auth_full, name='auth-full'),
path('auth/invite', views.auth_invite, name='auth-invite'),
path('callback/sni', views.sni_callback, name='sni_callback'),
path('logout', views.logout, name='logout'),
path('403', views.no_perm, name='no-permission'),
path('404', views.not_found, name='not-found'),
]
| """
URLconf of the homepage
"""
from django.urls import path, include
from . import views
urlpatterns = [
path('', views.home, name='home'),
path('auth', views.auth, name='auth'),
path('auth/public', views.auth_public, name='auth-public'),
path('auth/full', views.auth_full, name='auth-full'),
path('auth/invite', views.auth_invite, name='auth-invite'),
path('callback/sni', views.sni_callback, name='sni_callback'),
path('logout', views.logout, name='logout'),
path('403', views.no_perm, name='no-permission'),
path('404', views.not_found, name='not-found'),
] | en | 0.797408 | URLconf of the homepage | 2.052678 | 2 |
srcflib/email/__init__.py | mas90/srcf-python | 0 | 9726 | """
Notification email machinery, for tasks to send credentials and instructions to users.
Email templates placed inside the `templates` directory of this module should:
- extend from `layout`
- provide `subject` and `body` blocks
"""
from enum import Enum
import os.path
from jinja2 import Environment, FileSystemLoader
from sqlalchemy.orm import Session as SQLASession
from srcf.database import Member, Society
from srcf.mail import send_mail
from ..plumbing import Owner, owner_desc, owner_name, owner_website
ENV = Environment(loader=FileSystemLoader(os.path.join(os.path.dirname(__file__), "templates")),
trim_blocks=True, lstrip_blocks=True)
ENV.filters.update({"is_member": lambda mem: isinstance(mem, Member),
"is_society": lambda soc: isinstance(soc, Society),
"owner_name": owner_name,
"owner_desc": owner_desc,
"owner_website": owner_website})
CURRENT_WRAPPER = None
class Layout(Enum):
"""
Base layout template to be inherited by an email-specific template.
"""
SUBJECT = "/common/subject.j2"
"""
Subject line of the email.
"""
BODY = "/common/body.j2"
"""
Main content of the email.
"""
class EmailWrapper:
"""
Context manager for email sending, used to augment emails with additional metadata.
"""
def __init__(self, subject: str = None, body: str = None, context: dict = None):
self._layouts = {Layout.SUBJECT: subject,
Layout.BODY: body}
self._context = context
def render(self, template: str, layout: Layout, target: Owner, context: dict = None):
"""
Render an email template with Jinja using the provided context.
"""
context = dict(context or (), layout=layout.value, target=target)
out = ENV.get_template(template).render(context)
custom = self._layouts.get(layout)
if custom:
if self._context:
context.update(self._context)
out = custom.format(out, **context)
if layout == Layout.SUBJECT:
out = " ".join(out.split())
return out
def __enter__(self):
global CURRENT_WRAPPER
if CURRENT_WRAPPER:
raise RuntimeError("Another context is already active")
CURRENT_WRAPPER = self
def __exit__(self, exception_type, exception_value, traceback):
global CURRENT_WRAPPER
CURRENT_WRAPPER = None
DEFAULT_WRAPPER = EmailWrapper(subject="[SRCF] {}")
def send(target: Owner, template: str, context: dict = None, session: SQLASession = None):
"""
Render and send an email to the target member or society.
"""
wrapper = CURRENT_WRAPPER or DEFAULT_WRAPPER
subject = wrapper.render(template, Layout.SUBJECT, target, context)
body = wrapper.render(template, Layout.BODY, target, context)
recipient = (owner_desc(target, True), target.email)
send_mail(recipient, subject, body, copy_sysadmins=False, session=session)
| """
Notification email machinery, for tasks to send credentials and instructions to users.
Email templates placed inside the `templates` directory of this module should:
- extend from `layout`
- provide `subject` and `body` blocks
"""
from enum import Enum
import os.path
from jinja2 import Environment, FileSystemLoader
from sqlalchemy.orm import Session as SQLASession
from srcf.database import Member, Society
from srcf.mail import send_mail
from ..plumbing import Owner, owner_desc, owner_name, owner_website
ENV = Environment(loader=FileSystemLoader(os.path.join(os.path.dirname(__file__), "templates")),
trim_blocks=True, lstrip_blocks=True)
ENV.filters.update({"is_member": lambda mem: isinstance(mem, Member),
"is_society": lambda soc: isinstance(soc, Society),
"owner_name": owner_name,
"owner_desc": owner_desc,
"owner_website": owner_website})
CURRENT_WRAPPER = None
class Layout(Enum):
"""
Base layout template to be inherited by an email-specific template.
"""
SUBJECT = "/common/subject.j2"
"""
Subject line of the email.
"""
BODY = "/common/body.j2"
"""
Main content of the email.
"""
class EmailWrapper:
"""
Context manager for email sending, used to augment emails with additional metadata.
"""
def __init__(self, subject: str = None, body: str = None, context: dict = None):
self._layouts = {Layout.SUBJECT: subject,
Layout.BODY: body}
self._context = context
def render(self, template: str, layout: Layout, target: Owner, context: dict = None):
"""
Render an email template with Jinja using the provided context.
"""
context = dict(context or (), layout=layout.value, target=target)
out = ENV.get_template(template).render(context)
custom = self._layouts.get(layout)
if custom:
if self._context:
context.update(self._context)
out = custom.format(out, **context)
if layout == Layout.SUBJECT:
out = " ".join(out.split())
return out
def __enter__(self):
global CURRENT_WRAPPER
if CURRENT_WRAPPER:
raise RuntimeError("Another context is already active")
CURRENT_WRAPPER = self
def __exit__(self, exception_type, exception_value, traceback):
global CURRENT_WRAPPER
CURRENT_WRAPPER = None
DEFAULT_WRAPPER = EmailWrapper(subject="[SRCF] {}")
def send(target: Owner, template: str, context: dict = None, session: SQLASession = None):
"""
Render and send an email to the target member or society.
"""
wrapper = CURRENT_WRAPPER or DEFAULT_WRAPPER
subject = wrapper.render(template, Layout.SUBJECT, target, context)
body = wrapper.render(template, Layout.BODY, target, context)
recipient = (owner_desc(target, True), target.email)
send_mail(recipient, subject, body, copy_sysadmins=False, session=session)
| en | 0.774941 | Notification email machinery, for tasks to send credentials and instructions to users. Email templates placed inside the `templates` directory of this module should: - extend from `layout` - provide `subject` and `body` blocks Base layout template to be inherited by an email-specific template. Subject line of the email. Main content of the email. Context manager for email sending, used to augment emails with additional metadata. Render an email template with Jinja using the provided context. Render and send an email to the target member or society. | 2.538509 | 3 |
nose2_example/my_package/myapp.py | dolfandringa/PythonProjectStructureDemo | 2 | 9727 | from .operations import Multiply, Add, Substract
class MyApp(object):
def __init__(self):
self.operations={'multiply': Multiply,
'add': Add,
'substract': Substract}
def do(self, operation, number1, number2):
return self.operations[operation.lower()].do(number1, number2)
| from .operations import Multiply, Add, Substract
class MyApp(object):
def __init__(self):
self.operations={'multiply': Multiply,
'add': Add,
'substract': Substract}
def do(self, operation, number1, number2):
return self.operations[operation.lower()].do(number1, number2)
| none | 1 | 3.253654 | 3 |
|
src/train_nn.py | anirudhbhashyam/911-Calls-Seattle-Predictions | 0 | 9728 | <gh_stars>0
import os
from typing import Union
import tensorflow as tf
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from sklearn.model_selection import train_test_split, KFold
import utility as ut
from variables import *
# Read the data.
train_data = pd.read_csv(os.path.join(DATA_PATH, ".".join([DATA_TRAIN, DATA_EXT])), header = 0)
# Get the labels.
Y = train_data.pop(LABEL)
sample_weights = np.ones(Y.shape[0])
for i in range(10, 24):
sample_weights[train_data["_".join(("hour", str(i)))] == 1] = 1.5
# -- For classification -- #
# CLASSES = np.unique(Y)
# N_CLASSES = len(CLASSES)
# Y = Y.replace(dict(zip(CLASSES, range(0, len(CLASSES)))))
# Data shape parameters.
N_FEATURES = train_data.shape[1]
N_SAMPLES = train_data.shape[0]
# Split the training data.
X_train, X_val, Y_train, Y_val = train_test_split(train_data, Y, shuffle = True, random_state = 7919)
def build_and_compile(input_: tuple = (WB_SIZE, N_FEATURES),
loss_func: str = "mae") -> tf.keras.Model:
"""
Build and compile a TensorFLow LSTM network.
Parameters
----------
input_ :
Shape of the trainining data. Should specify
`(batch_size` or `window_size, n_features)`
loss_func :
Loss function to use for training.
Returns
-------
`tf.keras.Model` :
A compiled TensorFlow model.
"""
# Seqential keras model.
model = tf.keras.models.Sequential([
tf.keras.layers.LSTM(50, input_shape = input_, return_sequences = True),
tf.keras.layers.LSTM(50, return_sequences = False),
tf.keras.layers.GaussianNoise(1.0),
tf.keras.layers.Dense(1024, activation = "relu"),
tf.keras.layers.Dropout(0.7),
tf.keras.layers.Dense(128, activation = "relu"),
tf.keras.layers.Dropout(0.2),
tf.keras.layers.Dense(64, activation = "relu"),
tf.keras.layers.GaussianNoise(0.2),
# tf.keras.layers.Dense(32, activation = "relu"),
# tf.keras.layers.GaussianNoise(0.7),
tf.keras.layers.Dense(1, activation = "relu")
])
# Compile the model.
model.compile(
loss = loss_func,
optimizer = "adam"
)
return model
def train(model: tf.keras.Model,
train_data: np.ndarray,
train_labels: np.ndarray,
val_data: np.ndarray,
val_labels: np.ndarray,
epochs: int = 200,
sample_weights: np.array = None,
cross_val = False) -> pd.DataFrame:
"""
Trains the TensorFlow `model`.
Parameters
----------
model :
A TensorFlow compiled model.
train_data :
The data to be trained. Shape must be consistent with what is passed during model compilation.
train_labels :
The ground truth predictions.
val_data :
The data to be used as validation.
val_labels :
The ground truth validation predictions.
epochs :
Total number of epochs to train.
sample_weights :
Weights for `train_data` to use during training.
Returns
-------
pd.DataFrame:
Training information.
"""
# Check for overfitting.
early_stopping = tf.keras.callbacks.EarlyStopping(
monitor = "val_loss",
min_delta = 0.001,
patience = 100,
restore_best_weights = False)
history = model.fit(
train_data.reshape(-1, WB_SIZE, N_FEATURES),
train_labels,
sample_weight = sample_weights,
validation_data = (val_data.reshape(-1, WB_SIZE, N_FEATURES), val_labels),
verbose = 1,
epochs = epochs,
callbacks = early_stopping)
return pd.DataFrame(history.history)
# def cross_validate(train_data: pd.DataFrame,
# train_labels: pd.DataFrame,
# epochs: int = 50,
# sample_weights: np.array = None,
# folds: int = 2) -> pd.DataFrame:
# splits = KFold(n_splits = folds, shuffle = True)
# print("Starting cross validation.")
# accuracy = list()
# val_loss = list()
# models = list()
# for i, (train_index, test_index) in enumerate(splits.split(train_data, train_labels)):
# print(f"Iteration {i}\n")
# X_train, X_val, Y_train, Y_val = train_data[train_index], train_data[test_index], train_data[train_index], train_labels[test_index]
# model = build_and_compile((WB_SIZE, N_FEATURES), "mae")
# history_df = train(model, X_train, Y_train, epochs)
# # train_stats(history_df, i)
# scores = model.evaluate(X_val.reshape(-1, WB_SIZE, N_FEATURES), Y_val)
# print(f"Validation loss: {scores}\n")
# #of {scores[0]} {model.metrics_names[1]} of {scores[1] * 100:.2f}%")
# # accuracy.append(scores[1] * 100)
# val_loss.append(scores)
# models.append(model)
# return models[np.argmin(val_loss)]
def train_stats(history_df: pd.DataFrame, it: int = None) -> None:
"""
Produces training statistics once training has run its course.
Parameters
----------
history_df :
The history as returned by Keras `fit` method.
it :
To be used with cross validation. Specifies the name of the learning curve based on the cross validation itertation `it`.
Returns
-------
`None`
"""
# Learning curve.
plt.rcParams["figure.dpi"] = 160
history_df.loc[:, ["loss", "val_loss"]].plot()
plt.title("Model Loss")
plt.ylabel("Loss")
plt.xlabel("Epoch")
name = TRAIN_FIG_SAVE_NAME
if it is not None:
name = "_".join([name, str(it)])
plt.savefig(os.path.join(TRAIN_FIG_SAVE_PATH, ".".join([name, FIG_EXT])))
# Stats
print(f"Minimum validation loss: {history_df['val_loss'].min()}")
# plt.plot(f"Accuracy: {history_df['train_accuracy']}")
# plt.plot(f"Validation Accuracy: {history_df['val_accuracy']}")
return None
def main():
model = build_and_compile((WB_SIZE, N_FEATURES))
# model = cross_validate(np.array(train_data), np.array(Y))
history_df = train(model, np.array(X_train), np.array(Y_train), np.array(X_val), np.array(Y_val))
# train_stats(history_df)
# Save trained model (better to use checkpoints).
model.save(os.path.join(NN_MODEL_SAVE_PATH, NN_MODEL_SAVE_NAME))
if __name__ == "__main__":
main()
| import os
from typing import Union
import tensorflow as tf
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from sklearn.model_selection import train_test_split, KFold
import utility as ut
from variables import *
# Read the data.
train_data = pd.read_csv(os.path.join(DATA_PATH, ".".join([DATA_TRAIN, DATA_EXT])), header = 0)
# Get the labels.
Y = train_data.pop(LABEL)
sample_weights = np.ones(Y.shape[0])
for i in range(10, 24):
sample_weights[train_data["_".join(("hour", str(i)))] == 1] = 1.5
# -- For classification -- #
# CLASSES = np.unique(Y)
# N_CLASSES = len(CLASSES)
# Y = Y.replace(dict(zip(CLASSES, range(0, len(CLASSES)))))
# Data shape parameters.
N_FEATURES = train_data.shape[1]
N_SAMPLES = train_data.shape[0]
# Split the training data.
X_train, X_val, Y_train, Y_val = train_test_split(train_data, Y, shuffle = True, random_state = 7919)
def build_and_compile(input_: tuple = (WB_SIZE, N_FEATURES),
loss_func: str = "mae") -> tf.keras.Model:
"""
Build and compile a TensorFLow LSTM network.
Parameters
----------
input_ :
Shape of the trainining data. Should specify
`(batch_size` or `window_size, n_features)`
loss_func :
Loss function to use for training.
Returns
-------
`tf.keras.Model` :
A compiled TensorFlow model.
"""
# Seqential keras model.
model = tf.keras.models.Sequential([
tf.keras.layers.LSTM(50, input_shape = input_, return_sequences = True),
tf.keras.layers.LSTM(50, return_sequences = False),
tf.keras.layers.GaussianNoise(1.0),
tf.keras.layers.Dense(1024, activation = "relu"),
tf.keras.layers.Dropout(0.7),
tf.keras.layers.Dense(128, activation = "relu"),
tf.keras.layers.Dropout(0.2),
tf.keras.layers.Dense(64, activation = "relu"),
tf.keras.layers.GaussianNoise(0.2),
# tf.keras.layers.Dense(32, activation = "relu"),
# tf.keras.layers.GaussianNoise(0.7),
tf.keras.layers.Dense(1, activation = "relu")
])
# Compile the model.
model.compile(
loss = loss_func,
optimizer = "adam"
)
return model
def train(model: tf.keras.Model,
train_data: np.ndarray,
train_labels: np.ndarray,
val_data: np.ndarray,
val_labels: np.ndarray,
epochs: int = 200,
sample_weights: np.array = None,
cross_val = False) -> pd.DataFrame:
"""
Trains the TensorFlow `model`.
Parameters
----------
model :
A TensorFlow compiled model.
train_data :
The data to be trained. Shape must be consistent with what is passed during model compilation.
train_labels :
The ground truth predictions.
val_data :
The data to be used as validation.
val_labels :
The ground truth validation predictions.
epochs :
Total number of epochs to train.
sample_weights :
Weights for `train_data` to use during training.
Returns
-------
pd.DataFrame:
Training information.
"""
# Check for overfitting.
early_stopping = tf.keras.callbacks.EarlyStopping(
monitor = "val_loss",
min_delta = 0.001,
patience = 100,
restore_best_weights = False)
history = model.fit(
train_data.reshape(-1, WB_SIZE, N_FEATURES),
train_labels,
sample_weight = sample_weights,
validation_data = (val_data.reshape(-1, WB_SIZE, N_FEATURES), val_labels),
verbose = 1,
epochs = epochs,
callbacks = early_stopping)
return pd.DataFrame(history.history)
# def cross_validate(train_data: pd.DataFrame,
# train_labels: pd.DataFrame,
# epochs: int = 50,
# sample_weights: np.array = None,
# folds: int = 2) -> pd.DataFrame:
# splits = KFold(n_splits = folds, shuffle = True)
# print("Starting cross validation.")
# accuracy = list()
# val_loss = list()
# models = list()
# for i, (train_index, test_index) in enumerate(splits.split(train_data, train_labels)):
# print(f"Iteration {i}\n")
# X_train, X_val, Y_train, Y_val = train_data[train_index], train_data[test_index], train_data[train_index], train_labels[test_index]
# model = build_and_compile((WB_SIZE, N_FEATURES), "mae")
# history_df = train(model, X_train, Y_train, epochs)
# # train_stats(history_df, i)
# scores = model.evaluate(X_val.reshape(-1, WB_SIZE, N_FEATURES), Y_val)
# print(f"Validation loss: {scores}\n")
# #of {scores[0]} {model.metrics_names[1]} of {scores[1] * 100:.2f}%")
# # accuracy.append(scores[1] * 100)
# val_loss.append(scores)
# models.append(model)
# return models[np.argmin(val_loss)]
def train_stats(history_df: pd.DataFrame, it: int = None) -> None:
"""
Produces training statistics once training has run its course.
Parameters
----------
history_df :
The history as returned by Keras `fit` method.
it :
To be used with cross validation. Specifies the name of the learning curve based on the cross validation itertation `it`.
Returns
-------
`None`
"""
# Learning curve.
plt.rcParams["figure.dpi"] = 160
history_df.loc[:, ["loss", "val_loss"]].plot()
plt.title("Model Loss")
plt.ylabel("Loss")
plt.xlabel("Epoch")
name = TRAIN_FIG_SAVE_NAME
if it is not None:
name = "_".join([name, str(it)])
plt.savefig(os.path.join(TRAIN_FIG_SAVE_PATH, ".".join([name, FIG_EXT])))
# Stats
print(f"Minimum validation loss: {history_df['val_loss'].min()}")
# plt.plot(f"Accuracy: {history_df['train_accuracy']}")
# plt.plot(f"Validation Accuracy: {history_df['val_accuracy']}")
return None
def main():
model = build_and_compile((WB_SIZE, N_FEATURES))
# model = cross_validate(np.array(train_data), np.array(Y))
history_df = train(model, np.array(X_train), np.array(Y_train), np.array(X_val), np.array(Y_val))
# train_stats(history_df)
# Save trained model (better to use checkpoints).
model.save(os.path.join(NN_MODEL_SAVE_PATH, NN_MODEL_SAVE_NAME))
if __name__ == "__main__":
main() | en | 0.640894 | # Read the data. # Get the labels. # -- For classification -- # # CLASSES = np.unique(Y) # N_CLASSES = len(CLASSES) # Y = Y.replace(dict(zip(CLASSES, range(0, len(CLASSES))))) # Data shape parameters. # Split the training data. Build and compile a TensorFLow LSTM network. Parameters ---------- input_ : Shape of the trainining data. Should specify `(batch_size` or `window_size, n_features)` loss_func : Loss function to use for training. Returns ------- `tf.keras.Model` : A compiled TensorFlow model. # Seqential keras model. # tf.keras.layers.Dense(32, activation = "relu"), # tf.keras.layers.GaussianNoise(0.7), # Compile the model. Trains the TensorFlow `model`. Parameters ---------- model : A TensorFlow compiled model. train_data : The data to be trained. Shape must be consistent with what is passed during model compilation. train_labels : The ground truth predictions. val_data : The data to be used as validation. val_labels : The ground truth validation predictions. epochs : Total number of epochs to train. sample_weights : Weights for `train_data` to use during training. Returns ------- pd.DataFrame: Training information. # Check for overfitting. # def cross_validate(train_data: pd.DataFrame, # train_labels: pd.DataFrame, # epochs: int = 50, # sample_weights: np.array = None, # folds: int = 2) -> pd.DataFrame: # splits = KFold(n_splits = folds, shuffle = True) # print("Starting cross validation.") # accuracy = list() # val_loss = list() # models = list() # for i, (train_index, test_index) in enumerate(splits.split(train_data, train_labels)): # print(f"Iteration {i}\n") # X_train, X_val, Y_train, Y_val = train_data[train_index], train_data[test_index], train_data[train_index], train_labels[test_index] # model = build_and_compile((WB_SIZE, N_FEATURES), "mae") # history_df = train(model, X_train, Y_train, epochs) # # train_stats(history_df, i) # scores = model.evaluate(X_val.reshape(-1, WB_SIZE, N_FEATURES), Y_val) # print(f"Validation loss: {scores}\n") # #of {scores[0]} {model.metrics_names[1]} of {scores[1] * 100:.2f}%") # # accuracy.append(scores[1] * 100) # val_loss.append(scores) # models.append(model) # return models[np.argmin(val_loss)] Produces training statistics once training has run its course. Parameters ---------- history_df : The history as returned by Keras `fit` method. it : To be used with cross validation. Specifies the name of the learning curve based on the cross validation itertation `it`. Returns ------- `None` # Learning curve. # Stats # plt.plot(f"Accuracy: {history_df['train_accuracy']}") # plt.plot(f"Validation Accuracy: {history_df['val_accuracy']}") # model = cross_validate(np.array(train_data), np.array(Y)) # train_stats(history_df) # Save trained model (better to use checkpoints). | 3.119279 | 3 |
pdserver/objects.py | Gustavo6046/polydung | 0 | 9729 | <gh_stars>0
import base64
import random
import string
import netbyte
import numpy as np
try:
import simplejson as json
except ImportError:
import json
kinds = {}
class PDObject(object):
def __init__(self, game, kind, id, pos, properties):
self.game = game
self.kind = kind
self.id = id or ''.join([random.choice(string.ascii_letters + string.digits + "#$%*") for _ in range(100)])
self.pos = np.array(pos)
self.properties = properties
self.game.handle_object_creation(self)
def __getitem__(self, key): # a shortcut for Netbyte
return self.properties[key]
def __setitem__(self, key, value): # not only a shortcut for Netbyte
self.properties[key] = value
self.game.update_object(self)
def __call__(self, key, **kwargs):
nbe = netbyte.Netbyte()
nbe['self'] = self
nbe['game'] = self.game
for k, v in kwargs.items():
nbe[k] = v
nbe.execute_instructions(*self.kind.functions[key])
def tick(self, timedelta):
self('tick', timedelta=timedelta)
def serialize(self):
return json.dumps({
"kind": self.kind.name,
'id': self.id,
'pos': self.pos.tolist(),
"properties": self.properties
})
@classmethod
def deserialize(cls, game, js):
data = json.loads(js)
return cls(game, kinds[data['kind']], data['id'], data['pos'], data['properties'])
class PDClass(object):
def __init__(self, game, name, functions=()):
self.functions = dict(functions)
self.name = name
kinds[name] = self
nbe = netbyte.Netbyte()
def serializable(self):
return {
'name': self.name,
'functions': {k: nbe.dump(v, name="{}.{}".format(self.name, k)) for k, v in self.functions.items()}
} | import base64
import random
import string
import netbyte
import numpy as np
try:
import simplejson as json
except ImportError:
import json
kinds = {}
class PDObject(object):
def __init__(self, game, kind, id, pos, properties):
self.game = game
self.kind = kind
self.id = id or ''.join([random.choice(string.ascii_letters + string.digits + "#$%*") for _ in range(100)])
self.pos = np.array(pos)
self.properties = properties
self.game.handle_object_creation(self)
def __getitem__(self, key): # a shortcut for Netbyte
return self.properties[key]
def __setitem__(self, key, value): # not only a shortcut for Netbyte
self.properties[key] = value
self.game.update_object(self)
def __call__(self, key, **kwargs):
nbe = netbyte.Netbyte()
nbe['self'] = self
nbe['game'] = self.game
for k, v in kwargs.items():
nbe[k] = v
nbe.execute_instructions(*self.kind.functions[key])
def tick(self, timedelta):
self('tick', timedelta=timedelta)
def serialize(self):
return json.dumps({
"kind": self.kind.name,
'id': self.id,
'pos': self.pos.tolist(),
"properties": self.properties
})
@classmethod
def deserialize(cls, game, js):
data = json.loads(js)
return cls(game, kinds[data['kind']], data['id'], data['pos'], data['properties'])
class PDClass(object):
def __init__(self, game, name, functions=()):
self.functions = dict(functions)
self.name = name
kinds[name] = self
nbe = netbyte.Netbyte()
def serializable(self):
return {
'name': self.name,
'functions': {k: nbe.dump(v, name="{}.{}".format(self.name, k)) for k, v in self.functions.items()}
} | en | 0.422646 | # a shortcut for Netbyte # not only a shortcut for Netbyte | 2.516005 | 3 |
football/football_test.py | EEdwardsA/DS-OOP-Review | 0 | 9730 | import unittest
from players import Player, Quarterback
from possible_values import *
from game import Game
from random import randint, uniform, sample
from season import *
# TODO - some things you can add...
class FootballGameTest(unittest.TestCase):
'''test the class'''
def test_field_goal_made(self):
teams = sample(team_names, k=2)
game = Game(teams=teams)
team_prev_points = game.score[teams[0]]
game.field_goal(teams[0])
team_post_points = game.score[teams[0]]
self.assertEqual(team_post_points, team_prev_points + 3)
def test_get_winner(self):
teams = sample(team_names, k=2)
game = Game(teams=teams)
game.field_goal(teams[0])
t1_points = game.score[teams[0]]
t2_points = game.score[teams[1]]
if t1_points >= t2_points:
win, lose = teams
else:
lose, win = teams
self.assertEqual((win,lose), game.get_winning_team())
class FootballPlayerTest(unittest.TestCase):
'''Check the default values for Player and Quarterback
yards=120, touchdowns=5, safety=1,
interceptions=0
'''
def test_default_player_yards(self):
player = Player(name='Dude')
self.assertEqual(player.yards, 120)
def test_player_yards_set_to(self):
player = Player(name='OtherDude', yards=150)
self.assertEqual(player.yards, 150)
def test_default_qb_interceptions(self):
qb = Quarterback(name='FancyDude')
self.assertEqual(qb.interceptions, 4)
def test_default_qb_completed_passes(self):
qb = Quarterback()
self.assertEqual(qb.completed_passes, 20)
def test_passing_score(self):
qb = Quarterback()
self.assertEqual((20 - (2 * 4)), qb.passing_score())
if __name__ == '__main__':
unittest.main()
| import unittest
from players import Player, Quarterback
from possible_values import *
from game import Game
from random import randint, uniform, sample
from season import *
# TODO - some things you can add...
class FootballGameTest(unittest.TestCase):
'''test the class'''
def test_field_goal_made(self):
teams = sample(team_names, k=2)
game = Game(teams=teams)
team_prev_points = game.score[teams[0]]
game.field_goal(teams[0])
team_post_points = game.score[teams[0]]
self.assertEqual(team_post_points, team_prev_points + 3)
def test_get_winner(self):
teams = sample(team_names, k=2)
game = Game(teams=teams)
game.field_goal(teams[0])
t1_points = game.score[teams[0]]
t2_points = game.score[teams[1]]
if t1_points >= t2_points:
win, lose = teams
else:
lose, win = teams
self.assertEqual((win,lose), game.get_winning_team())
class FootballPlayerTest(unittest.TestCase):
'''Check the default values for Player and Quarterback
yards=120, touchdowns=5, safety=1,
interceptions=0
'''
def test_default_player_yards(self):
player = Player(name='Dude')
self.assertEqual(player.yards, 120)
def test_player_yards_set_to(self):
player = Player(name='OtherDude', yards=150)
self.assertEqual(player.yards, 150)
def test_default_qb_interceptions(self):
qb = Quarterback(name='FancyDude')
self.assertEqual(qb.interceptions, 4)
def test_default_qb_completed_passes(self):
qb = Quarterback()
self.assertEqual(qb.completed_passes, 20)
def test_passing_score(self):
qb = Quarterback()
self.assertEqual((20 - (2 * 4)), qb.passing_score())
if __name__ == '__main__':
unittest.main()
| en | 0.638937 | # TODO - some things you can add... test the class Check the default values for Player and Quarterback yards=120, touchdowns=5, safety=1, interceptions=0 | 3.603117 | 4 |
preprocessor/base.py | shayanthrn/AGAIN-VC | 3 | 9731 | <reponame>shayanthrn/AGAIN-VC
import os
import logging
import numpy as np
from tqdm import tqdm
from functools import partial
from multiprocessing.pool import ThreadPool
import pyworld as pw
from util.dsp import Dsp
logger = logging.getLogger(__name__)
def preprocess_one(input_items, module, output_path=''):
input_path, basename = input_items
y = module.load_wav(input_path)
if module.config.dtype == 'wav':
ret = y
elif module.config.dtype == 'melspectrogram':
ret = module.wav2mel(y)
elif module.config.dtype == 'f0':
f0, sp, ap = pw.wav2world(y.astype(np.float64), module.config.sample_rate)
ret = f0
if (f0 == 0).all():
logger.warn(f'f0 returns all zeros: {input_path}')
elif module.config.dtype == 's3prl_spec':
ret = module.wav2s3prl_spec(y)
if ret is None:
logger.warn(f'S3PRL spectrogram returns NoneType: {input_path}')
elif module.config.dtype == 'resemblyzer':
y = resemblyzer.preprocess_wav(input_path)
ret = module.wav2resemblyzer(y)
else:
logger.warn(f'Not implement feature type {module.config.dtype}')
if output_path == '':
return ret
else:
if type(ret) is np.ndarray:
np.save(os.path.join(output_path, f'{basename}.npy'), ret)
else:
logger.warn(f'Feature {module.config.dtype} is not saved: {input_path}.')
return 1
class BasePreproceccor():
def __init__(self, config):
self.dsp_modules = {}
for feat in config.feat_to_preprocess:
self.dsp_modules[feat] = Dsp(config.feat[feat])
def preprocess(self, input_path, output_path, feat, njobs):
file_dict = self.gen_file_dict(input_path)
logger.info(f'Starting to preprocess from {input_path}.')
self.preprocess_from_file_dict(file_dict=file_dict, output_path=output_path, feat=feat, njobs=njobs)
logger.info(f'Saving processed file to {output_path}.')
return
def preprocess_from_file_dict(self, file_dict, output_path, feat, njobs):
os.makedirs(os.path.join(output_path, feat), exist_ok=True)
module = self.dsp_modules[feat]
task = partial(preprocess_one, module=module, output_path=os.path.join(output_path, feat))
with ThreadPool(njobs) as pool:
_ = list(tqdm(pool.imap(task, file_dict.items()), total=len(file_dict), desc=f'Preprocessing '))
def gen_file_dict(self, input_path):
raise NotImplementedError
| import os
import logging
import numpy as np
from tqdm import tqdm
from functools import partial
from multiprocessing.pool import ThreadPool
import pyworld as pw
from util.dsp import Dsp
logger = logging.getLogger(__name__)
def preprocess_one(input_items, module, output_path=''):
input_path, basename = input_items
y = module.load_wav(input_path)
if module.config.dtype == 'wav':
ret = y
elif module.config.dtype == 'melspectrogram':
ret = module.wav2mel(y)
elif module.config.dtype == 'f0':
f0, sp, ap = pw.wav2world(y.astype(np.float64), module.config.sample_rate)
ret = f0
if (f0 == 0).all():
logger.warn(f'f0 returns all zeros: {input_path}')
elif module.config.dtype == 's3prl_spec':
ret = module.wav2s3prl_spec(y)
if ret is None:
logger.warn(f'S3PRL spectrogram returns NoneType: {input_path}')
elif module.config.dtype == 'resemblyzer':
y = resemblyzer.preprocess_wav(input_path)
ret = module.wav2resemblyzer(y)
else:
logger.warn(f'Not implement feature type {module.config.dtype}')
if output_path == '':
return ret
else:
if type(ret) is np.ndarray:
np.save(os.path.join(output_path, f'{basename}.npy'), ret)
else:
logger.warn(f'Feature {module.config.dtype} is not saved: {input_path}.')
return 1
class BasePreproceccor():
def __init__(self, config):
self.dsp_modules = {}
for feat in config.feat_to_preprocess:
self.dsp_modules[feat] = Dsp(config.feat[feat])
def preprocess(self, input_path, output_path, feat, njobs):
file_dict = self.gen_file_dict(input_path)
logger.info(f'Starting to preprocess from {input_path}.')
self.preprocess_from_file_dict(file_dict=file_dict, output_path=output_path, feat=feat, njobs=njobs)
logger.info(f'Saving processed file to {output_path}.')
return
def preprocess_from_file_dict(self, file_dict, output_path, feat, njobs):
os.makedirs(os.path.join(output_path, feat), exist_ok=True)
module = self.dsp_modules[feat]
task = partial(preprocess_one, module=module, output_path=os.path.join(output_path, feat))
with ThreadPool(njobs) as pool:
_ = list(tqdm(pool.imap(task, file_dict.items()), total=len(file_dict), desc=f'Preprocessing '))
def gen_file_dict(self, input_path):
raise NotImplementedError | none | 1 | 2.217593 | 2 |
|
divsum_stats.py | fjruizruano/SatIntExt | 0 | 9732 | <filename>divsum_stats.py<gh_stars>0
#!/usr/bin/python
import sys
from subprocess import call
print "divsum_count.py ListOfDivsumFiles\n"
try:
files = sys.argv[1]
except:
files = raw_input("Introduce RepeatMasker's list of Divsum files with library size (tab separated): ")
files = open(files).readlines()
to_join = []
header = "Coverage for each repeat class and divergence (Kimura)\n"
results = {}
for line in files:
line = line.split("\t")
file = line[0]
size = int(line[1])
data = open(file).readlines()
matrix_start = data.index(header)
matrix = data[matrix_start+1:]
li= []
names_line = matrix[0]
info = names_line.split()
for fam in info:
li.append([fam])
info_len = len(li)
for line in matrix[1:]:
info = line.split()
for i in range(0,info_len):
li[i].append(info[i])
out = open(file+".counts","w")
out.write("Sequence\tAbundance\n")
stats = open(file+".stats","w")
stats.write("Sequence\tDivergence\tTotalAbundance\tMaxAbundance\tMaxPeak\tRPS\tDIVPEAK\n")
for el in li[1:]:
numbers = el[1:]
numbers = [int(x) for x in numbers]
numbers_prop = [1.0*x/size for x in numbers]
prop_dict = {}
prop_li = []
for prop in range(0,len(numbers_prop)):
prop_dict[prop] = numbers_prop[prop]
prop_li.append(numbers_prop[prop])
prop_dict_sorted = sorted(prop_dict.items(), key=lambda x: x[1], reverse=True)
total = sum(numbers_prop)
top = prop_dict_sorted[0]
top_div = top[0]
top_ab = top[1]
peak = []
if top_div >= 2:
for div in range(top_div-2,top_div+3):
peak.append(prop_dict[div])
else:
for div in range(0,5):
peak.append(prop_dict[div])
sum_peak = sum(peak)
rps = sum_peak/total
divpeak = top_div
out.write(el[0]+"\t"+str(sum(numbers))+"\n")
all_divs = []
for d in li[0][1:]:
all_divs.append(int(d)+0.5)
div_sumproduct = 0
for x,y in zip(all_divs,prop_li):
div_sumproduct += x * y
divergence = div_sumproduct/total
data = "%s\t%s\t%s\t%s\t%s\t%s\t%s\n" % (el[0],str(divergence),str(total),str(top_ab),str(sum_peak),str(rps),str(divpeak))
stats.write(data)
data2 = "%s\t%s\t%s\t%s\t%s\t%s\t%s\n" % (file, str(divergence),str(total),str(top_ab),str(sum_peak),str(rps),str(divpeak))
if el[0] in results:
results[el[0]].append(data2)
else:
results[el[0]] = [data2]
out.close()
stats.close()
to_join.append(file+".counts")
out = open("results.txt", "w")
for el in sorted(results):
info = results[el]
out.write("%s\tDivergence\tTotalAbundance\tMaxAbundance\tMaxPeak\tRPS\tDIVPEAK\n" % (el))
for i in info:
out.write(i)
out.write("\n\n\n")
out.close()
call("join_multiple_lists.py %s" % (" ".join(to_join)), shell=True)
| <filename>divsum_stats.py<gh_stars>0
#!/usr/bin/python
import sys
from subprocess import call
print "divsum_count.py ListOfDivsumFiles\n"
try:
files = sys.argv[1]
except:
files = raw_input("Introduce RepeatMasker's list of Divsum files with library size (tab separated): ")
files = open(files).readlines()
to_join = []
header = "Coverage for each repeat class and divergence (Kimura)\n"
results = {}
for line in files:
line = line.split("\t")
file = line[0]
size = int(line[1])
data = open(file).readlines()
matrix_start = data.index(header)
matrix = data[matrix_start+1:]
li= []
names_line = matrix[0]
info = names_line.split()
for fam in info:
li.append([fam])
info_len = len(li)
for line in matrix[1:]:
info = line.split()
for i in range(0,info_len):
li[i].append(info[i])
out = open(file+".counts","w")
out.write("Sequence\tAbundance\n")
stats = open(file+".stats","w")
stats.write("Sequence\tDivergence\tTotalAbundance\tMaxAbundance\tMaxPeak\tRPS\tDIVPEAK\n")
for el in li[1:]:
numbers = el[1:]
numbers = [int(x) for x in numbers]
numbers_prop = [1.0*x/size for x in numbers]
prop_dict = {}
prop_li = []
for prop in range(0,len(numbers_prop)):
prop_dict[prop] = numbers_prop[prop]
prop_li.append(numbers_prop[prop])
prop_dict_sorted = sorted(prop_dict.items(), key=lambda x: x[1], reverse=True)
total = sum(numbers_prop)
top = prop_dict_sorted[0]
top_div = top[0]
top_ab = top[1]
peak = []
if top_div >= 2:
for div in range(top_div-2,top_div+3):
peak.append(prop_dict[div])
else:
for div in range(0,5):
peak.append(prop_dict[div])
sum_peak = sum(peak)
rps = sum_peak/total
divpeak = top_div
out.write(el[0]+"\t"+str(sum(numbers))+"\n")
all_divs = []
for d in li[0][1:]:
all_divs.append(int(d)+0.5)
div_sumproduct = 0
for x,y in zip(all_divs,prop_li):
div_sumproduct += x * y
divergence = div_sumproduct/total
data = "%s\t%s\t%s\t%s\t%s\t%s\t%s\n" % (el[0],str(divergence),str(total),str(top_ab),str(sum_peak),str(rps),str(divpeak))
stats.write(data)
data2 = "%s\t%s\t%s\t%s\t%s\t%s\t%s\n" % (file, str(divergence),str(total),str(top_ab),str(sum_peak),str(rps),str(divpeak))
if el[0] in results:
results[el[0]].append(data2)
else:
results[el[0]] = [data2]
out.close()
stats.close()
to_join.append(file+".counts")
out = open("results.txt", "w")
for el in sorted(results):
info = results[el]
out.write("%s\tDivergence\tTotalAbundance\tMaxAbundance\tMaxPeak\tRPS\tDIVPEAK\n" % (el))
for i in info:
out.write(i)
out.write("\n\n\n")
out.close()
call("join_multiple_lists.py %s" % (" ".join(to_join)), shell=True)
| ru | 0.258958 | #!/usr/bin/python | 2.885818 | 3 |
agatecharts/charts/__init__.py | onyxfish/fever | 4 | 9733 | <gh_stars>1-10
#!/usr/bin/env python
from agatecharts.charts.bars import Bars
from agatecharts.charts.columns import Columns
from agatecharts.charts.lines import Lines
from agatecharts.charts.scatter import Scatter
| #!/usr/bin/env python
from agatecharts.charts.bars import Bars
from agatecharts.charts.columns import Columns
from agatecharts.charts.lines import Lines
from agatecharts.charts.scatter import Scatter | ru | 0.26433 | #!/usr/bin/env python | 1.308807 | 1 |
users/views.py | rossm6/accounts | 11 | 9734 | from django.contrib.auth import update_session_auth_hash
from django.contrib.auth.mixins import LoginRequiredMixin
from django.contrib.auth.models import User
from django.contrib.auth.views import (LoginView, PasswordResetConfirmView,
PasswordResetView)
from django.http import HttpResponse, HttpResponseNotAllowed
from django.shortcuts import render
from django.urls import reverse_lazy
from django.views.generic import CreateView, DeleteView, UpdateView
from users.forms import (SignInForm, SignUpForm, UserPasswordResetForm,
UserProfileForm, UserSetPasswordForm)
from users.mixins import LockDuringEditMixin
from users.models import Lock, UserSession
class SignUp(CreateView):
model = User
form_class = SignUpForm
template_name = "registration/signup.html"
success_url = reverse_lazy("dashboard:dashboard")
class SignIn(LoginView):
form_class = SignInForm
class Profile(LoginRequiredMixin, LockDuringEditMixin, UpdateView):
model = User
form_class = UserProfileForm
template_name = "registration/profile.html"
success_url = reverse_lazy("users:profile")
def get_object(self):
return self.request.user
def form_valid(self, form):
response = super().form_valid(form)
update_session_auth_hash(self.request, self.object) # this will delete the current user session
# and create anew
UserSession.objects.create(user=self.object, session_id=self.request.session.session_key)
return response
class UserPasswordResetView(PasswordResetView):
form_class = UserPasswordResetForm
class UserPasswordResetConfirmView(PasswordResetConfirmView):
form_class = UserSetPasswordForm
def unlock(request, pk):
if request.method == "POST":
lock = Lock.objects.filter(pk=pk).delete()
return HttpResponse('')
return HttpResponseNotAllowed(["POST"])
| from django.contrib.auth import update_session_auth_hash
from django.contrib.auth.mixins import LoginRequiredMixin
from django.contrib.auth.models import User
from django.contrib.auth.views import (LoginView, PasswordResetConfirmView,
PasswordResetView)
from django.http import HttpResponse, HttpResponseNotAllowed
from django.shortcuts import render
from django.urls import reverse_lazy
from django.views.generic import CreateView, DeleteView, UpdateView
from users.forms import (SignInForm, SignUpForm, UserPasswordResetForm,
UserProfileForm, UserSetPasswordForm)
from users.mixins import LockDuringEditMixin
from users.models import Lock, UserSession
class SignUp(CreateView):
model = User
form_class = SignUpForm
template_name = "registration/signup.html"
success_url = reverse_lazy("dashboard:dashboard")
class SignIn(LoginView):
form_class = SignInForm
class Profile(LoginRequiredMixin, LockDuringEditMixin, UpdateView):
model = User
form_class = UserProfileForm
template_name = "registration/profile.html"
success_url = reverse_lazy("users:profile")
def get_object(self):
return self.request.user
def form_valid(self, form):
response = super().form_valid(form)
update_session_auth_hash(self.request, self.object) # this will delete the current user session
# and create anew
UserSession.objects.create(user=self.object, session_id=self.request.session.session_key)
return response
class UserPasswordResetView(PasswordResetView):
form_class = UserPasswordResetForm
class UserPasswordResetConfirmView(PasswordResetConfirmView):
form_class = UserSetPasswordForm
def unlock(request, pk):
if request.method == "POST":
lock = Lock.objects.filter(pk=pk).delete()
return HttpResponse('')
return HttpResponseNotAllowed(["POST"])
| en | 0.93328 | # this will delete the current user session # and create anew | 2.131227 | 2 |
test/core/s3_table_test_base.py | adidas/m3d-api | 24 | 9735 | <reponame>adidas/m3d-api<filename>test/core/s3_table_test_base.py
import os
from test.core.emr_system_unit_test_base import EMRSystemUnitTestBase
from test.core.tconx_helper import TconxHelper
class S3TableTestBase(EMRSystemUnitTestBase):
default_tconx = \
"test/resources/s3_table_test_base/tconx-bdp-emr_test-dev-bi_test101.json"
multi_partition_tconx = \
"test/resources/s3_table_test_base/tconx-bdp-emr_test-dev-bi_test102.json"
single_partition_tconx = \
"test/resources/s3_table_test_base/tconx-bdp-emr_test-dev-bi_test103.json"
def env_setup(
self,
tmpdir,
destination_system,
destination_database,
destination_environment,
destination_table
):
"""
This function builds on top of EMRSystemUnitTestBase.env_setup() and adds test-specific tconx file.
:param tmpdir: test case specific temporary directory where configuration files will be created.
:param destination_system: destination system code
:param destination_database: destination database code
:param destination_environment: destination environment code
:param destination_table: destination table code
:return: Function will return several parameters:
m3d_config_path: paths of test-specific config.json. Should be passed to M3D API calls.
scon_emr_path: paths of test-specific scon_emr
tconx_path: paths of test-specific tconx
m3d_config_dict: contents of test-specific config.json as dict
scon_emr_dict: contents of test-specific scon_emr as dict
"""
m3d_config_file, scon_emr_file, m3d_config_dict, scon_emr_dict = \
super(S3TableTestBase, self).env_setup(
tmpdir,
destination_system,
destination_database,
destination_environment
)
# tconx specific part
tconx_file = TconxHelper.setup_tconx_from_file(
m3d_config_dict["tags"]["config"],
destination_system,
destination_database,
destination_environment,
destination_table,
S3TableTestBase.default_tconx
)
return m3d_config_file, scon_emr_file, tconx_file, \
m3d_config_dict, scon_emr_dict
@staticmethod
def assert_one_hql_sent(dump_dir, expected_hql):
generated_files = map(lambda f: os.path.join(dump_dir, f), os.listdir(dump_dir))
hql_files = list(filter(lambda f: os.path.isfile(f) and f.endswith(".hql"), generated_files))
assert len(hql_files) == 1
hql_file = hql_files[0]
with open(hql_file, 'r') as hql_f:
generated_hql = hql_f.read()
generated_hql_processed = generated_hql.strip().lower()
expected_hql_processed = expected_hql.strip().lower()
assert generated_hql_processed == expected_hql_processed
| import os
from test.core.emr_system_unit_test_base import EMRSystemUnitTestBase
from test.core.tconx_helper import TconxHelper
class S3TableTestBase(EMRSystemUnitTestBase):
default_tconx = \
"test/resources/s3_table_test_base/tconx-bdp-emr_test-dev-bi_test101.json"
multi_partition_tconx = \
"test/resources/s3_table_test_base/tconx-bdp-emr_test-dev-bi_test102.json"
single_partition_tconx = \
"test/resources/s3_table_test_base/tconx-bdp-emr_test-dev-bi_test103.json"
def env_setup(
self,
tmpdir,
destination_system,
destination_database,
destination_environment,
destination_table
):
"""
This function builds on top of EMRSystemUnitTestBase.env_setup() and adds test-specific tconx file.
:param tmpdir: test case specific temporary directory where configuration files will be created.
:param destination_system: destination system code
:param destination_database: destination database code
:param destination_environment: destination environment code
:param destination_table: destination table code
:return: Function will return several parameters:
m3d_config_path: paths of test-specific config.json. Should be passed to M3D API calls.
scon_emr_path: paths of test-specific scon_emr
tconx_path: paths of test-specific tconx
m3d_config_dict: contents of test-specific config.json as dict
scon_emr_dict: contents of test-specific scon_emr as dict
"""
m3d_config_file, scon_emr_file, m3d_config_dict, scon_emr_dict = \
super(S3TableTestBase, self).env_setup(
tmpdir,
destination_system,
destination_database,
destination_environment
)
# tconx specific part
tconx_file = TconxHelper.setup_tconx_from_file(
m3d_config_dict["tags"]["config"],
destination_system,
destination_database,
destination_environment,
destination_table,
S3TableTestBase.default_tconx
)
return m3d_config_file, scon_emr_file, tconx_file, \
m3d_config_dict, scon_emr_dict
@staticmethod
def assert_one_hql_sent(dump_dir, expected_hql):
generated_files = map(lambda f: os.path.join(dump_dir, f), os.listdir(dump_dir))
hql_files = list(filter(lambda f: os.path.isfile(f) and f.endswith(".hql"), generated_files))
assert len(hql_files) == 1
hql_file = hql_files[0]
with open(hql_file, 'r') as hql_f:
generated_hql = hql_f.read()
generated_hql_processed = generated_hql.strip().lower()
expected_hql_processed = expected_hql.strip().lower()
assert generated_hql_processed == expected_hql_processed | en | 0.624647 | This function builds on top of EMRSystemUnitTestBase.env_setup() and adds test-specific tconx file. :param tmpdir: test case specific temporary directory where configuration files will be created. :param destination_system: destination system code :param destination_database: destination database code :param destination_environment: destination environment code :param destination_table: destination table code :return: Function will return several parameters: m3d_config_path: paths of test-specific config.json. Should be passed to M3D API calls. scon_emr_path: paths of test-specific scon_emr tconx_path: paths of test-specific tconx m3d_config_dict: contents of test-specific config.json as dict scon_emr_dict: contents of test-specific scon_emr as dict # tconx specific part | 2.163591 | 2 |
metrics/serializers.py | BrianWaganerSTL/RocketDBaaS | 1 | 9736 | from rest_framework import serializers
from metrics.models import Metrics_Cpu, Metrics_PingServer, Metrics_MountPoint, \
Metrics_CpuLoad, Metrics_PingDb
class Metrics_CpuSerializer(serializers.ModelSerializer):
class Meta:
model = Metrics_Cpu
fields = '__all__'
depth = 0
class Metrics_MountPointSerializer(serializers.ModelSerializer):
class Meta:
model = Metrics_MountPoint
fields = '__all__'
depth = 0
class Metrics_CpuLoadSerializer(serializers.ModelSerializer):
class Meta:
model = Metrics_CpuLoad
fields = '__all__'
depth = 0
class Metrics_PingServerSerializer(serializers.ModelSerializer):
class Meta:
model = Metrics_PingServer
fields = '__all__'
depth = 0
class Metrics_PingDbSerializer(serializers.ModelSerializer):
class Meta:
model = Metrics_PingDb
fields = '__all__'
depth = 0 | from rest_framework import serializers
from metrics.models import Metrics_Cpu, Metrics_PingServer, Metrics_MountPoint, \
Metrics_CpuLoad, Metrics_PingDb
class Metrics_CpuSerializer(serializers.ModelSerializer):
class Meta:
model = Metrics_Cpu
fields = '__all__'
depth = 0
class Metrics_MountPointSerializer(serializers.ModelSerializer):
class Meta:
model = Metrics_MountPoint
fields = '__all__'
depth = 0
class Metrics_CpuLoadSerializer(serializers.ModelSerializer):
class Meta:
model = Metrics_CpuLoad
fields = '__all__'
depth = 0
class Metrics_PingServerSerializer(serializers.ModelSerializer):
class Meta:
model = Metrics_PingServer
fields = '__all__'
depth = 0
class Metrics_PingDbSerializer(serializers.ModelSerializer):
class Meta:
model = Metrics_PingDb
fields = '__all__'
depth = 0 | none | 1 | 1.981495 | 2 |
|
sqlc/private/sqlc_toolchain.bzl | dmayle/rules_sqlc | 2 | 9737 | # Copyright 2020 Plezentek, Inc. All rights reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
load(
"//sqlc/private:providers.bzl",
"SQLCRelease",
)
load(
"//sqlc/private/rules_go/lib:platforms.bzl",
"PLATFORMS",
)
def _sqlc_toolchain_impl(ctx):
release = ctx.attr.release[SQLCRelease]
cross_compile = ctx.attr.goos != release.goos or ctx.attr.goarch != release.goarch
return [platform_common.ToolchainInfo(
name = ctx.label.name,
cross_compile = cross_compile,
default_goos = ctx.attr.goos,
default_goarch = ctx.attr.goarch,
actions = struct(),
flags = struct(),
release = release,
)]
sqlc_toolchain = rule(
_sqlc_toolchain_impl,
attrs = {
"goos": attr.string(
mandatory = True,
doc = "Default target OS",
),
"goarch": attr.string(
mandatory = True,
doc = "Default target architecture",
),
"release": attr.label(
mandatory = True,
providers = [SQLCRelease],
cfg = "exec",
doc = "The SQLC release this toolchain is based on",
),
},
doc = "Defines a SQLC toolchain based on a release",
provides = [platform_common.ToolchainInfo],
)
def declare_toolchains(host, release):
host_goos, _, host_goarch = host.partition("_")
for p in PLATFORMS:
toolchain_name = "sqlc_" + p.name
impl_name = toolchain_name + "-impl"
cgo_constraints = (
"@com_plezentek_rules_sqlc//sqlc/toolchain:cgo_off",
"@com_plezentek_rules_sqlc//sqlc/toolchain:cgo_on",
)
constraints = [c for c in p.constraints if c not in cgo_constraints]
sqlc_toolchain(
name = impl_name,
goos = p.goos,
goarch = p.goarch,
release = release,
tags = ["manual"],
visibility = ["//visibility:public"],
)
native.toolchain(
name = toolchain_name,
toolchain_type = "@com_plezentek_rules_sqlc//sqlc:toolchain",
exec_compatible_with = [
"@com_plezentek_rules_sqlc//sqlc/toolchain:" + host_goos,
"@com_plezentek_rules_sqlc//sqlc/toolchain:" + host_goarch,
],
target_compatible_with = constraints,
toolchain = ":" + impl_name,
)
| # Copyright 2020 Plezentek, Inc. All rights reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
load(
"//sqlc/private:providers.bzl",
"SQLCRelease",
)
load(
"//sqlc/private/rules_go/lib:platforms.bzl",
"PLATFORMS",
)
def _sqlc_toolchain_impl(ctx):
release = ctx.attr.release[SQLCRelease]
cross_compile = ctx.attr.goos != release.goos or ctx.attr.goarch != release.goarch
return [platform_common.ToolchainInfo(
name = ctx.label.name,
cross_compile = cross_compile,
default_goos = ctx.attr.goos,
default_goarch = ctx.attr.goarch,
actions = struct(),
flags = struct(),
release = release,
)]
sqlc_toolchain = rule(
_sqlc_toolchain_impl,
attrs = {
"goos": attr.string(
mandatory = True,
doc = "Default target OS",
),
"goarch": attr.string(
mandatory = True,
doc = "Default target architecture",
),
"release": attr.label(
mandatory = True,
providers = [SQLCRelease],
cfg = "exec",
doc = "The SQLC release this toolchain is based on",
),
},
doc = "Defines a SQLC toolchain based on a release",
provides = [platform_common.ToolchainInfo],
)
def declare_toolchains(host, release):
host_goos, _, host_goarch = host.partition("_")
for p in PLATFORMS:
toolchain_name = "sqlc_" + p.name
impl_name = toolchain_name + "-impl"
cgo_constraints = (
"@com_plezentek_rules_sqlc//sqlc/toolchain:cgo_off",
"@com_plezentek_rules_sqlc//sqlc/toolchain:cgo_on",
)
constraints = [c for c in p.constraints if c not in cgo_constraints]
sqlc_toolchain(
name = impl_name,
goos = p.goos,
goarch = p.goarch,
release = release,
tags = ["manual"],
visibility = ["//visibility:public"],
)
native.toolchain(
name = toolchain_name,
toolchain_type = "@com_plezentek_rules_sqlc//sqlc:toolchain",
exec_compatible_with = [
"@com_plezentek_rules_sqlc//sqlc/toolchain:" + host_goos,
"@com_plezentek_rules_sqlc//sqlc/toolchain:" + host_goarch,
],
target_compatible_with = constraints,
toolchain = ":" + impl_name,
)
| en | 0.846447 | # Copyright 2020 Plezentek, Inc. All rights reserved # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. | 1.637326 | 2 |
configs/tracker_configs/new_test_20e_cam_1_new_short.py | nolanzzz/mtmct | 17 | 9738 | <reponame>nolanzzz/mtmct
root = {
"general" : {
"display_viewer" : False,
#The visible GPUS will be restricted to the numbers listed here. The pytorch (cuda:0) numeration will start at 0
#This is a trick to get everything onto the wanted gpus because just setting cuda:4 in the function calls will
#not work for mmdetection. There will still be things on gpu cuda:0.
"cuda_visible_devices" : "1",
"save_track_results" : True
},
"data" : {
# To increase the speed while developing an specific interval of all frames can be set.
"selection_interval" : [0,10000],
"source" : {
"base_folder" : "/u40/zhanr110/MTA_ext_short/test",
# "base_folder" : "/Users/nolanzhang/Projects/mtmct/data/MTA_ext_short/test",
"cam_ids" : [1]
}
},
"detector" : {
# "mmdetection_config" : "detectors/mmdetection/configs/faster_rcnn_r50_fpn_1x_gta.py",
"mmdetection_config" : "detectors/mmdetection/configs/mta/faster_rcnn_r50_mta.py",
# "mmdetection_checkpoint_file" : "work_dirs/detector/faster_rcnn_gta22.07_epoch_5.pth",
"mmdetection_checkpoint_file" : "detectors/mmdetection/work_dirs/GtaDataset_30e/epoch_20.pth",
"device" : "cuda:0",
#Remove all detections with a confidence less than min_confidence
"min_confidence" : 0.8,
},
"feature_extractor" : {
"feature_extractor_name" : "abd_net_extractor"
,"reid_strong_extractor": {
"reid_strong_baseline_config": "feature_extractors/reid_strong_baseline/configs/softmax_triplet.yml",
"checkpoint_file": "work_dirs/feature_extractor/strong_reid_baseline/resnet50_model_reid_GTA_softmax_triplet.pth",
"device": "cuda:0,1"
,"visible_device" : "0,1"}
,"abd_net_extractor" : dict(abd_dan=['cam', 'pam'], abd_dan_no_head=False, abd_dim=1024, abd_np=2, adam_beta1=0.9,
adam_beta2=0.999, arch='resnet50', branches=['global', 'abd'], compatibility=False, criterion='htri',
cuhk03_classic_split=False, cuhk03_labeled=False, dan_dan=[], dan_dan_no_head=False, dan_dim=1024,
data_augment=['crop,random-erase'], day_only=False, dropout=0.5, eval_freq=5, evaluate=False,
fixbase=False, fixbase_epoch=10, flip_eval=False, gamma=0.1, global_dim=1024,
global_max_pooling=False, gpu_devices='1', height=384, htri_only=False, label_smooth=True,
lambda_htri=0.1, lambda_xent=1, lr=0.0003, margin=1.2, max_epoch=80, min_height=-1,
momentum=0.9, night_only=False, np_dim=1024, np_max_pooling=False, np_np=2, np_with_global=False,
num_instances=4, of_beta=1e-06, of_position=['before', 'after', 'cam', 'pam', 'intermediate'],
of_start_epoch=23, open_layers=['classifier'], optim='adam', ow_beta=0.001,
pool_tracklet_features='avg', print_freq=10, resume='', rmsprop_alpha=0.99
, load_weights='work_dirs/feature_extractor/abd-net/checkpoint_ep30_non_clean.pth.tar'
# , load_weights='work_dirs/feature_extractor/abd-net/resnet50-19c8e357.pth'
, root='work_dirs/datasets'
, sample_method='evenly'
, save_dir='work_dirs/feature_extractor/abd-net/log/eval-resnet50'
, seed=1, seq_len=15,
sgd_dampening=0, sgd_nesterov=False, shallow_cam=True, source_names=['mta_ext'], split_id=0,
start_epoch=0, start_eval=0, stepsize=[20, 40], target_names=['market1501'],
test_batch_size=100, train_batch_size=64, train_sampler='', use_avai_gpus=False, use_cpu=False,
use_metric_cuhk03=False, use_of=True, use_ow=True, visualize_ranks=False, weight_decay=0.0005,
width=128, workers=4)
},
"tracker" : {
"type" : "DeepSort",
"nn_budget" : 100
}
}
| root = {
"general" : {
"display_viewer" : False,
#The visible GPUS will be restricted to the numbers listed here. The pytorch (cuda:0) numeration will start at 0
#This is a trick to get everything onto the wanted gpus because just setting cuda:4 in the function calls will
#not work for mmdetection. There will still be things on gpu cuda:0.
"cuda_visible_devices" : "1",
"save_track_results" : True
},
"data" : {
# To increase the speed while developing an specific interval of all frames can be set.
"selection_interval" : [0,10000],
"source" : {
"base_folder" : "/u40/zhanr110/MTA_ext_short/test",
# "base_folder" : "/Users/nolanzhang/Projects/mtmct/data/MTA_ext_short/test",
"cam_ids" : [1]
}
},
"detector" : {
# "mmdetection_config" : "detectors/mmdetection/configs/faster_rcnn_r50_fpn_1x_gta.py",
"mmdetection_config" : "detectors/mmdetection/configs/mta/faster_rcnn_r50_mta.py",
# "mmdetection_checkpoint_file" : "work_dirs/detector/faster_rcnn_gta22.07_epoch_5.pth",
"mmdetection_checkpoint_file" : "detectors/mmdetection/work_dirs/GtaDataset_30e/epoch_20.pth",
"device" : "cuda:0",
#Remove all detections with a confidence less than min_confidence
"min_confidence" : 0.8,
},
"feature_extractor" : {
"feature_extractor_name" : "abd_net_extractor"
,"reid_strong_extractor": {
"reid_strong_baseline_config": "feature_extractors/reid_strong_baseline/configs/softmax_triplet.yml",
"checkpoint_file": "work_dirs/feature_extractor/strong_reid_baseline/resnet50_model_reid_GTA_softmax_triplet.pth",
"device": "cuda:0,1"
,"visible_device" : "0,1"}
,"abd_net_extractor" : dict(abd_dan=['cam', 'pam'], abd_dan_no_head=False, abd_dim=1024, abd_np=2, adam_beta1=0.9,
adam_beta2=0.999, arch='resnet50', branches=['global', 'abd'], compatibility=False, criterion='htri',
cuhk03_classic_split=False, cuhk03_labeled=False, dan_dan=[], dan_dan_no_head=False, dan_dim=1024,
data_augment=['crop,random-erase'], day_only=False, dropout=0.5, eval_freq=5, evaluate=False,
fixbase=False, fixbase_epoch=10, flip_eval=False, gamma=0.1, global_dim=1024,
global_max_pooling=False, gpu_devices='1', height=384, htri_only=False, label_smooth=True,
lambda_htri=0.1, lambda_xent=1, lr=0.0003, margin=1.2, max_epoch=80, min_height=-1,
momentum=0.9, night_only=False, np_dim=1024, np_max_pooling=False, np_np=2, np_with_global=False,
num_instances=4, of_beta=1e-06, of_position=['before', 'after', 'cam', 'pam', 'intermediate'],
of_start_epoch=23, open_layers=['classifier'], optim='adam', ow_beta=0.001,
pool_tracklet_features='avg', print_freq=10, resume='', rmsprop_alpha=0.99
, load_weights='work_dirs/feature_extractor/abd-net/checkpoint_ep30_non_clean.pth.tar'
# , load_weights='work_dirs/feature_extractor/abd-net/resnet50-19c8e357.pth'
, root='work_dirs/datasets'
, sample_method='evenly'
, save_dir='work_dirs/feature_extractor/abd-net/log/eval-resnet50'
, seed=1, seq_len=15,
sgd_dampening=0, sgd_nesterov=False, shallow_cam=True, source_names=['mta_ext'], split_id=0,
start_epoch=0, start_eval=0, stepsize=[20, 40], target_names=['market1501'],
test_batch_size=100, train_batch_size=64, train_sampler='', use_avai_gpus=False, use_cpu=False,
use_metric_cuhk03=False, use_of=True, use_ow=True, visualize_ranks=False, weight_decay=0.0005,
width=128, workers=4)
},
"tracker" : {
"type" : "DeepSort",
"nn_budget" : 100
}
} | en | 0.714727 | #The visible GPUS will be restricted to the numbers listed here. The pytorch (cuda:0) numeration will start at 0 #This is a trick to get everything onto the wanted gpus because just setting cuda:4 in the function calls will #not work for mmdetection. There will still be things on gpu cuda:0. # To increase the speed while developing an specific interval of all frames can be set. # "base_folder" : "/Users/nolanzhang/Projects/mtmct/data/MTA_ext_short/test", # "mmdetection_config" : "detectors/mmdetection/configs/faster_rcnn_r50_fpn_1x_gta.py", # "mmdetection_checkpoint_file" : "work_dirs/detector/faster_rcnn_gta22.07_epoch_5.pth", #Remove all detections with a confidence less than min_confidence # , load_weights='work_dirs/feature_extractor/abd-net/resnet50-19c8e357.pth' | 1.892192 | 2 |
tests/structures/test_generator.py | cherub96/voc | 1 | 9739 | <reponame>cherub96/voc
from ..utils import TranspileTestCase
class GeneratorTests(TranspileTestCase):
def test_simple_generator(self):
self.assertCodeExecution("""
def multiplier(first, second):
y = first * second
yield y
y *= second
yield y
y *= second
yield y
y *= second
yield y
print(list(multiplier(1, 20)))
""")
def test_loop_generator(self):
self.assertCodeExecution("""
def fizz_buzz(start, stop):
for i in range(start, stop):
found = False
if i % 2 == 0:
yield 'fizz'
found = True
if i % 3 == 0:
yield 'buzz'
found = True
if not found:
yield i
print(list(fizz_buzz(1, 20)))
""")
| from ..utils import TranspileTestCase
class GeneratorTests(TranspileTestCase):
def test_simple_generator(self):
self.assertCodeExecution("""
def multiplier(first, second):
y = first * second
yield y
y *= second
yield y
y *= second
yield y
y *= second
yield y
print(list(multiplier(1, 20)))
""")
def test_loop_generator(self):
self.assertCodeExecution("""
def fizz_buzz(start, stop):
for i in range(start, stop):
found = False
if i % 2 == 0:
yield 'fizz'
found = True
if i % 3 == 0:
yield 'buzz'
found = True
if not found:
yield i
print(list(fizz_buzz(1, 20)))
""") | en | 0.49293 | def multiplier(first, second): y = first * second yield y y *= second yield y y *= second yield y y *= second yield y print(list(multiplier(1, 20))) def fizz_buzz(start, stop): for i in range(start, stop): found = False if i % 2 == 0: yield 'fizz' found = True if i % 3 == 0: yield 'buzz' found = True if not found: yield i print(list(fizz_buzz(1, 20))) | 3.192285 | 3 |
ogusa/tax.py | hdoupe/OG-USA | 0 | 9740 | <filename>ogusa/tax.py
'''
------------------------------------------------------------------------
Functions for taxes in the steady state and along the transition path.
------------------------------------------------------------------------
'''
# Packages
import numpy as np
from ogusa import utils
'''
------------------------------------------------------------------------
Functions
------------------------------------------------------------------------
'''
def replacement_rate_vals(nssmat, wss, factor_ss, j, p):
'''
Calculates replacement rate values for the social security system.
Args:
nssmat (Numpy array): initial guess at labor supply, size = SxJ
new_w (scalar): steady state real wage rate
factor_ss (scalar): scaling factor converting model units to
dollars
j (int): index of lifetime income group
p (OG-USA Specifications object): model parameters
Returns:
theta (Numpy array): social security replacement rate value for
lifetime income group j
'''
if j is not None:
e = p.e[:, j]
else:
e = p.e
# adjust number of calendar years AIME computed from int model periods
equiv_periods = int(round((p.S / 80.0) * p.AIME_num_years)) - 1
if e.ndim == 2:
dim2 = e.shape[1]
else:
dim2 = 1
earnings = (e * (wss * nssmat * factor_ss)).reshape(p.S, dim2)
# get highest earning years for number of years AIME computed from
highest_earn =\
(-1.0 * np.sort(-1.0 * earnings[:p.retire[-1], :],
axis=0))[:equiv_periods]
AIME = highest_earn.sum(0) / ((12.0 * (p.S / 80.0)) * equiv_periods)
PIA = np.zeros(dim2)
# Compute level of replacement using AIME brackets and PIA rates
for j in range(dim2):
if AIME[j] < p.AIME_bkt_1:
PIA[j] = p.PIA_rate_bkt_1 * AIME[j]
elif AIME[j] < p.AIME_bkt_2:
PIA[j] = (p.PIA_rate_bkt_1 * p.AIME_bkt_1 +
p.PIA_rate_bkt_2 * (AIME[j] - p.AIME_bkt_1))
else:
PIA[j] = (p.PIA_rate_bkt_1 * p.AIME_bkt_1 +
p.PIA_rate_bkt_2 * (p.AIME_bkt_2 - p.AIME_bkt_1) +
p.PIA_rate_bkt_3 * (AIME[j] - p.AIME_bkt_2))
# Set the maximum monthly replacment rate from SS benefits tables
PIA[PIA > p.PIA_maxpayment] = p.PIA_maxpayment
if p.PIA_minpayment != 0.0:
PIA[PIA < p.PIA_minpayment] = p.PIA_minpayment
theta = (PIA * (12.0 * p.S / 80.0)) / (factor_ss * wss)
return theta
def ETR_wealth(b, h_wealth, m_wealth, p_wealth):
r'''
Calculates the effective tax rate on wealth.
.. math::
T_{j,s,t}^{w} = \frac{h^{w}p_{w}b_{j,s,t}}{h^{w}b_{j,s,t} + m^{w}}
Args:
b (Numpy array): savings
h_wealth (scalar): parameter of wealth tax function
p_wealth (scalar): parameter of wealth tax function
m_wealth (scalar): parameter of wealth tax function
Returns:
tau_w (Numpy array): effective tax rate on wealth, size = SxJ
'''
tau_w = (p_wealth * h_wealth * b) / (h_wealth * b + m_wealth)
return tau_w
def MTR_wealth(b, h_wealth, m_wealth, p_wealth):
r'''
Calculates the marginal tax rate on wealth from the wealth tax.
.. math::
\frac{\partial T_{j,s,t}^{w}}{\partial b_{j,s,t}} = \frac{h^{w}m^{w}p_{w}}{(b_{j,s,t}h^{w}m^{w})^{2}}
Args:
b (Numpy array): savings
h_wealth (scalar): parameter of wealth tax function
p_wealth (scalar): parameter of wealth tax function
m_wealth (scalar): parameter of wealth tax function
Returns:
tau_prime (Numpy array): marginal tax rate on wealth, size = SxJ
'''
tau_prime = ((b * h_wealth * m_wealth * p_wealth) /
((b * h_wealth + m_wealth) ** 2) +
ETR_wealth(b, h_wealth, m_wealth, p_wealth))
return tau_prime
def ETR_income(r, w, b, n, factor, e, etr_params, p):
'''
Calculates effective personal income tax rate.
Args:
r (array_like): real interest rate
w (array_like): real wage rate
b (Numpy array): savings
n (Numpy array): labor supply
factor (scalar): scaling factor converting model units to
dollars
e (Numpy array): effective labor units
etr_params (Numpy array): effective tax rate function parameters
p (OG-USA Specifications object): model parameters
Returns:
tau (Numpy array): effective tax rate on total income
'''
X = (w * e * n) * factor
Y = (r * b) * factor
X2 = X ** 2
Y2 = Y ** 2
income = X + Y
income2 = income ** 2
if p.tax_func_type == 'GS':
phi0 = np.squeeze(etr_params[..., 0])
phi1 = np.squeeze(etr_params[..., 1])
phi2 = np.squeeze(etr_params[..., 2])
tau = ((phi0 * (income - ((income ** -phi1) + phi2) **
(-1 / phi1))) / income)
elif p.tax_func_type == 'DEP_totalinc':
A = np.squeeze(etr_params[..., 0])
B = np.squeeze(etr_params[..., 1])
max_income = np.squeeze(etr_params[..., 4])
min_income = np.squeeze(etr_params[..., 5])
shift_income = np.squeeze(etr_params[..., 8])
shift = np.squeeze(etr_params[..., 10])
tau_income = (((max_income - min_income) *
(A * income2 + B * income) /
(A * income2 + B * income + 1)) + min_income)
tau = tau_income + shift_income + shift
else: # DEP or linear
A = np.squeeze(etr_params[..., 0])
B = np.squeeze(etr_params[..., 1])
C = np.squeeze(etr_params[..., 2])
D = np.squeeze(etr_params[..., 3])
max_x = np.squeeze(etr_params[..., 4])
min_x = np.squeeze(etr_params[..., 5])
max_y = np.squeeze(etr_params[..., 6])
min_y = np.squeeze(etr_params[..., 7])
shift_x = np.squeeze(etr_params[..., 8])
shift_y = np.squeeze(etr_params[..., 9])
shift = np.squeeze(etr_params[..., 10])
share = np.squeeze(etr_params[..., 11])
tau_x = ((max_x - min_x) * (A * X2 + B * X) /
(A * X2 + B * X + 1) + min_x)
tau_y = ((max_y - min_y) * (C * Y2 + D * Y) /
(C * Y2 + D * Y + 1) + min_y)
tau = (((tau_x + shift_x) ** share) *
((tau_y + shift_y) ** (1 - share))) + shift
return tau
def MTR_income(r, w, b, n, factor, mtr_capital, e, etr_params,
mtr_params, p):
r'''
Generates the marginal tax rate on labor income for households.
Args:
r (array_like): real interest rate
w (array_like): real wage rate
b (Numpy array): savings
n (Numpy array): labor supply
factor (scalar): scaling factor converting model units to
dollars
mtr_capital (bool): whether to compute the marginal tax rate on
capital income or labor income
e (Numpy array): effective labor units
etr_params (Numpy array): effective tax rate function parameters
p (OG-USA Specifications object): model parameters
Returns:
tau (Numpy array): marginal tax rate on income source
'''
X = (w * e * n) * factor
Y = (r * b) * factor
X2 = X ** 2
Y2 = Y ** 2
income = X + Y
income2 = income ** 2
if p.tax_func_type == 'GS':
if p.analytical_mtrs:
phi0 = np.squeeze(etr_params[..., 0])
phi1 = np.squeeze(etr_params[..., 1])
phi2 = np.squeeze(etr_params[..., 2])
else:
phi0 = np.squeeze(mtr_params[..., 0])
phi1 = np.squeeze(mtr_params[..., 1])
phi2 = np.squeeze(mtr_params[..., 2])
tau = (phi0*(1 - (income ** (-phi1 - 1) *
((income ** -phi1) + phi2) **
((-1 - phi1) / phi1))))
elif p.tax_func_type == 'DEP_totalinc':
if p.analytical_mtrs:
A = np.squeeze(etr_params[..., 0])
B = np.squeeze(etr_params[..., 1])
max_income = np.squeeze(etr_params[..., 4])
min_income = np.squeeze(etr_params[..., 5])
shift_income = np.squeeze(etr_params[..., 8])
shift = np.squeeze(etr_params[..., 10])
d_etr = ((max_income - min_income) * ((2 * A * income + B) /
((A * income2 + B * income + 1) ** 2)))
etr = (((max_income - min_income) *
((A * income2 + B * income) /
(A * income2 + B * income + 1)) + min_income) +
shift_income + shift)
tau = (d_etr * income) + (etr)
else:
A = np.squeeze(mtr_params[..., 0])
B = np.squeeze(mtr_params[..., 1])
max_income = np.squeeze(mtr_params[..., 4])
min_income = np.squeeze(mtr_params[..., 5])
shift_income = np.squeeze(mtr_params[..., 8])
shift = np.squeeze(mtr_params[..., 10])
tau_income = (((max_income - min_income) *
(A * income2 + B * income) /
(A * income2 + B * income + 1)) + min_income)
tau = tau_income + shift_income + shift
else: # DEP or linear
if p.analytical_mtrs:
A = np.squeeze(etr_params[..., 0])
B = np.squeeze(etr_params[..., 1])
C = np.squeeze(etr_params[..., 2])
D = np.squeeze(etr_params[..., 3])
max_x = np.squeeze(etr_params[..., 4])
min_x = np.squeeze(etr_params[..., 5])
max_y = np.squeeze(etr_params[..., 6])
min_y = np.squeeze(etr_params[..., 7])
shift_x = np.squeeze(etr_params[..., 8])
shift_y = np.squeeze(etr_params[..., 9])
shift = np.squeeze(etr_params[..., 10])
share = np.squeeze(etr_params[..., 11])
tau_x = ((max_x - min_x) * (A * X2 + B * X) /
(A * X2 + B * X + 1) + min_x)
tau_y = ((max_y - min_y) * (C * Y2 + D * Y) /
(C * Y2 + D * Y + 1) + min_y)
etr = (((tau_x + shift_x) ** share) *
((tau_y + shift_y) ** (1 - share))) + shift
if mtr_capital:
d_etr = ((1-share) * ((tau_y + shift_y) ** (-share)) *
(max_y - min_y) * ((2 * C * Y + D) /
((C * Y2 + D * Y + 1)
** 2)) *
((tau_x + shift_x) ** share))
tau = d_etr * income + etr
else:
d_etr = (share * ((tau_x + shift_x) ** (share - 1)) *
(max_x - min_x) * ((2 * A * X + B) /
((A * X2 + B * X + 1)
** 2)) *
((tau_y + shift_y) ** (1 - share)))
tau = d_etr * income + etr
else:
A = np.squeeze(mtr_params[..., 0])
B = np.squeeze(mtr_params[..., 1])
C = np.squeeze(mtr_params[..., 2])
D = np.squeeze(mtr_params[..., 3])
max_x = np.squeeze(mtr_params[..., 4])
min_x = np.squeeze(mtr_params[..., 5])
max_y = np.squeeze(mtr_params[..., 6])
min_y = np.squeeze(mtr_params[..., 7])
shift_x = np.squeeze(mtr_params[..., 8])
shift_y = np.squeeze(mtr_params[..., 9])
shift = np.squeeze(mtr_params[..., 10])
share = np.squeeze(mtr_params[..., 11])
tau_x = ((max_x - min_x) * (A * X2 + B * X) /
(A * X2 + B * X + 1) + min_x)
tau_y = ((max_y - min_y) * (C * Y2 + D * Y) /
(C * Y2 + D * Y + 1) + min_y)
tau = (((tau_x + shift_x) ** share) *
((tau_y + shift_y) ** (1 - share))) + shift
return tau
def get_biz_tax(w, Y, L, K, p, method):
r'''
Finds total business income tax revenue.
.. math::
R_{t}^{b} = \tau_{t}^{b}(Y_{t} - w_{t}L_{t}) - \tau_{t}^{b}\delta_{t}^{\tau}K_{t}^{\tau}
Args:
r (array_like): real interest rate
Y (array_like): aggregate output
L (array_like): aggregate labor demand
K (array_like): aggregate capital demand
Returns:
business_revenue (array_like): aggregate business tax revenue
'''
if method == 'SS':
delta_tau = p.delta_tau[-1]
tau_b = p.tau_b[-1]
else:
delta_tau = p.delta_tau[:p.T]
tau_b = p.tau_b[:p.T]
business_revenue = tau_b * (Y - w * L) - tau_b * delta_tau * K
return business_revenue
def net_taxes(r, w, b, n, bq, factor, tr, theta, t, j, shift, method,
e, etr_params, p):
'''
Calculate net taxes paid for each household.
Args:
r (array_like): real interest rate
w (array_like): real wage rate
b (Numpy array): savings
n (Numpy array): labor supply
bq (Numpy array): bequests received
factor (scalar): scaling factor converting model units to
dollars
tr (Numpy array): government transfers to the household
theta (Numpy array): social security replacement rate value for
lifetime income group j
t (int): time period
j (int): index of lifetime income group
shift (bool): whether computing for periods 0--s or 1--(s+1),
=True for 1--(s+1)
method (str): adjusts calculation dimensions based on 'SS' or
'TPI'
e (Numpy array): effective labor units
etr_params (Numpy array): effective tax rate function parameters
p (OG-USA Specifications object): model parameters
Returns:
net_tax (Numpy array): net taxes paid for each household
'''
T_I = income_tax_liab(r, w, b, n, factor, t, j, method, e, etr_params, p)
pension = pension_amount(w, n, theta, t, j, shift, method, e, p)
T_BQ = bequest_tax_liab(r, b, bq, t, j, method, p)
T_W = wealth_tax_liab(r, b, t, j, method, p)
net_tax = T_I - pension + T_BQ + T_W - tr
return net_tax
def income_tax_liab(r, w, b, n, factor, t, j, method, e, etr_params, p):
'''
Calculate income and payroll tax liability for each household
Args:
r (array_like): real interest rate
w (array_like): real wage rate
b (Numpy array): savings
n (Numpy array): labor supply
factor (scalar): scaling factor converting model units to
dollars
t (int): time period
j (int): index of lifetime income group
method (str): adjusts calculation dimensions based on 'SS' or
'TPI'
e (Numpy array): effective labor units
etr_params (Numpy array): effective tax rate function parameters
p (OG-USA Specifications object): model parameters
Returns:
T_I (Numpy array): total income and payroll taxes paid for each
household
'''
if j is not None:
if method == 'TPI':
if b.ndim == 2:
r = r.reshape(r.shape[0], 1)
w = w.reshape(w.shape[0], 1)
else:
if method == 'TPI':
r = utils.to_timepath_shape(r)
w = utils.to_timepath_shape(w)
income = r * b + w * e * n
labor_income = w * e * n
T_I = ETR_income(r, w, b, n, factor, e, etr_params, p) * income
if method == 'SS':
T_P = p.tau_payroll[-1] * labor_income
elif method == 'TPI':
length = w.shape[0]
if len(b.shape) == 1:
T_P = p.tau_payroll[t: t + length] * labor_income
elif len(b.shape) == 2:
T_P = (p.tau_payroll[t: t + length].reshape(length, 1) *
labor_income)
else:
T_P = (p.tau_payroll[t:t + length].reshape(length, 1, 1) *
labor_income)
elif method == 'TPI_scalar':
T_P = p.tau_payroll[0] * labor_income
income_payroll_tax_liab = T_I + T_P
return income_payroll_tax_liab
def pension_amount(w, n, theta, t, j, shift, method, e, p):
'''
Calculate public pension benefit amounts for each household.
Args:
w (array_like): real wage rate
n (Numpy array): labor supply
theta (Numpy array): social security replacement rate value for
lifetime income group j
t (int): time period
j (int): index of lifetime income group
shift (bool): whether computing for periods 0--s or 1--(s+1),
=True for 1--(s+1)
method (str): adjusts calculation dimensions based on 'SS' or
'TPI'
e (Numpy array): effective labor units
p (OG-USA Specifications object): model parameters
Returns:
pension (Numpy array): pension amount for each household
'''
if j is not None:
if method == 'TPI':
if n.ndim == 2:
w = w.reshape(w.shape[0], 1)
else:
if method == 'TPI':
w = utils.to_timepath_shape(w)
pension = np.zeros_like(n)
if method == 'SS':
# Depending on if we are looking at b_s or b_s+1, the
# entry for retirement will change (it shifts back one).
# The shift boolean makes sure we start replacement rates
# at the correct age.
if shift is False:
pension[p.retire[-1]:] = theta * w
else:
pension[p.retire[-1] - 1:] = theta * w
elif method == 'TPI':
length = w.shape[0]
if not shift:
# retireTPI is different from retire, because in TP income
# we are counting backwards with different length lists.
# This will always be the correct location of retirement,
# depending on the shape of the lists.
retireTPI = (p.retire[t: t + length] - p.S)
else:
retireTPI = (p.retire[t: t + length] - 1 - p.S)
if len(n.shape) == 1:
if not shift:
retireTPI = p.retire[t] - p.S
else:
retireTPI = p.retire[t] - 1 - p.S
pension[retireTPI:] = (
theta[j] * p.replacement_rate_adjust[t] * w[retireTPI:])
elif len(n.shape) == 2:
for tt in range(pension.shape[0]):
pension[tt, retireTPI[tt]:] = (
theta * p.replacement_rate_adjust[t + tt] * w[tt])
else:
for tt in range(pension.shape[0]):
pension[tt, retireTPI[tt]:, :] = (
theta.reshape(1, p.J) *
p.replacement_rate_adjust[t + tt] * w[tt])
elif method == 'TPI_scalar':
# The above methods won't work if scalars are used. This option
# is only called by the SS_TPI_firstdoughnutring function in TPI.
pension = theta * p.replacement_rate_adjust[0] * w
return pension
def wealth_tax_liab(r, b, t, j, method, p):
'''
Calculate wealth tax liability for each household.
Args:
r (array_like): real interest rate
b (Numpy array): savings
t (int): time period
j (int): index of lifetime income group
method (str): adjusts calculation dimensions based on 'SS' or
'TPI'
p (OG-USA Specifications object): model parameters
Returns:
T_W (Numpy array): wealth tax liability for each household
'''
if j is not None:
if method == 'TPI':
if b.ndim == 2:
r = r.reshape(r.shape[0], 1)
else:
if method == 'TPI':
r = utils.to_timepath_shape(r)
if method == 'SS':
T_W = (ETR_wealth(b, p.h_wealth[-1], p.m_wealth[-1],
p.p_wealth[-1]) * b)
elif method == 'TPI':
length = r.shape[0]
if len(b.shape) == 1:
T_W = (ETR_wealth(b, p.h_wealth[t:t + length],
p.m_wealth[t:t + length],
p.p_wealth[t:t + length]) * b)
elif len(b.shape) == 2:
T_W = (ETR_wealth(b, p.h_wealth[t:t + length],
p.m_wealth[t:t + length],
p.p_wealth[t:t + length]) * b)
else:
T_W = (ETR_wealth(
b, p.h_wealth[t:t + length].reshape(length, 1, 1),
p.m_wealth[t:t + length].reshape(length, 1, 1),
p.p_wealth[t:t + length].reshape(length, 1, 1)) * b)
elif method == 'TPI_scalar':
T_W = (ETR_wealth(b, p.h_wealth[0], p.m_wealth[0],
p.p_wealth[0]) * b)
return T_W
def bequest_tax_liab(r, b, bq, t, j, method, p):
'''
Calculate liability due from taxes on bequests for each household.
Args:
r (array_like): real interest rate
b (Numpy array): savings
bq (Numpy array): bequests received
t (int): time period
j (int): index of lifetime income group
method (str): adjusts calculation dimensions based on 'SS' or
'TPI'
p (OG-USA Specifications object): model parameters
Returns:
T_BQ (Numpy array): bequest tax liability for each household
'''
if j is not None:
lambdas = p.lambdas[j]
if method == 'TPI':
if b.ndim == 2:
r = r.reshape(r.shape[0], 1)
else:
lambdas = np.transpose(p.lambdas)
if method == 'TPI':
r = utils.to_timepath_shape(r)
if method == 'SS':
T_BQ = p.tau_bq[-1] * bq
elif method == 'TPI':
length = r.shape[0]
if len(b.shape) == 1:
T_BQ = p.tau_bq[t:t + length] * bq
elif len(b.shape) == 2:
T_BQ = p.tau_bq[t:t + length].reshape(length, 1) * bq / lambdas
else:
T_BQ = p.tau_bq[t:t + length].reshape(length, 1, 1) * bq
elif method == 'TPI_scalar':
# The above methods won't work if scalars are used. This option
# is only called by the SS_TPI_firstdoughnutring function in TPI.
T_BQ = p.tau_bq[0] * bq
return T_BQ
| <filename>ogusa/tax.py
'''
------------------------------------------------------------------------
Functions for taxes in the steady state and along the transition path.
------------------------------------------------------------------------
'''
# Packages
import numpy as np
from ogusa import utils
'''
------------------------------------------------------------------------
Functions
------------------------------------------------------------------------
'''
def replacement_rate_vals(nssmat, wss, factor_ss, j, p):
'''
Calculates replacement rate values for the social security system.
Args:
nssmat (Numpy array): initial guess at labor supply, size = SxJ
new_w (scalar): steady state real wage rate
factor_ss (scalar): scaling factor converting model units to
dollars
j (int): index of lifetime income group
p (OG-USA Specifications object): model parameters
Returns:
theta (Numpy array): social security replacement rate value for
lifetime income group j
'''
if j is not None:
e = p.e[:, j]
else:
e = p.e
# adjust number of calendar years AIME computed from int model periods
equiv_periods = int(round((p.S / 80.0) * p.AIME_num_years)) - 1
if e.ndim == 2:
dim2 = e.shape[1]
else:
dim2 = 1
earnings = (e * (wss * nssmat * factor_ss)).reshape(p.S, dim2)
# get highest earning years for number of years AIME computed from
highest_earn =\
(-1.0 * np.sort(-1.0 * earnings[:p.retire[-1], :],
axis=0))[:equiv_periods]
AIME = highest_earn.sum(0) / ((12.0 * (p.S / 80.0)) * equiv_periods)
PIA = np.zeros(dim2)
# Compute level of replacement using AIME brackets and PIA rates
for j in range(dim2):
if AIME[j] < p.AIME_bkt_1:
PIA[j] = p.PIA_rate_bkt_1 * AIME[j]
elif AIME[j] < p.AIME_bkt_2:
PIA[j] = (p.PIA_rate_bkt_1 * p.AIME_bkt_1 +
p.PIA_rate_bkt_2 * (AIME[j] - p.AIME_bkt_1))
else:
PIA[j] = (p.PIA_rate_bkt_1 * p.AIME_bkt_1 +
p.PIA_rate_bkt_2 * (p.AIME_bkt_2 - p.AIME_bkt_1) +
p.PIA_rate_bkt_3 * (AIME[j] - p.AIME_bkt_2))
# Set the maximum monthly replacment rate from SS benefits tables
PIA[PIA > p.PIA_maxpayment] = p.PIA_maxpayment
if p.PIA_minpayment != 0.0:
PIA[PIA < p.PIA_minpayment] = p.PIA_minpayment
theta = (PIA * (12.0 * p.S / 80.0)) / (factor_ss * wss)
return theta
def ETR_wealth(b, h_wealth, m_wealth, p_wealth):
r'''
Calculates the effective tax rate on wealth.
.. math::
T_{j,s,t}^{w} = \frac{h^{w}p_{w}b_{j,s,t}}{h^{w}b_{j,s,t} + m^{w}}
Args:
b (Numpy array): savings
h_wealth (scalar): parameter of wealth tax function
p_wealth (scalar): parameter of wealth tax function
m_wealth (scalar): parameter of wealth tax function
Returns:
tau_w (Numpy array): effective tax rate on wealth, size = SxJ
'''
tau_w = (p_wealth * h_wealth * b) / (h_wealth * b + m_wealth)
return tau_w
def MTR_wealth(b, h_wealth, m_wealth, p_wealth):
r'''
Calculates the marginal tax rate on wealth from the wealth tax.
.. math::
\frac{\partial T_{j,s,t}^{w}}{\partial b_{j,s,t}} = \frac{h^{w}m^{w}p_{w}}{(b_{j,s,t}h^{w}m^{w})^{2}}
Args:
b (Numpy array): savings
h_wealth (scalar): parameter of wealth tax function
p_wealth (scalar): parameter of wealth tax function
m_wealth (scalar): parameter of wealth tax function
Returns:
tau_prime (Numpy array): marginal tax rate on wealth, size = SxJ
'''
tau_prime = ((b * h_wealth * m_wealth * p_wealth) /
((b * h_wealth + m_wealth) ** 2) +
ETR_wealth(b, h_wealth, m_wealth, p_wealth))
return tau_prime
def ETR_income(r, w, b, n, factor, e, etr_params, p):
'''
Calculates effective personal income tax rate.
Args:
r (array_like): real interest rate
w (array_like): real wage rate
b (Numpy array): savings
n (Numpy array): labor supply
factor (scalar): scaling factor converting model units to
dollars
e (Numpy array): effective labor units
etr_params (Numpy array): effective tax rate function parameters
p (OG-USA Specifications object): model parameters
Returns:
tau (Numpy array): effective tax rate on total income
'''
X = (w * e * n) * factor
Y = (r * b) * factor
X2 = X ** 2
Y2 = Y ** 2
income = X + Y
income2 = income ** 2
if p.tax_func_type == 'GS':
phi0 = np.squeeze(etr_params[..., 0])
phi1 = np.squeeze(etr_params[..., 1])
phi2 = np.squeeze(etr_params[..., 2])
tau = ((phi0 * (income - ((income ** -phi1) + phi2) **
(-1 / phi1))) / income)
elif p.tax_func_type == 'DEP_totalinc':
A = np.squeeze(etr_params[..., 0])
B = np.squeeze(etr_params[..., 1])
max_income = np.squeeze(etr_params[..., 4])
min_income = np.squeeze(etr_params[..., 5])
shift_income = np.squeeze(etr_params[..., 8])
shift = np.squeeze(etr_params[..., 10])
tau_income = (((max_income - min_income) *
(A * income2 + B * income) /
(A * income2 + B * income + 1)) + min_income)
tau = tau_income + shift_income + shift
else: # DEP or linear
A = np.squeeze(etr_params[..., 0])
B = np.squeeze(etr_params[..., 1])
C = np.squeeze(etr_params[..., 2])
D = np.squeeze(etr_params[..., 3])
max_x = np.squeeze(etr_params[..., 4])
min_x = np.squeeze(etr_params[..., 5])
max_y = np.squeeze(etr_params[..., 6])
min_y = np.squeeze(etr_params[..., 7])
shift_x = np.squeeze(etr_params[..., 8])
shift_y = np.squeeze(etr_params[..., 9])
shift = np.squeeze(etr_params[..., 10])
share = np.squeeze(etr_params[..., 11])
tau_x = ((max_x - min_x) * (A * X2 + B * X) /
(A * X2 + B * X + 1) + min_x)
tau_y = ((max_y - min_y) * (C * Y2 + D * Y) /
(C * Y2 + D * Y + 1) + min_y)
tau = (((tau_x + shift_x) ** share) *
((tau_y + shift_y) ** (1 - share))) + shift
return tau
def MTR_income(r, w, b, n, factor, mtr_capital, e, etr_params,
mtr_params, p):
r'''
Generates the marginal tax rate on labor income for households.
Args:
r (array_like): real interest rate
w (array_like): real wage rate
b (Numpy array): savings
n (Numpy array): labor supply
factor (scalar): scaling factor converting model units to
dollars
mtr_capital (bool): whether to compute the marginal tax rate on
capital income or labor income
e (Numpy array): effective labor units
etr_params (Numpy array): effective tax rate function parameters
p (OG-USA Specifications object): model parameters
Returns:
tau (Numpy array): marginal tax rate on income source
'''
X = (w * e * n) * factor
Y = (r * b) * factor
X2 = X ** 2
Y2 = Y ** 2
income = X + Y
income2 = income ** 2
if p.tax_func_type == 'GS':
if p.analytical_mtrs:
phi0 = np.squeeze(etr_params[..., 0])
phi1 = np.squeeze(etr_params[..., 1])
phi2 = np.squeeze(etr_params[..., 2])
else:
phi0 = np.squeeze(mtr_params[..., 0])
phi1 = np.squeeze(mtr_params[..., 1])
phi2 = np.squeeze(mtr_params[..., 2])
tau = (phi0*(1 - (income ** (-phi1 - 1) *
((income ** -phi1) + phi2) **
((-1 - phi1) / phi1))))
elif p.tax_func_type == 'DEP_totalinc':
if p.analytical_mtrs:
A = np.squeeze(etr_params[..., 0])
B = np.squeeze(etr_params[..., 1])
max_income = np.squeeze(etr_params[..., 4])
min_income = np.squeeze(etr_params[..., 5])
shift_income = np.squeeze(etr_params[..., 8])
shift = np.squeeze(etr_params[..., 10])
d_etr = ((max_income - min_income) * ((2 * A * income + B) /
((A * income2 + B * income + 1) ** 2)))
etr = (((max_income - min_income) *
((A * income2 + B * income) /
(A * income2 + B * income + 1)) + min_income) +
shift_income + shift)
tau = (d_etr * income) + (etr)
else:
A = np.squeeze(mtr_params[..., 0])
B = np.squeeze(mtr_params[..., 1])
max_income = np.squeeze(mtr_params[..., 4])
min_income = np.squeeze(mtr_params[..., 5])
shift_income = np.squeeze(mtr_params[..., 8])
shift = np.squeeze(mtr_params[..., 10])
tau_income = (((max_income - min_income) *
(A * income2 + B * income) /
(A * income2 + B * income + 1)) + min_income)
tau = tau_income + shift_income + shift
else: # DEP or linear
if p.analytical_mtrs:
A = np.squeeze(etr_params[..., 0])
B = np.squeeze(etr_params[..., 1])
C = np.squeeze(etr_params[..., 2])
D = np.squeeze(etr_params[..., 3])
max_x = np.squeeze(etr_params[..., 4])
min_x = np.squeeze(etr_params[..., 5])
max_y = np.squeeze(etr_params[..., 6])
min_y = np.squeeze(etr_params[..., 7])
shift_x = np.squeeze(etr_params[..., 8])
shift_y = np.squeeze(etr_params[..., 9])
shift = np.squeeze(etr_params[..., 10])
share = np.squeeze(etr_params[..., 11])
tau_x = ((max_x - min_x) * (A * X2 + B * X) /
(A * X2 + B * X + 1) + min_x)
tau_y = ((max_y - min_y) * (C * Y2 + D * Y) /
(C * Y2 + D * Y + 1) + min_y)
etr = (((tau_x + shift_x) ** share) *
((tau_y + shift_y) ** (1 - share))) + shift
if mtr_capital:
d_etr = ((1-share) * ((tau_y + shift_y) ** (-share)) *
(max_y - min_y) * ((2 * C * Y + D) /
((C * Y2 + D * Y + 1)
** 2)) *
((tau_x + shift_x) ** share))
tau = d_etr * income + etr
else:
d_etr = (share * ((tau_x + shift_x) ** (share - 1)) *
(max_x - min_x) * ((2 * A * X + B) /
((A * X2 + B * X + 1)
** 2)) *
((tau_y + shift_y) ** (1 - share)))
tau = d_etr * income + etr
else:
A = np.squeeze(mtr_params[..., 0])
B = np.squeeze(mtr_params[..., 1])
C = np.squeeze(mtr_params[..., 2])
D = np.squeeze(mtr_params[..., 3])
max_x = np.squeeze(mtr_params[..., 4])
min_x = np.squeeze(mtr_params[..., 5])
max_y = np.squeeze(mtr_params[..., 6])
min_y = np.squeeze(mtr_params[..., 7])
shift_x = np.squeeze(mtr_params[..., 8])
shift_y = np.squeeze(mtr_params[..., 9])
shift = np.squeeze(mtr_params[..., 10])
share = np.squeeze(mtr_params[..., 11])
tau_x = ((max_x - min_x) * (A * X2 + B * X) /
(A * X2 + B * X + 1) + min_x)
tau_y = ((max_y - min_y) * (C * Y2 + D * Y) /
(C * Y2 + D * Y + 1) + min_y)
tau = (((tau_x + shift_x) ** share) *
((tau_y + shift_y) ** (1 - share))) + shift
return tau
def get_biz_tax(w, Y, L, K, p, method):
r'''
Finds total business income tax revenue.
.. math::
R_{t}^{b} = \tau_{t}^{b}(Y_{t} - w_{t}L_{t}) - \tau_{t}^{b}\delta_{t}^{\tau}K_{t}^{\tau}
Args:
r (array_like): real interest rate
Y (array_like): aggregate output
L (array_like): aggregate labor demand
K (array_like): aggregate capital demand
Returns:
business_revenue (array_like): aggregate business tax revenue
'''
if method == 'SS':
delta_tau = p.delta_tau[-1]
tau_b = p.tau_b[-1]
else:
delta_tau = p.delta_tau[:p.T]
tau_b = p.tau_b[:p.T]
business_revenue = tau_b * (Y - w * L) - tau_b * delta_tau * K
return business_revenue
def net_taxes(r, w, b, n, bq, factor, tr, theta, t, j, shift, method,
e, etr_params, p):
'''
Calculate net taxes paid for each household.
Args:
r (array_like): real interest rate
w (array_like): real wage rate
b (Numpy array): savings
n (Numpy array): labor supply
bq (Numpy array): bequests received
factor (scalar): scaling factor converting model units to
dollars
tr (Numpy array): government transfers to the household
theta (Numpy array): social security replacement rate value for
lifetime income group j
t (int): time period
j (int): index of lifetime income group
shift (bool): whether computing for periods 0--s or 1--(s+1),
=True for 1--(s+1)
method (str): adjusts calculation dimensions based on 'SS' or
'TPI'
e (Numpy array): effective labor units
etr_params (Numpy array): effective tax rate function parameters
p (OG-USA Specifications object): model parameters
Returns:
net_tax (Numpy array): net taxes paid for each household
'''
T_I = income_tax_liab(r, w, b, n, factor, t, j, method, e, etr_params, p)
pension = pension_amount(w, n, theta, t, j, shift, method, e, p)
T_BQ = bequest_tax_liab(r, b, bq, t, j, method, p)
T_W = wealth_tax_liab(r, b, t, j, method, p)
net_tax = T_I - pension + T_BQ + T_W - tr
return net_tax
def income_tax_liab(r, w, b, n, factor, t, j, method, e, etr_params, p):
'''
Calculate income and payroll tax liability for each household
Args:
r (array_like): real interest rate
w (array_like): real wage rate
b (Numpy array): savings
n (Numpy array): labor supply
factor (scalar): scaling factor converting model units to
dollars
t (int): time period
j (int): index of lifetime income group
method (str): adjusts calculation dimensions based on 'SS' or
'TPI'
e (Numpy array): effective labor units
etr_params (Numpy array): effective tax rate function parameters
p (OG-USA Specifications object): model parameters
Returns:
T_I (Numpy array): total income and payroll taxes paid for each
household
'''
if j is not None:
if method == 'TPI':
if b.ndim == 2:
r = r.reshape(r.shape[0], 1)
w = w.reshape(w.shape[0], 1)
else:
if method == 'TPI':
r = utils.to_timepath_shape(r)
w = utils.to_timepath_shape(w)
income = r * b + w * e * n
labor_income = w * e * n
T_I = ETR_income(r, w, b, n, factor, e, etr_params, p) * income
if method == 'SS':
T_P = p.tau_payroll[-1] * labor_income
elif method == 'TPI':
length = w.shape[0]
if len(b.shape) == 1:
T_P = p.tau_payroll[t: t + length] * labor_income
elif len(b.shape) == 2:
T_P = (p.tau_payroll[t: t + length].reshape(length, 1) *
labor_income)
else:
T_P = (p.tau_payroll[t:t + length].reshape(length, 1, 1) *
labor_income)
elif method == 'TPI_scalar':
T_P = p.tau_payroll[0] * labor_income
income_payroll_tax_liab = T_I + T_P
return income_payroll_tax_liab
def pension_amount(w, n, theta, t, j, shift, method, e, p):
'''
Calculate public pension benefit amounts for each household.
Args:
w (array_like): real wage rate
n (Numpy array): labor supply
theta (Numpy array): social security replacement rate value for
lifetime income group j
t (int): time period
j (int): index of lifetime income group
shift (bool): whether computing for periods 0--s or 1--(s+1),
=True for 1--(s+1)
method (str): adjusts calculation dimensions based on 'SS' or
'TPI'
e (Numpy array): effective labor units
p (OG-USA Specifications object): model parameters
Returns:
pension (Numpy array): pension amount for each household
'''
if j is not None:
if method == 'TPI':
if n.ndim == 2:
w = w.reshape(w.shape[0], 1)
else:
if method == 'TPI':
w = utils.to_timepath_shape(w)
pension = np.zeros_like(n)
if method == 'SS':
# Depending on if we are looking at b_s or b_s+1, the
# entry for retirement will change (it shifts back one).
# The shift boolean makes sure we start replacement rates
# at the correct age.
if shift is False:
pension[p.retire[-1]:] = theta * w
else:
pension[p.retire[-1] - 1:] = theta * w
elif method == 'TPI':
length = w.shape[0]
if not shift:
# retireTPI is different from retire, because in TP income
# we are counting backwards with different length lists.
# This will always be the correct location of retirement,
# depending on the shape of the lists.
retireTPI = (p.retire[t: t + length] - p.S)
else:
retireTPI = (p.retire[t: t + length] - 1 - p.S)
if len(n.shape) == 1:
if not shift:
retireTPI = p.retire[t] - p.S
else:
retireTPI = p.retire[t] - 1 - p.S
pension[retireTPI:] = (
theta[j] * p.replacement_rate_adjust[t] * w[retireTPI:])
elif len(n.shape) == 2:
for tt in range(pension.shape[0]):
pension[tt, retireTPI[tt]:] = (
theta * p.replacement_rate_adjust[t + tt] * w[tt])
else:
for tt in range(pension.shape[0]):
pension[tt, retireTPI[tt]:, :] = (
theta.reshape(1, p.J) *
p.replacement_rate_adjust[t + tt] * w[tt])
elif method == 'TPI_scalar':
# The above methods won't work if scalars are used. This option
# is only called by the SS_TPI_firstdoughnutring function in TPI.
pension = theta * p.replacement_rate_adjust[0] * w
return pension
def wealth_tax_liab(r, b, t, j, method, p):
'''
Calculate wealth tax liability for each household.
Args:
r (array_like): real interest rate
b (Numpy array): savings
t (int): time period
j (int): index of lifetime income group
method (str): adjusts calculation dimensions based on 'SS' or
'TPI'
p (OG-USA Specifications object): model parameters
Returns:
T_W (Numpy array): wealth tax liability for each household
'''
if j is not None:
if method == 'TPI':
if b.ndim == 2:
r = r.reshape(r.shape[0], 1)
else:
if method == 'TPI':
r = utils.to_timepath_shape(r)
if method == 'SS':
T_W = (ETR_wealth(b, p.h_wealth[-1], p.m_wealth[-1],
p.p_wealth[-1]) * b)
elif method == 'TPI':
length = r.shape[0]
if len(b.shape) == 1:
T_W = (ETR_wealth(b, p.h_wealth[t:t + length],
p.m_wealth[t:t + length],
p.p_wealth[t:t + length]) * b)
elif len(b.shape) == 2:
T_W = (ETR_wealth(b, p.h_wealth[t:t + length],
p.m_wealth[t:t + length],
p.p_wealth[t:t + length]) * b)
else:
T_W = (ETR_wealth(
b, p.h_wealth[t:t + length].reshape(length, 1, 1),
p.m_wealth[t:t + length].reshape(length, 1, 1),
p.p_wealth[t:t + length].reshape(length, 1, 1)) * b)
elif method == 'TPI_scalar':
T_W = (ETR_wealth(b, p.h_wealth[0], p.m_wealth[0],
p.p_wealth[0]) * b)
return T_W
def bequest_tax_liab(r, b, bq, t, j, method, p):
'''
Calculate liability due from taxes on bequests for each household.
Args:
r (array_like): real interest rate
b (Numpy array): savings
bq (Numpy array): bequests received
t (int): time period
j (int): index of lifetime income group
method (str): adjusts calculation dimensions based on 'SS' or
'TPI'
p (OG-USA Specifications object): model parameters
Returns:
T_BQ (Numpy array): bequest tax liability for each household
'''
if j is not None:
lambdas = p.lambdas[j]
if method == 'TPI':
if b.ndim == 2:
r = r.reshape(r.shape[0], 1)
else:
lambdas = np.transpose(p.lambdas)
if method == 'TPI':
r = utils.to_timepath_shape(r)
if method == 'SS':
T_BQ = p.tau_bq[-1] * bq
elif method == 'TPI':
length = r.shape[0]
if len(b.shape) == 1:
T_BQ = p.tau_bq[t:t + length] * bq
elif len(b.shape) == 2:
T_BQ = p.tau_bq[t:t + length].reshape(length, 1) * bq / lambdas
else:
T_BQ = p.tau_bq[t:t + length].reshape(length, 1, 1) * bq
elif method == 'TPI_scalar':
# The above methods won't work if scalars are used. This option
# is only called by the SS_TPI_firstdoughnutring function in TPI.
T_BQ = p.tau_bq[0] * bq
return T_BQ
| en | 0.70665 | ------------------------------------------------------------------------ Functions for taxes in the steady state and along the transition path. ------------------------------------------------------------------------ # Packages ------------------------------------------------------------------------ Functions ------------------------------------------------------------------------ Calculates replacement rate values for the social security system. Args: nssmat (Numpy array): initial guess at labor supply, size = SxJ new_w (scalar): steady state real wage rate factor_ss (scalar): scaling factor converting model units to dollars j (int): index of lifetime income group p (OG-USA Specifications object): model parameters Returns: theta (Numpy array): social security replacement rate value for lifetime income group j # adjust number of calendar years AIME computed from int model periods # get highest earning years for number of years AIME computed from # Compute level of replacement using AIME brackets and PIA rates # Set the maximum monthly replacment rate from SS benefits tables Calculates the effective tax rate on wealth. .. math:: T_{j,s,t}^{w} = \frac{h^{w}p_{w}b_{j,s,t}}{h^{w}b_{j,s,t} + m^{w}} Args: b (Numpy array): savings h_wealth (scalar): parameter of wealth tax function p_wealth (scalar): parameter of wealth tax function m_wealth (scalar): parameter of wealth tax function Returns: tau_w (Numpy array): effective tax rate on wealth, size = SxJ Calculates the marginal tax rate on wealth from the wealth tax. .. math:: \frac{\partial T_{j,s,t}^{w}}{\partial b_{j,s,t}} = \frac{h^{w}m^{w}p_{w}}{(b_{j,s,t}h^{w}m^{w})^{2}} Args: b (Numpy array): savings h_wealth (scalar): parameter of wealth tax function p_wealth (scalar): parameter of wealth tax function m_wealth (scalar): parameter of wealth tax function Returns: tau_prime (Numpy array): marginal tax rate on wealth, size = SxJ Calculates effective personal income tax rate. Args: r (array_like): real interest rate w (array_like): real wage rate b (Numpy array): savings n (Numpy array): labor supply factor (scalar): scaling factor converting model units to dollars e (Numpy array): effective labor units etr_params (Numpy array): effective tax rate function parameters p (OG-USA Specifications object): model parameters Returns: tau (Numpy array): effective tax rate on total income # DEP or linear Generates the marginal tax rate on labor income for households. Args: r (array_like): real interest rate w (array_like): real wage rate b (Numpy array): savings n (Numpy array): labor supply factor (scalar): scaling factor converting model units to dollars mtr_capital (bool): whether to compute the marginal tax rate on capital income or labor income e (Numpy array): effective labor units etr_params (Numpy array): effective tax rate function parameters p (OG-USA Specifications object): model parameters Returns: tau (Numpy array): marginal tax rate on income source # DEP or linear Finds total business income tax revenue. .. math:: R_{t}^{b} = \tau_{t}^{b}(Y_{t} - w_{t}L_{t}) - \tau_{t}^{b}\delta_{t}^{\tau}K_{t}^{\tau} Args: r (array_like): real interest rate Y (array_like): aggregate output L (array_like): aggregate labor demand K (array_like): aggregate capital demand Returns: business_revenue (array_like): aggregate business tax revenue Calculate net taxes paid for each household. Args: r (array_like): real interest rate w (array_like): real wage rate b (Numpy array): savings n (Numpy array): labor supply bq (Numpy array): bequests received factor (scalar): scaling factor converting model units to dollars tr (Numpy array): government transfers to the household theta (Numpy array): social security replacement rate value for lifetime income group j t (int): time period j (int): index of lifetime income group shift (bool): whether computing for periods 0--s or 1--(s+1), =True for 1--(s+1) method (str): adjusts calculation dimensions based on 'SS' or 'TPI' e (Numpy array): effective labor units etr_params (Numpy array): effective tax rate function parameters p (OG-USA Specifications object): model parameters Returns: net_tax (Numpy array): net taxes paid for each household Calculate income and payroll tax liability for each household Args: r (array_like): real interest rate w (array_like): real wage rate b (Numpy array): savings n (Numpy array): labor supply factor (scalar): scaling factor converting model units to dollars t (int): time period j (int): index of lifetime income group method (str): adjusts calculation dimensions based on 'SS' or 'TPI' e (Numpy array): effective labor units etr_params (Numpy array): effective tax rate function parameters p (OG-USA Specifications object): model parameters Returns: T_I (Numpy array): total income and payroll taxes paid for each household Calculate public pension benefit amounts for each household. Args: w (array_like): real wage rate n (Numpy array): labor supply theta (Numpy array): social security replacement rate value for lifetime income group j t (int): time period j (int): index of lifetime income group shift (bool): whether computing for periods 0--s or 1--(s+1), =True for 1--(s+1) method (str): adjusts calculation dimensions based on 'SS' or 'TPI' e (Numpy array): effective labor units p (OG-USA Specifications object): model parameters Returns: pension (Numpy array): pension amount for each household # Depending on if we are looking at b_s or b_s+1, the # entry for retirement will change (it shifts back one). # The shift boolean makes sure we start replacement rates # at the correct age. # retireTPI is different from retire, because in TP income # we are counting backwards with different length lists. # This will always be the correct location of retirement, # depending on the shape of the lists. # The above methods won't work if scalars are used. This option # is only called by the SS_TPI_firstdoughnutring function in TPI. Calculate wealth tax liability for each household. Args: r (array_like): real interest rate b (Numpy array): savings t (int): time period j (int): index of lifetime income group method (str): adjusts calculation dimensions based on 'SS' or 'TPI' p (OG-USA Specifications object): model parameters Returns: T_W (Numpy array): wealth tax liability for each household Calculate liability due from taxes on bequests for each household. Args: r (array_like): real interest rate b (Numpy array): savings bq (Numpy array): bequests received t (int): time period j (int): index of lifetime income group method (str): adjusts calculation dimensions based on 'SS' or 'TPI' p (OG-USA Specifications object): model parameters Returns: T_BQ (Numpy array): bequest tax liability for each household # The above methods won't work if scalars are used. This option # is only called by the SS_TPI_firstdoughnutring function in TPI. | 2.587121 | 3 |
muse_for_anything/api/v1_api/taxonomy_items.py | baireutherjonas/muse-for-anything | 0 | 9741 | <filename>muse_for_anything/api/v1_api/taxonomy_items.py
"""Module containing the taxonomy items API endpoints of the v1 API."""
from datetime import datetime
from sqlalchemy.sql.schema import Sequence
from muse_for_anything.db.models.taxonomies import (
Taxonomy,
TaxonomyItem,
TaxonomyItemRelation,
TaxonomyItemVersion,
)
from marshmallow.utils import INCLUDE
from flask_babel import gettext
from muse_for_anything.api.util import template_url_for
from typing import Any, Callable, Dict, List, Optional, Union, cast
from flask.helpers import url_for
from flask.views import MethodView
from sqlalchemy.sql.expression import asc, desc, literal
from sqlalchemy.orm.query import Query
from sqlalchemy.orm import selectinload
from flask_smorest import abort
from http import HTTPStatus
from .root import API_V1
from ..base_models import (
ApiLink,
ApiResponse,
ChangedApiObject,
ChangedApiObjectSchema,
CursorPage,
CursorPageArgumentsSchema,
CursorPageSchema,
DynamicApiResponseSchema,
NewApiObject,
NewApiObjectSchema,
)
from ...db.db import DB
from ...db.pagination import get_page_info
from ...db.models.namespace import Namespace
from ...db.models.ontology_objects import OntologyObjectType, OntologyObjectTypeVersion
from .models.ontology import (
TaxonomyItemRelationPostSchema,
TaxonomyItemRelationSchema,
TaxonomyItemSchema,
TaxonomySchema,
)
from .namespace_helpers import (
query_params_to_api_key,
)
from .taxonomy_helpers import (
action_links_for_taxonomy_item,
action_links_for_taxonomy_item_relation,
create_action_link_for_taxonomy_item_relation_page,
nav_links_for_taxonomy_item,
nav_links_for_taxonomy_item_relation,
taxonomy_item_relation_to_api_link,
taxonomy_item_relation_to_api_response,
taxonomy_item_relation_to_taxonomy_item_relation_data,
taxonomy_item_to_api_link,
taxonomy_item_to_api_response,
taxonomy_item_to_taxonomy_item_data,
taxonomy_to_api_response,
taxonomy_to_items_links,
taxonomy_to_taxonomy_data,
)
@API_V1.route(
"/namespaces/<string:namespace>/taxonomies/<string:taxonomy>/items/<string:taxonomy_item>/"
)
class TaxonomyItemView(MethodView):
"""Endpoint for a single taxonomy item."""
def _check_path_params(self, namespace: str, taxonomy: str, taxonomy_item: str):
if not namespace or not namespace.isdigit():
abort(
HTTPStatus.BAD_REQUEST,
message=gettext("The requested namespace id has the wrong format!"),
)
if not taxonomy or not taxonomy.isdigit():
abort(
HTTPStatus.BAD_REQUEST,
message=gettext("The requested taxonomy id has the wrong format!"),
)
if not taxonomy_item or not taxonomy_item.isdigit():
abort(
HTTPStatus.BAD_REQUEST,
message=gettext("The requested taxonomy item id has the wrong format!"),
)
def _get_taxonomy_item(
self, namespace: str, taxonomy: str, taxonomy_item: str
) -> TaxonomyItem:
namespace_id = int(namespace)
taxonomy_id = int(taxonomy)
taxonomy_item_id = int(taxonomy_item)
found_taxonomy_item: Optional[TaxonomyItem] = (
TaxonomyItem.query.options(selectinload(TaxonomyItem.current_ancestors))
.filter(
TaxonomyItem.id == taxonomy_item_id,
TaxonomyItem.taxonomy_id == taxonomy_id,
)
.first()
)
if (
found_taxonomy_item is None
or found_taxonomy_item.taxonomy.namespace_id != namespace_id
):
abort(HTTPStatus.NOT_FOUND, message=gettext("Taxonomy item not found."))
return found_taxonomy_item # is not None because abort raises exception
def _check_if_taxonomy_modifiable(self, taxonomy: Taxonomy):
if taxonomy.namespace.deleted_on is not None:
# cannot modify deleted namespace!
abort(
HTTPStatus.CONFLICT,
message=gettext(
"Namespace is marked as deleted and cannot be modified further."
),
)
if taxonomy.deleted_on is not None:
# cannot modify deleted namespace!
abort(
HTTPStatus.CONFLICT,
message=gettext(
"Taxonomy is marked as deleted and cannot be modified further."
),
)
def _check_if_modifiable(self, taxonomy_item: TaxonomyItem):
self._check_if_taxonomy_modifiable(taxonomy=taxonomy_item.taxonomy)
if taxonomy_item.deleted_on is not None:
# cannot modify deleted taxonomy!
abort(
HTTPStatus.CONFLICT,
message=gettext(
"Taxonomy item is marked as deleted and cannot be modified further."
),
)
@API_V1.response(DynamicApiResponseSchema(TaxonomyItemSchema()))
def get(self, namespace: str, taxonomy: str, taxonomy_item: str, **kwargs: Any):
"""Get a single taxonomy item."""
self._check_path_params(
namespace=namespace, taxonomy=taxonomy, taxonomy_item=taxonomy_item
)
found_taxonomy_item: TaxonomyItem = self._get_taxonomy_item(
namespace=namespace, taxonomy=taxonomy, taxonomy_item=taxonomy_item
)
embedded: List[ApiResponse] = []
for relation in found_taxonomy_item.current_ancestors:
embedded.append(taxonomy_item_to_api_response(relation.taxonomy_item_source))
for relation in found_taxonomy_item.current_related:
embedded.append(taxonomy_item_relation_to_api_response(relation))
embedded.append(taxonomy_item_to_api_response(relation.taxonomy_item_target))
return ApiResponse(
links=[
ApiLink(
href=url_for(
"api-v1.NamespacesView",
_external=True,
**{"item-count": 50},
sort="name",
),
rel=("first", "page", "collection", "nav"),
resource_type="ont-namespace",
schema=url_for(
"api-v1.ApiSchemaView", schema_id="Namespace", _external=True
),
),
*nav_links_for_taxonomy_item(found_taxonomy_item),
*action_links_for_taxonomy_item(found_taxonomy_item),
],
embedded=embedded,
data=taxonomy_item_to_taxonomy_item_data(found_taxonomy_item),
)
@API_V1.arguments(TaxonomyItemSchema())
@API_V1.response(DynamicApiResponseSchema(NewApiObjectSchema()))
def put(self, data, namespace: str, taxonomy: str, taxonomy_item: str):
"""Update a taxonomy item."""
self._check_path_params(
namespace=namespace, taxonomy=taxonomy, taxonomy_item=taxonomy_item
)
found_taxonomy_item: TaxonomyItem = self._get_taxonomy_item(
namespace=namespace, taxonomy=taxonomy, taxonomy_item=taxonomy_item
)
self._check_if_modifiable(found_taxonomy_item)
taxonomy_item_version = TaxonomyItemVersion(
taxonomy_item=found_taxonomy_item,
version=found_taxonomy_item.current_version.version + 1,
name=data["name"],
description=data.get("description", ""),
sort_key=data.get("sort_key", 10),
)
found_taxonomy_item.current_version = taxonomy_item_version
DB.session.add(found_taxonomy_item)
DB.session.add(taxonomy_item_version)
DB.session.commit()
taxonomy_item_link = taxonomy_item_to_taxonomy_item_data(found_taxonomy_item).self
taxonomy_item_data = taxonomy_item_to_api_response(found_taxonomy_item)
return ApiResponse(
links=[taxonomy_item_link],
embedded=[taxonomy_item_data],
data=ChangedApiObject(
self=ApiLink(
href=url_for(
"api-v1.TaxonomyItemView",
namespace=namespace,
taxonomy=taxonomy,
taxonomy_item=taxonomy_item,
_external=True,
),
rel=(
"update",
"put",
"ont-taxonomy-item",
),
resource_type="changed",
),
changed=taxonomy_item_link,
),
)
@API_V1.response(DynamicApiResponseSchema(ChangedApiObjectSchema()))
def post(self, namespace: str, taxonomy: str, taxonomy_item: str): # restore action
"""Restore a deleted taxonomy item."""
self._check_path_params(
namespace=namespace, taxonomy=taxonomy, taxonomy_item=taxonomy_item
)
found_taxonomy_item: TaxonomyItem = self._get_taxonomy_item(
namespace=namespace, taxonomy=taxonomy, taxonomy_item=taxonomy_item
)
self._check_if_taxonomy_modifiable(found_taxonomy_item.taxonomy)
changed_links: List[ApiLink] = []
embedded: List[ApiResponse] = []
# only actually restore when not already restored
if found_taxonomy_item.deleted_on is not None:
# restore taxonomy item
deleted_timestamp = found_taxonomy_item.deleted_on
found_taxonomy_item.deleted_on = None
# also restore relations
ancestors: Sequence[TaxonomyItemRelation] = TaxonomyItemRelation.query.filter(
TaxonomyItemRelation.taxonomy_item_target_id == found_taxonomy_item.id,
TaxonomyItemRelation.deleted_on == deleted_timestamp,
).all()
ancestor_ids = set()
relation: TaxonomyItemRelation
for relation in ancestors:
if relation.taxonomy_item_source.deleted_on is not None:
continue # do not restore relations to deleted items
ancestor_ids.add(relation.taxonomy_item_source_id)
relation.deleted_on = None
DB.session.add(relation)
def produces_circle(relation: TaxonomyItemRelation) -> bool:
if relation.taxonomy_item_target_id in ancestor_ids:
return True
for rel in relation.taxonomy_item_target.current_related:
if produces_circle(rel):
return True
return False
children: Sequence[TaxonomyItemRelation] = TaxonomyItemRelation.query.filter(
TaxonomyItemRelation.taxonomy_item_source_id == found_taxonomy_item.id,
TaxonomyItemRelation.deleted_on == deleted_timestamp,
).all()
for relation in children:
if relation.taxonomy_item_target.deleted_on is not None:
continue # do not restore relations to deleted items
if produces_circle(relation):
continue
relation.deleted_on = None
DB.session.add(relation)
DB.session.add(found_taxonomy_item)
DB.session.commit()
# add changed items to be embedded into the response
for relation in found_taxonomy_item.current_ancestors:
changed_links.append(taxonomy_item_relation_to_api_link(relation))
embedded.append(taxonomy_item_relation_to_api_response(relation))
changed_links.append(
taxonomy_item_to_api_link(relation.taxonomy_item_source)
)
embedded.append(
taxonomy_item_to_api_response(relation.taxonomy_item_source)
)
for relation in found_taxonomy_item.current_related:
changed_links.append(taxonomy_item_relation_to_api_link(relation))
embedded.append(taxonomy_item_relation_to_api_response(relation))
changed_links.append(
taxonomy_item_to_api_link(relation.taxonomy_item_target)
)
embedded.append(
taxonomy_item_to_api_response(relation.taxonomy_item_target)
)
taxonomy_item_link = taxonomy_item_to_taxonomy_item_data(found_taxonomy_item).self
taxonomy_item_data = taxonomy_item_to_api_response(found_taxonomy_item)
taxonomy_link = taxonomy_to_taxonomy_data(found_taxonomy_item.taxonomy).self
taxonomy_data = taxonomy_to_api_response(found_taxonomy_item.taxonomy)
return ApiResponse(
links=[taxonomy_item_link, taxonomy_link, *changed_links],
embedded=[taxonomy_item_data, taxonomy_data, *embedded],
data=ChangedApiObject(
self=ApiLink(
href=url_for(
"api-v1.TaxonomyItemView",
namespace=namespace,
taxonomy=taxonomy,
taxonomy_item=taxonomy_item,
_external=True,
),
rel=(
"restore",
"post",
"ont-taxonomy-item",
),
resource_type="changed",
),
changed=taxonomy_item_link,
),
)
@API_V1.response(DynamicApiResponseSchema(ChangedApiObjectSchema()))
def delete(self, namespace: str, taxonomy: str, taxonomy_item: str): # restore action
"""Delete a taxonomy item."""
self._check_path_params(
namespace=namespace, taxonomy=taxonomy, taxonomy_item=taxonomy_item
)
found_taxonomy_item: TaxonomyItem = self._get_taxonomy_item(
namespace=namespace, taxonomy=taxonomy, taxonomy_item=taxonomy_item
)
self._check_if_taxonomy_modifiable(found_taxonomy_item.taxonomy)
changed_links: List[ApiLink] = []
embedded: List[ApiResponse] = []
# only actually delete when not already deleted
if found_taxonomy_item.deleted_on is None:
# delete taxonomy item
deleted_timestamp = datetime.utcnow()
found_taxonomy_item.deleted_on = deleted_timestamp
# also delete incoming and outgoing relations to remove them
# from relations of existing items
ancestors = found_taxonomy_item.current_ancestors
for relation in found_taxonomy_item.current_ancestors:
relation.deleted_on = deleted_timestamp
DB.session.add(relation)
related = found_taxonomy_item.current_related
for relation in found_taxonomy_item.current_related:
relation.deleted_on = deleted_timestamp
DB.session.add(relation)
DB.session.add(found_taxonomy_item)
DB.session.commit()
# add changed items to be embedded into the response
for relation in ancestors:
changed_links.append(taxonomy_item_relation_to_api_link(relation))
embedded.append(taxonomy_item_relation_to_api_response(relation))
changed_links.append(
taxonomy_item_to_api_link(relation.taxonomy_item_source)
)
embedded.append(
taxonomy_item_to_api_response(relation.taxonomy_item_source)
)
for relation in related:
changed_links.append(taxonomy_item_relation_to_api_link(relation))
embedded.append(taxonomy_item_relation_to_api_response(relation))
changed_links.append(
taxonomy_item_to_api_link(relation.taxonomy_item_target)
)
embedded.append(
taxonomy_item_to_api_response(relation.taxonomy_item_target)
)
taxonomy_item_link = taxonomy_item_to_taxonomy_item_data(found_taxonomy_item).self
taxonomy_item_data = taxonomy_item_to_api_response(found_taxonomy_item)
taxonomy_link = taxonomy_to_taxonomy_data(found_taxonomy_item.taxonomy).self
taxonomy_data = taxonomy_to_api_response(found_taxonomy_item.taxonomy)
return ApiResponse(
links=[taxonomy_item_link, taxonomy_link, *changed_links],
embedded=[taxonomy_item_data, taxonomy_data, *embedded],
data=ChangedApiObject(
self=ApiLink(
href=url_for(
"api-v1.TaxonomyItemView",
namespace=namespace,
taxonomy=taxonomy,
taxonomy_item=taxonomy_item,
_external=True,
),
rel=(
"delete",
"ont-taxonomy-item",
),
resource_type="changed",
),
changed=taxonomy_item_link,
),
)
@API_V1.route(
"/namespaces/<string:namespace>/taxonomies/<string:taxonomy>/items/<string:taxonomy_item>/relations/"
)
class TaxonomyItemRelationsView(MethodView):
"""Endpoint for manipulating taxonomy item relations."""
def _check_path_params(self, namespace: str, taxonomy: str, taxonomy_item: str):
if not namespace or not namespace.isdigit():
abort(
HTTPStatus.BAD_REQUEST,
message=gettext("The requested namespace id has the wrong format!"),
)
if not taxonomy or not taxonomy.isdigit():
abort(
HTTPStatus.BAD_REQUEST,
message=gettext("The requested taxonomy id has the wrong format!"),
)
if not taxonomy_item or not taxonomy_item.isdigit():
abort(
HTTPStatus.BAD_REQUEST,
message=gettext("The requested taxonomy item id has the wrong format!"),
)
def _get_taxonomy_item(
self, namespace: str, taxonomy: str, taxonomy_item: str
) -> TaxonomyItem:
namespace_id = int(namespace)
taxonomy_id = int(taxonomy)
taxonomy_item_id = int(taxonomy_item)
found_taxonomy_item: Optional[TaxonomyItem] = TaxonomyItem.query.filter(
TaxonomyItem.id == taxonomy_item_id,
TaxonomyItem.taxonomy_id == taxonomy_id,
).first()
if (
found_taxonomy_item is None
or found_taxonomy_item.taxonomy.namespace_id != namespace_id
):
abort(HTTPStatus.NOT_FOUND, message=gettext("Taxonomy item not found."))
return found_taxonomy_item # is not None because abort raises exception
def _check_if_modifiable(self, taxonomy_item: TaxonomyItem):
taxonomy = taxonomy_item.taxonomy
if taxonomy.namespace.deleted_on is not None:
# cannot modify deleted namespace!
abort(
HTTPStatus.CONFLICT,
message=gettext(
"Namespace is marked as deleted and cannot be modified further."
),
)
if taxonomy.deleted_on is not None:
# cannot modify deleted namespace!
abort(
HTTPStatus.CONFLICT,
message=gettext(
"Taxonomy is marked as deleted and cannot be modified further."
),
)
if taxonomy_item.deleted_on is not None:
# cannot modify deleted taxonomy!
abort(
HTTPStatus.CONFLICT,
message=gettext(
"Taxonomy item is marked as deleted and cannot be modified further."
),
)
def _check_item_circle(
self,
item_target: TaxonomyItem,
item_source: TaxonomyItem,
original_target: Optional[TaxonomyItem] = None,
):
"""Check for a path from target to source which would form a circular dependency. Abort if such a path is found!"""
if original_target is None:
original_target = item_target
relation: TaxonomyItemRelation
for relation in item_target.current_related:
if relation.taxonomy_item_target.deleted_on is not None:
continue # exclude deleted items as targets
if relation.taxonomy_item_target_id == item_source.id:
abort(
HTTPStatus.CONFLICT,
message=gettext(
"Cannot add a relation from %(target)s to %(source)s as it would create a circle!",
target=original_target.name,
source=item_source.name,
),
)
else:
self._check_item_circle(
item_target=relation.taxonomy_item_target,
item_source=item_source,
original_target=original_target,
)
@API_V1.arguments(TaxonomyItemRelationPostSchema())
@API_V1.response(DynamicApiResponseSchema(NewApiObjectSchema()))
def post(
self,
data: Dict[str, str],
namespace: str,
taxonomy: str,
taxonomy_item: str,
):
"""Create a new relation to a taxonomy item."""
self._check_path_params(
namespace=namespace, taxonomy=taxonomy, taxonomy_item=taxonomy_item
)
if namespace != data["namespace_id"] or taxonomy != data["taxonomy_id"]:
abort(
HTTPStatus.BAD_REQUEST,
message=gettext(
"Cannot create a relation to a taxonomy item of a different taxonomy!"
),
)
found_taxonomy_item = self._get_taxonomy_item(
namespace=namespace, taxonomy=taxonomy, taxonomy_item=taxonomy_item
)
self._check_if_modifiable(found_taxonomy_item)
found_taxonomy_item_target = self._get_taxonomy_item(
namespace=namespace, taxonomy=taxonomy, taxonomy_item=data["taxonomy_item_id"]
)
self._check_item_circle(found_taxonomy_item_target, found_taxonomy_item)
relation = TaxonomyItemRelation(
taxonomy_item_source=found_taxonomy_item,
taxonomy_item_target=found_taxonomy_item_target,
)
DB.session.add(relation)
DB.session.commit()
taxonomy_item_relation_link = (
taxonomy_item_relation_to_taxonomy_item_relation_data(relation).self
)
taxonomy_item_relation_data = taxonomy_item_relation_to_api_response(relation)
taxonomy_item_source_link = taxonomy_item_to_api_link(found_taxonomy_item)
taxonomy_item_source_data = taxonomy_item_to_api_response(found_taxonomy_item)
taxonomy_item_target_link = taxonomy_item_to_api_link(found_taxonomy_item_target)
taxonomy_item_target_data = taxonomy_item_to_api_response(
found_taxonomy_item_target
)
self_link = create_action_link_for_taxonomy_item_relation_page(
namespace=namespace, taxonomy=taxonomy, taxonomy_item=taxonomy_item
)
self_link.rel = (*self_link.rel, "ont-taxonomy-item-relation")
self_link.resource_type = "new"
return ApiResponse(
links=[
taxonomy_item_relation_link,
taxonomy_item_source_link,
taxonomy_item_target_link,
],
embedded=[
taxonomy_item_relation_data,
taxonomy_item_source_data,
taxonomy_item_target_data,
],
data=NewApiObject(
self=self_link,
new=taxonomy_item_relation_link,
),
)
@API_V1.route(
"/namespaces/<string:namespace>/taxonomies/<string:taxonomy>/items/<string:taxonomy_item>/relations/<string:relation>/"
)
class TaxonomyItemRelationView(MethodView):
"""Endpoint for removing taxonomy item relations."""
def _check_path_params(
self, namespace: str, taxonomy: str, taxonomy_item: str, relation: str
):
if not namespace or not namespace.isdigit():
abort(
HTTPStatus.BAD_REQUEST,
message=gettext("The requested namespace id has the wrong format!"),
)
if not taxonomy or not taxonomy.isdigit():
abort(
HTTPStatus.BAD_REQUEST,
message=gettext("The requested taxonomy id has the wrong format!"),
)
if not taxonomy_item or not taxonomy_item.isdigit():
abort(
HTTPStatus.BAD_REQUEST,
message=gettext("The requested taxonomy item id has the wrong format!"),
)
if not relation or not relation.isdigit():
abort(
HTTPStatus.BAD_REQUEST,
message=gettext(
"The requested taxonomy item relation id has the wrong format!"
),
)
def _get_taxonomy_item_relation(
self, namespace: str, taxonomy: str, taxonomy_item: str, relation: str
) -> TaxonomyItemRelation:
namespace_id = int(namespace)
taxonomy_id = int(taxonomy)
taxonomy_item_id = int(taxonomy_item)
relation_id = int(relation)
found_taxonomy_item_relation: Optional[
TaxonomyItemRelation
] = TaxonomyItemRelation.query.filter(
TaxonomyItemRelation.id == relation_id,
TaxonomyItemRelation.taxonomy_item_source_id == taxonomy_item_id,
).first()
if (
found_taxonomy_item_relation is None
or found_taxonomy_item_relation.taxonomy_item_source.taxonomy_id
!= taxonomy_id
or found_taxonomy_item_relation.taxonomy_item_source.taxonomy.namespace_id
!= namespace_id
):
abort(
HTTPStatus.NOT_FOUND, message=gettext("Taxonomy item relation not found.")
)
return found_taxonomy_item_relation # is not None because abort raises exception
def _check_if_modifiable(self, relation: TaxonomyItemRelation):
taxonomy_item = relation.taxonomy_item_source
taxonomy = taxonomy_item.taxonomy
if taxonomy.namespace.deleted_on is not None:
# cannot modify deleted namespace!
abort(
HTTPStatus.CONFLICT,
message=gettext(
"Namespace is marked as deleted and cannot be modified further."
),
)
if taxonomy.deleted_on is not None:
# cannot modify deleted namespace!
abort(
HTTPStatus.CONFLICT,
message=gettext(
"Taxonomy is marked as deleted and cannot be modified further."
),
)
if taxonomy_item.deleted_on is not None:
# cannot modify deleted taxonomy item!
abort(
HTTPStatus.CONFLICT,
message=gettext(
"Taxonomy item is marked as deleted and cannot be modified further."
),
)
if relation.deleted_on is not None:
# cannot modify deleted item relation!
abort(
HTTPStatus.CONFLICT,
message=gettext(
"Taxonomy item relation is marked as deleted and cannot be modified further."
),
)
@API_V1.response(DynamicApiResponseSchema(TaxonomyItemRelationSchema()))
def get(
self,
namespace: str,
taxonomy: str,
taxonomy_item: str,
relation: str,
**kwargs: Any
):
"""Get a single relation."""
self._check_path_params(
namespace=namespace,
taxonomy=taxonomy,
taxonomy_item=taxonomy_item,
relation=relation,
)
found_relation = self._get_taxonomy_item_relation(
namespace=namespace,
taxonomy=taxonomy,
taxonomy_item=taxonomy_item,
relation=relation,
)
return ApiResponse(
links=(
*nav_links_for_taxonomy_item_relation(found_relation),
*action_links_for_taxonomy_item_relation(found_relation),
),
data=taxonomy_item_relation_to_taxonomy_item_relation_data(found_relation),
)
@API_V1.response(DynamicApiResponseSchema(ChangedApiObjectSchema()))
def delete(
self,
namespace: str,
taxonomy: str,
taxonomy_item: str,
relation: str,
**kwargs: Any
):
"""Delete an existing relation."""
self._check_path_params(
namespace=namespace,
taxonomy=taxonomy,
taxonomy_item=taxonomy_item,
relation=relation,
)
found_relation = self._get_taxonomy_item_relation(
namespace=namespace,
taxonomy=taxonomy,
taxonomy_item=taxonomy_item,
relation=relation,
)
self._check_if_modifiable(found_relation)
# only actually delete when not already deleted
if found_relation.deleted_on is None:
# delete taxonomy item relation
found_relation.deleted_on = datetime.utcnow()
DB.session.add(found_relation)
DB.session.commit()
relation_link = taxonomy_item_relation_to_taxonomy_item_relation_data(
found_relation
).self
relation_data = taxonomy_item_relation_to_api_response(found_relation)
source_item_link = taxonomy_item_to_api_link(found_relation.taxonomy_item_source)
source_item_data = taxonomy_item_to_api_response(
found_relation.taxonomy_item_source
)
target_item_link = taxonomy_item_to_api_link(found_relation.taxonomy_item_target)
target_item_data = taxonomy_item_to_api_response(
found_relation.taxonomy_item_target
)
return ApiResponse(
links=[relation_link, source_item_link, target_item_link],
embedded=[relation_data, source_item_data, target_item_data],
data=ChangedApiObject(
self=ApiLink(
href=url_for(
"api-v1.TaxonomyItemRelationView",
namespace=namespace,
taxonomy=taxonomy,
taxonomy_item=taxonomy_item,
relation=relation,
_external=True,
),
rel=(
"delete",
"ont-taxonomy-item-relation",
),
resource_type="changed",
),
changed=relation_link,
),
)
@API_V1.route(
"/namespaces/<string:namespace>/taxonomies/<string:taxonomy>/items/<string:taxonomy_item>/versions/"
)
class TaxonomyItemVersionsView(MethodView):
"""Endpoint for all versions of a taxonomy item."""
def get(self, namespace: str, taxonomy: str, taxonomy_item: str, **kwargs: Any):
"""TODO."""
@API_V1.route(
"/namespaces/<string:namespace>/taxonomies/<string:taxonomy>/items/<string:taxonomy_item>/versions/<string:version>/"
)
class TaxonomyItemVersionView(MethodView):
"""Endpoint for a single version of a taxonomy item."""
def _check_path_params(
self, namespace: str, taxonomy: str, taxonomy_item: str, version: str
):
if not namespace or not namespace.isdigit():
abort(
HTTPStatus.BAD_REQUEST,
message=gettext("The requested namespace id has the wrong format!"),
)
if not taxonomy or not taxonomy.isdigit():
abort(
HTTPStatus.BAD_REQUEST,
message=gettext("The requested taxonomy id has the wrong format!"),
)
if not taxonomy_item or not taxonomy_item.isdigit():
abort(
HTTPStatus.BAD_REQUEST,
message=gettext("The requested taxonomy item id has the wrong format!"),
)
if not version or not version.isdigit():
abort(
HTTPStatus.BAD_REQUEST,
message=gettext(
"The requested taxonomy item version has the wrong format!"
),
)
def _get_taxonomy_item_version(
self, namespace: str, taxonomy: str, taxonomy_item: str, version: str
) -> TaxonomyItemVersion:
namespace_id = int(namespace)
taxonomy_id = int(taxonomy)
taxonomy_item_id = int(taxonomy_item)
version_nr = int(version)
found_taxonomy_item_version: Optional[
TaxonomyItemVersion
] = TaxonomyItemVersion.query.filter(
TaxonomyItemVersion.version == version_nr,
TaxonomyItemVersion.taxonomy_item_id == taxonomy_item_id,
).first()
if (
found_taxonomy_item_version is None
or found_taxonomy_item_version.taxonomy_item.taxonomy_id != taxonomy_id
or found_taxonomy_item_version.taxonomy_item.taxonomy.namespace_id
!= namespace_id
):
abort(
HTTPStatus.NOT_FOUND, message=gettext("Taxonomy item version not found.")
)
return found_taxonomy_item_version # is not None because abort raises exception
@API_V1.response(DynamicApiResponseSchema(TaxonomyItemSchema()))
def get(
self,
namespace: str,
taxonomy: str,
taxonomy_item: str,
version: str,
**kwargs: Any
):
"""Get a single taxonomy item version."""
self._check_path_params(
namespace=namespace,
taxonomy=taxonomy,
taxonomy_item=taxonomy_item,
version=version,
)
found_taxonomy_item_version = self._get_taxonomy_item_version(
namespace=namespace,
taxonomy=taxonomy,
taxonomy_item=taxonomy_item,
version=version,
)
return ApiResponse(
links=[
ApiLink(
href=url_for(
"api-v1.NamespacesView",
_external=True,
**{"item-count": 50},
sort="name",
),
rel=("first", "page", "collection", "nav"),
resource_type="ont-namespace",
schema=url_for(
"api-v1.ApiSchemaView", schema_id="Namespace", _external=True
),
),
*nav_links_for_taxonomy_item_version(found_taxonomy_item_version),
*action_links_for_taxonomy_item_version(found_taxonomy_item_version),
],
data=taxonomy_item_to_taxonomy_item_data(found_taxonomy_item_version),
)
| <filename>muse_for_anything/api/v1_api/taxonomy_items.py
"""Module containing the taxonomy items API endpoints of the v1 API."""
from datetime import datetime
from sqlalchemy.sql.schema import Sequence
from muse_for_anything.db.models.taxonomies import (
Taxonomy,
TaxonomyItem,
TaxonomyItemRelation,
TaxonomyItemVersion,
)
from marshmallow.utils import INCLUDE
from flask_babel import gettext
from muse_for_anything.api.util import template_url_for
from typing import Any, Callable, Dict, List, Optional, Union, cast
from flask.helpers import url_for
from flask.views import MethodView
from sqlalchemy.sql.expression import asc, desc, literal
from sqlalchemy.orm.query import Query
from sqlalchemy.orm import selectinload
from flask_smorest import abort
from http import HTTPStatus
from .root import API_V1
from ..base_models import (
ApiLink,
ApiResponse,
ChangedApiObject,
ChangedApiObjectSchema,
CursorPage,
CursorPageArgumentsSchema,
CursorPageSchema,
DynamicApiResponseSchema,
NewApiObject,
NewApiObjectSchema,
)
from ...db.db import DB
from ...db.pagination import get_page_info
from ...db.models.namespace import Namespace
from ...db.models.ontology_objects import OntologyObjectType, OntologyObjectTypeVersion
from .models.ontology import (
TaxonomyItemRelationPostSchema,
TaxonomyItemRelationSchema,
TaxonomyItemSchema,
TaxonomySchema,
)
from .namespace_helpers import (
query_params_to_api_key,
)
from .taxonomy_helpers import (
action_links_for_taxonomy_item,
action_links_for_taxonomy_item_relation,
create_action_link_for_taxonomy_item_relation_page,
nav_links_for_taxonomy_item,
nav_links_for_taxonomy_item_relation,
taxonomy_item_relation_to_api_link,
taxonomy_item_relation_to_api_response,
taxonomy_item_relation_to_taxonomy_item_relation_data,
taxonomy_item_to_api_link,
taxonomy_item_to_api_response,
taxonomy_item_to_taxonomy_item_data,
taxonomy_to_api_response,
taxonomy_to_items_links,
taxonomy_to_taxonomy_data,
)
@API_V1.route(
"/namespaces/<string:namespace>/taxonomies/<string:taxonomy>/items/<string:taxonomy_item>/"
)
class TaxonomyItemView(MethodView):
"""Endpoint for a single taxonomy item."""
def _check_path_params(self, namespace: str, taxonomy: str, taxonomy_item: str):
if not namespace or not namespace.isdigit():
abort(
HTTPStatus.BAD_REQUEST,
message=gettext("The requested namespace id has the wrong format!"),
)
if not taxonomy or not taxonomy.isdigit():
abort(
HTTPStatus.BAD_REQUEST,
message=gettext("The requested taxonomy id has the wrong format!"),
)
if not taxonomy_item or not taxonomy_item.isdigit():
abort(
HTTPStatus.BAD_REQUEST,
message=gettext("The requested taxonomy item id has the wrong format!"),
)
def _get_taxonomy_item(
self, namespace: str, taxonomy: str, taxonomy_item: str
) -> TaxonomyItem:
namespace_id = int(namespace)
taxonomy_id = int(taxonomy)
taxonomy_item_id = int(taxonomy_item)
found_taxonomy_item: Optional[TaxonomyItem] = (
TaxonomyItem.query.options(selectinload(TaxonomyItem.current_ancestors))
.filter(
TaxonomyItem.id == taxonomy_item_id,
TaxonomyItem.taxonomy_id == taxonomy_id,
)
.first()
)
if (
found_taxonomy_item is None
or found_taxonomy_item.taxonomy.namespace_id != namespace_id
):
abort(HTTPStatus.NOT_FOUND, message=gettext("Taxonomy item not found."))
return found_taxonomy_item # is not None because abort raises exception
def _check_if_taxonomy_modifiable(self, taxonomy: Taxonomy):
if taxonomy.namespace.deleted_on is not None:
# cannot modify deleted namespace!
abort(
HTTPStatus.CONFLICT,
message=gettext(
"Namespace is marked as deleted and cannot be modified further."
),
)
if taxonomy.deleted_on is not None:
# cannot modify deleted namespace!
abort(
HTTPStatus.CONFLICT,
message=gettext(
"Taxonomy is marked as deleted and cannot be modified further."
),
)
def _check_if_modifiable(self, taxonomy_item: TaxonomyItem):
self._check_if_taxonomy_modifiable(taxonomy=taxonomy_item.taxonomy)
if taxonomy_item.deleted_on is not None:
# cannot modify deleted taxonomy!
abort(
HTTPStatus.CONFLICT,
message=gettext(
"Taxonomy item is marked as deleted and cannot be modified further."
),
)
@API_V1.response(DynamicApiResponseSchema(TaxonomyItemSchema()))
def get(self, namespace: str, taxonomy: str, taxonomy_item: str, **kwargs: Any):
"""Get a single taxonomy item."""
self._check_path_params(
namespace=namespace, taxonomy=taxonomy, taxonomy_item=taxonomy_item
)
found_taxonomy_item: TaxonomyItem = self._get_taxonomy_item(
namespace=namespace, taxonomy=taxonomy, taxonomy_item=taxonomy_item
)
embedded: List[ApiResponse] = []
for relation in found_taxonomy_item.current_ancestors:
embedded.append(taxonomy_item_to_api_response(relation.taxonomy_item_source))
for relation in found_taxonomy_item.current_related:
embedded.append(taxonomy_item_relation_to_api_response(relation))
embedded.append(taxonomy_item_to_api_response(relation.taxonomy_item_target))
return ApiResponse(
links=[
ApiLink(
href=url_for(
"api-v1.NamespacesView",
_external=True,
**{"item-count": 50},
sort="name",
),
rel=("first", "page", "collection", "nav"),
resource_type="ont-namespace",
schema=url_for(
"api-v1.ApiSchemaView", schema_id="Namespace", _external=True
),
),
*nav_links_for_taxonomy_item(found_taxonomy_item),
*action_links_for_taxonomy_item(found_taxonomy_item),
],
embedded=embedded,
data=taxonomy_item_to_taxonomy_item_data(found_taxonomy_item),
)
@API_V1.arguments(TaxonomyItemSchema())
@API_V1.response(DynamicApiResponseSchema(NewApiObjectSchema()))
def put(self, data, namespace: str, taxonomy: str, taxonomy_item: str):
"""Update a taxonomy item."""
self._check_path_params(
namespace=namespace, taxonomy=taxonomy, taxonomy_item=taxonomy_item
)
found_taxonomy_item: TaxonomyItem = self._get_taxonomy_item(
namespace=namespace, taxonomy=taxonomy, taxonomy_item=taxonomy_item
)
self._check_if_modifiable(found_taxonomy_item)
taxonomy_item_version = TaxonomyItemVersion(
taxonomy_item=found_taxonomy_item,
version=found_taxonomy_item.current_version.version + 1,
name=data["name"],
description=data.get("description", ""),
sort_key=data.get("sort_key", 10),
)
found_taxonomy_item.current_version = taxonomy_item_version
DB.session.add(found_taxonomy_item)
DB.session.add(taxonomy_item_version)
DB.session.commit()
taxonomy_item_link = taxonomy_item_to_taxonomy_item_data(found_taxonomy_item).self
taxonomy_item_data = taxonomy_item_to_api_response(found_taxonomy_item)
return ApiResponse(
links=[taxonomy_item_link],
embedded=[taxonomy_item_data],
data=ChangedApiObject(
self=ApiLink(
href=url_for(
"api-v1.TaxonomyItemView",
namespace=namespace,
taxonomy=taxonomy,
taxonomy_item=taxonomy_item,
_external=True,
),
rel=(
"update",
"put",
"ont-taxonomy-item",
),
resource_type="changed",
),
changed=taxonomy_item_link,
),
)
@API_V1.response(DynamicApiResponseSchema(ChangedApiObjectSchema()))
def post(self, namespace: str, taxonomy: str, taxonomy_item: str): # restore action
"""Restore a deleted taxonomy item."""
self._check_path_params(
namespace=namespace, taxonomy=taxonomy, taxonomy_item=taxonomy_item
)
found_taxonomy_item: TaxonomyItem = self._get_taxonomy_item(
namespace=namespace, taxonomy=taxonomy, taxonomy_item=taxonomy_item
)
self._check_if_taxonomy_modifiable(found_taxonomy_item.taxonomy)
changed_links: List[ApiLink] = []
embedded: List[ApiResponse] = []
# only actually restore when not already restored
if found_taxonomy_item.deleted_on is not None:
# restore taxonomy item
deleted_timestamp = found_taxonomy_item.deleted_on
found_taxonomy_item.deleted_on = None
# also restore relations
ancestors: Sequence[TaxonomyItemRelation] = TaxonomyItemRelation.query.filter(
TaxonomyItemRelation.taxonomy_item_target_id == found_taxonomy_item.id,
TaxonomyItemRelation.deleted_on == deleted_timestamp,
).all()
ancestor_ids = set()
relation: TaxonomyItemRelation
for relation in ancestors:
if relation.taxonomy_item_source.deleted_on is not None:
continue # do not restore relations to deleted items
ancestor_ids.add(relation.taxonomy_item_source_id)
relation.deleted_on = None
DB.session.add(relation)
def produces_circle(relation: TaxonomyItemRelation) -> bool:
if relation.taxonomy_item_target_id in ancestor_ids:
return True
for rel in relation.taxonomy_item_target.current_related:
if produces_circle(rel):
return True
return False
children: Sequence[TaxonomyItemRelation] = TaxonomyItemRelation.query.filter(
TaxonomyItemRelation.taxonomy_item_source_id == found_taxonomy_item.id,
TaxonomyItemRelation.deleted_on == deleted_timestamp,
).all()
for relation in children:
if relation.taxonomy_item_target.deleted_on is not None:
continue # do not restore relations to deleted items
if produces_circle(relation):
continue
relation.deleted_on = None
DB.session.add(relation)
DB.session.add(found_taxonomy_item)
DB.session.commit()
# add changed items to be embedded into the response
for relation in found_taxonomy_item.current_ancestors:
changed_links.append(taxonomy_item_relation_to_api_link(relation))
embedded.append(taxonomy_item_relation_to_api_response(relation))
changed_links.append(
taxonomy_item_to_api_link(relation.taxonomy_item_source)
)
embedded.append(
taxonomy_item_to_api_response(relation.taxonomy_item_source)
)
for relation in found_taxonomy_item.current_related:
changed_links.append(taxonomy_item_relation_to_api_link(relation))
embedded.append(taxonomy_item_relation_to_api_response(relation))
changed_links.append(
taxonomy_item_to_api_link(relation.taxonomy_item_target)
)
embedded.append(
taxonomy_item_to_api_response(relation.taxonomy_item_target)
)
taxonomy_item_link = taxonomy_item_to_taxonomy_item_data(found_taxonomy_item).self
taxonomy_item_data = taxonomy_item_to_api_response(found_taxonomy_item)
taxonomy_link = taxonomy_to_taxonomy_data(found_taxonomy_item.taxonomy).self
taxonomy_data = taxonomy_to_api_response(found_taxonomy_item.taxonomy)
return ApiResponse(
links=[taxonomy_item_link, taxonomy_link, *changed_links],
embedded=[taxonomy_item_data, taxonomy_data, *embedded],
data=ChangedApiObject(
self=ApiLink(
href=url_for(
"api-v1.TaxonomyItemView",
namespace=namespace,
taxonomy=taxonomy,
taxonomy_item=taxonomy_item,
_external=True,
),
rel=(
"restore",
"post",
"ont-taxonomy-item",
),
resource_type="changed",
),
changed=taxonomy_item_link,
),
)
@API_V1.response(DynamicApiResponseSchema(ChangedApiObjectSchema()))
def delete(self, namespace: str, taxonomy: str, taxonomy_item: str): # restore action
"""Delete a taxonomy item."""
self._check_path_params(
namespace=namespace, taxonomy=taxonomy, taxonomy_item=taxonomy_item
)
found_taxonomy_item: TaxonomyItem = self._get_taxonomy_item(
namespace=namespace, taxonomy=taxonomy, taxonomy_item=taxonomy_item
)
self._check_if_taxonomy_modifiable(found_taxonomy_item.taxonomy)
changed_links: List[ApiLink] = []
embedded: List[ApiResponse] = []
# only actually delete when not already deleted
if found_taxonomy_item.deleted_on is None:
# delete taxonomy item
deleted_timestamp = datetime.utcnow()
found_taxonomy_item.deleted_on = deleted_timestamp
# also delete incoming and outgoing relations to remove them
# from relations of existing items
ancestors = found_taxonomy_item.current_ancestors
for relation in found_taxonomy_item.current_ancestors:
relation.deleted_on = deleted_timestamp
DB.session.add(relation)
related = found_taxonomy_item.current_related
for relation in found_taxonomy_item.current_related:
relation.deleted_on = deleted_timestamp
DB.session.add(relation)
DB.session.add(found_taxonomy_item)
DB.session.commit()
# add changed items to be embedded into the response
for relation in ancestors:
changed_links.append(taxonomy_item_relation_to_api_link(relation))
embedded.append(taxonomy_item_relation_to_api_response(relation))
changed_links.append(
taxonomy_item_to_api_link(relation.taxonomy_item_source)
)
embedded.append(
taxonomy_item_to_api_response(relation.taxonomy_item_source)
)
for relation in related:
changed_links.append(taxonomy_item_relation_to_api_link(relation))
embedded.append(taxonomy_item_relation_to_api_response(relation))
changed_links.append(
taxonomy_item_to_api_link(relation.taxonomy_item_target)
)
embedded.append(
taxonomy_item_to_api_response(relation.taxonomy_item_target)
)
taxonomy_item_link = taxonomy_item_to_taxonomy_item_data(found_taxonomy_item).self
taxonomy_item_data = taxonomy_item_to_api_response(found_taxonomy_item)
taxonomy_link = taxonomy_to_taxonomy_data(found_taxonomy_item.taxonomy).self
taxonomy_data = taxonomy_to_api_response(found_taxonomy_item.taxonomy)
return ApiResponse(
links=[taxonomy_item_link, taxonomy_link, *changed_links],
embedded=[taxonomy_item_data, taxonomy_data, *embedded],
data=ChangedApiObject(
self=ApiLink(
href=url_for(
"api-v1.TaxonomyItemView",
namespace=namespace,
taxonomy=taxonomy,
taxonomy_item=taxonomy_item,
_external=True,
),
rel=(
"delete",
"ont-taxonomy-item",
),
resource_type="changed",
),
changed=taxonomy_item_link,
),
)
@API_V1.route(
"/namespaces/<string:namespace>/taxonomies/<string:taxonomy>/items/<string:taxonomy_item>/relations/"
)
class TaxonomyItemRelationsView(MethodView):
"""Endpoint for manipulating taxonomy item relations."""
def _check_path_params(self, namespace: str, taxonomy: str, taxonomy_item: str):
if not namespace or not namespace.isdigit():
abort(
HTTPStatus.BAD_REQUEST,
message=gettext("The requested namespace id has the wrong format!"),
)
if not taxonomy or not taxonomy.isdigit():
abort(
HTTPStatus.BAD_REQUEST,
message=gettext("The requested taxonomy id has the wrong format!"),
)
if not taxonomy_item or not taxonomy_item.isdigit():
abort(
HTTPStatus.BAD_REQUEST,
message=gettext("The requested taxonomy item id has the wrong format!"),
)
def _get_taxonomy_item(
self, namespace: str, taxonomy: str, taxonomy_item: str
) -> TaxonomyItem:
namespace_id = int(namespace)
taxonomy_id = int(taxonomy)
taxonomy_item_id = int(taxonomy_item)
found_taxonomy_item: Optional[TaxonomyItem] = TaxonomyItem.query.filter(
TaxonomyItem.id == taxonomy_item_id,
TaxonomyItem.taxonomy_id == taxonomy_id,
).first()
if (
found_taxonomy_item is None
or found_taxonomy_item.taxonomy.namespace_id != namespace_id
):
abort(HTTPStatus.NOT_FOUND, message=gettext("Taxonomy item not found."))
return found_taxonomy_item # is not None because abort raises exception
def _check_if_modifiable(self, taxonomy_item: TaxonomyItem):
taxonomy = taxonomy_item.taxonomy
if taxonomy.namespace.deleted_on is not None:
# cannot modify deleted namespace!
abort(
HTTPStatus.CONFLICT,
message=gettext(
"Namespace is marked as deleted and cannot be modified further."
),
)
if taxonomy.deleted_on is not None:
# cannot modify deleted namespace!
abort(
HTTPStatus.CONFLICT,
message=gettext(
"Taxonomy is marked as deleted and cannot be modified further."
),
)
if taxonomy_item.deleted_on is not None:
# cannot modify deleted taxonomy!
abort(
HTTPStatus.CONFLICT,
message=gettext(
"Taxonomy item is marked as deleted and cannot be modified further."
),
)
def _check_item_circle(
self,
item_target: TaxonomyItem,
item_source: TaxonomyItem,
original_target: Optional[TaxonomyItem] = None,
):
"""Check for a path from target to source which would form a circular dependency. Abort if such a path is found!"""
if original_target is None:
original_target = item_target
relation: TaxonomyItemRelation
for relation in item_target.current_related:
if relation.taxonomy_item_target.deleted_on is not None:
continue # exclude deleted items as targets
if relation.taxonomy_item_target_id == item_source.id:
abort(
HTTPStatus.CONFLICT,
message=gettext(
"Cannot add a relation from %(target)s to %(source)s as it would create a circle!",
target=original_target.name,
source=item_source.name,
),
)
else:
self._check_item_circle(
item_target=relation.taxonomy_item_target,
item_source=item_source,
original_target=original_target,
)
@API_V1.arguments(TaxonomyItemRelationPostSchema())
@API_V1.response(DynamicApiResponseSchema(NewApiObjectSchema()))
def post(
self,
data: Dict[str, str],
namespace: str,
taxonomy: str,
taxonomy_item: str,
):
"""Create a new relation to a taxonomy item."""
self._check_path_params(
namespace=namespace, taxonomy=taxonomy, taxonomy_item=taxonomy_item
)
if namespace != data["namespace_id"] or taxonomy != data["taxonomy_id"]:
abort(
HTTPStatus.BAD_REQUEST,
message=gettext(
"Cannot create a relation to a taxonomy item of a different taxonomy!"
),
)
found_taxonomy_item = self._get_taxonomy_item(
namespace=namespace, taxonomy=taxonomy, taxonomy_item=taxonomy_item
)
self._check_if_modifiable(found_taxonomy_item)
found_taxonomy_item_target = self._get_taxonomy_item(
namespace=namespace, taxonomy=taxonomy, taxonomy_item=data["taxonomy_item_id"]
)
self._check_item_circle(found_taxonomy_item_target, found_taxonomy_item)
relation = TaxonomyItemRelation(
taxonomy_item_source=found_taxonomy_item,
taxonomy_item_target=found_taxonomy_item_target,
)
DB.session.add(relation)
DB.session.commit()
taxonomy_item_relation_link = (
taxonomy_item_relation_to_taxonomy_item_relation_data(relation).self
)
taxonomy_item_relation_data = taxonomy_item_relation_to_api_response(relation)
taxonomy_item_source_link = taxonomy_item_to_api_link(found_taxonomy_item)
taxonomy_item_source_data = taxonomy_item_to_api_response(found_taxonomy_item)
taxonomy_item_target_link = taxonomy_item_to_api_link(found_taxonomy_item_target)
taxonomy_item_target_data = taxonomy_item_to_api_response(
found_taxonomy_item_target
)
self_link = create_action_link_for_taxonomy_item_relation_page(
namespace=namespace, taxonomy=taxonomy, taxonomy_item=taxonomy_item
)
self_link.rel = (*self_link.rel, "ont-taxonomy-item-relation")
self_link.resource_type = "new"
return ApiResponse(
links=[
taxonomy_item_relation_link,
taxonomy_item_source_link,
taxonomy_item_target_link,
],
embedded=[
taxonomy_item_relation_data,
taxonomy_item_source_data,
taxonomy_item_target_data,
],
data=NewApiObject(
self=self_link,
new=taxonomy_item_relation_link,
),
)
@API_V1.route(
"/namespaces/<string:namespace>/taxonomies/<string:taxonomy>/items/<string:taxonomy_item>/relations/<string:relation>/"
)
class TaxonomyItemRelationView(MethodView):
"""Endpoint for removing taxonomy item relations."""
def _check_path_params(
self, namespace: str, taxonomy: str, taxonomy_item: str, relation: str
):
if not namespace or not namespace.isdigit():
abort(
HTTPStatus.BAD_REQUEST,
message=gettext("The requested namespace id has the wrong format!"),
)
if not taxonomy or not taxonomy.isdigit():
abort(
HTTPStatus.BAD_REQUEST,
message=gettext("The requested taxonomy id has the wrong format!"),
)
if not taxonomy_item or not taxonomy_item.isdigit():
abort(
HTTPStatus.BAD_REQUEST,
message=gettext("The requested taxonomy item id has the wrong format!"),
)
if not relation or not relation.isdigit():
abort(
HTTPStatus.BAD_REQUEST,
message=gettext(
"The requested taxonomy item relation id has the wrong format!"
),
)
def _get_taxonomy_item_relation(
self, namespace: str, taxonomy: str, taxonomy_item: str, relation: str
) -> TaxonomyItemRelation:
namespace_id = int(namespace)
taxonomy_id = int(taxonomy)
taxonomy_item_id = int(taxonomy_item)
relation_id = int(relation)
found_taxonomy_item_relation: Optional[
TaxonomyItemRelation
] = TaxonomyItemRelation.query.filter(
TaxonomyItemRelation.id == relation_id,
TaxonomyItemRelation.taxonomy_item_source_id == taxonomy_item_id,
).first()
if (
found_taxonomy_item_relation is None
or found_taxonomy_item_relation.taxonomy_item_source.taxonomy_id
!= taxonomy_id
or found_taxonomy_item_relation.taxonomy_item_source.taxonomy.namespace_id
!= namespace_id
):
abort(
HTTPStatus.NOT_FOUND, message=gettext("Taxonomy item relation not found.")
)
return found_taxonomy_item_relation # is not None because abort raises exception
def _check_if_modifiable(self, relation: TaxonomyItemRelation):
taxonomy_item = relation.taxonomy_item_source
taxonomy = taxonomy_item.taxonomy
if taxonomy.namespace.deleted_on is not None:
# cannot modify deleted namespace!
abort(
HTTPStatus.CONFLICT,
message=gettext(
"Namespace is marked as deleted and cannot be modified further."
),
)
if taxonomy.deleted_on is not None:
# cannot modify deleted namespace!
abort(
HTTPStatus.CONFLICT,
message=gettext(
"Taxonomy is marked as deleted and cannot be modified further."
),
)
if taxonomy_item.deleted_on is not None:
# cannot modify deleted taxonomy item!
abort(
HTTPStatus.CONFLICT,
message=gettext(
"Taxonomy item is marked as deleted and cannot be modified further."
),
)
if relation.deleted_on is not None:
# cannot modify deleted item relation!
abort(
HTTPStatus.CONFLICT,
message=gettext(
"Taxonomy item relation is marked as deleted and cannot be modified further."
),
)
@API_V1.response(DynamicApiResponseSchema(TaxonomyItemRelationSchema()))
def get(
self,
namespace: str,
taxonomy: str,
taxonomy_item: str,
relation: str,
**kwargs: Any
):
"""Get a single relation."""
self._check_path_params(
namespace=namespace,
taxonomy=taxonomy,
taxonomy_item=taxonomy_item,
relation=relation,
)
found_relation = self._get_taxonomy_item_relation(
namespace=namespace,
taxonomy=taxonomy,
taxonomy_item=taxonomy_item,
relation=relation,
)
return ApiResponse(
links=(
*nav_links_for_taxonomy_item_relation(found_relation),
*action_links_for_taxonomy_item_relation(found_relation),
),
data=taxonomy_item_relation_to_taxonomy_item_relation_data(found_relation),
)
@API_V1.response(DynamicApiResponseSchema(ChangedApiObjectSchema()))
def delete(
self,
namespace: str,
taxonomy: str,
taxonomy_item: str,
relation: str,
**kwargs: Any
):
"""Delete an existing relation."""
self._check_path_params(
namespace=namespace,
taxonomy=taxonomy,
taxonomy_item=taxonomy_item,
relation=relation,
)
found_relation = self._get_taxonomy_item_relation(
namespace=namespace,
taxonomy=taxonomy,
taxonomy_item=taxonomy_item,
relation=relation,
)
self._check_if_modifiable(found_relation)
# only actually delete when not already deleted
if found_relation.deleted_on is None:
# delete taxonomy item relation
found_relation.deleted_on = datetime.utcnow()
DB.session.add(found_relation)
DB.session.commit()
relation_link = taxonomy_item_relation_to_taxonomy_item_relation_data(
found_relation
).self
relation_data = taxonomy_item_relation_to_api_response(found_relation)
source_item_link = taxonomy_item_to_api_link(found_relation.taxonomy_item_source)
source_item_data = taxonomy_item_to_api_response(
found_relation.taxonomy_item_source
)
target_item_link = taxonomy_item_to_api_link(found_relation.taxonomy_item_target)
target_item_data = taxonomy_item_to_api_response(
found_relation.taxonomy_item_target
)
return ApiResponse(
links=[relation_link, source_item_link, target_item_link],
embedded=[relation_data, source_item_data, target_item_data],
data=ChangedApiObject(
self=ApiLink(
href=url_for(
"api-v1.TaxonomyItemRelationView",
namespace=namespace,
taxonomy=taxonomy,
taxonomy_item=taxonomy_item,
relation=relation,
_external=True,
),
rel=(
"delete",
"ont-taxonomy-item-relation",
),
resource_type="changed",
),
changed=relation_link,
),
)
@API_V1.route(
"/namespaces/<string:namespace>/taxonomies/<string:taxonomy>/items/<string:taxonomy_item>/versions/"
)
class TaxonomyItemVersionsView(MethodView):
"""Endpoint for all versions of a taxonomy item."""
def get(self, namespace: str, taxonomy: str, taxonomy_item: str, **kwargs: Any):
"""TODO."""
@API_V1.route(
"/namespaces/<string:namespace>/taxonomies/<string:taxonomy>/items/<string:taxonomy_item>/versions/<string:version>/"
)
class TaxonomyItemVersionView(MethodView):
"""Endpoint for a single version of a taxonomy item."""
def _check_path_params(
self, namespace: str, taxonomy: str, taxonomy_item: str, version: str
):
if not namespace or not namespace.isdigit():
abort(
HTTPStatus.BAD_REQUEST,
message=gettext("The requested namespace id has the wrong format!"),
)
if not taxonomy or not taxonomy.isdigit():
abort(
HTTPStatus.BAD_REQUEST,
message=gettext("The requested taxonomy id has the wrong format!"),
)
if not taxonomy_item or not taxonomy_item.isdigit():
abort(
HTTPStatus.BAD_REQUEST,
message=gettext("The requested taxonomy item id has the wrong format!"),
)
if not version or not version.isdigit():
abort(
HTTPStatus.BAD_REQUEST,
message=gettext(
"The requested taxonomy item version has the wrong format!"
),
)
def _get_taxonomy_item_version(
self, namespace: str, taxonomy: str, taxonomy_item: str, version: str
) -> TaxonomyItemVersion:
namespace_id = int(namespace)
taxonomy_id = int(taxonomy)
taxonomy_item_id = int(taxonomy_item)
version_nr = int(version)
found_taxonomy_item_version: Optional[
TaxonomyItemVersion
] = TaxonomyItemVersion.query.filter(
TaxonomyItemVersion.version == version_nr,
TaxonomyItemVersion.taxonomy_item_id == taxonomy_item_id,
).first()
if (
found_taxonomy_item_version is None
or found_taxonomy_item_version.taxonomy_item.taxonomy_id != taxonomy_id
or found_taxonomy_item_version.taxonomy_item.taxonomy.namespace_id
!= namespace_id
):
abort(
HTTPStatus.NOT_FOUND, message=gettext("Taxonomy item version not found.")
)
return found_taxonomy_item_version # is not None because abort raises exception
@API_V1.response(DynamicApiResponseSchema(TaxonomyItemSchema()))
def get(
self,
namespace: str,
taxonomy: str,
taxonomy_item: str,
version: str,
**kwargs: Any
):
"""Get a single taxonomy item version."""
self._check_path_params(
namespace=namespace,
taxonomy=taxonomy,
taxonomy_item=taxonomy_item,
version=version,
)
found_taxonomy_item_version = self._get_taxonomy_item_version(
namespace=namespace,
taxonomy=taxonomy,
taxonomy_item=taxonomy_item,
version=version,
)
return ApiResponse(
links=[
ApiLink(
href=url_for(
"api-v1.NamespacesView",
_external=True,
**{"item-count": 50},
sort="name",
),
rel=("first", "page", "collection", "nav"),
resource_type="ont-namespace",
schema=url_for(
"api-v1.ApiSchemaView", schema_id="Namespace", _external=True
),
),
*nav_links_for_taxonomy_item_version(found_taxonomy_item_version),
*action_links_for_taxonomy_item_version(found_taxonomy_item_version),
],
data=taxonomy_item_to_taxonomy_item_data(found_taxonomy_item_version),
)
| en | 0.911726 | Module containing the taxonomy items API endpoints of the v1 API. Endpoint for a single taxonomy item. # is not None because abort raises exception # cannot modify deleted namespace! # cannot modify deleted namespace! # cannot modify deleted taxonomy! Get a single taxonomy item. Update a taxonomy item. # restore action Restore a deleted taxonomy item. # only actually restore when not already restored # restore taxonomy item # also restore relations # do not restore relations to deleted items # do not restore relations to deleted items # add changed items to be embedded into the response # restore action Delete a taxonomy item. # only actually delete when not already deleted # delete taxonomy item # also delete incoming and outgoing relations to remove them # from relations of existing items # add changed items to be embedded into the response Endpoint for manipulating taxonomy item relations. # is not None because abort raises exception # cannot modify deleted namespace! # cannot modify deleted namespace! # cannot modify deleted taxonomy! Check for a path from target to source which would form a circular dependency. Abort if such a path is found! # exclude deleted items as targets Create a new relation to a taxonomy item. Endpoint for removing taxonomy item relations. # is not None because abort raises exception # cannot modify deleted namespace! # cannot modify deleted namespace! # cannot modify deleted taxonomy item! # cannot modify deleted item relation! Get a single relation. Delete an existing relation. # only actually delete when not already deleted # delete taxonomy item relation Endpoint for all versions of a taxonomy item. TODO. Endpoint for a single version of a taxonomy item. # is not None because abort raises exception Get a single taxonomy item version. | 1.841306 | 2 |
PythonDAdata/3358OS_06_Code/code6/pd_plotting.py | shijiale0609/Python_Data_Analysis | 1 | 9742 | import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
df = pd.read_csv('transcount.csv')
df = df.groupby('year').aggregate(np.mean)
gpu = pd.read_csv('gpu_transcount.csv')
gpu = gpu.groupby('year').aggregate(np.mean)
df = pd.merge(df, gpu, how='outer', left_index=True, right_index=True)
df = df.replace(np.nan, 0)
df.plot()
df.plot(logy=True)
df[df['gpu_trans_count'] > 0].plot(kind='scatter', x='trans_count', y='gpu_trans_count', loglog=True)
plt.show()
| import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
df = pd.read_csv('transcount.csv')
df = df.groupby('year').aggregate(np.mean)
gpu = pd.read_csv('gpu_transcount.csv')
gpu = gpu.groupby('year').aggregate(np.mean)
df = pd.merge(df, gpu, how='outer', left_index=True, right_index=True)
df = df.replace(np.nan, 0)
df.plot()
df.plot(logy=True)
df[df['gpu_trans_count'] > 0].plot(kind='scatter', x='trans_count', y='gpu_trans_count', loglog=True)
plt.show()
| none | 1 | 3.157782 | 3 |
|
source/blog/migrations/0004_postcomments.py | JakubGutowski/PersonalBlog | 0 | 9743 | <filename>source/blog/migrations/0004_postcomments.py
# Generated by Django 2.0.5 on 2018-07-02 19:46
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('blog', '0003_blogpost_author'),
]
operations = [
migrations.CreateModel(
name='PostComments',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('nick', models.CharField(max_length=20)),
('comment', models.CharField(max_length=140)),
('post', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='blog.BlogPost')),
],
),
]
| <filename>source/blog/migrations/0004_postcomments.py
# Generated by Django 2.0.5 on 2018-07-02 19:46
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('blog', '0003_blogpost_author'),
]
operations = [
migrations.CreateModel(
name='PostComments',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('nick', models.CharField(max_length=20)),
('comment', models.CharField(max_length=140)),
('post', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='blog.BlogPost')),
],
),
]
| en | 0.666511 | # Generated by Django 2.0.5 on 2018-07-02 19:46 | 1.517525 | 2 |
submissions/aising2019/a.py | m-star18/atcoder | 1 | 9744 | import sys
read = sys.stdin.buffer.read
readline = sys.stdin.buffer.readline
readlines = sys.stdin.buffer.readlines
sys.setrecursionlimit(10 ** 7)
n = int(readline())
h = int(readline())
w = int(readline())
print((n - h + 1) * (n - w + 1))
| import sys
read = sys.stdin.buffer.read
readline = sys.stdin.buffer.readline
readlines = sys.stdin.buffer.readlines
sys.setrecursionlimit(10 ** 7)
n = int(readline())
h = int(readline())
w = int(readline())
print((n - h + 1) * (n - w + 1))
| none | 1 | 2.719723 | 3 |
|
CreateHalo.py | yoyoberenguer/MultiplayerGameEngine | 4 | 9745 | <filename>CreateHalo.py<gh_stars>1-10
import pygame
from NetworkBroadcast import Broadcast, AnimatedSprite, DeleteSpriteCommand
from Textures import HALO_SPRITE12, HALO_SPRITE14, HALO_SPRITE13
__author__ = "<NAME>"
__credits__ = ["<NAME>"]
__version__ = "1.0.0"
__maintainer__ = "<NAME>"
__email__ = "<EMAIL>"
class PlayerHalo(pygame.sprite.Sprite):
images = []
containers = None
def __init__(self, texture_name_, object_, timing_, layer_=0):
self.layer = layer_
pygame.sprite.Sprite.__init__(self, self.containers)
if isinstance(object_.gl.All, pygame.sprite.LayeredUpdates):
object_.gl.All.change_layer(self, object_.layer)
self.object = object_
if isinstance(self.images, pygame.Surface):
self.images = [self.images] * 30
self.images_copy = self.images.copy()
self.image = self.images_copy[0]
self.rect = self.image.get_rect(center=object_.rect.center)
self.dt = 0
self.index = 0
self.gl = object_.gl
self.length = len(self.images) - 1
self.blend = 0
self.timing = timing_
self.texture_name = texture_name_
self.id_ = id(self)
self.player_halo_object = Broadcast(self.make_object())
def make_object(self) -> AnimatedSprite:
return AnimatedSprite(frame_=self.gl.FRAME, id_=self.id_, surface_=self.texture_name,
layer_=self.layer, blend_=self.blend, rect_=self.rect,
index_=self.index)
def update(self):
if self.dt > self.timing:
if self.object.rect.colliderect(self.gl.SCREENRECT):
self.image = self.images_copy[self.index]
self.rect = self.image.get_rect(center=self.object.rect.center)
self.index += 1
if self.index > self.length:
self.kill()
return
self.dt = 0
self.player_halo_object.update({'frame': self.gl.FRAME,
'rect': self.rect, 'index': self.index})
else:
self.kill()
return
else:
self.dt += self.gl.TIME_PASSED_SECONDS
self.player_halo_object.queue()
class AsteroidHalo(pygame.sprite.Sprite):
images = []
containers = None
def __init__(self, texture_name_, object_, timing_, layer_=0):
self.layer = layer_
pygame.sprite.Sprite.__init__(self, self.containers)
if isinstance(object_.gl.All, pygame.sprite.LayeredUpdates):
object_.gl.All.change_layer(self, object_.layer)
self.object = object_
if isinstance(self.images, pygame.Surface):
self.images = [self.images] * 30
self.images_copy = self.images.copy()
self.image = self.images_copy[0]
if not id(AsteroidHalo.images) == id(eval(texture_name_)):
raise ValueError("Asteroid image does not match with its surface name.")
self.rect = self.image.get_rect(center=object_.rect.center)
self.dt = 0
self.index = 0
self.gl = object_.gl
self.length = len(self.images) - 1
self.blend = 0
self.timing = timing_
self.texture_name = texture_name_
self.id_ = id(self)
self.asteroidHalo_object = Broadcast(self.make_object())
Broadcast.add_object_id(self.id_)
def delete_object(self) -> DeleteSpriteCommand:
"""
Send a command to kill an object on client side.
:return: DetectCollisionSprite object
"""
return DeleteSpriteCommand(frame_=self.gl.FRAME, to_delete_={self.id_: self.texture_name})
def make_object(self) -> AnimatedSprite:
return AnimatedSprite(frame_=self.gl.FRAME, id_=self.id_, surface_=self.texture_name,
layer_=self.layer, blend_=self.blend, rect_=self.rect,
index_=self.index)
def quit(self) -> None:
Broadcast.remove_object_id(self.id_)
obj = Broadcast(self.delete_object())
obj.queue()
self.kill()
def update(self) -> None:
if self.dt > self.timing:
if self.object.rect.colliderect(self.gl.SCREENRECT):
self.image = self.images_copy[self.index]
self.rect = self.image.get_rect(center=self.object.rect.center)
self.index += 1
if self.index > self.length:
self.quit()
return
self.asteroidHalo_object.update(
{'frame': self.gl.FRAME, 'rect': self.rect, 'index': self.index})
self.asteroidHalo_object.queue()
self.dt = 0
else:
self.quit()
return
else:
self.dt += self.gl.TIME_PASSED_SECONDS
| <filename>CreateHalo.py<gh_stars>1-10
import pygame
from NetworkBroadcast import Broadcast, AnimatedSprite, DeleteSpriteCommand
from Textures import HALO_SPRITE12, HALO_SPRITE14, HALO_SPRITE13
__author__ = "<NAME>"
__credits__ = ["<NAME>"]
__version__ = "1.0.0"
__maintainer__ = "<NAME>"
__email__ = "<EMAIL>"
class PlayerHalo(pygame.sprite.Sprite):
images = []
containers = None
def __init__(self, texture_name_, object_, timing_, layer_=0):
self.layer = layer_
pygame.sprite.Sprite.__init__(self, self.containers)
if isinstance(object_.gl.All, pygame.sprite.LayeredUpdates):
object_.gl.All.change_layer(self, object_.layer)
self.object = object_
if isinstance(self.images, pygame.Surface):
self.images = [self.images] * 30
self.images_copy = self.images.copy()
self.image = self.images_copy[0]
self.rect = self.image.get_rect(center=object_.rect.center)
self.dt = 0
self.index = 0
self.gl = object_.gl
self.length = len(self.images) - 1
self.blend = 0
self.timing = timing_
self.texture_name = texture_name_
self.id_ = id(self)
self.player_halo_object = Broadcast(self.make_object())
def make_object(self) -> AnimatedSprite:
return AnimatedSprite(frame_=self.gl.FRAME, id_=self.id_, surface_=self.texture_name,
layer_=self.layer, blend_=self.blend, rect_=self.rect,
index_=self.index)
def update(self):
if self.dt > self.timing:
if self.object.rect.colliderect(self.gl.SCREENRECT):
self.image = self.images_copy[self.index]
self.rect = self.image.get_rect(center=self.object.rect.center)
self.index += 1
if self.index > self.length:
self.kill()
return
self.dt = 0
self.player_halo_object.update({'frame': self.gl.FRAME,
'rect': self.rect, 'index': self.index})
else:
self.kill()
return
else:
self.dt += self.gl.TIME_PASSED_SECONDS
self.player_halo_object.queue()
class AsteroidHalo(pygame.sprite.Sprite):
images = []
containers = None
def __init__(self, texture_name_, object_, timing_, layer_=0):
self.layer = layer_
pygame.sprite.Sprite.__init__(self, self.containers)
if isinstance(object_.gl.All, pygame.sprite.LayeredUpdates):
object_.gl.All.change_layer(self, object_.layer)
self.object = object_
if isinstance(self.images, pygame.Surface):
self.images = [self.images] * 30
self.images_copy = self.images.copy()
self.image = self.images_copy[0]
if not id(AsteroidHalo.images) == id(eval(texture_name_)):
raise ValueError("Asteroid image does not match with its surface name.")
self.rect = self.image.get_rect(center=object_.rect.center)
self.dt = 0
self.index = 0
self.gl = object_.gl
self.length = len(self.images) - 1
self.blend = 0
self.timing = timing_
self.texture_name = texture_name_
self.id_ = id(self)
self.asteroidHalo_object = Broadcast(self.make_object())
Broadcast.add_object_id(self.id_)
def delete_object(self) -> DeleteSpriteCommand:
"""
Send a command to kill an object on client side.
:return: DetectCollisionSprite object
"""
return DeleteSpriteCommand(frame_=self.gl.FRAME, to_delete_={self.id_: self.texture_name})
def make_object(self) -> AnimatedSprite:
return AnimatedSprite(frame_=self.gl.FRAME, id_=self.id_, surface_=self.texture_name,
layer_=self.layer, blend_=self.blend, rect_=self.rect,
index_=self.index)
def quit(self) -> None:
Broadcast.remove_object_id(self.id_)
obj = Broadcast(self.delete_object())
obj.queue()
self.kill()
def update(self) -> None:
if self.dt > self.timing:
if self.object.rect.colliderect(self.gl.SCREENRECT):
self.image = self.images_copy[self.index]
self.rect = self.image.get_rect(center=self.object.rect.center)
self.index += 1
if self.index > self.length:
self.quit()
return
self.asteroidHalo_object.update(
{'frame': self.gl.FRAME, 'rect': self.rect, 'index': self.index})
self.asteroidHalo_object.queue()
self.dt = 0
else:
self.quit()
return
else:
self.dt += self.gl.TIME_PASSED_SECONDS
| en | 0.502474 | Send a command to kill an object on client side.
:return: DetectCollisionSprite object | 2.362422 | 2 |
src/dataops/pandas_db.py | ShizhuZhang/ontask_b | 0 | 9746 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals, print_function
import logging
import os.path
import subprocess
from collections import OrderedDict
from itertools import izip
import numpy as np
import pandas as pd
from django.conf import settings
from django.core.cache import cache
from django.db import connection
from sqlalchemy import create_engine
from dataops.formula_evaluation import evaluate_node_sql
from ontask import fix_pctg_in_name
SITE_ROOT = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
table_prefix = '__ONTASK_WORKFLOW_TABLE_'
df_table_prefix = table_prefix + '{0}'
upload_table_prefix = table_prefix + 'UPLOAD_{0}'
# Query to count the number of rows in a table
query_count_rows = 'SELECT count(*) from "{0}"'
logger = logging.getLogger(__name__)
# Translation between pandas data type names, and those handled in OnTask
pandas_datatype_names = {
'object': 'string',
'int64': 'integer',
'float64': 'double',
'bool': 'boolean',
'datetime64[ns]': 'datetime'
}
# Translation between SQL data type names, and those handled in OnTask
sql_datatype_names = {
'text': 'string',
'bigint': 'integer',
'double precision': 'double',
'boolean': 'boolean',
'timestamp without time zone': 'datetime'
}
# DB Engine to use with Pandas (required by to_sql, from_sql
engine = None
def create_db_connection(dialect, driver, username, password, host, dbname):
"""
Function that creates the engine object to connect to the database. The
object is required by the pandas functions to_sql and from_sql
:param dialect: Dialect for the engine (oracle, mysql, postgresql, etc)
:param driver: DBAPI driver (psycopg2, ...)
:param username: Username to connect with the database
:param password: Password to connect with the database
:param host: Host to connect with the database
:param dbname: database name
:return: the engine
"""
# DB engine
database_url = \
'{dialect}{driver}://{user}:{password}@{host}/{database_name}'.format(
dialect=dialect,
driver=driver,
user=username,
password=password,
host=host,
database_name=dbname,
)
return create_engine(database_url, echo=False, paramstyle='format')
def create_db_engine(dialect, driver, username, password, host, dbname):
"""
Function that creates the engine object to connect to the database. The
object is required by the pandas functions to_sql and from_sql
:param dialect: Dialect for the engine (oracle, mysql, postgresql, etc)
:param driver: DBAPI driver (psycopg2, ...)
:param username: Username to connect with the database
:param password: <PASSWORD>
:param host: Host to connect with the database
:param dbname: database name
:return: the engine
"""
# DB engine
database_url = \
'{dialect}{driver}://{user}:{password}@{host}/{database_name}'.format(
dialect=dialect,
driver=driver,
user=username,
password=password,
host=host,
database_name=dbname,
)
engine = create_db_connection(dialect, driver, username, password, host,
dbname)
if settings.DEBUG:
print('Creating engine with ', database_url)
return engine
def destroy_db_engine(db_engine):
"""
Method that disposes of the given engine (to guarantee there are no
connections available
:param db_engine: Engine to destroy
:return: Nothing
"""
db_engine.dispose()
def pg_restore_table(filename):
"""
Function that given a file produced with a pg_dump, it uploads its
content to the existing database
:param filename: File in pg_dump format to restore
:return:
"""
process = subprocess.Popen(['psql',
'-d',
settings.DATABASES['default']['NAME'],
'-q',
'-f',
filename])
process.wait()
def delete_all_tables():
"""
Delete all tables related to existing workflows
:return:
"""
cursor = connection.cursor()
table_list = connection.introspection.get_table_list(cursor)
for tinfo in table_list:
if not tinfo.name.startswith(table_prefix):
continue
cursor.execute('DROP TABLE "{0}";'.format(tinfo.name))
# To make sure the table is dropped.
connection.commit()
return
def is_table_in_db(table_name):
cursor = connection.cursor()
return next(
(True for x in connection.introspection.get_table_list(cursor)
if x.name == table_name),
False
)
def is_wf_table_in_db(workflow):
return is_table_in_db(create_table_name(workflow.id))
def create_table_name(pk):
"""
:param pk: Primary Key of a workflow
:return: The unique table name to use to store a workflow data frame
"""
return df_table_prefix.format(pk)
def create_upload_table_name(pk):
"""
:param pk: Primary key of a workflow
:return: The unique table to use to upload a new data frame
"""
return upload_table_prefix.format(pk)
def load_from_db(pk, columns=None, filter_exp=None):
"""
Load the data frame stored for the workflow with the pk
:param pk: Primary key of the workflow
:param columns: Optional list of columns to load (all if NOne is given)
:param filter_exp: JSON expression to filter a subset of rows
:return: data frame
"""
return load_table(create_table_name(pk),
columns=columns,
filter_exp=filter_exp)
def load_table(table_name, columns=None, filter_exp=None):
"""
Load a data frame from the SQL DB.
FUTURE WORK:
Consider to store the dataframes in Redis to reduce load/store time.
The trick is to use a compressed format:
SET: redisConn.set("key", df.to_msgpack(compress='zlib'))
GET: pd.read_msgpack(redisConn.get("key"))
Need to agree on a sensible item name that does not collide with anything
else and a policy to detect a cached dataframe and remove it when the data
changes (difficult to detect? Perhaps df_new.equals(df_current))
If feasible, a write-through system could be easily implemented.
:param table_name: Table name to read from the db in to data frame
:param view: Optional view object to restrict access to the DB
:return: data_frame or None if it does not exist.
"""
if table_name not in connection.introspection.table_names():
return None
if settings.DEBUG:
print('Loading table ', table_name)
if columns or filter_exp:
# A list of columns or a filter exp is given
query, params = get_filter_query(table_name, columns, filter_exp)
result = pd.read_sql_query(query, engine, params=params)
else:
# No view given, so simply get the whole table
result = pd.read_sql(table_name, engine)
# After reading from the DB, turn all None into NaN
result.fillna(value=np.nan, inplace=True)
return result
def load_query(query):
"""
Load a data frame from the SQL DB running the given query.
:param query: Query to run in the DB
:return: data_frame or None if it does not exist.
"""
if settings.DEBUG:
print('Loading query ', query)
result = pd.read_sql_query(query, engine)
# After reading from the DB, turn all None into NaN
result.fillna(value=np.nan, inplace=True)
return result
def load_df_from_csvfile(file, skiprows=0, skipfooter=0):
"""
Given a file object, try to read the content as a CSV file and transform
into a data frame. The skiprows and skipfooter are number of lines to skip
from the top and bottom of the file (see read_csv in pandas).
It also tries to convert as many columns as possible to date/time format
(testing the conversion on every string column).
:param filename: File object to read the CSV content
:param skiprows: Number of lines to skip at the top of the document
:param skipfooter: Number of lines to skip at the bottom of the document
:return: Resulting data frame, or an Exception.
"""
data_frame = pd.read_csv(
file,
index_col=False,
infer_datetime_format=True,
quotechar='"',
skiprows=skiprows,
skipfooter=skipfooter
)
# Strip white space from all string columns and try to convert to
# datetime just in case
for x in list(data_frame.columns):
if data_frame[x].dtype.name == 'object':
# Column is a string! Remove the leading and trailing white
# space
data_frame[x] = data_frame[x].str.strip().fillna(data_frame[x])
# Try the datetime conversion
try:
series = pd.to_datetime(data_frame[x],
infer_datetime_format=True)
# Datetime conversion worked! Update the data_frame
data_frame[x] = series
except (ValueError, TypeError):
pass
return data_frame
def load_df_from_sqlconnection(conn_item, pwd=None):
"""
Load a DF from a SQL connection open with the parameters given in conn_item.
:param conn_item: SQLConnection object with the connection parameters.
:return: Data frame or raise an exception.
"""
# Get the connection
db_connection = create_db_connection(conn_item.conn_type,
conn_item.conn_driver,
conn_item.db_user,
pwd,
conn_item.db_host,
conn_item.db_name)
# Try to fetch the data
result = pd.read_sql(conn_item.db_table, db_connection)
# After reading from the DB, turn all None into NaN
result.fillna(value=np.nan, inplace=True)
return result
def store_table(data_frame, table_name):
"""
Store a data frame in the DB
:param data_frame: The data frame to store
:param table_name: The name of the table in the DB
:return: Nothing. Side effect in the DB
"""
with cache.lock(table_name):
# We ovewrite the content and do not create an index
data_frame.to_sql(table_name,
engine,
if_exists='replace',
index=False)
return
def delete_table(pk):
"""Delete the table representing the workflow with the given PK. Due to
the dual use of the database, the command has to be executed directly on
the DB.
"""
try:
cursor = connection.cursor()
cursor.execute('DROP TABLE "{0}";'.format(create_table_name(pk)))
connection.commit()
except Exception:
logger.error(
'Error while dropping table {0}'.format(create_table_name(pk))
)
def delete_upload_table(pk):
"""Delete the table used to merge data into the workflow with the given
PK. Due to the dual use of the database, the command has to be executed
directly on the DB.
"""
cursor = connection.cursor()
cursor.execute('DROP TABLE "{0}"'.format(create_upload_table_name(pk)))
connection.commit()
def get_table_column_types(table_name):
"""
:param table_name: Table name
:return: List of pairs (column name, SQL type)
"""
cursor = connection.cursor()
cursor.execute("""select column_name, data_type from
INFORMATION_SCHEMA.COLUMNS where table_name = '{0}'""".format(table_name))
return cursor.fetchall()
def df_column_types_rename(table_name):
"""
:param table_name: Primary key of the workflow containing this data frame (table)
:return: List of data type strings translated to the proper values
"""
column_types = get_table_column_types(table_name)
# result = [table_name[x].dtype.name for x in list(table_name.columns)]
# for tname, ntname in pandas_datatype_names.items():
# result[:] = [x if x != tname else ntname for x in result]
return [sql_datatype_names[x] for __, x in
get_table_column_types(table_name)]
def df_drop_column(pk, column_name):
"""
Drop a column from the DB table storing a data frame
:param pk: Workflow primary key to obtain table name
:param column_name: Column name
:return: Drops the column from the corresponding DB table
"""
query = 'ALTER TABLE "{0}" DROP COLUMN "{1}"'.format(
create_table_name(pk),
column_name
)
cursor = connection.cursor()
cursor.execute(query)
def get_subframe(pk, cond_filter, column_names=None):
"""
Execute a select query to extract a subset of the dataframe and turn the
resulting query set into a data frame.
:param pk: Workflow primary key
:param cond_filter: Condition object to filter the data (or None)
:param column_names: [list of column names], QuerySet with the data rows
:return:
"""
# Get the cursor
cursor = get_table_cursor(pk, cond_filter, column_names)
# Create the DataFrame and set the column names
result = pd.DataFrame.from_records(cursor.fetchall(), coerce_float=True)
result.columns = [c.name for c in cursor.description]
return result
def get_table_cursor(pk, cond_filter, column_names=None):
"""
Execute a select query in the database with an optional filter obtained
from the jquery QueryBuilder.
:param pk: Primary key of the workflow storing the data
:param cond_filter: Condition object to filter the data (or None)
:param column_names: optional list of columns to select
:return: ([list of column names], QuerySet with the data rows)
"""
# Create the query
if column_names:
safe_column_names = [fix_pctg_in_name(x) for x in column_names]
query = 'SELECT "{0}" from "{1}"'.format(
'", "'.join(safe_column_names),
create_table_name(pk)
)
else:
query = 'SELECT * from "{0}"'.format(create_table_name(pk))
# See if the action has a filter or not
fields = []
if cond_filter is not None:
cond_filter, fields = evaluate_node_sql(cond_filter.formula)
if cond_filter:
# The condition may be empty, in which case, nothing is needed.
query += ' WHERE ' + cond_filter
# Execute the query
cursor = connection.cursor()
cursor.execute(query, fields)
return cursor
def get_table_data(pk, cond_filter, column_names=None):
# Get first the cursor
cursor = get_table_cursor(pk, cond_filter, column_names)
# Return the data
return cursor.fetchall()
def execute_select_on_table(pk, fields, values, column_names=None):
"""
Execute a select query in the database with an optional filter obtained
from the jquery QueryBuilder.
:param pk: Primary key of the workflow storing the data
:param fields: List of fields to add to the WHERE clause
:param values: parameters to match the previous fields
:param column_names: optional list of columns to select
:return: QuerySet with the data rows
"""
# Create the query
if column_names:
safe_column_names = ['"' + fix_pctg_in_name(x) + '"'
for x in column_names]
query = 'SELECT {0}'.format(','.join(safe_column_names))
else:
query = 'SELECT *'
# Add the table
query += ' FROM "{0}"'.format(create_table_name(pk))
# See if the action has a filter or not
cursor = connection.cursor()
if fields:
query += ' WHERE ' + \
' AND '.join(['"{0}" = %s'.format(fix_pctg_in_name(x))
for x in fields])
cursor.execute(query, values)
else:
# Execute the query
cursor.execute(query)
# Get the data
return cursor.fetchall()
def get_table_queryset(tablename):
query = 'SELECT * from "{0}";'.format(tablename)
try:
cursor = connection.cursor()
cursor.execute(query)
except Exception:
return None
return cursor.fetchall()
def query_to_dicts(query_string, *query_args):
"""
Run a simple query and produce a generator that returns the results as
a bunch of dictionaries with keys for the column values selected.
"""
cursor = connection.cursor()
cursor.execute(query_string, query_args)
col_names = [desc[0] for desc in cursor.description]
while True:
row = cursor.fetchone()
if row is None:
break
row_dict = OrderedDict(izip(col_names, row))
yield row_dict
return
def update_row(pk, set_fields, set_values, where_fields, where_values):
"""
Given a primary key, pairs (set_field, set_value), and pairs (where_field,
where_value), it updates the row in the table selected with the
list of (where field = where value) with the values in the assignments in
the list of (set_fields, set_values)
:param pk: Primary key to detect workflow
:param set_fields: List of field names to be updated
:param set_values: List of values to update the fields of the previous list
:param where_fields: List of fields used to filter the row in the table
:param where_values: List of values of the previous fields to filter the row
:return: The table in the workflow pointed by PK is modified.
"""
# First part of the query with the table name
query = 'UPDATE "{0}"'.format(create_table_name(pk))
# Add the SET field = value clauses
query += ' SET ' + ', '.join(['"{0}" = %s'.format(fix_pctg_in_name(x))
for x in set_fields])
# And finally add the WHERE clause
query += ' WHERE ' + ' AND '.join(['"{0}" = %s'.format(fix_pctg_in_name(x))
for x in where_fields])
# Concatenate the values as parameters to the query
parameters = set_values + where_values
# Execute the query
cursor = connection.cursor()
cursor.execute(query, parameters)
connection.commit()
def increase_row_integer(pk, set_field, where_field, where_value):
"""
Given a primary key, a field set_field, and a pair (where_field,
where_value), it increases the field in the appropriate row
:param pk: Primary key to detect workflow
:param set_field: name of the field to be increased
:param where_field: Field used to filter the row in the table
:param where_value: Value of the previous field to filter the row
:return: The table in the workflow pointed by PK is modified.
"""
# First part of the query with the table name
query = 'UPDATE "{0}" SET "{1}" = "{1}" + 1 WHERE "{2}" = %s'.format(
create_table_name(pk),
set_field,
where_field
)
# Execute the query
cursor = connection.cursor()
cursor.execute(query, [where_value])
connection.commit()
def get_table_row_by_key(workflow, cond_filter, kv_pair, column_names=None):
"""
Select the set of elements after filtering and with the key=value pair
:param workflow: workflow object to get to the table
:param cond_filter: Condition object to filter the data (or None)
:param kv_pair: A key=value pair to identify the row. Key is suppose to
be unique.
:param column_names: Optional list of column names to select
:return: A dictionary with the (column_name, value) data or None if the
row has not been found
"""
# Create the query
if column_names:
safe_column_names = [fix_pctg_in_name(x) for x in column_names]
query = 'SELECT "{0}"'.format('", "'.join(safe_column_names))
else:
query = 'SELECT *'
# Add the table
query += ' FROM "{0}"'.format(create_table_name(workflow.id))
# Create the second part of the query setting key=value
query += ' WHERE ("{0}" = %s)'.format(fix_pctg_in_name(kv_pair[0]))
fields = [kv_pair[1]]
# See if the action has a filter or not
if cond_filter is not None:
cond_filter, filter_fields = \
evaluate_node_sql(cond_filter.formula)
query += ' AND (' + cond_filter + ')'
fields = fields + filter_fields
# Execute the query
cursor = connection.cursor()
cursor.execute(query, fields)
# Get the data
qs = cursor.fetchall()
# If there is anything different than one element, return None
if len(qs) != 1:
return None
# Get the only element
qs = qs[0]
# ZIP the values to create a dictionary
return OrderedDict(zip(workflow.get_column_names(), qs))
def get_column_stats_from_df(df_column):
"""
Given a data frame with a single column, return a set of statistics
depending on its type.
:param df_column: data frame with a single column
:return: A dictionary with keys depending on the type of column
{'min': minimum value (integer, double an datetime),
'q1': Q1 value (0.25) (integer, double),
'mean': mean value (integer, double),
'median': median value (integer, double),
'mean': mean value (integer, double),
'q3': Q3 value (0.75) (integer, double),
'max': maximum value (integer, double an datetime),
'std': standard deviation (integer, double),
'counts': (integer, double, string, datetime, Boolean',
'mode': (integer, double, string, datetime, Boolean,
or None if the column has all its values to NaN
"""
if len(df_column.loc[df_column.notnull()]) == 0:
# The column has no data
return None
# Dictionary to return
result = {
'min': 0,
'q1': 0,
'mean': 0,
'median': 0,
'q3': 0,
'max': 0,
'std': 0,
'mode': None,
'counts': {},
}
data_type = pandas_datatype_names[df_column.dtype.name]
if data_type == 'integer' or data_type == 'double':
quantiles = df_column.quantile([0, .25, .5, .75, 1])
result['min'] = '{0:g}'.format(quantiles[0])
result['q1'] = '{0:g}'.format(quantiles[.25])
result['mean'] = '{0:g}'.format(df_column.mean())
result['median'] = '{0:g}'.format(quantiles[.5])
result['q3'] = '{0:g}'.format(quantiles[.75])
result['max'] = '{0:g}'.format(quantiles[1])
result['std'] = '{0:g}'.format(df_column.std())
result['counts'] = df_column.value_counts().to_dict()
mode = df_column.mode()
if len(mode) == 0:
mode = '--'
result['mode'] = mode[0]
return result
def get_filter_query(table_name, column_names, filter_exp):
"""
Given a set of columns and a filter expression, return a pair of SQL query
and params to be executed
:param table_name: Table to query
:param column_names: list of columns to consider or None to consider all
:param filter_exp: Text filter expression
:return: (sql query, sql params)
"""
# Create the query
if column_names:
safe_column_names = [fix_pctg_in_name(x) for x in column_names]
query = 'SELECT "{0}"'.format('", "'.join(safe_column_names))
else:
query = 'SELECT *'
# Add the table
query += ' FROM "{0}"'.format(table_name)
# Calculate the first suffix to add to the query
filter_txt = ''
filter_fields = []
if filter_exp:
filter_txt, filter_fields = evaluate_node_sql(filter_exp)
# Build the query so far appending the filter and/or the cv_tuples
if filter_txt:
query += ' WHERE '
fields = []
# If there has been a suffix from the filter, add it.
if filter_txt:
query += filter_txt
if filter_fields:
fields.extend(filter_fields)
return (query, fields)
def search_table_rows(workflow_id,
cv_tuples=None,
any_join=True,
order_col_name=None,
order_asc=True,
column_names=None,
pre_filter=None):
"""
Select rows where for every (column, value) pair, column contains value (
as in LIKE %value%, these are combined with OR if any is TRUE, or AND if
any is false, and the result is ordered by the given column and type (if
given)
:param workflow_id: workflow object to get to the table
:param cv_tuples: A column, value, type tuple to search the value in the
column
:param any_join: Boolean encoding if values should be combined with OR (or
AND)
:param order_col_name: Order results by this column
:param order_asc: Order results in ascending values (or descending)
:param column_names: Optional list of column names to select
:param pre_filter: Optional filter condition to pre filter the query set.
the query is built with these terms as requirement AND the cv_tuples.
:return: The resulting query set
"""
# Create the query
if column_names:
safe_column_names = [fix_pctg_in_name(x) for x in column_names]
query = 'SELECT "{0}"'.format('", "'.join(safe_column_names))
else:
query = 'SELECT *'
# Add the table
query += ' FROM "{0}"'.format(create_table_name(workflow_id))
# Calculate the first suffix to add to the query
filter_txt = ''
filter_fields = []
if pre_filter:
filter_txt, filter_fields = evaluate_node_sql(pre_filter)
if cv_tuples:
likes = []
tuple_fields = []
for name, value, data_type in cv_tuples:
# Make sure we escape the name and search as text
name = fix_pctg_in_name(name)
mod_name = '(CAST("{0}" AS TEXT) LIKE %s)'.format(name)
# Create the second part of the query setting column LIKE '%value%'
likes.append(mod_name)
tuple_fields.append('%' + value + '%')
# Combine the search subqueries
if any_join:
tuple_txt = '(' + ' OR '.join(likes) + ')'
else:
tuple_txt = '(' + ' AND '.join(likes) + ')'
# Build the query so far appending the filter and/or the cv_tuples
if filter_txt or cv_tuples:
query += ' WHERE '
fields = []
# If there has been a suffix from the filter, add it.
if filter_txt:
query += filter_txt
fields.extend(filter_fields)
# If there is a pre-filter, the suffix needs to be "AND" with the ones
# just calculated
if filter_txt and cv_tuples:
query += ' AND '
if cv_tuples:
query += tuple_txt
fields.extend(tuple_fields)
# Add the order if needed
if order_col_name:
query += ' ORDER BY "{0}"'.format(fix_pctg_in_name(order_col_name))
if not order_asc:
query += ' DESC'
# Execute the query
cursor = connection.cursor()
cursor.execute(query, fields)
# Get the data
return cursor.fetchall()
def delete_table_row_by_key(workflow_id, kv_pair):
"""
Delete the row in the table attached to a workflow with the given key,
value pairs
:param workflow_id: workflow object to get to the table
:param kv_pair: A key=value pair to identify the row. Key is suppose to
be unique.
:return: Drops that row from the table in the DB
"""
# Create the query
query = 'DELETE FROM "{0}"'.format(create_table_name(workflow_id))
# Create the second part of the query setting key=value
query += ' WHERE ("{0}" = %s)'.format(fix_pctg_in_name(kv_pair[0]))
fields = [kv_pair[1]]
# Execute the query
cursor = connection.cursor()
cursor.execute(query, fields)
def num_rows(pk, cond_filter=None):
"""
Obtain the number of rows of the table storing workflow with given pk
:param pk: Primary key of the table storing the data frame
:param cond_filter: Condition element to filter the query
:return:
"""
return num_rows_by_name(create_table_name(pk), cond_filter)
def num_rows_by_name(table_name, cond_filter=None):
"""
Given a table name, get its number of rows
:param table_name: Table name
:param cond_filter: Condition element used to filter the query
:return: integer
"""
# Initial query with the table name
query = query_count_rows.format(table_name)
fields = []
if cond_filter is not None:
cond_filter, fields = evaluate_node_sql(cond_filter)
query += ' WHERE ' + cond_filter
cursor = connection.cursor()
cursor.execute(query, fields)
return cursor.fetchone()[0]
def check_wf_df(workflow):
"""
Check the consistency between the information stored in the workflow
and the structure of the underlying dataframe
:param workflow: Workflow object
:return: Boolean stating the result of the check. True: Correct.
"""
# Get the df
df = load_from_db(workflow.id)
# Set values in case there is no df
if df is not None:
dfnrows = df.shape[0]
dfncols = df.shape[1]
df_col_names = list(df.columns)
else:
dfnrows = 0
dfncols = 0
df_col_names = []
# Check 1: Number of rows and columns
if workflow.nrows != dfnrows:
return False
if workflow.ncols != dfncols:
return False
# Identical sets of columns
wf_cols = workflow.columns.all()
if [x.name for x in wf_cols] != df_col_names:
return False
# Identical data types
for n1, n2 in zip(wf_cols, df_col_names):
df_dt = pandas_datatype_names[df[n2].dtype.name]
if n1.data_type == 'boolean' and df_dt == 'string':
# This is the case of a column with Boolean and Nulls
continue
if n1.data_type != df_dt:
return False
return True
| # -*- coding: utf-8 -*-
from __future__ import unicode_literals, print_function
import logging
import os.path
import subprocess
from collections import OrderedDict
from itertools import izip
import numpy as np
import pandas as pd
from django.conf import settings
from django.core.cache import cache
from django.db import connection
from sqlalchemy import create_engine
from dataops.formula_evaluation import evaluate_node_sql
from ontask import fix_pctg_in_name
SITE_ROOT = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
table_prefix = '__ONTASK_WORKFLOW_TABLE_'
df_table_prefix = table_prefix + '{0}'
upload_table_prefix = table_prefix + 'UPLOAD_{0}'
# Query to count the number of rows in a table
query_count_rows = 'SELECT count(*) from "{0}"'
logger = logging.getLogger(__name__)
# Translation between pandas data type names, and those handled in OnTask
pandas_datatype_names = {
'object': 'string',
'int64': 'integer',
'float64': 'double',
'bool': 'boolean',
'datetime64[ns]': 'datetime'
}
# Translation between SQL data type names, and those handled in OnTask
sql_datatype_names = {
'text': 'string',
'bigint': 'integer',
'double precision': 'double',
'boolean': 'boolean',
'timestamp without time zone': 'datetime'
}
# DB Engine to use with Pandas (required by to_sql, from_sql
engine = None
def create_db_connection(dialect, driver, username, password, host, dbname):
"""
Function that creates the engine object to connect to the database. The
object is required by the pandas functions to_sql and from_sql
:param dialect: Dialect for the engine (oracle, mysql, postgresql, etc)
:param driver: DBAPI driver (psycopg2, ...)
:param username: Username to connect with the database
:param password: Password to connect with the database
:param host: Host to connect with the database
:param dbname: database name
:return: the engine
"""
# DB engine
database_url = \
'{dialect}{driver}://{user}:{password}@{host}/{database_name}'.format(
dialect=dialect,
driver=driver,
user=username,
password=password,
host=host,
database_name=dbname,
)
return create_engine(database_url, echo=False, paramstyle='format')
def create_db_engine(dialect, driver, username, password, host, dbname):
"""
Function that creates the engine object to connect to the database. The
object is required by the pandas functions to_sql and from_sql
:param dialect: Dialect for the engine (oracle, mysql, postgresql, etc)
:param driver: DBAPI driver (psycopg2, ...)
:param username: Username to connect with the database
:param password: <PASSWORD>
:param host: Host to connect with the database
:param dbname: database name
:return: the engine
"""
# DB engine
database_url = \
'{dialect}{driver}://{user}:{password}@{host}/{database_name}'.format(
dialect=dialect,
driver=driver,
user=username,
password=password,
host=host,
database_name=dbname,
)
engine = create_db_connection(dialect, driver, username, password, host,
dbname)
if settings.DEBUG:
print('Creating engine with ', database_url)
return engine
def destroy_db_engine(db_engine):
"""
Method that disposes of the given engine (to guarantee there are no
connections available
:param db_engine: Engine to destroy
:return: Nothing
"""
db_engine.dispose()
def pg_restore_table(filename):
"""
Function that given a file produced with a pg_dump, it uploads its
content to the existing database
:param filename: File in pg_dump format to restore
:return:
"""
process = subprocess.Popen(['psql',
'-d',
settings.DATABASES['default']['NAME'],
'-q',
'-f',
filename])
process.wait()
def delete_all_tables():
"""
Delete all tables related to existing workflows
:return:
"""
cursor = connection.cursor()
table_list = connection.introspection.get_table_list(cursor)
for tinfo in table_list:
if not tinfo.name.startswith(table_prefix):
continue
cursor.execute('DROP TABLE "{0}";'.format(tinfo.name))
# To make sure the table is dropped.
connection.commit()
return
def is_table_in_db(table_name):
cursor = connection.cursor()
return next(
(True for x in connection.introspection.get_table_list(cursor)
if x.name == table_name),
False
)
def is_wf_table_in_db(workflow):
return is_table_in_db(create_table_name(workflow.id))
def create_table_name(pk):
"""
:param pk: Primary Key of a workflow
:return: The unique table name to use to store a workflow data frame
"""
return df_table_prefix.format(pk)
def create_upload_table_name(pk):
"""
:param pk: Primary key of a workflow
:return: The unique table to use to upload a new data frame
"""
return upload_table_prefix.format(pk)
def load_from_db(pk, columns=None, filter_exp=None):
"""
Load the data frame stored for the workflow with the pk
:param pk: Primary key of the workflow
:param columns: Optional list of columns to load (all if NOne is given)
:param filter_exp: JSON expression to filter a subset of rows
:return: data frame
"""
return load_table(create_table_name(pk),
columns=columns,
filter_exp=filter_exp)
def load_table(table_name, columns=None, filter_exp=None):
"""
Load a data frame from the SQL DB.
FUTURE WORK:
Consider to store the dataframes in Redis to reduce load/store time.
The trick is to use a compressed format:
SET: redisConn.set("key", df.to_msgpack(compress='zlib'))
GET: pd.read_msgpack(redisConn.get("key"))
Need to agree on a sensible item name that does not collide with anything
else and a policy to detect a cached dataframe and remove it when the data
changes (difficult to detect? Perhaps df_new.equals(df_current))
If feasible, a write-through system could be easily implemented.
:param table_name: Table name to read from the db in to data frame
:param view: Optional view object to restrict access to the DB
:return: data_frame or None if it does not exist.
"""
if table_name not in connection.introspection.table_names():
return None
if settings.DEBUG:
print('Loading table ', table_name)
if columns or filter_exp:
# A list of columns or a filter exp is given
query, params = get_filter_query(table_name, columns, filter_exp)
result = pd.read_sql_query(query, engine, params=params)
else:
# No view given, so simply get the whole table
result = pd.read_sql(table_name, engine)
# After reading from the DB, turn all None into NaN
result.fillna(value=np.nan, inplace=True)
return result
def load_query(query):
"""
Load a data frame from the SQL DB running the given query.
:param query: Query to run in the DB
:return: data_frame or None if it does not exist.
"""
if settings.DEBUG:
print('Loading query ', query)
result = pd.read_sql_query(query, engine)
# After reading from the DB, turn all None into NaN
result.fillna(value=np.nan, inplace=True)
return result
def load_df_from_csvfile(file, skiprows=0, skipfooter=0):
"""
Given a file object, try to read the content as a CSV file and transform
into a data frame. The skiprows and skipfooter are number of lines to skip
from the top and bottom of the file (see read_csv in pandas).
It also tries to convert as many columns as possible to date/time format
(testing the conversion on every string column).
:param filename: File object to read the CSV content
:param skiprows: Number of lines to skip at the top of the document
:param skipfooter: Number of lines to skip at the bottom of the document
:return: Resulting data frame, or an Exception.
"""
data_frame = pd.read_csv(
file,
index_col=False,
infer_datetime_format=True,
quotechar='"',
skiprows=skiprows,
skipfooter=skipfooter
)
# Strip white space from all string columns and try to convert to
# datetime just in case
for x in list(data_frame.columns):
if data_frame[x].dtype.name == 'object':
# Column is a string! Remove the leading and trailing white
# space
data_frame[x] = data_frame[x].str.strip().fillna(data_frame[x])
# Try the datetime conversion
try:
series = pd.to_datetime(data_frame[x],
infer_datetime_format=True)
# Datetime conversion worked! Update the data_frame
data_frame[x] = series
except (ValueError, TypeError):
pass
return data_frame
def load_df_from_sqlconnection(conn_item, pwd=None):
"""
Load a DF from a SQL connection open with the parameters given in conn_item.
:param conn_item: SQLConnection object with the connection parameters.
:return: Data frame or raise an exception.
"""
# Get the connection
db_connection = create_db_connection(conn_item.conn_type,
conn_item.conn_driver,
conn_item.db_user,
pwd,
conn_item.db_host,
conn_item.db_name)
# Try to fetch the data
result = pd.read_sql(conn_item.db_table, db_connection)
# After reading from the DB, turn all None into NaN
result.fillna(value=np.nan, inplace=True)
return result
def store_table(data_frame, table_name):
"""
Store a data frame in the DB
:param data_frame: The data frame to store
:param table_name: The name of the table in the DB
:return: Nothing. Side effect in the DB
"""
with cache.lock(table_name):
# We ovewrite the content and do not create an index
data_frame.to_sql(table_name,
engine,
if_exists='replace',
index=False)
return
def delete_table(pk):
"""Delete the table representing the workflow with the given PK. Due to
the dual use of the database, the command has to be executed directly on
the DB.
"""
try:
cursor = connection.cursor()
cursor.execute('DROP TABLE "{0}";'.format(create_table_name(pk)))
connection.commit()
except Exception:
logger.error(
'Error while dropping table {0}'.format(create_table_name(pk))
)
def delete_upload_table(pk):
"""Delete the table used to merge data into the workflow with the given
PK. Due to the dual use of the database, the command has to be executed
directly on the DB.
"""
cursor = connection.cursor()
cursor.execute('DROP TABLE "{0}"'.format(create_upload_table_name(pk)))
connection.commit()
def get_table_column_types(table_name):
"""
:param table_name: Table name
:return: List of pairs (column name, SQL type)
"""
cursor = connection.cursor()
cursor.execute("""select column_name, data_type from
INFORMATION_SCHEMA.COLUMNS where table_name = '{0}'""".format(table_name))
return cursor.fetchall()
def df_column_types_rename(table_name):
"""
:param table_name: Primary key of the workflow containing this data frame (table)
:return: List of data type strings translated to the proper values
"""
column_types = get_table_column_types(table_name)
# result = [table_name[x].dtype.name for x in list(table_name.columns)]
# for tname, ntname in pandas_datatype_names.items():
# result[:] = [x if x != tname else ntname for x in result]
return [sql_datatype_names[x] for __, x in
get_table_column_types(table_name)]
def df_drop_column(pk, column_name):
"""
Drop a column from the DB table storing a data frame
:param pk: Workflow primary key to obtain table name
:param column_name: Column name
:return: Drops the column from the corresponding DB table
"""
query = 'ALTER TABLE "{0}" DROP COLUMN "{1}"'.format(
create_table_name(pk),
column_name
)
cursor = connection.cursor()
cursor.execute(query)
def get_subframe(pk, cond_filter, column_names=None):
"""
Execute a select query to extract a subset of the dataframe and turn the
resulting query set into a data frame.
:param pk: Workflow primary key
:param cond_filter: Condition object to filter the data (or None)
:param column_names: [list of column names], QuerySet with the data rows
:return:
"""
# Get the cursor
cursor = get_table_cursor(pk, cond_filter, column_names)
# Create the DataFrame and set the column names
result = pd.DataFrame.from_records(cursor.fetchall(), coerce_float=True)
result.columns = [c.name for c in cursor.description]
return result
def get_table_cursor(pk, cond_filter, column_names=None):
"""
Execute a select query in the database with an optional filter obtained
from the jquery QueryBuilder.
:param pk: Primary key of the workflow storing the data
:param cond_filter: Condition object to filter the data (or None)
:param column_names: optional list of columns to select
:return: ([list of column names], QuerySet with the data rows)
"""
# Create the query
if column_names:
safe_column_names = [fix_pctg_in_name(x) for x in column_names]
query = 'SELECT "{0}" from "{1}"'.format(
'", "'.join(safe_column_names),
create_table_name(pk)
)
else:
query = 'SELECT * from "{0}"'.format(create_table_name(pk))
# See if the action has a filter or not
fields = []
if cond_filter is not None:
cond_filter, fields = evaluate_node_sql(cond_filter.formula)
if cond_filter:
# The condition may be empty, in which case, nothing is needed.
query += ' WHERE ' + cond_filter
# Execute the query
cursor = connection.cursor()
cursor.execute(query, fields)
return cursor
def get_table_data(pk, cond_filter, column_names=None):
# Get first the cursor
cursor = get_table_cursor(pk, cond_filter, column_names)
# Return the data
return cursor.fetchall()
def execute_select_on_table(pk, fields, values, column_names=None):
"""
Execute a select query in the database with an optional filter obtained
from the jquery QueryBuilder.
:param pk: Primary key of the workflow storing the data
:param fields: List of fields to add to the WHERE clause
:param values: parameters to match the previous fields
:param column_names: optional list of columns to select
:return: QuerySet with the data rows
"""
# Create the query
if column_names:
safe_column_names = ['"' + fix_pctg_in_name(x) + '"'
for x in column_names]
query = 'SELECT {0}'.format(','.join(safe_column_names))
else:
query = 'SELECT *'
# Add the table
query += ' FROM "{0}"'.format(create_table_name(pk))
# See if the action has a filter or not
cursor = connection.cursor()
if fields:
query += ' WHERE ' + \
' AND '.join(['"{0}" = %s'.format(fix_pctg_in_name(x))
for x in fields])
cursor.execute(query, values)
else:
# Execute the query
cursor.execute(query)
# Get the data
return cursor.fetchall()
def get_table_queryset(tablename):
query = 'SELECT * from "{0}";'.format(tablename)
try:
cursor = connection.cursor()
cursor.execute(query)
except Exception:
return None
return cursor.fetchall()
def query_to_dicts(query_string, *query_args):
"""
Run a simple query and produce a generator that returns the results as
a bunch of dictionaries with keys for the column values selected.
"""
cursor = connection.cursor()
cursor.execute(query_string, query_args)
col_names = [desc[0] for desc in cursor.description]
while True:
row = cursor.fetchone()
if row is None:
break
row_dict = OrderedDict(izip(col_names, row))
yield row_dict
return
def update_row(pk, set_fields, set_values, where_fields, where_values):
"""
Given a primary key, pairs (set_field, set_value), and pairs (where_field,
where_value), it updates the row in the table selected with the
list of (where field = where value) with the values in the assignments in
the list of (set_fields, set_values)
:param pk: Primary key to detect workflow
:param set_fields: List of field names to be updated
:param set_values: List of values to update the fields of the previous list
:param where_fields: List of fields used to filter the row in the table
:param where_values: List of values of the previous fields to filter the row
:return: The table in the workflow pointed by PK is modified.
"""
# First part of the query with the table name
query = 'UPDATE "{0}"'.format(create_table_name(pk))
# Add the SET field = value clauses
query += ' SET ' + ', '.join(['"{0}" = %s'.format(fix_pctg_in_name(x))
for x in set_fields])
# And finally add the WHERE clause
query += ' WHERE ' + ' AND '.join(['"{0}" = %s'.format(fix_pctg_in_name(x))
for x in where_fields])
# Concatenate the values as parameters to the query
parameters = set_values + where_values
# Execute the query
cursor = connection.cursor()
cursor.execute(query, parameters)
connection.commit()
def increase_row_integer(pk, set_field, where_field, where_value):
"""
Given a primary key, a field set_field, and a pair (where_field,
where_value), it increases the field in the appropriate row
:param pk: Primary key to detect workflow
:param set_field: name of the field to be increased
:param where_field: Field used to filter the row in the table
:param where_value: Value of the previous field to filter the row
:return: The table in the workflow pointed by PK is modified.
"""
# First part of the query with the table name
query = 'UPDATE "{0}" SET "{1}" = "{1}" + 1 WHERE "{2}" = %s'.format(
create_table_name(pk),
set_field,
where_field
)
# Execute the query
cursor = connection.cursor()
cursor.execute(query, [where_value])
connection.commit()
def get_table_row_by_key(workflow, cond_filter, kv_pair, column_names=None):
"""
Select the set of elements after filtering and with the key=value pair
:param workflow: workflow object to get to the table
:param cond_filter: Condition object to filter the data (or None)
:param kv_pair: A key=value pair to identify the row. Key is suppose to
be unique.
:param column_names: Optional list of column names to select
:return: A dictionary with the (column_name, value) data or None if the
row has not been found
"""
# Create the query
if column_names:
safe_column_names = [fix_pctg_in_name(x) for x in column_names]
query = 'SELECT "{0}"'.format('", "'.join(safe_column_names))
else:
query = 'SELECT *'
# Add the table
query += ' FROM "{0}"'.format(create_table_name(workflow.id))
# Create the second part of the query setting key=value
query += ' WHERE ("{0}" = %s)'.format(fix_pctg_in_name(kv_pair[0]))
fields = [kv_pair[1]]
# See if the action has a filter or not
if cond_filter is not None:
cond_filter, filter_fields = \
evaluate_node_sql(cond_filter.formula)
query += ' AND (' + cond_filter + ')'
fields = fields + filter_fields
# Execute the query
cursor = connection.cursor()
cursor.execute(query, fields)
# Get the data
qs = cursor.fetchall()
# If there is anything different than one element, return None
if len(qs) != 1:
return None
# Get the only element
qs = qs[0]
# ZIP the values to create a dictionary
return OrderedDict(zip(workflow.get_column_names(), qs))
def get_column_stats_from_df(df_column):
"""
Given a data frame with a single column, return a set of statistics
depending on its type.
:param df_column: data frame with a single column
:return: A dictionary with keys depending on the type of column
{'min': minimum value (integer, double an datetime),
'q1': Q1 value (0.25) (integer, double),
'mean': mean value (integer, double),
'median': median value (integer, double),
'mean': mean value (integer, double),
'q3': Q3 value (0.75) (integer, double),
'max': maximum value (integer, double an datetime),
'std': standard deviation (integer, double),
'counts': (integer, double, string, datetime, Boolean',
'mode': (integer, double, string, datetime, Boolean,
or None if the column has all its values to NaN
"""
if len(df_column.loc[df_column.notnull()]) == 0:
# The column has no data
return None
# Dictionary to return
result = {
'min': 0,
'q1': 0,
'mean': 0,
'median': 0,
'q3': 0,
'max': 0,
'std': 0,
'mode': None,
'counts': {},
}
data_type = pandas_datatype_names[df_column.dtype.name]
if data_type == 'integer' or data_type == 'double':
quantiles = df_column.quantile([0, .25, .5, .75, 1])
result['min'] = '{0:g}'.format(quantiles[0])
result['q1'] = '{0:g}'.format(quantiles[.25])
result['mean'] = '{0:g}'.format(df_column.mean())
result['median'] = '{0:g}'.format(quantiles[.5])
result['q3'] = '{0:g}'.format(quantiles[.75])
result['max'] = '{0:g}'.format(quantiles[1])
result['std'] = '{0:g}'.format(df_column.std())
result['counts'] = df_column.value_counts().to_dict()
mode = df_column.mode()
if len(mode) == 0:
mode = '--'
result['mode'] = mode[0]
return result
def get_filter_query(table_name, column_names, filter_exp):
"""
Given a set of columns and a filter expression, return a pair of SQL query
and params to be executed
:param table_name: Table to query
:param column_names: list of columns to consider or None to consider all
:param filter_exp: Text filter expression
:return: (sql query, sql params)
"""
# Create the query
if column_names:
safe_column_names = [fix_pctg_in_name(x) for x in column_names]
query = 'SELECT "{0}"'.format('", "'.join(safe_column_names))
else:
query = 'SELECT *'
# Add the table
query += ' FROM "{0}"'.format(table_name)
# Calculate the first suffix to add to the query
filter_txt = ''
filter_fields = []
if filter_exp:
filter_txt, filter_fields = evaluate_node_sql(filter_exp)
# Build the query so far appending the filter and/or the cv_tuples
if filter_txt:
query += ' WHERE '
fields = []
# If there has been a suffix from the filter, add it.
if filter_txt:
query += filter_txt
if filter_fields:
fields.extend(filter_fields)
return (query, fields)
def search_table_rows(workflow_id,
cv_tuples=None,
any_join=True,
order_col_name=None,
order_asc=True,
column_names=None,
pre_filter=None):
"""
Select rows where for every (column, value) pair, column contains value (
as in LIKE %value%, these are combined with OR if any is TRUE, or AND if
any is false, and the result is ordered by the given column and type (if
given)
:param workflow_id: workflow object to get to the table
:param cv_tuples: A column, value, type tuple to search the value in the
column
:param any_join: Boolean encoding if values should be combined with OR (or
AND)
:param order_col_name: Order results by this column
:param order_asc: Order results in ascending values (or descending)
:param column_names: Optional list of column names to select
:param pre_filter: Optional filter condition to pre filter the query set.
the query is built with these terms as requirement AND the cv_tuples.
:return: The resulting query set
"""
# Create the query
if column_names:
safe_column_names = [fix_pctg_in_name(x) for x in column_names]
query = 'SELECT "{0}"'.format('", "'.join(safe_column_names))
else:
query = 'SELECT *'
# Add the table
query += ' FROM "{0}"'.format(create_table_name(workflow_id))
# Calculate the first suffix to add to the query
filter_txt = ''
filter_fields = []
if pre_filter:
filter_txt, filter_fields = evaluate_node_sql(pre_filter)
if cv_tuples:
likes = []
tuple_fields = []
for name, value, data_type in cv_tuples:
# Make sure we escape the name and search as text
name = fix_pctg_in_name(name)
mod_name = '(CAST("{0}" AS TEXT) LIKE %s)'.format(name)
# Create the second part of the query setting column LIKE '%value%'
likes.append(mod_name)
tuple_fields.append('%' + value + '%')
# Combine the search subqueries
if any_join:
tuple_txt = '(' + ' OR '.join(likes) + ')'
else:
tuple_txt = '(' + ' AND '.join(likes) + ')'
# Build the query so far appending the filter and/or the cv_tuples
if filter_txt or cv_tuples:
query += ' WHERE '
fields = []
# If there has been a suffix from the filter, add it.
if filter_txt:
query += filter_txt
fields.extend(filter_fields)
# If there is a pre-filter, the suffix needs to be "AND" with the ones
# just calculated
if filter_txt and cv_tuples:
query += ' AND '
if cv_tuples:
query += tuple_txt
fields.extend(tuple_fields)
# Add the order if needed
if order_col_name:
query += ' ORDER BY "{0}"'.format(fix_pctg_in_name(order_col_name))
if not order_asc:
query += ' DESC'
# Execute the query
cursor = connection.cursor()
cursor.execute(query, fields)
# Get the data
return cursor.fetchall()
def delete_table_row_by_key(workflow_id, kv_pair):
"""
Delete the row in the table attached to a workflow with the given key,
value pairs
:param workflow_id: workflow object to get to the table
:param kv_pair: A key=value pair to identify the row. Key is suppose to
be unique.
:return: Drops that row from the table in the DB
"""
# Create the query
query = 'DELETE FROM "{0}"'.format(create_table_name(workflow_id))
# Create the second part of the query setting key=value
query += ' WHERE ("{0}" = %s)'.format(fix_pctg_in_name(kv_pair[0]))
fields = [kv_pair[1]]
# Execute the query
cursor = connection.cursor()
cursor.execute(query, fields)
def num_rows(pk, cond_filter=None):
"""
Obtain the number of rows of the table storing workflow with given pk
:param pk: Primary key of the table storing the data frame
:param cond_filter: Condition element to filter the query
:return:
"""
return num_rows_by_name(create_table_name(pk), cond_filter)
def num_rows_by_name(table_name, cond_filter=None):
"""
Given a table name, get its number of rows
:param table_name: Table name
:param cond_filter: Condition element used to filter the query
:return: integer
"""
# Initial query with the table name
query = query_count_rows.format(table_name)
fields = []
if cond_filter is not None:
cond_filter, fields = evaluate_node_sql(cond_filter)
query += ' WHERE ' + cond_filter
cursor = connection.cursor()
cursor.execute(query, fields)
return cursor.fetchone()[0]
def check_wf_df(workflow):
"""
Check the consistency between the information stored in the workflow
and the structure of the underlying dataframe
:param workflow: Workflow object
:return: Boolean stating the result of the check. True: Correct.
"""
# Get the df
df = load_from_db(workflow.id)
# Set values in case there is no df
if df is not None:
dfnrows = df.shape[0]
dfncols = df.shape[1]
df_col_names = list(df.columns)
else:
dfnrows = 0
dfncols = 0
df_col_names = []
# Check 1: Number of rows and columns
if workflow.nrows != dfnrows:
return False
if workflow.ncols != dfncols:
return False
# Identical sets of columns
wf_cols = workflow.columns.all()
if [x.name for x in wf_cols] != df_col_names:
return False
# Identical data types
for n1, n2 in zip(wf_cols, df_col_names):
df_dt = pandas_datatype_names[df[n2].dtype.name]
if n1.data_type == 'boolean' and df_dt == 'string':
# This is the case of a column with Boolean and Nulls
continue
if n1.data_type != df_dt:
return False
return True
| en | 0.783379 | # -*- coding: utf-8 -*- # Query to count the number of rows in a table # Translation between pandas data type names, and those handled in OnTask # Translation between SQL data type names, and those handled in OnTask # DB Engine to use with Pandas (required by to_sql, from_sql Function that creates the engine object to connect to the database. The object is required by the pandas functions to_sql and from_sql :param dialect: Dialect for the engine (oracle, mysql, postgresql, etc) :param driver: DBAPI driver (psycopg2, ...) :param username: Username to connect with the database :param password: Password to connect with the database :param host: Host to connect with the database :param dbname: database name :return: the engine # DB engine Function that creates the engine object to connect to the database. The object is required by the pandas functions to_sql and from_sql :param dialect: Dialect for the engine (oracle, mysql, postgresql, etc) :param driver: DBAPI driver (psycopg2, ...) :param username: Username to connect with the database :param password: <PASSWORD> :param host: Host to connect with the database :param dbname: database name :return: the engine # DB engine Method that disposes of the given engine (to guarantee there are no connections available :param db_engine: Engine to destroy :return: Nothing Function that given a file produced with a pg_dump, it uploads its content to the existing database :param filename: File in pg_dump format to restore :return: Delete all tables related to existing workflows :return: # To make sure the table is dropped. :param pk: Primary Key of a workflow :return: The unique table name to use to store a workflow data frame :param pk: Primary key of a workflow :return: The unique table to use to upload a new data frame Load the data frame stored for the workflow with the pk :param pk: Primary key of the workflow :param columns: Optional list of columns to load (all if NOne is given) :param filter_exp: JSON expression to filter a subset of rows :return: data frame Load a data frame from the SQL DB. FUTURE WORK: Consider to store the dataframes in Redis to reduce load/store time. The trick is to use a compressed format: SET: redisConn.set("key", df.to_msgpack(compress='zlib')) GET: pd.read_msgpack(redisConn.get("key")) Need to agree on a sensible item name that does not collide with anything else and a policy to detect a cached dataframe and remove it when the data changes (difficult to detect? Perhaps df_new.equals(df_current)) If feasible, a write-through system could be easily implemented. :param table_name: Table name to read from the db in to data frame :param view: Optional view object to restrict access to the DB :return: data_frame or None if it does not exist. # A list of columns or a filter exp is given # No view given, so simply get the whole table # After reading from the DB, turn all None into NaN Load a data frame from the SQL DB running the given query. :param query: Query to run in the DB :return: data_frame or None if it does not exist. # After reading from the DB, turn all None into NaN Given a file object, try to read the content as a CSV file and transform into a data frame. The skiprows and skipfooter are number of lines to skip from the top and bottom of the file (see read_csv in pandas). It also tries to convert as many columns as possible to date/time format (testing the conversion on every string column). :param filename: File object to read the CSV content :param skiprows: Number of lines to skip at the top of the document :param skipfooter: Number of lines to skip at the bottom of the document :return: Resulting data frame, or an Exception. # Strip white space from all string columns and try to convert to # datetime just in case # Column is a string! Remove the leading and trailing white # space # Try the datetime conversion # Datetime conversion worked! Update the data_frame Load a DF from a SQL connection open with the parameters given in conn_item. :param conn_item: SQLConnection object with the connection parameters. :return: Data frame or raise an exception. # Get the connection # Try to fetch the data # After reading from the DB, turn all None into NaN Store a data frame in the DB :param data_frame: The data frame to store :param table_name: The name of the table in the DB :return: Nothing. Side effect in the DB # We ovewrite the content and do not create an index Delete the table representing the workflow with the given PK. Due to the dual use of the database, the command has to be executed directly on the DB. Delete the table used to merge data into the workflow with the given PK. Due to the dual use of the database, the command has to be executed directly on the DB. :param table_name: Table name :return: List of pairs (column name, SQL type) select column_name, data_type from INFORMATION_SCHEMA.COLUMNS where table_name = '{0}' :param table_name: Primary key of the workflow containing this data frame (table) :return: List of data type strings translated to the proper values # result = [table_name[x].dtype.name for x in list(table_name.columns)] # for tname, ntname in pandas_datatype_names.items(): # result[:] = [x if x != tname else ntname for x in result] Drop a column from the DB table storing a data frame :param pk: Workflow primary key to obtain table name :param column_name: Column name :return: Drops the column from the corresponding DB table Execute a select query to extract a subset of the dataframe and turn the resulting query set into a data frame. :param pk: Workflow primary key :param cond_filter: Condition object to filter the data (or None) :param column_names: [list of column names], QuerySet with the data rows :return: # Get the cursor # Create the DataFrame and set the column names Execute a select query in the database with an optional filter obtained from the jquery QueryBuilder. :param pk: Primary key of the workflow storing the data :param cond_filter: Condition object to filter the data (or None) :param column_names: optional list of columns to select :return: ([list of column names], QuerySet with the data rows) # Create the query # See if the action has a filter or not # The condition may be empty, in which case, nothing is needed. # Execute the query # Get first the cursor # Return the data Execute a select query in the database with an optional filter obtained from the jquery QueryBuilder. :param pk: Primary key of the workflow storing the data :param fields: List of fields to add to the WHERE clause :param values: parameters to match the previous fields :param column_names: optional list of columns to select :return: QuerySet with the data rows # Create the query # Add the table # See if the action has a filter or not # Execute the query # Get the data Run a simple query and produce a generator that returns the results as a bunch of dictionaries with keys for the column values selected. Given a primary key, pairs (set_field, set_value), and pairs (where_field, where_value), it updates the row in the table selected with the list of (where field = where value) with the values in the assignments in the list of (set_fields, set_values) :param pk: Primary key to detect workflow :param set_fields: List of field names to be updated :param set_values: List of values to update the fields of the previous list :param where_fields: List of fields used to filter the row in the table :param where_values: List of values of the previous fields to filter the row :return: The table in the workflow pointed by PK is modified. # First part of the query with the table name # Add the SET field = value clauses # And finally add the WHERE clause # Concatenate the values as parameters to the query # Execute the query Given a primary key, a field set_field, and a pair (where_field, where_value), it increases the field in the appropriate row :param pk: Primary key to detect workflow :param set_field: name of the field to be increased :param where_field: Field used to filter the row in the table :param where_value: Value of the previous field to filter the row :return: The table in the workflow pointed by PK is modified. # First part of the query with the table name # Execute the query Select the set of elements after filtering and with the key=value pair :param workflow: workflow object to get to the table :param cond_filter: Condition object to filter the data (or None) :param kv_pair: A key=value pair to identify the row. Key is suppose to be unique. :param column_names: Optional list of column names to select :return: A dictionary with the (column_name, value) data or None if the row has not been found # Create the query # Add the table # Create the second part of the query setting key=value # See if the action has a filter or not # Execute the query # Get the data # If there is anything different than one element, return None # Get the only element # ZIP the values to create a dictionary Given a data frame with a single column, return a set of statistics depending on its type. :param df_column: data frame with a single column :return: A dictionary with keys depending on the type of column {'min': minimum value (integer, double an datetime), 'q1': Q1 value (0.25) (integer, double), 'mean': mean value (integer, double), 'median': median value (integer, double), 'mean': mean value (integer, double), 'q3': Q3 value (0.75) (integer, double), 'max': maximum value (integer, double an datetime), 'std': standard deviation (integer, double), 'counts': (integer, double, string, datetime, Boolean', 'mode': (integer, double, string, datetime, Boolean, or None if the column has all its values to NaN # The column has no data # Dictionary to return Given a set of columns and a filter expression, return a pair of SQL query and params to be executed :param table_name: Table to query :param column_names: list of columns to consider or None to consider all :param filter_exp: Text filter expression :return: (sql query, sql params) # Create the query # Add the table # Calculate the first suffix to add to the query # Build the query so far appending the filter and/or the cv_tuples # If there has been a suffix from the filter, add it. Select rows where for every (column, value) pair, column contains value ( as in LIKE %value%, these are combined with OR if any is TRUE, or AND if any is false, and the result is ordered by the given column and type (if given) :param workflow_id: workflow object to get to the table :param cv_tuples: A column, value, type tuple to search the value in the column :param any_join: Boolean encoding if values should be combined with OR (or AND) :param order_col_name: Order results by this column :param order_asc: Order results in ascending values (or descending) :param column_names: Optional list of column names to select :param pre_filter: Optional filter condition to pre filter the query set. the query is built with these terms as requirement AND the cv_tuples. :return: The resulting query set # Create the query # Add the table # Calculate the first suffix to add to the query # Make sure we escape the name and search as text # Create the second part of the query setting column LIKE '%value%' # Combine the search subqueries # Build the query so far appending the filter and/or the cv_tuples # If there has been a suffix from the filter, add it. # If there is a pre-filter, the suffix needs to be "AND" with the ones # just calculated # Add the order if needed # Execute the query # Get the data Delete the row in the table attached to a workflow with the given key, value pairs :param workflow_id: workflow object to get to the table :param kv_pair: A key=value pair to identify the row. Key is suppose to be unique. :return: Drops that row from the table in the DB # Create the query # Create the second part of the query setting key=value # Execute the query Obtain the number of rows of the table storing workflow with given pk :param pk: Primary key of the table storing the data frame :param cond_filter: Condition element to filter the query :return: Given a table name, get its number of rows :param table_name: Table name :param cond_filter: Condition element used to filter the query :return: integer # Initial query with the table name Check the consistency between the information stored in the workflow and the structure of the underlying dataframe :param workflow: Workflow object :return: Boolean stating the result of the check. True: Correct. # Get the df # Set values in case there is no df # Check 1: Number of rows and columns # Identical sets of columns # Identical data types # This is the case of a column with Boolean and Nulls | 2.315767 | 2 |
config/cf.py | rbsdev/config-client | 0 | 9747 | <gh_stars>0
from typing import Any, Dict, KeysView
import attr
from config.auth import OAuth2
from config.cfenv import CFenv
from config.spring import ConfigClient
@attr.s(slots=True)
class CF:
cfenv = attr.ib(
type=CFenv, factory=CFenv, validator=attr.validators.instance_of(CFenv),
)
oauth2 = attr.ib(type=OAuth2, default=None)
client = attr.ib(type=ConfigClient, default=None)
def __attrs_post_init__(self) -> None:
if not self.oauth2:
self.oauth2 = OAuth2(
access_token_uri=self.cfenv.configserver_access_token_uri(),
client_id=self.cfenv.configserver_client_id(),
client_secret=self.cfenv.configserver_client_secret(),
)
if not self.client:
self.client = ConfigClient(
address=self.cfenv.configserver_uri(),
app_name=self.cfenv.application_name,
profile=self.cfenv.space_name.lower(),
)
self.oauth2.configure()
@property
def vcap_services(self):
return self.cfenv.vcap_services
@property
def vcap_application(self):
return self.cfenv.vcap_application
def get_config(self) -> None:
header = {"Authorization": f"Bearer {self.oauth2.token}"}
self.client.get_config(headers=header)
@property
def config(self) -> Dict:
return self.client.config
def get_attribute(self, value: str) -> Any:
return self.client.get_attribute(value)
def get_keys(self) -> KeysView:
return self.client.get_keys()
| from typing import Any, Dict, KeysView
import attr
from config.auth import OAuth2
from config.cfenv import CFenv
from config.spring import ConfigClient
@attr.s(slots=True)
class CF:
cfenv = attr.ib(
type=CFenv, factory=CFenv, validator=attr.validators.instance_of(CFenv),
)
oauth2 = attr.ib(type=OAuth2, default=None)
client = attr.ib(type=ConfigClient, default=None)
def __attrs_post_init__(self) -> None:
if not self.oauth2:
self.oauth2 = OAuth2(
access_token_uri=self.cfenv.configserver_access_token_uri(),
client_id=self.cfenv.configserver_client_id(),
client_secret=self.cfenv.configserver_client_secret(),
)
if not self.client:
self.client = ConfigClient(
address=self.cfenv.configserver_uri(),
app_name=self.cfenv.application_name,
profile=self.cfenv.space_name.lower(),
)
self.oauth2.configure()
@property
def vcap_services(self):
return self.cfenv.vcap_services
@property
def vcap_application(self):
return self.cfenv.vcap_application
def get_config(self) -> None:
header = {"Authorization": f"Bearer {self.oauth2.token}"}
self.client.get_config(headers=header)
@property
def config(self) -> Dict:
return self.client.config
def get_attribute(self, value: str) -> Any:
return self.client.get_attribute(value)
def get_keys(self) -> KeysView:
return self.client.get_keys() | none | 1 | 2.264096 | 2 |
|
ducktape/template.py | rancp/ducktape-docs | 0 | 9748 | <reponame>rancp/ducktape-docs
# Copyright 2015 Confluent Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ducktape.utils.util import package_is_installed
from jinja2 import Template, FileSystemLoader, PackageLoader, ChoiceLoader, Environment
import os.path
import inspect
class TemplateRenderer(object):
def render_template(self, template, **kwargs):
"""
Render a template using the context of the current object, optionally with overrides.
:param template: the template to render, a Template or a str
:param kwargs: optional override parameters
:return: the rendered template
"""
if not hasattr(template, 'render'): template = Template(template)
ctx = dict(self.__class__.__dict__)
ctx.update(self.__dict__)
return template.render(ctx, **kwargs)
@staticmethod
def _package_search_path(module_name):
"""
:param module_name: Name of a module
:return: (package, package_search_path) where package is the package containing the module,
and package_search_path is a path relative to the package in which to search for templates.
"""
module_parts = module_name.split(".")
package = module_parts[0]
# Construct path relative to package under which "templates" would be found
directory = ""
for d in module_parts[1: -1]:
directory = os.path.join(directory, d)
return package, os.path.join(directory, "templates")
def render(self, path, **kwargs):
"""
Render a template loaded from a file.
template files referenced in file f should be in a sibling directory of f called "templates".
:param path: path, relative to the search paths, to the template file
:param kwargs: optional override parameters
:return: the rendered template
"""
if not hasattr(self, 'template_loader'):
class_dir = os.path.dirname(inspect.getfile(self.__class__))
module_name = self.__class__.__module__
package, package_search_path = self._package_search_path(module_name)
loaders = []
msg = ""
if os.path.isdir(class_dir):
# FileSystemLoader overrides PackageLoader if the path containing this directory
# is a valid directory. FileSystemLoader throws an error from which ChoiceLoader
# doesn't recover if the directory is invalid
loaders.append(FileSystemLoader(os.path.join(class_dir, 'templates')))
else:
msg += "Will not search in %s for template files since it is not a valid directory. " % class_dir
if package_is_installed(package):
loaders.append(PackageLoader(package, package_search_path))
else:
msg += "Will not search in package %s for template files because it cannot be imported."
if len(loaders) == 0:
# Expect at least one of FileSystemLoader and PackageLoader to be present
raise EnvironmentError(msg)
self.template_loader = ChoiceLoader(loaders)
self.template_env = Environment(loader=self.template_loader, trim_blocks=True, lstrip_blocks=True)
template = self.template_env.get_template(path)
return self.render_template(template, **kwargs)
| # Copyright 2015 Confluent Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ducktape.utils.util import package_is_installed
from jinja2 import Template, FileSystemLoader, PackageLoader, ChoiceLoader, Environment
import os.path
import inspect
class TemplateRenderer(object):
def render_template(self, template, **kwargs):
"""
Render a template using the context of the current object, optionally with overrides.
:param template: the template to render, a Template or a str
:param kwargs: optional override parameters
:return: the rendered template
"""
if not hasattr(template, 'render'): template = Template(template)
ctx = dict(self.__class__.__dict__)
ctx.update(self.__dict__)
return template.render(ctx, **kwargs)
@staticmethod
def _package_search_path(module_name):
"""
:param module_name: Name of a module
:return: (package, package_search_path) where package is the package containing the module,
and package_search_path is a path relative to the package in which to search for templates.
"""
module_parts = module_name.split(".")
package = module_parts[0]
# Construct path relative to package under which "templates" would be found
directory = ""
for d in module_parts[1: -1]:
directory = os.path.join(directory, d)
return package, os.path.join(directory, "templates")
def render(self, path, **kwargs):
"""
Render a template loaded from a file.
template files referenced in file f should be in a sibling directory of f called "templates".
:param path: path, relative to the search paths, to the template file
:param kwargs: optional override parameters
:return: the rendered template
"""
if not hasattr(self, 'template_loader'):
class_dir = os.path.dirname(inspect.getfile(self.__class__))
module_name = self.__class__.__module__
package, package_search_path = self._package_search_path(module_name)
loaders = []
msg = ""
if os.path.isdir(class_dir):
# FileSystemLoader overrides PackageLoader if the path containing this directory
# is a valid directory. FileSystemLoader throws an error from which ChoiceLoader
# doesn't recover if the directory is invalid
loaders.append(FileSystemLoader(os.path.join(class_dir, 'templates')))
else:
msg += "Will not search in %s for template files since it is not a valid directory. " % class_dir
if package_is_installed(package):
loaders.append(PackageLoader(package, package_search_path))
else:
msg += "Will not search in package %s for template files because it cannot be imported."
if len(loaders) == 0:
# Expect at least one of FileSystemLoader and PackageLoader to be present
raise EnvironmentError(msg)
self.template_loader = ChoiceLoader(loaders)
self.template_env = Environment(loader=self.template_loader, trim_blocks=True, lstrip_blocks=True)
template = self.template_env.get_template(path)
return self.render_template(template, **kwargs) | en | 0.742518 | # Copyright 2015 Confluent Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. Render a template using the context of the current object, optionally with overrides. :param template: the template to render, a Template or a str :param kwargs: optional override parameters :return: the rendered template :param module_name: Name of a module :return: (package, package_search_path) where package is the package containing the module, and package_search_path is a path relative to the package in which to search for templates. # Construct path relative to package under which "templates" would be found Render a template loaded from a file. template files referenced in file f should be in a sibling directory of f called "templates". :param path: path, relative to the search paths, to the template file :param kwargs: optional override parameters :return: the rendered template # FileSystemLoader overrides PackageLoader if the path containing this directory # is a valid directory. FileSystemLoader throws an error from which ChoiceLoader # doesn't recover if the directory is invalid # Expect at least one of FileSystemLoader and PackageLoader to be present | 2.244931 | 2 |
day4/homework/q7.py | AkshayManchanda/Python_Training | 0 | 9749 | <reponame>AkshayManchanda/Python_Training<gh_stars>0
i=input("Enter a string: ")
list = i.split()
list.sort()
for i in list:
print(i,end=' ')
| i=input("Enter a string: ")
list = i.split()
list.sort()
for i in list:
print(i,end=' ') | none | 1 | 3.976479 | 4 |
|
src/git_portfolio/use_cases/config_repos.py | staticdev/github-portfolio | 0 | 9750 | <filename>src/git_portfolio/use_cases/config_repos.py
"""Config repositories use case."""
from __future__ import annotations
import git_portfolio.config_manager as cm
import git_portfolio.domain.gh_connection_settings as cs
import git_portfolio.responses as res
class ConfigReposUseCase:
"""Gitp config repositories use case."""
def __init__(self, config_manager: cm.ConfigManager) -> None:
"""Initializer."""
self.config_manager = config_manager
def execute(
self, github_config: cs.GhConnectionSettings, selected_repos: list[str]
) -> res.Response:
"""Configuration of git repositories."""
self.config_manager.config.github_access_token = github_config.access_token
self.config_manager.config.github_hostname = github_config.hostname
self.config_manager.config.github_selected_repos = selected_repos
self.config_manager.save_config()
return res.ResponseSuccess("gitp repositories successfully configured.")
| <filename>src/git_portfolio/use_cases/config_repos.py
"""Config repositories use case."""
from __future__ import annotations
import git_portfolio.config_manager as cm
import git_portfolio.domain.gh_connection_settings as cs
import git_portfolio.responses as res
class ConfigReposUseCase:
"""Gitp config repositories use case."""
def __init__(self, config_manager: cm.ConfigManager) -> None:
"""Initializer."""
self.config_manager = config_manager
def execute(
self, github_config: cs.GhConnectionSettings, selected_repos: list[str]
) -> res.Response:
"""Configuration of git repositories."""
self.config_manager.config.github_access_token = github_config.access_token
self.config_manager.config.github_hostname = github_config.hostname
self.config_manager.config.github_selected_repos = selected_repos
self.config_manager.save_config()
return res.ResponseSuccess("gitp repositories successfully configured.")
| en | 0.343925 | Config repositories use case. Gitp config repositories use case. Initializer. Configuration of git repositories. | 2.058089 | 2 |
test/test_logic.py | mateuszkowalke/sudoku_game | 0 | 9751 | import pytest
from ..logic import Board, empty_board, example_board, solved_board
class TestBoard:
def test_create_board(self):
board = Board(example_board)
assert board.tiles == example_board
def test_solve_board(self):
board = Board(example_board)
board.solve()
assert board.tiles == solved_board
def test_check_if_possible(self):
board = Board(example_board)
assert board.check_if_possible(0, 0, 4) == False
assert board.check_if_possible(0, 0, 9) == True
def test_check_solution(self):
board = Board(solved_board)
assert board.check_solution()
def test_new_board(self):
board = Board(empty_board)
board.new_board(example_board)
assert board.tiles == example_board
def test_lock_tiles(self):
board = Board(example_board)
board.lock_tiles()
assert board.check_if_tile_locked(0, 1)
| import pytest
from ..logic import Board, empty_board, example_board, solved_board
class TestBoard:
def test_create_board(self):
board = Board(example_board)
assert board.tiles == example_board
def test_solve_board(self):
board = Board(example_board)
board.solve()
assert board.tiles == solved_board
def test_check_if_possible(self):
board = Board(example_board)
assert board.check_if_possible(0, 0, 4) == False
assert board.check_if_possible(0, 0, 9) == True
def test_check_solution(self):
board = Board(solved_board)
assert board.check_solution()
def test_new_board(self):
board = Board(empty_board)
board.new_board(example_board)
assert board.tiles == example_board
def test_lock_tiles(self):
board = Board(example_board)
board.lock_tiles()
assert board.check_if_tile_locked(0, 1)
| none | 1 | 3.072967 | 3 |
|
src/compas_rhino/objects/_select.py | jf---/compas | 2 | 9752 | from __future__ import print_function
from __future__ import absolute_import
from __future__ import division
import ast
import rhinoscriptsyntax as rs
__all__ = [
'mesh_select_vertex',
'mesh_select_vertices',
'mesh_select_face',
'mesh_select_faces',
'mesh_select_edge',
'mesh_select_edges',
'network_select_node',
'network_select_nodes',
'network_select_edge',
'network_select_edges',
]
def mesh_select_vertex(mesh, message="Select a vertex."):
"""Select a single vertex of a mesh.
Parameters
----------
mesh: :class:`compas.datastructures.Mesh`
message: str, optional
Returns
-------
int or None
"""
guid = rs.GetObject(message, preselect=True, filter=rs.filter.point | rs.filter.textdot)
if guid:
prefix = mesh.attributes['name']
name = rs.ObjectName(guid).split('.')
if 'vertex' in name:
if not prefix or prefix in name:
key = name[-1]
return ast.literal_eval(key)
return None
def mesh_select_vertices(mesh, message="Select vertices."):
"""Select multiple vertices of a mesh.
Parameters
----------
mesh: :class:`compas.datastructures.Mesh`
message: str, optional
Returns
-------
list of int
"""
keys = []
guids = rs.GetObjects(message, preselect=True, filter=rs.filter.point | rs.filter.textdot)
if guids:
prefix = mesh.attributes['name']
seen = set()
for guid in guids:
name = rs.ObjectName(guid).split('.')
if 'vertex' in name:
if not prefix or prefix in name:
key = name[-1]
if not seen.add(key):
key = ast.literal_eval(key)
keys.append(key)
return keys
def mesh_select_face(mesh, message="Select a face."):
"""Select a single face of a mesh.
Parameters
----------
mesh: :class:`compas.datastructures.Mesh`
message: str, optional
Returns
-------
int or None
"""
guid = rs.GetObject(message, preselect=True, filter=rs.filter.mesh | rs.filter.textdot)
if guid:
prefix = mesh.attributes['name']
name = rs.ObjectName(guid).split('.')
if 'face' in name:
if not prefix or prefix in name:
key = name[-1]
key = ast.literal_eval(key)
return key
return None
def mesh_select_faces(mesh, message="Select faces."):
"""Select multiple faces of a mesh.
Parameters
----------
mesh: :class:`compas.datastructures.Mesh`
message: str, optional
Returns
-------
list of int
"""
keys = []
guids = rs.GetObjects(message, preselect=True, filter=rs.filter.mesh | rs.filter.textdot)
if guids:
prefix = mesh.attributes['name']
seen = set()
for guid in guids:
name = rs.ObjectName(guid).split('.')
if 'face' in name:
if not prefix or prefix in name:
key = name[-1]
if not seen.add(key):
key = ast.literal_eval(key)
keys.append(key)
return keys
def mesh_select_edge(mesh, message="Select an edge."):
"""Select a single edge of a mesh.
Parameters
----------
mesh: :class:`compas.datastructures.Mesh`
message: str, optional
Returns
-------
tuple of int, or None
"""
guid = rs.GetObject(message, preselect=True, filter=rs.filter.curve | rs.filter.textdot)
if guid:
prefix = mesh.attributes['name']
name = rs.ObjectName(guid).split('.')
if 'edge' in name:
if not prefix or prefix in name:
key = name[-1]
u, v = key.split('-')
u = ast.literal_eval(u)
v = ast.literal_eval(v)
return u, v
return None
def mesh_select_edges(mesh, message="Select edges."):
"""Select multiple edges of a mesh.
Parameters
----------
mesh: :class:`compas.datastructures.Mesh`
message: str, optional
Returns
-------
list of tuple of int
"""
keys = []
guids = rs.GetObjects(message, preselect=True, filter=rs.filter.curve | rs.filter.textdot)
if guids:
prefix = mesh.attributes['name']
seen = set()
for guid in guids:
name = rs.ObjectName(guid).split('.')
if 'edge' in name:
if not prefix or prefix in name:
key = name[-1]
if not seen.add(key):
u, v = key.split('-')
u = ast.literal_eval(u)
v = ast.literal_eval(v)
keys.append((u, v))
return keys
def network_select_node(network, message="Select a node."):
"""Select a single node of a network.
Parameters
----------
network: :class:`compas.datastructures.Network`
message: str, optional
Returns
-------
hashable or None
"""
guid = rs.GetObject(message, preselect=True, filter=rs.filter.point | rs.filter.textdot)
if guid:
prefix = network.attributes['name']
name = rs.ObjectName(guid).split('.')
if 'node' in name:
if not prefix or prefix in name:
key = name[-1]
return ast.literal_eval(key)
return None
def network_select_nodes(network, message="Select nodes."):
"""Select multiple nodes of a network.
Parameters
----------
network: :class:`compas.datastructures.Network`
message: str, optional
Returns
-------
list of hashable
"""
keys = []
guids = rs.GetObjects(message, preselect=True, filter=rs.filter.point | rs.filter.textdot)
if guids:
prefix = network.attributes['name']
seen = set()
for guid in guids:
name = rs.ObjectName(guid).split('.')
if 'node' in name:
if not prefix or prefix in name:
key = name[-1]
if not seen.add(key):
key = ast.literal_eval(key)
keys.append(key)
return keys
def network_select_edge(network, message="Select an edge."):
"""Select a single edge of a network.
Parameters
----------
network: :class:`compas.datastructures.Network`
message: str, optional
Returns
-------
tuple of hashable, or None
"""
guid = rs.GetObject(message, preselect=True, filter=rs.filter.curve | rs.filter.textdot)
if guid:
prefix = network.attributes['name']
name = rs.ObjectName(guid).split('.')
if 'edge' in name:
if not prefix or prefix in name:
key = name[-1]
u, v = key.split('-')
u = ast.literal_eval(u)
v = ast.literal_eval(v)
return u, v
return None
def network_select_edges(network, message="Select edges."):
"""Select multiple edges of a network.
Parameters
----------
network: :class:`compas.datastructures.Network`
message: str, optional
Returns
-------
list of tuple of hashable
"""
keys = []
guids = rs.GetObjects(message, preselect=True, filter=rs.filter.curve | rs.filter.textdot)
if guids:
prefix = network.attributes['name']
seen = set()
for guid in guids:
name = rs.ObjectName(guid).split('.')
if 'edge' in name:
if not prefix or prefix in name:
key = name[-1]
if not seen.add(key):
u, v = key.split('-')
u = ast.literal_eval(u)
v = ast.literal_eval(v)
keys.append((u, v))
return keys
# ==============================================================================
# Main
# ==============================================================================
if __name__ == '__main__':
pass
| from __future__ import print_function
from __future__ import absolute_import
from __future__ import division
import ast
import rhinoscriptsyntax as rs
__all__ = [
'mesh_select_vertex',
'mesh_select_vertices',
'mesh_select_face',
'mesh_select_faces',
'mesh_select_edge',
'mesh_select_edges',
'network_select_node',
'network_select_nodes',
'network_select_edge',
'network_select_edges',
]
def mesh_select_vertex(mesh, message="Select a vertex."):
"""Select a single vertex of a mesh.
Parameters
----------
mesh: :class:`compas.datastructures.Mesh`
message: str, optional
Returns
-------
int or None
"""
guid = rs.GetObject(message, preselect=True, filter=rs.filter.point | rs.filter.textdot)
if guid:
prefix = mesh.attributes['name']
name = rs.ObjectName(guid).split('.')
if 'vertex' in name:
if not prefix or prefix in name:
key = name[-1]
return ast.literal_eval(key)
return None
def mesh_select_vertices(mesh, message="Select vertices."):
"""Select multiple vertices of a mesh.
Parameters
----------
mesh: :class:`compas.datastructures.Mesh`
message: str, optional
Returns
-------
list of int
"""
keys = []
guids = rs.GetObjects(message, preselect=True, filter=rs.filter.point | rs.filter.textdot)
if guids:
prefix = mesh.attributes['name']
seen = set()
for guid in guids:
name = rs.ObjectName(guid).split('.')
if 'vertex' in name:
if not prefix or prefix in name:
key = name[-1]
if not seen.add(key):
key = ast.literal_eval(key)
keys.append(key)
return keys
def mesh_select_face(mesh, message="Select a face."):
"""Select a single face of a mesh.
Parameters
----------
mesh: :class:`compas.datastructures.Mesh`
message: str, optional
Returns
-------
int or None
"""
guid = rs.GetObject(message, preselect=True, filter=rs.filter.mesh | rs.filter.textdot)
if guid:
prefix = mesh.attributes['name']
name = rs.ObjectName(guid).split('.')
if 'face' in name:
if not prefix or prefix in name:
key = name[-1]
key = ast.literal_eval(key)
return key
return None
def mesh_select_faces(mesh, message="Select faces."):
"""Select multiple faces of a mesh.
Parameters
----------
mesh: :class:`compas.datastructures.Mesh`
message: str, optional
Returns
-------
list of int
"""
keys = []
guids = rs.GetObjects(message, preselect=True, filter=rs.filter.mesh | rs.filter.textdot)
if guids:
prefix = mesh.attributes['name']
seen = set()
for guid in guids:
name = rs.ObjectName(guid).split('.')
if 'face' in name:
if not prefix or prefix in name:
key = name[-1]
if not seen.add(key):
key = ast.literal_eval(key)
keys.append(key)
return keys
def mesh_select_edge(mesh, message="Select an edge."):
"""Select a single edge of a mesh.
Parameters
----------
mesh: :class:`compas.datastructures.Mesh`
message: str, optional
Returns
-------
tuple of int, or None
"""
guid = rs.GetObject(message, preselect=True, filter=rs.filter.curve | rs.filter.textdot)
if guid:
prefix = mesh.attributes['name']
name = rs.ObjectName(guid).split('.')
if 'edge' in name:
if not prefix or prefix in name:
key = name[-1]
u, v = key.split('-')
u = ast.literal_eval(u)
v = ast.literal_eval(v)
return u, v
return None
def mesh_select_edges(mesh, message="Select edges."):
"""Select multiple edges of a mesh.
Parameters
----------
mesh: :class:`compas.datastructures.Mesh`
message: str, optional
Returns
-------
list of tuple of int
"""
keys = []
guids = rs.GetObjects(message, preselect=True, filter=rs.filter.curve | rs.filter.textdot)
if guids:
prefix = mesh.attributes['name']
seen = set()
for guid in guids:
name = rs.ObjectName(guid).split('.')
if 'edge' in name:
if not prefix or prefix in name:
key = name[-1]
if not seen.add(key):
u, v = key.split('-')
u = ast.literal_eval(u)
v = ast.literal_eval(v)
keys.append((u, v))
return keys
def network_select_node(network, message="Select a node."):
"""Select a single node of a network.
Parameters
----------
network: :class:`compas.datastructures.Network`
message: str, optional
Returns
-------
hashable or None
"""
guid = rs.GetObject(message, preselect=True, filter=rs.filter.point | rs.filter.textdot)
if guid:
prefix = network.attributes['name']
name = rs.ObjectName(guid).split('.')
if 'node' in name:
if not prefix or prefix in name:
key = name[-1]
return ast.literal_eval(key)
return None
def network_select_nodes(network, message="Select nodes."):
"""Select multiple nodes of a network.
Parameters
----------
network: :class:`compas.datastructures.Network`
message: str, optional
Returns
-------
list of hashable
"""
keys = []
guids = rs.GetObjects(message, preselect=True, filter=rs.filter.point | rs.filter.textdot)
if guids:
prefix = network.attributes['name']
seen = set()
for guid in guids:
name = rs.ObjectName(guid).split('.')
if 'node' in name:
if not prefix or prefix in name:
key = name[-1]
if not seen.add(key):
key = ast.literal_eval(key)
keys.append(key)
return keys
def network_select_edge(network, message="Select an edge."):
"""Select a single edge of a network.
Parameters
----------
network: :class:`compas.datastructures.Network`
message: str, optional
Returns
-------
tuple of hashable, or None
"""
guid = rs.GetObject(message, preselect=True, filter=rs.filter.curve | rs.filter.textdot)
if guid:
prefix = network.attributes['name']
name = rs.ObjectName(guid).split('.')
if 'edge' in name:
if not prefix or prefix in name:
key = name[-1]
u, v = key.split('-')
u = ast.literal_eval(u)
v = ast.literal_eval(v)
return u, v
return None
def network_select_edges(network, message="Select edges."):
"""Select multiple edges of a network.
Parameters
----------
network: :class:`compas.datastructures.Network`
message: str, optional
Returns
-------
list of tuple of hashable
"""
keys = []
guids = rs.GetObjects(message, preselect=True, filter=rs.filter.curve | rs.filter.textdot)
if guids:
prefix = network.attributes['name']
seen = set()
for guid in guids:
name = rs.ObjectName(guid).split('.')
if 'edge' in name:
if not prefix or prefix in name:
key = name[-1]
if not seen.add(key):
u, v = key.split('-')
u = ast.literal_eval(u)
v = ast.literal_eval(v)
keys.append((u, v))
return keys
# ==============================================================================
# Main
# ==============================================================================
if __name__ == '__main__':
pass
| en | 0.300779 | Select a single vertex of a mesh. Parameters ---------- mesh: :class:`compas.datastructures.Mesh` message: str, optional Returns ------- int or None Select multiple vertices of a mesh. Parameters ---------- mesh: :class:`compas.datastructures.Mesh` message: str, optional Returns ------- list of int Select a single face of a mesh. Parameters ---------- mesh: :class:`compas.datastructures.Mesh` message: str, optional Returns ------- int or None Select multiple faces of a mesh. Parameters ---------- mesh: :class:`compas.datastructures.Mesh` message: str, optional Returns ------- list of int Select a single edge of a mesh. Parameters ---------- mesh: :class:`compas.datastructures.Mesh` message: str, optional Returns ------- tuple of int, or None Select multiple edges of a mesh. Parameters ---------- mesh: :class:`compas.datastructures.Mesh` message: str, optional Returns ------- list of tuple of int Select a single node of a network. Parameters ---------- network: :class:`compas.datastructures.Network` message: str, optional Returns ------- hashable or None Select multiple nodes of a network. Parameters ---------- network: :class:`compas.datastructures.Network` message: str, optional Returns ------- list of hashable Select a single edge of a network. Parameters ---------- network: :class:`compas.datastructures.Network` message: str, optional Returns ------- tuple of hashable, or None Select multiple edges of a network. Parameters ---------- network: :class:`compas.datastructures.Network` message: str, optional Returns ------- list of tuple of hashable # ============================================================================== # Main # ============================================================================== | 2.3027 | 2 |
handlers/product_add.py | MuchkoM/CalorieMatchBot | 0 | 9753 | from telegram import Update
from telegram.ext import Updater, CallbackContext, ConversationHandler, CommandHandler, MessageHandler, Filters
from db import DBConnector
import re
str_matcher = r"\"(?P<name>.+)\"\s*(?P<fat>\d+)\s*/\s*(?P<protein>\d+)\s*/\s*(?P<carbohydrates>\d+)\s*(?P<kcal>\d+)"
ADD_1 = 0
def add_0(update: Update, _: CallbackContext):
update.message.reply_text('Enter new product in format\n'
'"name" fat/protein/carbohydrates kcal')
return ADD_1
def add_1(update: Update, context: CallbackContext):
db_connect: DBConnector = context.bot_data['db_connect']
result = re.match(str_matcher, update.message.text)
if result:
db_connect.products.insert(result.groupdict())
update.message.reply_text('Product was added')
else:
update.message.reply_text('Message have wrong format')
return ConversationHandler.END
def add_handler(updater: Updater):
"""/product_add - Add product to list known products"""
updater.dispatcher.add_handler(ConversationHandler(
entry_points=[CommandHandler('product_add', add_0)],
states={
ADD_1: [MessageHandler(Filters.text & ~Filters.command, add_1)]
},
fallbacks=[]
))
| from telegram import Update
from telegram.ext import Updater, CallbackContext, ConversationHandler, CommandHandler, MessageHandler, Filters
from db import DBConnector
import re
str_matcher = r"\"(?P<name>.+)\"\s*(?P<fat>\d+)\s*/\s*(?P<protein>\d+)\s*/\s*(?P<carbohydrates>\d+)\s*(?P<kcal>\d+)"
ADD_1 = 0
def add_0(update: Update, _: CallbackContext):
update.message.reply_text('Enter new product in format\n'
'"name" fat/protein/carbohydrates kcal')
return ADD_1
def add_1(update: Update, context: CallbackContext):
db_connect: DBConnector = context.bot_data['db_connect']
result = re.match(str_matcher, update.message.text)
if result:
db_connect.products.insert(result.groupdict())
update.message.reply_text('Product was added')
else:
update.message.reply_text('Message have wrong format')
return ConversationHandler.END
def add_handler(updater: Updater):
"""/product_add - Add product to list known products"""
updater.dispatcher.add_handler(ConversationHandler(
entry_points=[CommandHandler('product_add', add_0)],
states={
ADD_1: [MessageHandler(Filters.text & ~Filters.command, add_1)]
},
fallbacks=[]
))
| en | 0.938881 | /product_add - Add product to list known products | 2.368037 | 2 |
python-packages/nolearn-0.5/build/lib.linux-x86_64-2.7/nolearn/tests/test_dataset.py | rajegannathan/grasp-lift-eeg-cat-dog-solution-updated | 2 | 9754 | <gh_stars>1-10
from mock import patch
import numpy as np
def test_dataset_simple():
from ..dataset import Dataset
data = object()
target = object()
dataset = Dataset(data, target)
assert dataset.data is data
assert dataset.target is target
@patch('nolearn.dataset.np.load')
def test_dataset_with_filenames(load):
from ..dataset import Dataset
data = 'datafile'
target = 'targetfile'
dataset = Dataset(data, target)
assert load.call_count == 2
assert dataset.target is load.return_value
def test_dataset_train_test_split():
from ..dataset import Dataset
data = np.arange(100)
target = np.array([0] * 50 + [1] * 50)
dataset = Dataset(data, target)
assert dataset.split_indices.classes.tolist() == [0, 1]
assert dataset.split_indices.n_train == 75
assert dataset.split_indices.n_test == 25
X_train, X_test, y_train, y_test = dataset.train_test_split()
assert len(X_train) == len(y_train)
assert len(X_test) == len(y_test)
def test_dataset_scale():
from ..dataset import Dataset
data = np.arange(100).astype('float')
target = np.array([0] * 100)
dataset = Dataset(data, target)
dataset.scale()
assert dataset.data[0] == -1.7148160424389376
assert dataset.data[-1] == 1.7148160424389376
| from mock import patch
import numpy as np
def test_dataset_simple():
from ..dataset import Dataset
data = object()
target = object()
dataset = Dataset(data, target)
assert dataset.data is data
assert dataset.target is target
@patch('nolearn.dataset.np.load')
def test_dataset_with_filenames(load):
from ..dataset import Dataset
data = 'datafile'
target = 'targetfile'
dataset = Dataset(data, target)
assert load.call_count == 2
assert dataset.target is load.return_value
def test_dataset_train_test_split():
from ..dataset import Dataset
data = np.arange(100)
target = np.array([0] * 50 + [1] * 50)
dataset = Dataset(data, target)
assert dataset.split_indices.classes.tolist() == [0, 1]
assert dataset.split_indices.n_train == 75
assert dataset.split_indices.n_test == 25
X_train, X_test, y_train, y_test = dataset.train_test_split()
assert len(X_train) == len(y_train)
assert len(X_test) == len(y_test)
def test_dataset_scale():
from ..dataset import Dataset
data = np.arange(100).astype('float')
target = np.array([0] * 100)
dataset = Dataset(data, target)
dataset.scale()
assert dataset.data[0] == -1.7148160424389376
assert dataset.data[-1] == 1.7148160424389376 | none | 1 | 2.369039 | 2 |
|
src/Cipher/MultiLevelCaesarDecrypt.py | EpicTofuu/Assignment | 0 | 9755 | <reponame>EpicTofuu/Assignment
import Cipher.tk
from Cipher.tk import EncryptDecryptCoord, GetChiSquared, Mode
def MultiDecrypt (message, alphabet, usables = 3, lan = "English", transformations = [], lowestchi = 9999, ogMessage = ""):
msg = ""
prev = (9999, (0, 0)) # (chi, key)
for i in range (len(message)):
for k in range (1, len (alphabet)):
msg = EncryptDecryptCoord(message, (i,k), alphabet, Mode.DECRYPT)
chi = GetChiSquared (msg, lan)
if (round (chi, 3) < round (prev[0], 3)):
prev = (chi, (i,k))
# base case
if (prev[0] >= lowestchi):
v = ogMessage
for tr in transformations:
v = EncryptDecryptCoord (v, tr, alphabet, Mode.DECRYPT)
return (v, lowestchi, transformations)
if (len(transformations) == 0): # only set lowest chi on the first run
lowestchi = prev[0]
ogMessage = message
transformations.append (prev[1])
return MultiDecrypt (EncryptDecryptCoord (message, prev[1], alphabet, Mode.DECRYPT), alphabet, usables, lan, transformations, prev[0], ogMessage)
'''
# testing do write it here
a = " abcdefghijklmnopqrstuvwxyz"
p=[]
for c in a:
p.append (c)
print ("starting...")
print (MultiDecrypt ("dtyktckcxlbd", p))
# original 231
''' | import Cipher.tk
from Cipher.tk import EncryptDecryptCoord, GetChiSquared, Mode
def MultiDecrypt (message, alphabet, usables = 3, lan = "English", transformations = [], lowestchi = 9999, ogMessage = ""):
msg = ""
prev = (9999, (0, 0)) # (chi, key)
for i in range (len(message)):
for k in range (1, len (alphabet)):
msg = EncryptDecryptCoord(message, (i,k), alphabet, Mode.DECRYPT)
chi = GetChiSquared (msg, lan)
if (round (chi, 3) < round (prev[0], 3)):
prev = (chi, (i,k))
# base case
if (prev[0] >= lowestchi):
v = ogMessage
for tr in transformations:
v = EncryptDecryptCoord (v, tr, alphabet, Mode.DECRYPT)
return (v, lowestchi, transformations)
if (len(transformations) == 0): # only set lowest chi on the first run
lowestchi = prev[0]
ogMessage = message
transformations.append (prev[1])
return MultiDecrypt (EncryptDecryptCoord (message, prev[1], alphabet, Mode.DECRYPT), alphabet, usables, lan, transformations, prev[0], ogMessage)
'''
# testing do write it here
a = " abcdefghijklmnopqrstuvwxyz"
p=[]
for c in a:
p.append (c)
print ("starting...")
print (MultiDecrypt ("dtyktckcxlbd", p))
# original 231
''' | en | 0.620817 | # (chi, key) # base case # only set lowest chi on the first run # testing do write it here
a = " abcdefghijklmnopqrstuvwxyz"
p=[]
for c in a:
p.append (c)
print ("starting...")
print (MultiDecrypt ("dtyktckcxlbd", p))
# original 231 | 3.471786 | 3 |
scripts/vcf_filter.py | bunop/cyvcf | 46 | 9756 | <reponame>bunop/cyvcf<gh_stars>10-100
#!/usr/bin/env python
import sys
import argparse
import pkg_resources
import vcf
from vcf.parser import _Filter
parser = argparse.ArgumentParser(description='Filter a VCF file',
formatter_class=argparse.RawDescriptionHelpFormatter,
)
parser.add_argument('input', metavar='input', type=str, nargs=1,
help='File to process (use - for STDIN)')
parser.add_argument('filters', metavar='filter', type=str, nargs='+',
help='Filters to use')
parser.add_argument('--no-short-circuit', action='store_true',
help='Do not stop filter processing on a site if a single filter fails.')
parser.add_argument('--output', action='store', default=sys.stdout,
help='Filename to output (default stdout)')
parser.add_argument('--no-filtered', action='store_true',
help='Remove failed sites')
if __name__ == '__main__':
# TODO: allow filter specification by short name
# TODO: flag that writes filter output into INFO column
# TODO: argument use implies filter use
# TODO: parallelize
# TODO: prevent plugins raising an exception from crashing the script
# dynamically build the list of available filters
filters = {}
filter_help = '\n\navailable filters:'
for p in pkg_resources.iter_entry_points('vcf.filters'):
filt = p.load()
filters[filt.name] = filt
filt.customize_parser(parser)
filter_help += '\n %s:\t%s' % (filt.name, filt.description)
parser.description += filter_help
# parse command line args
args = parser.parse_args()
inp = vcf.Reader(file(args.input[0]))
# build filter chain
chain = []
for name in args.filters:
f = filters[name](args)
chain.append(f)
inp.filters[f.filter_name()] = _Filter(f.filter_name(), f.description)
oup = vcf.Writer(args.output, inp)
# apply filters
short_circuit = not args.no_short_circuit
for record in inp:
for filt in chain:
result = filt(record)
if result:
record.add_filter(filt.filter_name())
if short_circuit:
break
if (not args.no_filtered) or (record.FILTER == '.'):
oup.write_record(record)
| #!/usr/bin/env python
import sys
import argparse
import pkg_resources
import vcf
from vcf.parser import _Filter
parser = argparse.ArgumentParser(description='Filter a VCF file',
formatter_class=argparse.RawDescriptionHelpFormatter,
)
parser.add_argument('input', metavar='input', type=str, nargs=1,
help='File to process (use - for STDIN)')
parser.add_argument('filters', metavar='filter', type=str, nargs='+',
help='Filters to use')
parser.add_argument('--no-short-circuit', action='store_true',
help='Do not stop filter processing on a site if a single filter fails.')
parser.add_argument('--output', action='store', default=sys.stdout,
help='Filename to output (default stdout)')
parser.add_argument('--no-filtered', action='store_true',
help='Remove failed sites')
if __name__ == '__main__':
# TODO: allow filter specification by short name
# TODO: flag that writes filter output into INFO column
# TODO: argument use implies filter use
# TODO: parallelize
# TODO: prevent plugins raising an exception from crashing the script
# dynamically build the list of available filters
filters = {}
filter_help = '\n\navailable filters:'
for p in pkg_resources.iter_entry_points('vcf.filters'):
filt = p.load()
filters[filt.name] = filt
filt.customize_parser(parser)
filter_help += '\n %s:\t%s' % (filt.name, filt.description)
parser.description += filter_help
# parse command line args
args = parser.parse_args()
inp = vcf.Reader(file(args.input[0]))
# build filter chain
chain = []
for name in args.filters:
f = filters[name](args)
chain.append(f)
inp.filters[f.filter_name()] = _Filter(f.filter_name(), f.description)
oup = vcf.Writer(args.output, inp)
# apply filters
short_circuit = not args.no_short_circuit
for record in inp:
for filt in chain:
result = filt(record)
if result:
record.add_filter(filt.filter_name())
if short_circuit:
break
if (not args.no_filtered) or (record.FILTER == '.'):
oup.write_record(record) | en | 0.585351 | #!/usr/bin/env python # TODO: allow filter specification by short name # TODO: flag that writes filter output into INFO column # TODO: argument use implies filter use # TODO: parallelize # TODO: prevent plugins raising an exception from crashing the script # dynamically build the list of available filters # parse command line args # build filter chain # apply filters | 2.495073 | 2 |
src/flocker/blueprints/red/__init__.py | Muxelmann/home-projects | 0 | 9757 | import os
from flask import Blueprint, render_template
def create_bp():
bp_red = Blueprint('red', __name__, url_prefix='/red')
@bp_red.route('/index/')
@bp_red.route('/')
def index():
return render_template('red/index.html')
return bp_red | import os
from flask import Blueprint, render_template
def create_bp():
bp_red = Blueprint('red', __name__, url_prefix='/red')
@bp_red.route('/index/')
@bp_red.route('/')
def index():
return render_template('red/index.html')
return bp_red | none | 1 | 2.296598 | 2 |
|
alphacoders/__init__.py | whoiscc/alphacoders | 7 | 9758 | <reponame>whoiscc/alphacoders
#
from aiohttp.client_exceptions import ClientError
from lxml import html
from pathlib import Path
from asyncio import create_task
from functools import wraps
def start_immediately(task):
@wraps(task)
def wrapper(*args, **kwargs):
return create_task(task(*args, **kwargs))
return wrapper
@start_immediately
async def download_page(client, url):
count = 0
while True:
print(f"(retry = {count}) download url: {url}")
try:
async with client.get(url) as resp:
assert resp.status == 200
return await resp.text()
except ClientError:
pass
finally:
count += 1
@start_immediately
async def download_image(client, url, target_dir, name):
count = 0
while True:
print(f"(retry = {count}) download image: {url} -> {target_dir / name}")
try:
async with client.get(url) as resp:
content = await resp.read()
target_dir.mkdir(exist_ok=True)
(target_dir / name).write_bytes(content)
return
except ClientError:
pass
finally:
count += 1
def download_search(client, keyword, page):
safe_keyword = keyword.replace(" ", "+")
# url = f"https://mobile.alphacoders.com/by-resolution/5?search={safe_keyword}&page={page}"
url = f"https://wall.alphacoders.com/search.php?search={safe_keyword}&page={page}"
return download_page(client, url)
@start_immediately
async def query_image_id(client, keyword=None, page=None, document=None):
if document is None:
assert keyword is not None and page is not None
search = await download_search(client, keyword, page)
document = html.fromstring(search)
a_list = document.xpath('//div[@class="boxgrid"]/a')
href_list = [a.attrib["href"] for a in a_list]
return href_list
def query_page_count(document):
count_string = document.xpath('//ul[@class="pagination"]/li[last() - 1]/a/text()')[
0
]
return int(count_string)
@start_immediately
async def query_image_url(client, detail_path):
url = f"https://wall.alphacoders.com/{detail_path}"
detail = await download_page(client, url)
document = html.fromstring(detail)
image = document.xpath('//div[@class="center img-container-desktop"]/a')[0]
return image.attrib["href"]
@start_immediately
async def download_image_by_id(manager, client, image_id, target_dir):
image_url = await query_image_url(client, image_id)
name = image_url.split("/")[-1]
await download_image(client, image_url, target_dir, name)
manager.complete_count += 1
class SingleTask:
def __init__(self, keyword, limit=None):
self.keyword = keyword
self.limit = limit
self.complete_count = 0
self.triggered = False
async def run(self, client):
assert not self.triggered
self.triggered = True
first_search_doc = html.fromstring(
await download_search(client, self.keyword, 1)
)
page_count = query_page_count(first_search_doc)
download_image_task_list = []
image_count = 0
for page in range(1, page_count + 1):
if page == 1:
partial_list = await query_image_id(client, document=first_search_doc)
else:
partial_list = await query_image_id(
client, keyword=self.keyword, page=page
)
if self.limit is not None:
partial_list = partial_list[: self.limit - image_count]
image_count += len(partial_list)
for image_id in partial_list:
download_image_task_list.append(
download_image_by_id(self, client, image_id, Path(self.keyword))
)
if self.limit is not None and image_count == self.limit:
break
for task in download_image_task_list:
await task
@start_immediately
async def execute_single_task(manager, client):
return await manager.run(client)
| #
from aiohttp.client_exceptions import ClientError
from lxml import html
from pathlib import Path
from asyncio import create_task
from functools import wraps
def start_immediately(task):
@wraps(task)
def wrapper(*args, **kwargs):
return create_task(task(*args, **kwargs))
return wrapper
@start_immediately
async def download_page(client, url):
count = 0
while True:
print(f"(retry = {count}) download url: {url}")
try:
async with client.get(url) as resp:
assert resp.status == 200
return await resp.text()
except ClientError:
pass
finally:
count += 1
@start_immediately
async def download_image(client, url, target_dir, name):
count = 0
while True:
print(f"(retry = {count}) download image: {url} -> {target_dir / name}")
try:
async with client.get(url) as resp:
content = await resp.read()
target_dir.mkdir(exist_ok=True)
(target_dir / name).write_bytes(content)
return
except ClientError:
pass
finally:
count += 1
def download_search(client, keyword, page):
safe_keyword = keyword.replace(" ", "+")
# url = f"https://mobile.alphacoders.com/by-resolution/5?search={safe_keyword}&page={page}"
url = f"https://wall.alphacoders.com/search.php?search={safe_keyword}&page={page}"
return download_page(client, url)
@start_immediately
async def query_image_id(client, keyword=None, page=None, document=None):
if document is None:
assert keyword is not None and page is not None
search = await download_search(client, keyword, page)
document = html.fromstring(search)
a_list = document.xpath('//div[@class="boxgrid"]/a')
href_list = [a.attrib["href"] for a in a_list]
return href_list
def query_page_count(document):
count_string = document.xpath('//ul[@class="pagination"]/li[last() - 1]/a/text()')[
0
]
return int(count_string)
@start_immediately
async def query_image_url(client, detail_path):
url = f"https://wall.alphacoders.com/{detail_path}"
detail = await download_page(client, url)
document = html.fromstring(detail)
image = document.xpath('//div[@class="center img-container-desktop"]/a')[0]
return image.attrib["href"]
@start_immediately
async def download_image_by_id(manager, client, image_id, target_dir):
image_url = await query_image_url(client, image_id)
name = image_url.split("/")[-1]
await download_image(client, image_url, target_dir, name)
manager.complete_count += 1
class SingleTask:
def __init__(self, keyword, limit=None):
self.keyword = keyword
self.limit = limit
self.complete_count = 0
self.triggered = False
async def run(self, client):
assert not self.triggered
self.triggered = True
first_search_doc = html.fromstring(
await download_search(client, self.keyword, 1)
)
page_count = query_page_count(first_search_doc)
download_image_task_list = []
image_count = 0
for page in range(1, page_count + 1):
if page == 1:
partial_list = await query_image_id(client, document=first_search_doc)
else:
partial_list = await query_image_id(
client, keyword=self.keyword, page=page
)
if self.limit is not None:
partial_list = partial_list[: self.limit - image_count]
image_count += len(partial_list)
for image_id in partial_list:
download_image_task_list.append(
download_image_by_id(self, client, image_id, Path(self.keyword))
)
if self.limit is not None and image_count == self.limit:
break
for task in download_image_task_list:
await task
@start_immediately
async def execute_single_task(manager, client):
return await manager.run(client) | en | 0.573389 | # # url = f"https://mobile.alphacoders.com/by-resolution/5?search={safe_keyword}&page={page}" | 2.717837 | 3 |
Python/Calculating_Trimmed_Means/calculating_trimmed_means1.py | PeriscopeData/analytics-toolbox | 2 | 9759 | # SQL output is imported as a pandas dataframe variable called "df"
# Source: https://stackoverflow.com/questions/19441730/trimmed-mean-with-percentage-limit-in-python
import pandas as pd
import matplotlib.pyplot as plt
from scipy.stats import tmean, scoreatpercentile
import numpy as np
def trimmean(arr, percent):
lower_limit = scoreatpercentile(arr, percent)
upper_limit = scoreatpercentile(arr, 100-percent)
return tmean(arr, limits=(lower_limit, upper_limit), inclusive=(False, False))
my_result = trimmean(df["amt_paid"].values,10) | # SQL output is imported as a pandas dataframe variable called "df"
# Source: https://stackoverflow.com/questions/19441730/trimmed-mean-with-percentage-limit-in-python
import pandas as pd
import matplotlib.pyplot as plt
from scipy.stats import tmean, scoreatpercentile
import numpy as np
def trimmean(arr, percent):
lower_limit = scoreatpercentile(arr, percent)
upper_limit = scoreatpercentile(arr, 100-percent)
return tmean(arr, limits=(lower_limit, upper_limit), inclusive=(False, False))
my_result = trimmean(df["amt_paid"].values,10) | en | 0.849478 | # SQL output is imported as a pandas dataframe variable called "df" # Source: https://stackoverflow.com/questions/19441730/trimmed-mean-with-percentage-limit-in-python | 3.285858 | 3 |
scripts/data_extract.py | amichalski2/WBC-SHAP | 0 | 9760 | import os
import cv2
import random
import numpy as np
from tensorflow.keras.utils import to_categorical
from scripts.consts import class_dict
def get_data(path, split=0.2):
X, y = [], []
for directory in os.listdir(path):
dirpath = os.path.join(path, directory)
print(directory, len(os.listdir(dirpath)))
for file in os.listdir(dirpath):
filepath = os.path.join(dirpath, file)
img = cv2.imread(filepath, cv2.IMREAD_UNCHANGED)
if img.shape != (360, 363, 3):
img = cv2.resize(img, (360, 363), cv2.INTER_CUBIC)
X.append(img)
y.append(class_dict[directory])
data = list(zip(X, y))
random.shuffle(data)
X, y = zip(*data)
num_train = int((1.0 - split) * len(y))
X_train, X_valid = np.array(X[:num_train]).astype(
'float32'), np.array(X[num_train:]).astype('float32')
y_train, y_valid = np.array(
y[:num_train]).reshape(-1, 1), np.array(y[num_train:]).reshape((-1, 1))
X_train = X_train / 255.0
X_valid = X_valid / 255.0
y_train, y_valid = to_categorical(y_train), to_categorical(y_valid)
print(X_train.shape, y_train.shape)
print(X_valid.shape, y_valid.shape)
return X_train, y_train, X_valid, y_valid
| import os
import cv2
import random
import numpy as np
from tensorflow.keras.utils import to_categorical
from scripts.consts import class_dict
def get_data(path, split=0.2):
X, y = [], []
for directory in os.listdir(path):
dirpath = os.path.join(path, directory)
print(directory, len(os.listdir(dirpath)))
for file in os.listdir(dirpath):
filepath = os.path.join(dirpath, file)
img = cv2.imread(filepath, cv2.IMREAD_UNCHANGED)
if img.shape != (360, 363, 3):
img = cv2.resize(img, (360, 363), cv2.INTER_CUBIC)
X.append(img)
y.append(class_dict[directory])
data = list(zip(X, y))
random.shuffle(data)
X, y = zip(*data)
num_train = int((1.0 - split) * len(y))
X_train, X_valid = np.array(X[:num_train]).astype(
'float32'), np.array(X[num_train:]).astype('float32')
y_train, y_valid = np.array(
y[:num_train]).reshape(-1, 1), np.array(y[num_train:]).reshape((-1, 1))
X_train = X_train / 255.0
X_valid = X_valid / 255.0
y_train, y_valid = to_categorical(y_train), to_categorical(y_valid)
print(X_train.shape, y_train.shape)
print(X_valid.shape, y_valid.shape)
return X_train, y_train, X_valid, y_valid
| none | 1 | 2.634801 | 3 |
|
ironic/tests/unit/drivers/test_base.py | tzumainn/ironic | 0 | 9761 | <filename>ironic/tests/unit/drivers/test_base.py
# Copyright 2014 Cisco Systems, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import json
import mock
from ironic.common import exception
from ironic.common import raid
from ironic.common import states
from ironic.drivers import base as driver_base
from ironic.drivers.modules import fake
from ironic.tests import base
class FakeVendorInterface(driver_base.VendorInterface):
def get_properties(self):
pass
@driver_base.passthru(['POST'])
def noexception(self):
return "Fake"
@driver_base.driver_passthru(['POST'])
def driver_noexception(self):
return "Fake"
@driver_base.passthru(['POST'])
def ironicexception(self):
raise exception.IronicException("Fake!")
@driver_base.passthru(['POST'])
def normalexception(self):
raise Exception("Fake!")
@driver_base.passthru(['POST'], require_exclusive_lock=False)
def shared_task(self):
return "shared fake"
def validate(self, task, **kwargs):
pass
def driver_validate(self, **kwargs):
pass
class PassthruDecoratorTestCase(base.TestCase):
def setUp(self):
super(PassthruDecoratorTestCase, self).setUp()
self.fvi = FakeVendorInterface()
def test_passthru_noexception(self):
result = self.fvi.noexception()
self.assertEqual("Fake", result)
@mock.patch.object(driver_base, 'LOG', autospec=True)
def test_passthru_ironicexception(self, mock_log):
self.assertRaises(exception.IronicException,
self.fvi.ironicexception, mock.ANY)
mock_log.exception.assert_called_with(
mock.ANY, 'ironicexception')
@mock.patch.object(driver_base, 'LOG', autospec=True)
def test_passthru_nonironicexception(self, mock_log):
self.assertRaises(exception.VendorPassthruException,
self.fvi.normalexception, mock.ANY)
mock_log.exception.assert_called_with(
mock.ANY, 'normalexception')
def test_passthru_shared_task_metadata(self):
self.assertIn('require_exclusive_lock',
self.fvi.shared_task._vendor_metadata[1])
self.assertFalse(
self.fvi.shared_task._vendor_metadata[1]['require_exclusive_lock'])
def test_passthru_exclusive_task_metadata(self):
self.assertIn('require_exclusive_lock',
self.fvi.noexception._vendor_metadata[1])
self.assertTrue(
self.fvi.noexception._vendor_metadata[1]['require_exclusive_lock'])
def test_passthru_check_func_references(self):
inst1 = FakeVendorInterface()
inst2 = FakeVendorInterface()
self.assertNotEqual(inst1.vendor_routes['noexception']['func'],
inst2.vendor_routes['noexception']['func'])
self.assertNotEqual(inst1.driver_routes['driver_noexception']['func'],
inst2.driver_routes['driver_noexception']['func'])
class CleanStepDecoratorTestCase(base.TestCase):
def setUp(self):
super(CleanStepDecoratorTestCase, self).setUp()
method_mock = mock.MagicMock()
del method_mock._is_clean_step
del method_mock._clean_step_priority
del method_mock._clean_step_abortable
del method_mock._clean_step_argsinfo
self.method = method_mock
def test__validate_argsinfo(self):
# None, empty dict
driver_base._validate_argsinfo(None)
driver_base._validate_argsinfo({})
# Only description specified
driver_base._validate_argsinfo({'arg1': {'description': 'desc1'}})
# Multiple args
driver_base._validate_argsinfo({'arg1': {'description': 'desc1',
'required': True},
'arg2': {'description': 'desc2'}})
def test__validate_argsinfo_not_dict(self):
self.assertRaisesRegex(exception.InvalidParameterValue,
'argsinfo.+dictionary',
driver_base._validate_argsinfo, 'not-a-dict')
def test__validate_argsinfo_arg_not_dict(self):
self.assertRaisesRegex(exception.InvalidParameterValue,
'Argument.+dictionary',
driver_base._validate_argsinfo,
{'arg1': 'not-a-dict'})
def test__validate_argsinfo_arg_empty_dict(self):
self.assertRaisesRegex(exception.InvalidParameterValue,
'description',
driver_base._validate_argsinfo,
{'arg1': {}})
def test__validate_argsinfo_arg_missing_description(self):
self.assertRaisesRegex(exception.InvalidParameterValue,
'description',
driver_base._validate_argsinfo,
{'arg1': {'required': True}})
def test__validate_argsinfo_arg_description_invalid(self):
self.assertRaisesRegex(exception.InvalidParameterValue,
'string',
driver_base._validate_argsinfo,
{'arg1': {'description': True}})
def test__validate_argsinfo_arg_required_invalid(self):
self.assertRaisesRegex(exception.InvalidParameterValue,
'Boolean',
driver_base._validate_argsinfo,
{'arg1': {'description': 'desc1',
'required': 'maybe'}})
def test__validate_argsinfo_arg_unknown_key(self):
self.assertRaisesRegex(exception.InvalidParameterValue,
'invalid',
driver_base._validate_argsinfo,
{'arg1': {'description': 'desc1',
'unknown': 'bad'}})
def test_clean_step_priority_only(self):
d = driver_base.clean_step(priority=10)
d(self.method)
self.assertTrue(self.method._is_clean_step)
self.assertEqual(10, self.method._clean_step_priority)
self.assertFalse(self.method._clean_step_abortable)
self.assertIsNone(self.method._clean_step_argsinfo)
def test_clean_step_all_args(self):
argsinfo = {'arg1': {'description': 'desc1',
'required': True}}
d = driver_base.clean_step(priority=0, abortable=True,
argsinfo=argsinfo)
d(self.method)
self.assertTrue(self.method._is_clean_step)
self.assertEqual(0, self.method._clean_step_priority)
self.assertTrue(self.method._clean_step_abortable)
self.assertEqual(argsinfo, self.method._clean_step_argsinfo)
def test_clean_step_bad_priority(self):
d = driver_base.clean_step(priority='hi')
self.assertRaisesRegex(exception.InvalidParameterValue, 'priority',
d, self.method)
self.assertTrue(self.method._is_clean_step)
self.assertFalse(hasattr(self.method, '_clean_step_priority'))
self.assertFalse(hasattr(self.method, '_clean_step_abortable'))
self.assertFalse(hasattr(self.method, '_clean_step_argsinfo'))
def test_clean_step_bad_abortable(self):
d = driver_base.clean_step(priority=0, abortable='blue')
self.assertRaisesRegex(exception.InvalidParameterValue, 'abortable',
d, self.method)
self.assertTrue(self.method._is_clean_step)
self.assertEqual(0, self.method._clean_step_priority)
self.assertFalse(hasattr(self.method, '_clean_step_abortable'))
self.assertFalse(hasattr(self.method, '_clean_step_argsinfo'))
@mock.patch.object(driver_base, '_validate_argsinfo', spec_set=True,
autospec=True)
def test_clean_step_bad_argsinfo(self, mock_valid):
mock_valid.side_effect = exception.InvalidParameterValue('bad')
d = driver_base.clean_step(priority=0, argsinfo=100)
self.assertRaises(exception.InvalidParameterValue, d, self.method)
self.assertTrue(self.method._is_clean_step)
self.assertEqual(0, self.method._clean_step_priority)
self.assertFalse(self.method._clean_step_abortable)
self.assertFalse(hasattr(self.method, '_clean_step_argsinfo'))
class CleanStepTestCase(base.TestCase):
def test_get_and_execute_clean_steps(self):
# Create a fake Driver class, create some clean steps, make sure
# they are listed correctly, and attempt to execute one of them
method_mock = mock.MagicMock(spec_set=[])
method_args_mock = mock.MagicMock(spec_set=[])
task_mock = mock.MagicMock(spec_set=[])
class BaseTestClass(driver_base.BaseInterface):
def get_properties(self):
return {}
def validate(self, task):
pass
class TestClass(BaseTestClass):
interface_type = 'test'
@driver_base.clean_step(priority=0)
def manual_method(self, task):
pass
@driver_base.clean_step(priority=10, abortable=True)
def automated_method(self, task):
method_mock(task)
def not_clean_method(self, task):
pass
class TestClass2(BaseTestClass):
interface_type = 'test2'
@driver_base.clean_step(priority=0)
def manual_method2(self, task):
pass
@driver_base.clean_step(priority=20, abortable=True)
def automated_method2(self, task):
method_mock(task)
def not_clean_method2(self, task):
pass
class TestClass3(BaseTestClass):
interface_type = 'test3'
@driver_base.clean_step(priority=0, abortable=True, argsinfo={
'arg1': {'description': 'desc1',
'required': True}})
def manual_method3(self, task, **kwargs):
method_args_mock(task, **kwargs)
@driver_base.clean_step(priority=15, argsinfo={
'arg10': {'description': 'desc10'}})
def automated_method3(self, task, **kwargs):
pass
def not_clean_method3(self, task):
pass
obj = TestClass()
obj2 = TestClass2()
obj3 = TestClass3()
self.assertEqual(2, len(obj.get_clean_steps(task_mock)))
# Ensure the steps look correct
self.assertEqual(10, obj.get_clean_steps(task_mock)[0]['priority'])
self.assertTrue(obj.get_clean_steps(task_mock)[0]['abortable'])
self.assertEqual('test', obj.get_clean_steps(
task_mock)[0]['interface'])
self.assertEqual('automated_method', obj.get_clean_steps(
task_mock)[0]['step'])
self.assertEqual(0, obj.get_clean_steps(task_mock)[1]['priority'])
self.assertFalse(obj.get_clean_steps(task_mock)[1]['abortable'])
self.assertEqual('test', obj.get_clean_steps(
task_mock)[1]['interface'])
self.assertEqual('manual_method', obj.get_clean_steps(
task_mock)[1]['step'])
# Ensure the second obj get different clean steps
self.assertEqual(2, len(obj2.get_clean_steps(task_mock)))
# Ensure the steps look correct
self.assertEqual(20, obj2.get_clean_steps(task_mock)[0]['priority'])
self.assertTrue(obj2.get_clean_steps(task_mock)[0]['abortable'])
self.assertEqual('test2', obj2.get_clean_steps(
task_mock)[0]['interface'])
self.assertEqual('automated_method2', obj2.get_clean_steps(
task_mock)[0]['step'])
self.assertEqual(0, obj2.get_clean_steps(task_mock)[1]['priority'])
self.assertFalse(obj2.get_clean_steps(task_mock)[1]['abortable'])
self.assertEqual('test2', obj2.get_clean_steps(
task_mock)[1]['interface'])
self.assertEqual('manual_method2', obj2.get_clean_steps(
task_mock)[1]['step'])
self.assertIsNone(obj2.get_clean_steps(task_mock)[0]['argsinfo'])
# Ensure the third obj has different clean steps
self.assertEqual(2, len(obj3.get_clean_steps(task_mock)))
self.assertEqual(15, obj3.get_clean_steps(task_mock)[0]['priority'])
self.assertFalse(obj3.get_clean_steps(task_mock)[0]['abortable'])
self.assertEqual('test3', obj3.get_clean_steps(
task_mock)[0]['interface'])
self.assertEqual('automated_method3', obj3.get_clean_steps(
task_mock)[0]['step'])
self.assertEqual({'arg10': {'description': 'desc10'}},
obj3.get_clean_steps(task_mock)[0]['argsinfo'])
self.assertEqual(0, obj3.get_clean_steps(task_mock)[1]['priority'])
self.assertTrue(obj3.get_clean_steps(task_mock)[1]['abortable'])
self.assertEqual(obj3.interface_type, obj3.get_clean_steps(
task_mock)[1]['interface'])
self.assertEqual('manual_method3', obj3.get_clean_steps(
task_mock)[1]['step'])
self.assertEqual({'arg1': {'description': 'desc1', 'required': True}},
obj3.get_clean_steps(task_mock)[1]['argsinfo'])
# Ensure we can execute the function.
obj.execute_clean_step(task_mock, obj.get_clean_steps(task_mock)[0])
method_mock.assert_called_once_with(task_mock)
args = {'arg1': 'val1'}
clean_step = {'interface': 'test3', 'step': 'manual_method3',
'args': args}
obj3.execute_clean_step(task_mock, clean_step)
method_args_mock.assert_called_once_with(task_mock, **args)
class DeployStepDecoratorTestCase(base.TestCase):
def setUp(self):
super(DeployStepDecoratorTestCase, self).setUp()
method_mock = mock.MagicMock()
del method_mock._is_deploy_step
del method_mock._deploy_step_priority
del method_mock._deploy_step_argsinfo
self.method = method_mock
def test_deploy_step_priority_only(self):
d = driver_base.deploy_step(priority=10)
d(self.method)
self.assertTrue(self.method._is_deploy_step)
self.assertEqual(10, self.method._deploy_step_priority)
self.assertIsNone(self.method._deploy_step_argsinfo)
def test_deploy_step_all_args(self):
argsinfo = {'arg1': {'description': 'desc1',
'required': True}}
d = driver_base.deploy_step(priority=0, argsinfo=argsinfo)
d(self.method)
self.assertTrue(self.method._is_deploy_step)
self.assertEqual(0, self.method._deploy_step_priority)
self.assertEqual(argsinfo, self.method._deploy_step_argsinfo)
def test_deploy_step_bad_priority(self):
d = driver_base.deploy_step(priority='hi')
self.assertRaisesRegex(exception.InvalidParameterValue, 'priority',
d, self.method)
self.assertTrue(self.method._is_deploy_step)
self.assertFalse(hasattr(self.method, '_deploy_step_priority'))
self.assertFalse(hasattr(self.method, '_deploy_step_argsinfo'))
@mock.patch.object(driver_base, '_validate_argsinfo', spec_set=True,
autospec=True)
def test_deploy_step_bad_argsinfo(self, mock_valid):
mock_valid.side_effect = exception.InvalidParameterValue('bad')
d = driver_base.deploy_step(priority=0, argsinfo=100)
self.assertRaises(exception.InvalidParameterValue, d, self.method)
self.assertTrue(self.method._is_deploy_step)
self.assertEqual(0, self.method._deploy_step_priority)
self.assertFalse(hasattr(self.method, '_deploy_step_argsinfo'))
class DeployAndCleanStepDecoratorTestCase(base.TestCase):
def setUp(self):
super(DeployAndCleanStepDecoratorTestCase, self).setUp()
method_mock = mock.MagicMock()
del method_mock._is_deploy_step
del method_mock._deploy_step_priority
del method_mock._deploy_step_argsinfo
del method_mock._is_clean_step
del method_mock._clean_step_priority
del method_mock._clean_step_abortable
del method_mock._clean_step_argsinfo
self.method = method_mock
def test_deploy_and_clean_step_priority_only(self):
dd = driver_base.deploy_step(priority=10)
dc = driver_base.clean_step(priority=11)
dd(dc(self.method))
self.assertTrue(self.method._is_deploy_step)
self.assertEqual(10, self.method._deploy_step_priority)
self.assertIsNone(self.method._deploy_step_argsinfo)
self.assertTrue(self.method._is_clean_step)
self.assertEqual(11, self.method._clean_step_priority)
self.assertFalse(self.method._clean_step_abortable)
self.assertIsNone(self.method._clean_step_argsinfo)
def test_deploy_and_clean_step_all_args(self):
dargsinfo = {'arg1': {'description': 'desc1',
'required': True}}
cargsinfo = {'arg2': {'description': 'desc2',
'required': False}}
dd = driver_base.deploy_step(priority=0, argsinfo=dargsinfo)
dc = driver_base.clean_step(priority=0, argsinfo=cargsinfo)
dd(dc(self.method))
self.assertTrue(self.method._is_deploy_step)
self.assertEqual(0, self.method._deploy_step_priority)
self.assertEqual(dargsinfo, self.method._deploy_step_argsinfo)
self.assertTrue(self.method._is_clean_step)
self.assertEqual(0, self.method._clean_step_priority)
self.assertFalse(self.method._clean_step_abortable)
self.assertEqual(cargsinfo, self.method._clean_step_argsinfo)
def test_clean_and_deploy_step_all_args(self):
# Opposite ordering, should make no difference.
dargsinfo = {'arg1': {'description': 'desc1',
'required': True}}
cargsinfo = {'arg2': {'description': 'desc2',
'required': False}}
dd = driver_base.deploy_step(priority=0, argsinfo=dargsinfo)
dc = driver_base.clean_step(priority=0, argsinfo=cargsinfo)
dc(dd(self.method))
self.assertTrue(self.method._is_deploy_step)
self.assertEqual(0, self.method._deploy_step_priority)
self.assertEqual(dargsinfo, self.method._deploy_step_argsinfo)
self.assertTrue(self.method._is_clean_step)
self.assertEqual(0, self.method._clean_step_priority)
self.assertFalse(self.method._clean_step_abortable)
self.assertEqual(cargsinfo, self.method._clean_step_argsinfo)
class DeployStepTestCase(base.TestCase):
def test_get_and_execute_deploy_steps(self):
# Create a fake Driver class, create some deploy steps, make sure
# they are listed correctly, and attempt to execute one of them
method_mock = mock.MagicMock(spec_set=[])
method_args_mock = mock.MagicMock(spec_set=[])
task_mock = mock.MagicMock(spec_set=[])
class BaseTestClass(driver_base.BaseInterface):
def get_properties(self):
return {}
def validate(self, task):
pass
class TestClass(BaseTestClass):
interface_type = 'test'
@driver_base.deploy_step(priority=0)
def deploy_zero(self, task):
pass
@driver_base.deploy_step(priority=10)
def deploy_ten(self, task):
method_mock(task)
def not_deploy_method(self, task):
pass
class TestClass2(BaseTestClass):
interface_type = 'test2'
@driver_base.deploy_step(priority=0)
def deploy_zero2(self, task):
pass
@driver_base.deploy_step(priority=20)
def deploy_twenty(self, task):
method_mock(task)
def not_deploy_method2(self, task):
pass
class TestClass3(BaseTestClass):
interface_type = 'test3'
@driver_base.deploy_step(priority=0, argsinfo={
'arg1': {'description': 'desc1',
'required': True}})
def deploy_zero3(self, task, **kwargs):
method_args_mock(task, **kwargs)
@driver_base.deploy_step(priority=15, argsinfo={
'arg10': {'description': 'desc10'}})
def deploy_fifteen(self, task, **kwargs):
pass
def not_deploy_method3(self, task):
pass
obj = TestClass()
obj2 = TestClass2()
obj3 = TestClass3()
self.assertEqual(2, len(obj.get_deploy_steps(task_mock)))
# Ensure the steps look correct
self.assertEqual(10, obj.get_deploy_steps(task_mock)[0]['priority'])
self.assertEqual('test', obj.get_deploy_steps(
task_mock)[0]['interface'])
self.assertEqual('deploy_ten', obj.get_deploy_steps(
task_mock)[0]['step'])
self.assertEqual(0, obj.get_deploy_steps(task_mock)[1]['priority'])
self.assertEqual('test', obj.get_deploy_steps(
task_mock)[1]['interface'])
self.assertEqual('deploy_zero', obj.get_deploy_steps(
task_mock)[1]['step'])
# Ensure the second obj has different deploy steps
self.assertEqual(2, len(obj2.get_deploy_steps(task_mock)))
# Ensure the steps look correct
self.assertEqual(20, obj2.get_deploy_steps(task_mock)[0]['priority'])
self.assertEqual('test2', obj2.get_deploy_steps(
task_mock)[0]['interface'])
self.assertEqual('deploy_twenty', obj2.get_deploy_steps(
task_mock)[0]['step'])
self.assertEqual(0, obj2.get_deploy_steps(task_mock)[1]['priority'])
self.assertEqual('test2', obj2.get_deploy_steps(
task_mock)[1]['interface'])
self.assertEqual('deploy_zero2', obj2.get_deploy_steps(
task_mock)[1]['step'])
self.assertIsNone(obj2.get_deploy_steps(task_mock)[0]['argsinfo'])
# Ensure the third obj has different deploy steps
self.assertEqual(2, len(obj3.get_deploy_steps(task_mock)))
self.assertEqual(15, obj3.get_deploy_steps(task_mock)[0]['priority'])
self.assertEqual('test3', obj3.get_deploy_steps(
task_mock)[0]['interface'])
self.assertEqual('deploy_fifteen', obj3.get_deploy_steps(
task_mock)[0]['step'])
self.assertEqual({'arg10': {'description': 'desc10'}},
obj3.get_deploy_steps(task_mock)[0]['argsinfo'])
self.assertEqual(0, obj3.get_deploy_steps(task_mock)[1]['priority'])
self.assertEqual(obj3.interface_type, obj3.get_deploy_steps(
task_mock)[1]['interface'])
self.assertEqual('deploy_zero3', obj3.get_deploy_steps(
task_mock)[1]['step'])
self.assertEqual({'arg1': {'description': 'desc1', 'required': True}},
obj3.get_deploy_steps(task_mock)[1]['argsinfo'])
# Ensure we can execute the function.
obj.execute_deploy_step(task_mock, obj.get_deploy_steps(task_mock)[0])
method_mock.assert_called_once_with(task_mock)
args = {'arg1': 'val1'}
deploy_step = {'interface': 'test3', 'step': 'deploy_zero3',
'args': args}
obj3.execute_deploy_step(task_mock, deploy_step)
method_args_mock.assert_called_once_with(task_mock, **args)
class MyRAIDInterface(driver_base.RAIDInterface):
def create_configuration(self, task,
create_root_volume=True,
create_nonroot_volumes=True,
delete_existing=True):
pass
def delete_configuration(self, task):
pass
class RAIDInterfaceTestCase(base.TestCase):
@mock.patch.object(driver_base.RAIDInterface, 'validate_raid_config',
autospec=True)
def test_validate(self, validate_raid_config_mock):
raid_interface = MyRAIDInterface()
node_mock = mock.MagicMock(target_raid_config='some_raid_config')
task_mock = mock.MagicMock(node=node_mock)
raid_interface.validate(task_mock)
validate_raid_config_mock.assert_called_once_with(
raid_interface, task_mock, 'some_raid_config')
@mock.patch.object(driver_base.RAIDInterface, 'validate_raid_config',
autospec=True)
def test_validate_no_target_raid_config(self, validate_raid_config_mock):
raid_interface = MyRAIDInterface()
node_mock = mock.MagicMock(target_raid_config={})
task_mock = mock.MagicMock(node=node_mock)
raid_interface.validate(task_mock)
self.assertFalse(validate_raid_config_mock.called)
@mock.patch.object(raid, 'validate_configuration', autospec=True)
def test_validate_raid_config(self, common_validate_mock):
with open(driver_base.RAID_CONFIG_SCHEMA, 'r') as raid_schema_fobj:
raid_schema = json.load(raid_schema_fobj)
raid_interface = MyRAIDInterface()
raid_interface.validate_raid_config('task', 'some_raid_config')
common_validate_mock.assert_called_once_with(
'some_raid_config', raid_schema)
@mock.patch.object(raid, 'get_logical_disk_properties',
autospec=True)
def test_get_logical_disk_properties(self, get_properties_mock):
with open(driver_base.RAID_CONFIG_SCHEMA, 'r') as raid_schema_fobj:
raid_schema = json.load(raid_schema_fobj)
raid_interface = MyRAIDInterface()
raid_interface.get_logical_disk_properties()
get_properties_mock.assert_called_once_with(raid_schema)
@mock.patch.object(MyRAIDInterface, 'create_configuration', autospec=True)
@mock.patch.object(MyRAIDInterface, 'validate_raid_config',
autospec=True)
def test_apply_configuration(self, mock_validate, mock_create):
raid_interface = MyRAIDInterface()
node_mock = mock.MagicMock(target_raid_config=None)
task_mock = mock.MagicMock(node=node_mock)
mock_create.return_value = states.DEPLOYWAIT
raid_config = 'some_raid_config'
result = raid_interface.apply_configuration(task_mock, raid_config)
self.assertEqual(states.DEPLOYWAIT, result)
mock_validate.assert_called_once_with(raid_interface, task_mock,
raid_config)
mock_create.assert_called_once_with(raid_interface, task_mock,
create_root_volume=True,
create_nonroot_volumes=True,
delete_existing=True)
self.assertEqual(raid_config, node_mock.target_raid_config)
@mock.patch.object(MyRAIDInterface, 'create_configuration', autospec=True)
@mock.patch.object(MyRAIDInterface, 'validate_raid_config',
autospec=True)
def test_apply_configuration_delete_existing(self, mock_validate,
mock_create):
raid_interface = MyRAIDInterface()
node_mock = mock.MagicMock(target_raid_config=None)
task_mock = mock.MagicMock(node=node_mock)
mock_create.return_value = states.DEPLOYWAIT
raid_config = 'some_raid_config'
result = raid_interface.apply_configuration(task_mock, raid_config,
delete_existing=True)
self.assertEqual(states.DEPLOYWAIT, result)
mock_validate.assert_called_once_with(raid_interface, task_mock,
raid_config)
mock_create.assert_called_once_with(raid_interface, task_mock,
create_root_volume=True,
create_nonroot_volumes=True,
delete_existing=True)
self.assertEqual(raid_config, node_mock.target_raid_config)
@mock.patch.object(MyRAIDInterface, 'create_configuration', autospec=True)
@mock.patch.object(MyRAIDInterface, 'validate_raid_config',
autospec=True)
def test_apply_configuration_invalid(self, mock_validate, mock_create):
raid_interface = MyRAIDInterface()
node_mock = mock.MagicMock(target_raid_config=None)
task_mock = mock.MagicMock(node=node_mock)
mock_validate.side_effect = exception.InvalidParameterValue('bad')
raid_config = 'some_raid_config'
self.assertRaises(exception.InvalidParameterValue,
raid_interface.apply_configuration, task_mock,
raid_config)
mock_validate.assert_called_once_with(raid_interface, task_mock,
raid_config)
self.assertFalse(mock_create.called)
self.assertIsNone(node_mock.target_raid_config)
class TestDeployInterface(base.TestCase):
@mock.patch.object(driver_base.LOG, 'warning', autospec=True)
def test_warning_on_heartbeat(self, mock_log):
# NOTE(dtantsur): FakeDeploy does not override heartbeat
deploy = fake.FakeDeploy()
deploy.heartbeat(mock.Mock(node=mock.Mock(uuid='uuid',
driver='driver')),
'url', '3.2.0')
self.assertTrue(mock_log.called)
class MyBIOSInterface(driver_base.BIOSInterface):
def get_properties(self):
pass
def validate(self, task):
pass
@driver_base.cache_bios_settings
def apply_configuration(self, task, settings):
return "return_value_apply_configuration"
@driver_base.cache_bios_settings
def factory_reset(self, task):
return "return_value_factory_reset"
def cache_bios_settings(self, task):
pass
class TestBIOSInterface(base.TestCase):
@mock.patch.object(MyBIOSInterface, 'cache_bios_settings', autospec=True)
def test_apply_configuration_wrapper(self, cache_bios_settings_mock):
bios = MyBIOSInterface()
task_mock = mock.MagicMock()
actual = bios.apply_configuration(task_mock, "")
cache_bios_settings_mock.assert_called_once_with(bios, task_mock)
self.assertEqual(actual, "return_value_apply_configuration")
@mock.patch.object(MyBIOSInterface, 'cache_bios_settings', autospec=True)
def test_factory_reset_wrapper(self, cache_bios_settings_mock):
bios = MyBIOSInterface()
task_mock = mock.MagicMock()
actual = bios.factory_reset(task_mock)
cache_bios_settings_mock.assert_called_once_with(bios, task_mock)
self.assertEqual(actual, "return_value_factory_reset")
class TestBootInterface(base.TestCase):
def test_validate_rescue_default_impl(self):
boot = fake.FakeBoot()
task_mock = mock.MagicMock(spec_set=['node'])
self.assertRaises(exception.UnsupportedDriverExtension,
boot.validate_rescue, task_mock)
class TestManagementInterface(base.TestCase):
def test_inject_nmi_default_impl(self):
management = fake.FakeManagement()
task_mock = mock.MagicMock(spec_set=['node'])
self.assertRaises(exception.UnsupportedDriverExtension,
management.inject_nmi, task_mock)
def test_get_supported_boot_modes_default_impl(self):
management = fake.FakeManagement()
task_mock = mock.MagicMock(spec_set=['node'])
self.assertRaises(exception.UnsupportedDriverExtension,
management.get_supported_boot_modes, task_mock)
def test_set_boot_mode_default_impl(self):
management = fake.FakeManagement()
task_mock = mock.MagicMock(spec_set=['node'])
self.assertRaises(exception.UnsupportedDriverExtension,
management.set_boot_mode, task_mock, 'whatever')
def test_get_boot_mode_default_impl(self):
management = fake.FakeManagement()
task_mock = mock.MagicMock(spec_set=['node'])
self.assertRaises(exception.UnsupportedDriverExtension,
management.get_boot_mode, task_mock)
class TestBareDriver(base.TestCase):
def test_class_variables(self):
self.assertEqual(['boot', 'deploy', 'management', 'network', 'power'],
driver_base.BareDriver().core_interfaces)
self.assertEqual(
['bios', 'console', 'inspect', 'raid', 'rescue', 'storage'],
driver_base.BareDriver().optional_interfaces
)
| <filename>ironic/tests/unit/drivers/test_base.py
# Copyright 2014 Cisco Systems, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import json
import mock
from ironic.common import exception
from ironic.common import raid
from ironic.common import states
from ironic.drivers import base as driver_base
from ironic.drivers.modules import fake
from ironic.tests import base
class FakeVendorInterface(driver_base.VendorInterface):
def get_properties(self):
pass
@driver_base.passthru(['POST'])
def noexception(self):
return "Fake"
@driver_base.driver_passthru(['POST'])
def driver_noexception(self):
return "Fake"
@driver_base.passthru(['POST'])
def ironicexception(self):
raise exception.IronicException("Fake!")
@driver_base.passthru(['POST'])
def normalexception(self):
raise Exception("Fake!")
@driver_base.passthru(['POST'], require_exclusive_lock=False)
def shared_task(self):
return "shared fake"
def validate(self, task, **kwargs):
pass
def driver_validate(self, **kwargs):
pass
class PassthruDecoratorTestCase(base.TestCase):
def setUp(self):
super(PassthruDecoratorTestCase, self).setUp()
self.fvi = FakeVendorInterface()
def test_passthru_noexception(self):
result = self.fvi.noexception()
self.assertEqual("Fake", result)
@mock.patch.object(driver_base, 'LOG', autospec=True)
def test_passthru_ironicexception(self, mock_log):
self.assertRaises(exception.IronicException,
self.fvi.ironicexception, mock.ANY)
mock_log.exception.assert_called_with(
mock.ANY, 'ironicexception')
@mock.patch.object(driver_base, 'LOG', autospec=True)
def test_passthru_nonironicexception(self, mock_log):
self.assertRaises(exception.VendorPassthruException,
self.fvi.normalexception, mock.ANY)
mock_log.exception.assert_called_with(
mock.ANY, 'normalexception')
def test_passthru_shared_task_metadata(self):
self.assertIn('require_exclusive_lock',
self.fvi.shared_task._vendor_metadata[1])
self.assertFalse(
self.fvi.shared_task._vendor_metadata[1]['require_exclusive_lock'])
def test_passthru_exclusive_task_metadata(self):
self.assertIn('require_exclusive_lock',
self.fvi.noexception._vendor_metadata[1])
self.assertTrue(
self.fvi.noexception._vendor_metadata[1]['require_exclusive_lock'])
def test_passthru_check_func_references(self):
inst1 = FakeVendorInterface()
inst2 = FakeVendorInterface()
self.assertNotEqual(inst1.vendor_routes['noexception']['func'],
inst2.vendor_routes['noexception']['func'])
self.assertNotEqual(inst1.driver_routes['driver_noexception']['func'],
inst2.driver_routes['driver_noexception']['func'])
class CleanStepDecoratorTestCase(base.TestCase):
def setUp(self):
super(CleanStepDecoratorTestCase, self).setUp()
method_mock = mock.MagicMock()
del method_mock._is_clean_step
del method_mock._clean_step_priority
del method_mock._clean_step_abortable
del method_mock._clean_step_argsinfo
self.method = method_mock
def test__validate_argsinfo(self):
# None, empty dict
driver_base._validate_argsinfo(None)
driver_base._validate_argsinfo({})
# Only description specified
driver_base._validate_argsinfo({'arg1': {'description': 'desc1'}})
# Multiple args
driver_base._validate_argsinfo({'arg1': {'description': 'desc1',
'required': True},
'arg2': {'description': 'desc2'}})
def test__validate_argsinfo_not_dict(self):
self.assertRaisesRegex(exception.InvalidParameterValue,
'argsinfo.+dictionary',
driver_base._validate_argsinfo, 'not-a-dict')
def test__validate_argsinfo_arg_not_dict(self):
self.assertRaisesRegex(exception.InvalidParameterValue,
'Argument.+dictionary',
driver_base._validate_argsinfo,
{'arg1': 'not-a-dict'})
def test__validate_argsinfo_arg_empty_dict(self):
self.assertRaisesRegex(exception.InvalidParameterValue,
'description',
driver_base._validate_argsinfo,
{'arg1': {}})
def test__validate_argsinfo_arg_missing_description(self):
self.assertRaisesRegex(exception.InvalidParameterValue,
'description',
driver_base._validate_argsinfo,
{'arg1': {'required': True}})
def test__validate_argsinfo_arg_description_invalid(self):
self.assertRaisesRegex(exception.InvalidParameterValue,
'string',
driver_base._validate_argsinfo,
{'arg1': {'description': True}})
def test__validate_argsinfo_arg_required_invalid(self):
self.assertRaisesRegex(exception.InvalidParameterValue,
'Boolean',
driver_base._validate_argsinfo,
{'arg1': {'description': 'desc1',
'required': 'maybe'}})
def test__validate_argsinfo_arg_unknown_key(self):
self.assertRaisesRegex(exception.InvalidParameterValue,
'invalid',
driver_base._validate_argsinfo,
{'arg1': {'description': 'desc1',
'unknown': 'bad'}})
def test_clean_step_priority_only(self):
d = driver_base.clean_step(priority=10)
d(self.method)
self.assertTrue(self.method._is_clean_step)
self.assertEqual(10, self.method._clean_step_priority)
self.assertFalse(self.method._clean_step_abortable)
self.assertIsNone(self.method._clean_step_argsinfo)
def test_clean_step_all_args(self):
argsinfo = {'arg1': {'description': 'desc1',
'required': True}}
d = driver_base.clean_step(priority=0, abortable=True,
argsinfo=argsinfo)
d(self.method)
self.assertTrue(self.method._is_clean_step)
self.assertEqual(0, self.method._clean_step_priority)
self.assertTrue(self.method._clean_step_abortable)
self.assertEqual(argsinfo, self.method._clean_step_argsinfo)
def test_clean_step_bad_priority(self):
d = driver_base.clean_step(priority='hi')
self.assertRaisesRegex(exception.InvalidParameterValue, 'priority',
d, self.method)
self.assertTrue(self.method._is_clean_step)
self.assertFalse(hasattr(self.method, '_clean_step_priority'))
self.assertFalse(hasattr(self.method, '_clean_step_abortable'))
self.assertFalse(hasattr(self.method, '_clean_step_argsinfo'))
def test_clean_step_bad_abortable(self):
d = driver_base.clean_step(priority=0, abortable='blue')
self.assertRaisesRegex(exception.InvalidParameterValue, 'abortable',
d, self.method)
self.assertTrue(self.method._is_clean_step)
self.assertEqual(0, self.method._clean_step_priority)
self.assertFalse(hasattr(self.method, '_clean_step_abortable'))
self.assertFalse(hasattr(self.method, '_clean_step_argsinfo'))
@mock.patch.object(driver_base, '_validate_argsinfo', spec_set=True,
autospec=True)
def test_clean_step_bad_argsinfo(self, mock_valid):
mock_valid.side_effect = exception.InvalidParameterValue('bad')
d = driver_base.clean_step(priority=0, argsinfo=100)
self.assertRaises(exception.InvalidParameterValue, d, self.method)
self.assertTrue(self.method._is_clean_step)
self.assertEqual(0, self.method._clean_step_priority)
self.assertFalse(self.method._clean_step_abortable)
self.assertFalse(hasattr(self.method, '_clean_step_argsinfo'))
class CleanStepTestCase(base.TestCase):
def test_get_and_execute_clean_steps(self):
# Create a fake Driver class, create some clean steps, make sure
# they are listed correctly, and attempt to execute one of them
method_mock = mock.MagicMock(spec_set=[])
method_args_mock = mock.MagicMock(spec_set=[])
task_mock = mock.MagicMock(spec_set=[])
class BaseTestClass(driver_base.BaseInterface):
def get_properties(self):
return {}
def validate(self, task):
pass
class TestClass(BaseTestClass):
interface_type = 'test'
@driver_base.clean_step(priority=0)
def manual_method(self, task):
pass
@driver_base.clean_step(priority=10, abortable=True)
def automated_method(self, task):
method_mock(task)
def not_clean_method(self, task):
pass
class TestClass2(BaseTestClass):
interface_type = 'test2'
@driver_base.clean_step(priority=0)
def manual_method2(self, task):
pass
@driver_base.clean_step(priority=20, abortable=True)
def automated_method2(self, task):
method_mock(task)
def not_clean_method2(self, task):
pass
class TestClass3(BaseTestClass):
interface_type = 'test3'
@driver_base.clean_step(priority=0, abortable=True, argsinfo={
'arg1': {'description': 'desc1',
'required': True}})
def manual_method3(self, task, **kwargs):
method_args_mock(task, **kwargs)
@driver_base.clean_step(priority=15, argsinfo={
'arg10': {'description': 'desc10'}})
def automated_method3(self, task, **kwargs):
pass
def not_clean_method3(self, task):
pass
obj = TestClass()
obj2 = TestClass2()
obj3 = TestClass3()
self.assertEqual(2, len(obj.get_clean_steps(task_mock)))
# Ensure the steps look correct
self.assertEqual(10, obj.get_clean_steps(task_mock)[0]['priority'])
self.assertTrue(obj.get_clean_steps(task_mock)[0]['abortable'])
self.assertEqual('test', obj.get_clean_steps(
task_mock)[0]['interface'])
self.assertEqual('automated_method', obj.get_clean_steps(
task_mock)[0]['step'])
self.assertEqual(0, obj.get_clean_steps(task_mock)[1]['priority'])
self.assertFalse(obj.get_clean_steps(task_mock)[1]['abortable'])
self.assertEqual('test', obj.get_clean_steps(
task_mock)[1]['interface'])
self.assertEqual('manual_method', obj.get_clean_steps(
task_mock)[1]['step'])
# Ensure the second obj get different clean steps
self.assertEqual(2, len(obj2.get_clean_steps(task_mock)))
# Ensure the steps look correct
self.assertEqual(20, obj2.get_clean_steps(task_mock)[0]['priority'])
self.assertTrue(obj2.get_clean_steps(task_mock)[0]['abortable'])
self.assertEqual('test2', obj2.get_clean_steps(
task_mock)[0]['interface'])
self.assertEqual('automated_method2', obj2.get_clean_steps(
task_mock)[0]['step'])
self.assertEqual(0, obj2.get_clean_steps(task_mock)[1]['priority'])
self.assertFalse(obj2.get_clean_steps(task_mock)[1]['abortable'])
self.assertEqual('test2', obj2.get_clean_steps(
task_mock)[1]['interface'])
self.assertEqual('manual_method2', obj2.get_clean_steps(
task_mock)[1]['step'])
self.assertIsNone(obj2.get_clean_steps(task_mock)[0]['argsinfo'])
# Ensure the third obj has different clean steps
self.assertEqual(2, len(obj3.get_clean_steps(task_mock)))
self.assertEqual(15, obj3.get_clean_steps(task_mock)[0]['priority'])
self.assertFalse(obj3.get_clean_steps(task_mock)[0]['abortable'])
self.assertEqual('test3', obj3.get_clean_steps(
task_mock)[0]['interface'])
self.assertEqual('automated_method3', obj3.get_clean_steps(
task_mock)[0]['step'])
self.assertEqual({'arg10': {'description': 'desc10'}},
obj3.get_clean_steps(task_mock)[0]['argsinfo'])
self.assertEqual(0, obj3.get_clean_steps(task_mock)[1]['priority'])
self.assertTrue(obj3.get_clean_steps(task_mock)[1]['abortable'])
self.assertEqual(obj3.interface_type, obj3.get_clean_steps(
task_mock)[1]['interface'])
self.assertEqual('manual_method3', obj3.get_clean_steps(
task_mock)[1]['step'])
self.assertEqual({'arg1': {'description': 'desc1', 'required': True}},
obj3.get_clean_steps(task_mock)[1]['argsinfo'])
# Ensure we can execute the function.
obj.execute_clean_step(task_mock, obj.get_clean_steps(task_mock)[0])
method_mock.assert_called_once_with(task_mock)
args = {'arg1': 'val1'}
clean_step = {'interface': 'test3', 'step': 'manual_method3',
'args': args}
obj3.execute_clean_step(task_mock, clean_step)
method_args_mock.assert_called_once_with(task_mock, **args)
class DeployStepDecoratorTestCase(base.TestCase):
def setUp(self):
super(DeployStepDecoratorTestCase, self).setUp()
method_mock = mock.MagicMock()
del method_mock._is_deploy_step
del method_mock._deploy_step_priority
del method_mock._deploy_step_argsinfo
self.method = method_mock
def test_deploy_step_priority_only(self):
d = driver_base.deploy_step(priority=10)
d(self.method)
self.assertTrue(self.method._is_deploy_step)
self.assertEqual(10, self.method._deploy_step_priority)
self.assertIsNone(self.method._deploy_step_argsinfo)
def test_deploy_step_all_args(self):
argsinfo = {'arg1': {'description': 'desc1',
'required': True}}
d = driver_base.deploy_step(priority=0, argsinfo=argsinfo)
d(self.method)
self.assertTrue(self.method._is_deploy_step)
self.assertEqual(0, self.method._deploy_step_priority)
self.assertEqual(argsinfo, self.method._deploy_step_argsinfo)
def test_deploy_step_bad_priority(self):
d = driver_base.deploy_step(priority='hi')
self.assertRaisesRegex(exception.InvalidParameterValue, 'priority',
d, self.method)
self.assertTrue(self.method._is_deploy_step)
self.assertFalse(hasattr(self.method, '_deploy_step_priority'))
self.assertFalse(hasattr(self.method, '_deploy_step_argsinfo'))
@mock.patch.object(driver_base, '_validate_argsinfo', spec_set=True,
autospec=True)
def test_deploy_step_bad_argsinfo(self, mock_valid):
mock_valid.side_effect = exception.InvalidParameterValue('bad')
d = driver_base.deploy_step(priority=0, argsinfo=100)
self.assertRaises(exception.InvalidParameterValue, d, self.method)
self.assertTrue(self.method._is_deploy_step)
self.assertEqual(0, self.method._deploy_step_priority)
self.assertFalse(hasattr(self.method, '_deploy_step_argsinfo'))
class DeployAndCleanStepDecoratorTestCase(base.TestCase):
def setUp(self):
super(DeployAndCleanStepDecoratorTestCase, self).setUp()
method_mock = mock.MagicMock()
del method_mock._is_deploy_step
del method_mock._deploy_step_priority
del method_mock._deploy_step_argsinfo
del method_mock._is_clean_step
del method_mock._clean_step_priority
del method_mock._clean_step_abortable
del method_mock._clean_step_argsinfo
self.method = method_mock
def test_deploy_and_clean_step_priority_only(self):
dd = driver_base.deploy_step(priority=10)
dc = driver_base.clean_step(priority=11)
dd(dc(self.method))
self.assertTrue(self.method._is_deploy_step)
self.assertEqual(10, self.method._deploy_step_priority)
self.assertIsNone(self.method._deploy_step_argsinfo)
self.assertTrue(self.method._is_clean_step)
self.assertEqual(11, self.method._clean_step_priority)
self.assertFalse(self.method._clean_step_abortable)
self.assertIsNone(self.method._clean_step_argsinfo)
def test_deploy_and_clean_step_all_args(self):
dargsinfo = {'arg1': {'description': 'desc1',
'required': True}}
cargsinfo = {'arg2': {'description': 'desc2',
'required': False}}
dd = driver_base.deploy_step(priority=0, argsinfo=dargsinfo)
dc = driver_base.clean_step(priority=0, argsinfo=cargsinfo)
dd(dc(self.method))
self.assertTrue(self.method._is_deploy_step)
self.assertEqual(0, self.method._deploy_step_priority)
self.assertEqual(dargsinfo, self.method._deploy_step_argsinfo)
self.assertTrue(self.method._is_clean_step)
self.assertEqual(0, self.method._clean_step_priority)
self.assertFalse(self.method._clean_step_abortable)
self.assertEqual(cargsinfo, self.method._clean_step_argsinfo)
def test_clean_and_deploy_step_all_args(self):
# Opposite ordering, should make no difference.
dargsinfo = {'arg1': {'description': 'desc1',
'required': True}}
cargsinfo = {'arg2': {'description': 'desc2',
'required': False}}
dd = driver_base.deploy_step(priority=0, argsinfo=dargsinfo)
dc = driver_base.clean_step(priority=0, argsinfo=cargsinfo)
dc(dd(self.method))
self.assertTrue(self.method._is_deploy_step)
self.assertEqual(0, self.method._deploy_step_priority)
self.assertEqual(dargsinfo, self.method._deploy_step_argsinfo)
self.assertTrue(self.method._is_clean_step)
self.assertEqual(0, self.method._clean_step_priority)
self.assertFalse(self.method._clean_step_abortable)
self.assertEqual(cargsinfo, self.method._clean_step_argsinfo)
class DeployStepTestCase(base.TestCase):
def test_get_and_execute_deploy_steps(self):
# Create a fake Driver class, create some deploy steps, make sure
# they are listed correctly, and attempt to execute one of them
method_mock = mock.MagicMock(spec_set=[])
method_args_mock = mock.MagicMock(spec_set=[])
task_mock = mock.MagicMock(spec_set=[])
class BaseTestClass(driver_base.BaseInterface):
def get_properties(self):
return {}
def validate(self, task):
pass
class TestClass(BaseTestClass):
interface_type = 'test'
@driver_base.deploy_step(priority=0)
def deploy_zero(self, task):
pass
@driver_base.deploy_step(priority=10)
def deploy_ten(self, task):
method_mock(task)
def not_deploy_method(self, task):
pass
class TestClass2(BaseTestClass):
interface_type = 'test2'
@driver_base.deploy_step(priority=0)
def deploy_zero2(self, task):
pass
@driver_base.deploy_step(priority=20)
def deploy_twenty(self, task):
method_mock(task)
def not_deploy_method2(self, task):
pass
class TestClass3(BaseTestClass):
interface_type = 'test3'
@driver_base.deploy_step(priority=0, argsinfo={
'arg1': {'description': 'desc1',
'required': True}})
def deploy_zero3(self, task, **kwargs):
method_args_mock(task, **kwargs)
@driver_base.deploy_step(priority=15, argsinfo={
'arg10': {'description': 'desc10'}})
def deploy_fifteen(self, task, **kwargs):
pass
def not_deploy_method3(self, task):
pass
obj = TestClass()
obj2 = TestClass2()
obj3 = TestClass3()
self.assertEqual(2, len(obj.get_deploy_steps(task_mock)))
# Ensure the steps look correct
self.assertEqual(10, obj.get_deploy_steps(task_mock)[0]['priority'])
self.assertEqual('test', obj.get_deploy_steps(
task_mock)[0]['interface'])
self.assertEqual('deploy_ten', obj.get_deploy_steps(
task_mock)[0]['step'])
self.assertEqual(0, obj.get_deploy_steps(task_mock)[1]['priority'])
self.assertEqual('test', obj.get_deploy_steps(
task_mock)[1]['interface'])
self.assertEqual('deploy_zero', obj.get_deploy_steps(
task_mock)[1]['step'])
# Ensure the second obj has different deploy steps
self.assertEqual(2, len(obj2.get_deploy_steps(task_mock)))
# Ensure the steps look correct
self.assertEqual(20, obj2.get_deploy_steps(task_mock)[0]['priority'])
self.assertEqual('test2', obj2.get_deploy_steps(
task_mock)[0]['interface'])
self.assertEqual('deploy_twenty', obj2.get_deploy_steps(
task_mock)[0]['step'])
self.assertEqual(0, obj2.get_deploy_steps(task_mock)[1]['priority'])
self.assertEqual('test2', obj2.get_deploy_steps(
task_mock)[1]['interface'])
self.assertEqual('deploy_zero2', obj2.get_deploy_steps(
task_mock)[1]['step'])
self.assertIsNone(obj2.get_deploy_steps(task_mock)[0]['argsinfo'])
# Ensure the third obj has different deploy steps
self.assertEqual(2, len(obj3.get_deploy_steps(task_mock)))
self.assertEqual(15, obj3.get_deploy_steps(task_mock)[0]['priority'])
self.assertEqual('test3', obj3.get_deploy_steps(
task_mock)[0]['interface'])
self.assertEqual('deploy_fifteen', obj3.get_deploy_steps(
task_mock)[0]['step'])
self.assertEqual({'arg10': {'description': 'desc10'}},
obj3.get_deploy_steps(task_mock)[0]['argsinfo'])
self.assertEqual(0, obj3.get_deploy_steps(task_mock)[1]['priority'])
self.assertEqual(obj3.interface_type, obj3.get_deploy_steps(
task_mock)[1]['interface'])
self.assertEqual('deploy_zero3', obj3.get_deploy_steps(
task_mock)[1]['step'])
self.assertEqual({'arg1': {'description': 'desc1', 'required': True}},
obj3.get_deploy_steps(task_mock)[1]['argsinfo'])
# Ensure we can execute the function.
obj.execute_deploy_step(task_mock, obj.get_deploy_steps(task_mock)[0])
method_mock.assert_called_once_with(task_mock)
args = {'arg1': 'val1'}
deploy_step = {'interface': 'test3', 'step': 'deploy_zero3',
'args': args}
obj3.execute_deploy_step(task_mock, deploy_step)
method_args_mock.assert_called_once_with(task_mock, **args)
class MyRAIDInterface(driver_base.RAIDInterface):
def create_configuration(self, task,
create_root_volume=True,
create_nonroot_volumes=True,
delete_existing=True):
pass
def delete_configuration(self, task):
pass
class RAIDInterfaceTestCase(base.TestCase):
@mock.patch.object(driver_base.RAIDInterface, 'validate_raid_config',
autospec=True)
def test_validate(self, validate_raid_config_mock):
raid_interface = MyRAIDInterface()
node_mock = mock.MagicMock(target_raid_config='some_raid_config')
task_mock = mock.MagicMock(node=node_mock)
raid_interface.validate(task_mock)
validate_raid_config_mock.assert_called_once_with(
raid_interface, task_mock, 'some_raid_config')
@mock.patch.object(driver_base.RAIDInterface, 'validate_raid_config',
autospec=True)
def test_validate_no_target_raid_config(self, validate_raid_config_mock):
raid_interface = MyRAIDInterface()
node_mock = mock.MagicMock(target_raid_config={})
task_mock = mock.MagicMock(node=node_mock)
raid_interface.validate(task_mock)
self.assertFalse(validate_raid_config_mock.called)
@mock.patch.object(raid, 'validate_configuration', autospec=True)
def test_validate_raid_config(self, common_validate_mock):
with open(driver_base.RAID_CONFIG_SCHEMA, 'r') as raid_schema_fobj:
raid_schema = json.load(raid_schema_fobj)
raid_interface = MyRAIDInterface()
raid_interface.validate_raid_config('task', 'some_raid_config')
common_validate_mock.assert_called_once_with(
'some_raid_config', raid_schema)
@mock.patch.object(raid, 'get_logical_disk_properties',
autospec=True)
def test_get_logical_disk_properties(self, get_properties_mock):
with open(driver_base.RAID_CONFIG_SCHEMA, 'r') as raid_schema_fobj:
raid_schema = json.load(raid_schema_fobj)
raid_interface = MyRAIDInterface()
raid_interface.get_logical_disk_properties()
get_properties_mock.assert_called_once_with(raid_schema)
@mock.patch.object(MyRAIDInterface, 'create_configuration', autospec=True)
@mock.patch.object(MyRAIDInterface, 'validate_raid_config',
autospec=True)
def test_apply_configuration(self, mock_validate, mock_create):
raid_interface = MyRAIDInterface()
node_mock = mock.MagicMock(target_raid_config=None)
task_mock = mock.MagicMock(node=node_mock)
mock_create.return_value = states.DEPLOYWAIT
raid_config = 'some_raid_config'
result = raid_interface.apply_configuration(task_mock, raid_config)
self.assertEqual(states.DEPLOYWAIT, result)
mock_validate.assert_called_once_with(raid_interface, task_mock,
raid_config)
mock_create.assert_called_once_with(raid_interface, task_mock,
create_root_volume=True,
create_nonroot_volumes=True,
delete_existing=True)
self.assertEqual(raid_config, node_mock.target_raid_config)
@mock.patch.object(MyRAIDInterface, 'create_configuration', autospec=True)
@mock.patch.object(MyRAIDInterface, 'validate_raid_config',
autospec=True)
def test_apply_configuration_delete_existing(self, mock_validate,
mock_create):
raid_interface = MyRAIDInterface()
node_mock = mock.MagicMock(target_raid_config=None)
task_mock = mock.MagicMock(node=node_mock)
mock_create.return_value = states.DEPLOYWAIT
raid_config = 'some_raid_config'
result = raid_interface.apply_configuration(task_mock, raid_config,
delete_existing=True)
self.assertEqual(states.DEPLOYWAIT, result)
mock_validate.assert_called_once_with(raid_interface, task_mock,
raid_config)
mock_create.assert_called_once_with(raid_interface, task_mock,
create_root_volume=True,
create_nonroot_volumes=True,
delete_existing=True)
self.assertEqual(raid_config, node_mock.target_raid_config)
@mock.patch.object(MyRAIDInterface, 'create_configuration', autospec=True)
@mock.patch.object(MyRAIDInterface, 'validate_raid_config',
autospec=True)
def test_apply_configuration_invalid(self, mock_validate, mock_create):
raid_interface = MyRAIDInterface()
node_mock = mock.MagicMock(target_raid_config=None)
task_mock = mock.MagicMock(node=node_mock)
mock_validate.side_effect = exception.InvalidParameterValue('bad')
raid_config = 'some_raid_config'
self.assertRaises(exception.InvalidParameterValue,
raid_interface.apply_configuration, task_mock,
raid_config)
mock_validate.assert_called_once_with(raid_interface, task_mock,
raid_config)
self.assertFalse(mock_create.called)
self.assertIsNone(node_mock.target_raid_config)
class TestDeployInterface(base.TestCase):
@mock.patch.object(driver_base.LOG, 'warning', autospec=True)
def test_warning_on_heartbeat(self, mock_log):
# NOTE(dtantsur): FakeDeploy does not override heartbeat
deploy = fake.FakeDeploy()
deploy.heartbeat(mock.Mock(node=mock.Mock(uuid='uuid',
driver='driver')),
'url', '3.2.0')
self.assertTrue(mock_log.called)
class MyBIOSInterface(driver_base.BIOSInterface):
def get_properties(self):
pass
def validate(self, task):
pass
@driver_base.cache_bios_settings
def apply_configuration(self, task, settings):
return "return_value_apply_configuration"
@driver_base.cache_bios_settings
def factory_reset(self, task):
return "return_value_factory_reset"
def cache_bios_settings(self, task):
pass
class TestBIOSInterface(base.TestCase):
@mock.patch.object(MyBIOSInterface, 'cache_bios_settings', autospec=True)
def test_apply_configuration_wrapper(self, cache_bios_settings_mock):
bios = MyBIOSInterface()
task_mock = mock.MagicMock()
actual = bios.apply_configuration(task_mock, "")
cache_bios_settings_mock.assert_called_once_with(bios, task_mock)
self.assertEqual(actual, "return_value_apply_configuration")
@mock.patch.object(MyBIOSInterface, 'cache_bios_settings', autospec=True)
def test_factory_reset_wrapper(self, cache_bios_settings_mock):
bios = MyBIOSInterface()
task_mock = mock.MagicMock()
actual = bios.factory_reset(task_mock)
cache_bios_settings_mock.assert_called_once_with(bios, task_mock)
self.assertEqual(actual, "return_value_factory_reset")
class TestBootInterface(base.TestCase):
def test_validate_rescue_default_impl(self):
boot = fake.FakeBoot()
task_mock = mock.MagicMock(spec_set=['node'])
self.assertRaises(exception.UnsupportedDriverExtension,
boot.validate_rescue, task_mock)
class TestManagementInterface(base.TestCase):
def test_inject_nmi_default_impl(self):
management = fake.FakeManagement()
task_mock = mock.MagicMock(spec_set=['node'])
self.assertRaises(exception.UnsupportedDriverExtension,
management.inject_nmi, task_mock)
def test_get_supported_boot_modes_default_impl(self):
management = fake.FakeManagement()
task_mock = mock.MagicMock(spec_set=['node'])
self.assertRaises(exception.UnsupportedDriverExtension,
management.get_supported_boot_modes, task_mock)
def test_set_boot_mode_default_impl(self):
management = fake.FakeManagement()
task_mock = mock.MagicMock(spec_set=['node'])
self.assertRaises(exception.UnsupportedDriverExtension,
management.set_boot_mode, task_mock, 'whatever')
def test_get_boot_mode_default_impl(self):
management = fake.FakeManagement()
task_mock = mock.MagicMock(spec_set=['node'])
self.assertRaises(exception.UnsupportedDriverExtension,
management.get_boot_mode, task_mock)
class TestBareDriver(base.TestCase):
def test_class_variables(self):
self.assertEqual(['boot', 'deploy', 'management', 'network', 'power'],
driver_base.BareDriver().core_interfaces)
self.assertEqual(
['bios', 'console', 'inspect', 'raid', 'rescue', 'storage'],
driver_base.BareDriver().optional_interfaces
)
| en | 0.865481 | # Copyright 2014 Cisco Systems, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # None, empty dict # Only description specified # Multiple args # Create a fake Driver class, create some clean steps, make sure # they are listed correctly, and attempt to execute one of them # Ensure the steps look correct # Ensure the second obj get different clean steps # Ensure the steps look correct # Ensure the third obj has different clean steps # Ensure we can execute the function. # Opposite ordering, should make no difference. # Create a fake Driver class, create some deploy steps, make sure # they are listed correctly, and attempt to execute one of them # Ensure the steps look correct # Ensure the second obj has different deploy steps # Ensure the steps look correct # Ensure the third obj has different deploy steps # Ensure we can execute the function. # NOTE(dtantsur): FakeDeploy does not override heartbeat | 2.069278 | 2 |
opentimesheet/profiles/tests/test_models.py | valerymelou/opentimesheet-server | 0 | 9762 | <reponame>valerymelou/opentimesheet-server
import pytest
from opentimesheet.core.tests import TenantTestCase
@pytest.mark.usefixtures("profile")
class TestProfile(TenantTestCase):
def test__str__(self):
assert (
self.profile.first_name + " " + self.profile.last_name
== self.profile.__str__()
)
| import pytest
from opentimesheet.core.tests import TenantTestCase
@pytest.mark.usefixtures("profile")
class TestProfile(TenantTestCase):
def test__str__(self):
assert (
self.profile.first_name + " " + self.profile.last_name
== self.profile.__str__()
) | none | 1 | 2.307724 | 2 |
|
ami/flowchart/library/Display.py | chuckie82/ami | 6 | 9763 | from ami.flowchart.library.DisplayWidgets import ScalarWidget, ScatterWidget, WaveformWidget, \
ImageWidget, ObjectWidget, LineWidget, TimeWidget, HistogramWidget, \
Histogram2DWidget
from ami.flowchart.library.common import CtrlNode
from amitypes import Array1d, Array2d
from typing import Any
import ami.graph_nodes as gn
class ScalarViewer(CtrlNode):
"""
ScalarViewer displays the value of a scalar.
"""
nodeName = "ScalarViewer"
uiTemplate = []
def __init__(self, name):
super().__init__(name,
terminals={"In": {"io": "in", "ttype": float}},
viewable=True)
def isChanged(self, restore_ctrl, restore_widget):
return False
def display(self, topics, terms, addr, win, **kwargs):
return super().display(topics, terms, addr, win, ScalarWidget, **kwargs)
def plotMetadata(self, topics, terms, **kwargs):
return {'type': 'ScalarWidget', 'terms': terms, 'topics': topics}
class WaveformViewer(CtrlNode):
"""
WaveformViewer displays 1D arrays.
"""
nodeName = "WaveformViewer"
uiTemplate = []
def __init__(self, name):
super().__init__(name, terminals={"In": {"io": "in", "ttype": Array1d}},
allowAddInput=True,
viewable=True)
def isChanged(self, restore_ctrl, restore_widget):
return False
def display(self, topics, terms, addr, win, **kwargs):
return super().display(topics, terms, addr, win, WaveformWidget, **kwargs)
def plotMetadata(self, topics, terms, **kwargs):
return {'type': 'WaveformWidget', 'terms': terms, 'topics': topics}
class ImageViewer(CtrlNode):
"""
ImageViewer displays 2D arrays.
"""
nodeName = "ImageViewer"
uiTemplate = []
def __init__(self, name):
super().__init__(name, terminals={"In": {"io": "in", "ttype": Array2d}}, viewable=True)
def isChanged(self, restore_ctrl, restore_widget):
return False
def display(self, topics, terms, addr, win, **kwargs):
return super().display(topics, terms, addr, win, ImageWidget, **kwargs)
def plotMetadata(self, topics, terms, **kwargs):
return {'type': 'ImageWidget', 'terms': terms, 'topics': topics}
class ObjectViewer(CtrlNode):
"""
ObjectViewer displays string representation of a python object.
"""
nodeName = "ObjectViewer"
uiTemplate = []
def __init__(self, name):
super().__init__(name, terminals={"In": {"io": "in", "ttype": Any}}, viewable=True)
def isChanged(self, restore_ctrl, restore_widget):
return False
def display(self, topics, terms, addr, win, **kwargs):
return super().display(topics, terms, addr, win, ObjectWidget, **kwargs)
def plotMetadata(self, topics, terms, **kwargs):
return {'type': 'ObjectWidget', 'terms': terms, 'topics': topics}
class Histogram(CtrlNode):
"""
Histogram plots a histogram created from Binning.
"""
nodeName = "Histogram"
uiTemplate = []
def __init__(self, name):
super().__init__(name,
terminals={"Bins": {"io": "in", "ttype": Array1d},
"Counts": {"io": "in", "ttype": Array1d}},
allowAddInput=True,
viewable=True)
def isChanged(self, restore_ctrl, restore_widget):
return False
def display(self, topics, terms, addr, win, **kwargs):
return super().display(topics, terms, addr, win, HistogramWidget, **kwargs)
def addInput(self, **args):
self.addTerminal(name="Bins", io='in', ttype=Array1d, **args)
self.addTerminal(name="Counts", io='in', ttype=Array1d, **args)
def plotMetadata(self, topics, terms, **kwargs):
return {'type': 'HistogramWidget', 'terms': terms, 'topics': topics}
class Histogram2D(CtrlNode):
"""
Histogram2D plots a 2d histogram created from Binning2D.
"""
nodeName = "Histogram2D"
uiTemplate = []
def __init__(self, name):
super().__init__(name,
terminals={"XBins": {"io": "in", "ttype": Array1d},
"YBins": {"io": "in", "ttype": Array1d},
"Counts": {"io": "in", "ttype": Array2d}},
viewable=True)
def isChanged(self, restore_ctrl, restore_widget):
return False
def display(self, topics, terms, addr, win, **kwargs):
return super().display(topics, terms, addr, win, Histogram2DWidget, **kwargs)
def plotMetadata(self, topics, terms, **kwargs):
return {'type': 'Histogram2DWidget', 'terms': terms, 'topics': topics}
class ScatterPlot(CtrlNode):
"""
Scatter Plot collects two scalars and plots them against each other.
"""
nodeName = "ScatterPlot"
uiTemplate = [("Num Points", 'intSpin', {'value': 100, 'min': 1}),
('Unique', 'check')]
def __init__(self, name):
super().__init__(name, terminals={"X": {"io": "in", "ttype": float},
"Y": {"io": "in", "ttype": float}},
allowAddInput=True,
buffered=True)
def display(self, topics, terms, addr, win, **kwargs):
return super().display(topics, terms, addr, win, ScatterWidget, **kwargs)
def isChanged(self, restore_ctrl, restore_widget):
return restore_ctrl
def addInput(self, **args):
self.addTerminal(name="X", io='in', ttype=float, **args)
self.addTerminal(name="Y", io='in', ttype=float, **args)
def to_operation(self, inputs, outputs, **kwargs):
outputs = [self.name()+'.'+i for i in inputs.keys()]
buffer_output = [self.name()]
nodes = [gn.RollingBuffer(name=self.name()+"_buffer",
N=self.values['Num Points'], unique=self.values['Unique'],
inputs=inputs, outputs=buffer_output, **kwargs),
gn.Map(name=self.name()+"_operation",
inputs=buffer_output, outputs=outputs,
func=lambda a: zip(*a),
**kwargs)]
return nodes
def plotMetadata(self, topics, terms, **kwargs):
return {'type': 'ScatterWidget', 'terms': terms, 'topics': topics}
class ScalarPlot(CtrlNode):
"""
Scalar Plot collects scalars and plots them.
"""
nodeName = "ScalarPlot"
uiTemplate = [("Num Points", 'intSpin', {'value': 100, 'min': 1})]
def __init__(self, name):
super().__init__(name, terminals={"Y": {"io": "in", "ttype": float}},
allowAddInput=True,
buffered=True)
def isChanged(self, restore_ctrl, restore_widget):
return restore_ctrl
def addInput(self, **args):
self.addTerminal(name="Y", io='in', ttype=float, **args)
def display(self, topics, terms, addr, win, **kwargs):
return super().display(topics, terms, addr, win, WaveformWidget, **kwargs)
def to_operation(self, inputs, outputs, **kwargs):
outputs = [self.name()+'.'+i for i in inputs.keys()]
buffer_output = [self.name()]
if len(inputs.values()) > 1:
node = [gn.RollingBuffer(name=self.name()+"_buffer", N=self.values['Num Points'],
inputs=inputs, outputs=buffer_output, **kwargs),
gn.Map(name=self.name()+"_operation", inputs=buffer_output, outputs=outputs,
func=lambda a: zip(*a), **kwargs)]
else:
node = gn.RollingBuffer(name=self.name(), N=self.values['Num Points'],
inputs=inputs, outputs=outputs, **kwargs)
return node
def plotMetadata(self, topics, terms, **kwargs):
return {'type': 'WaveformWidget', 'terms': terms, 'topics': topics}
class LinePlot(CtrlNode):
"""
Line Plot plots arrays.
"""
nodeName = "LinePlot"
uiTemplate = []
def __init__(self, name):
super().__init__(name, terminals={"X": {"io": "in", "ttype": Array1d},
"Y": {"io": "in", "ttype": Array1d}},
allowAddInput=True,
viewable=True)
def isChanged(self, restore_ctrl, restore_widget):
return False
def display(self, topics, terms, addr, win, **kwargs):
return super().display(topics, terms, addr, win, LineWidget, **kwargs)
def addInput(self, **args):
group = self.nextGroupName()
self.addTerminal(name="X", io='in', ttype=Array1d, group=group, **args)
self.addTerminal(name="Y", io='in', ttype=Array1d, group=group, **args)
def plotMetadata(self, topics, terms, **kwargs):
return {'type': 'LineWidget', 'terms': terms, 'topics': topics}
class TimePlot(CtrlNode):
"""
Plot a number against time of day.
"""
nodeName = "TimePlot"
uiTemplate = [("Num Points", 'intSpin', {'value': 1000, 'min': 1})]
def __init__(self, name):
super().__init__(name, terminals={"X": {"io": "in", "ttype": float},
"Y": {"io": "in", "ttype": float}},
allowAddInput=True,
buffered=True)
def isChanged(self, restore_ctrl, restore_widget):
return restore_ctrl
def display(self, topics, terms, addr, win, **kwargs):
return super().display(topics, terms, addr, win, TimeWidget, **kwargs)
def addInput(self, **args):
self.addTerminal(name="X", io='in', ttype=float, **args)
self.addTerminal(name="Y", io='in', ttype=float, **args)
def to_operation(self, inputs, outputs, **kwargs):
outputs = [self.name()+'.'+i for i in inputs.keys()]
buffer_output = [self.name()]
nodes = [gn.RollingBuffer(name=self.name()+"_buffer", N=self.values['Num Points'],
inputs=inputs, outputs=buffer_output, **kwargs),
gn.Map(name=self.name()+"_operation", inputs=buffer_output, outputs=outputs,
func=lambda a: zip(*a), **kwargs)]
return nodes
def plotMetadata(self, topics, terms, **kwargs):
return {'type': 'TimeWidget', 'terms': terms, 'topics': topics}
| from ami.flowchart.library.DisplayWidgets import ScalarWidget, ScatterWidget, WaveformWidget, \
ImageWidget, ObjectWidget, LineWidget, TimeWidget, HistogramWidget, \
Histogram2DWidget
from ami.flowchart.library.common import CtrlNode
from amitypes import Array1d, Array2d
from typing import Any
import ami.graph_nodes as gn
class ScalarViewer(CtrlNode):
"""
ScalarViewer displays the value of a scalar.
"""
nodeName = "ScalarViewer"
uiTemplate = []
def __init__(self, name):
super().__init__(name,
terminals={"In": {"io": "in", "ttype": float}},
viewable=True)
def isChanged(self, restore_ctrl, restore_widget):
return False
def display(self, topics, terms, addr, win, **kwargs):
return super().display(topics, terms, addr, win, ScalarWidget, **kwargs)
def plotMetadata(self, topics, terms, **kwargs):
return {'type': 'ScalarWidget', 'terms': terms, 'topics': topics}
class WaveformViewer(CtrlNode):
"""
WaveformViewer displays 1D arrays.
"""
nodeName = "WaveformViewer"
uiTemplate = []
def __init__(self, name):
super().__init__(name, terminals={"In": {"io": "in", "ttype": Array1d}},
allowAddInput=True,
viewable=True)
def isChanged(self, restore_ctrl, restore_widget):
return False
def display(self, topics, terms, addr, win, **kwargs):
return super().display(topics, terms, addr, win, WaveformWidget, **kwargs)
def plotMetadata(self, topics, terms, **kwargs):
return {'type': 'WaveformWidget', 'terms': terms, 'topics': topics}
class ImageViewer(CtrlNode):
"""
ImageViewer displays 2D arrays.
"""
nodeName = "ImageViewer"
uiTemplate = []
def __init__(self, name):
super().__init__(name, terminals={"In": {"io": "in", "ttype": Array2d}}, viewable=True)
def isChanged(self, restore_ctrl, restore_widget):
return False
def display(self, topics, terms, addr, win, **kwargs):
return super().display(topics, terms, addr, win, ImageWidget, **kwargs)
def plotMetadata(self, topics, terms, **kwargs):
return {'type': 'ImageWidget', 'terms': terms, 'topics': topics}
class ObjectViewer(CtrlNode):
"""
ObjectViewer displays string representation of a python object.
"""
nodeName = "ObjectViewer"
uiTemplate = []
def __init__(self, name):
super().__init__(name, terminals={"In": {"io": "in", "ttype": Any}}, viewable=True)
def isChanged(self, restore_ctrl, restore_widget):
return False
def display(self, topics, terms, addr, win, **kwargs):
return super().display(topics, terms, addr, win, ObjectWidget, **kwargs)
def plotMetadata(self, topics, terms, **kwargs):
return {'type': 'ObjectWidget', 'terms': terms, 'topics': topics}
class Histogram(CtrlNode):
"""
Histogram plots a histogram created from Binning.
"""
nodeName = "Histogram"
uiTemplate = []
def __init__(self, name):
super().__init__(name,
terminals={"Bins": {"io": "in", "ttype": Array1d},
"Counts": {"io": "in", "ttype": Array1d}},
allowAddInput=True,
viewable=True)
def isChanged(self, restore_ctrl, restore_widget):
return False
def display(self, topics, terms, addr, win, **kwargs):
return super().display(topics, terms, addr, win, HistogramWidget, **kwargs)
def addInput(self, **args):
self.addTerminal(name="Bins", io='in', ttype=Array1d, **args)
self.addTerminal(name="Counts", io='in', ttype=Array1d, **args)
def plotMetadata(self, topics, terms, **kwargs):
return {'type': 'HistogramWidget', 'terms': terms, 'topics': topics}
class Histogram2D(CtrlNode):
"""
Histogram2D plots a 2d histogram created from Binning2D.
"""
nodeName = "Histogram2D"
uiTemplate = []
def __init__(self, name):
super().__init__(name,
terminals={"XBins": {"io": "in", "ttype": Array1d},
"YBins": {"io": "in", "ttype": Array1d},
"Counts": {"io": "in", "ttype": Array2d}},
viewable=True)
def isChanged(self, restore_ctrl, restore_widget):
return False
def display(self, topics, terms, addr, win, **kwargs):
return super().display(topics, terms, addr, win, Histogram2DWidget, **kwargs)
def plotMetadata(self, topics, terms, **kwargs):
return {'type': 'Histogram2DWidget', 'terms': terms, 'topics': topics}
class ScatterPlot(CtrlNode):
"""
Scatter Plot collects two scalars and plots them against each other.
"""
nodeName = "ScatterPlot"
uiTemplate = [("Num Points", 'intSpin', {'value': 100, 'min': 1}),
('Unique', 'check')]
def __init__(self, name):
super().__init__(name, terminals={"X": {"io": "in", "ttype": float},
"Y": {"io": "in", "ttype": float}},
allowAddInput=True,
buffered=True)
def display(self, topics, terms, addr, win, **kwargs):
return super().display(topics, terms, addr, win, ScatterWidget, **kwargs)
def isChanged(self, restore_ctrl, restore_widget):
return restore_ctrl
def addInput(self, **args):
self.addTerminal(name="X", io='in', ttype=float, **args)
self.addTerminal(name="Y", io='in', ttype=float, **args)
def to_operation(self, inputs, outputs, **kwargs):
outputs = [self.name()+'.'+i for i in inputs.keys()]
buffer_output = [self.name()]
nodes = [gn.RollingBuffer(name=self.name()+"_buffer",
N=self.values['Num Points'], unique=self.values['Unique'],
inputs=inputs, outputs=buffer_output, **kwargs),
gn.Map(name=self.name()+"_operation",
inputs=buffer_output, outputs=outputs,
func=lambda a: zip(*a),
**kwargs)]
return nodes
def plotMetadata(self, topics, terms, **kwargs):
return {'type': 'ScatterWidget', 'terms': terms, 'topics': topics}
class ScalarPlot(CtrlNode):
"""
Scalar Plot collects scalars and plots them.
"""
nodeName = "ScalarPlot"
uiTemplate = [("Num Points", 'intSpin', {'value': 100, 'min': 1})]
def __init__(self, name):
super().__init__(name, terminals={"Y": {"io": "in", "ttype": float}},
allowAddInput=True,
buffered=True)
def isChanged(self, restore_ctrl, restore_widget):
return restore_ctrl
def addInput(self, **args):
self.addTerminal(name="Y", io='in', ttype=float, **args)
def display(self, topics, terms, addr, win, **kwargs):
return super().display(topics, terms, addr, win, WaveformWidget, **kwargs)
def to_operation(self, inputs, outputs, **kwargs):
outputs = [self.name()+'.'+i for i in inputs.keys()]
buffer_output = [self.name()]
if len(inputs.values()) > 1:
node = [gn.RollingBuffer(name=self.name()+"_buffer", N=self.values['Num Points'],
inputs=inputs, outputs=buffer_output, **kwargs),
gn.Map(name=self.name()+"_operation", inputs=buffer_output, outputs=outputs,
func=lambda a: zip(*a), **kwargs)]
else:
node = gn.RollingBuffer(name=self.name(), N=self.values['Num Points'],
inputs=inputs, outputs=outputs, **kwargs)
return node
def plotMetadata(self, topics, terms, **kwargs):
return {'type': 'WaveformWidget', 'terms': terms, 'topics': topics}
class LinePlot(CtrlNode):
"""
Line Plot plots arrays.
"""
nodeName = "LinePlot"
uiTemplate = []
def __init__(self, name):
super().__init__(name, terminals={"X": {"io": "in", "ttype": Array1d},
"Y": {"io": "in", "ttype": Array1d}},
allowAddInput=True,
viewable=True)
def isChanged(self, restore_ctrl, restore_widget):
return False
def display(self, topics, terms, addr, win, **kwargs):
return super().display(topics, terms, addr, win, LineWidget, **kwargs)
def addInput(self, **args):
group = self.nextGroupName()
self.addTerminal(name="X", io='in', ttype=Array1d, group=group, **args)
self.addTerminal(name="Y", io='in', ttype=Array1d, group=group, **args)
def plotMetadata(self, topics, terms, **kwargs):
return {'type': 'LineWidget', 'terms': terms, 'topics': topics}
class TimePlot(CtrlNode):
"""
Plot a number against time of day.
"""
nodeName = "TimePlot"
uiTemplate = [("Num Points", 'intSpin', {'value': 1000, 'min': 1})]
def __init__(self, name):
super().__init__(name, terminals={"X": {"io": "in", "ttype": float},
"Y": {"io": "in", "ttype": float}},
allowAddInput=True,
buffered=True)
def isChanged(self, restore_ctrl, restore_widget):
return restore_ctrl
def display(self, topics, terms, addr, win, **kwargs):
return super().display(topics, terms, addr, win, TimeWidget, **kwargs)
def addInput(self, **args):
self.addTerminal(name="X", io='in', ttype=float, **args)
self.addTerminal(name="Y", io='in', ttype=float, **args)
def to_operation(self, inputs, outputs, **kwargs):
outputs = [self.name()+'.'+i for i in inputs.keys()]
buffer_output = [self.name()]
nodes = [gn.RollingBuffer(name=self.name()+"_buffer", N=self.values['Num Points'],
inputs=inputs, outputs=buffer_output, **kwargs),
gn.Map(name=self.name()+"_operation", inputs=buffer_output, outputs=outputs,
func=lambda a: zip(*a), **kwargs)]
return nodes
def plotMetadata(self, topics, terms, **kwargs):
return {'type': 'TimeWidget', 'terms': terms, 'topics': topics}
| en | 0.817348 | ScalarViewer displays the value of a scalar. WaveformViewer displays 1D arrays. ImageViewer displays 2D arrays. ObjectViewer displays string representation of a python object. Histogram plots a histogram created from Binning. Histogram2D plots a 2d histogram created from Binning2D. Scatter Plot collects two scalars and plots them against each other. Scalar Plot collects scalars and plots them. Line Plot plots arrays. Plot a number against time of day. | 2.432266 | 2 |
deep-rl/lib/python2.7/site-packages/OpenGL/GL/ARB/transform_feedback_instanced.py | ShujaKhalid/deep-rl | 210 | 9764 | <filename>deep-rl/lib/python2.7/site-packages/OpenGL/GL/ARB/transform_feedback_instanced.py<gh_stars>100-1000
'''OpenGL extension ARB.transform_feedback_instanced
This module customises the behaviour of the
OpenGL.raw.GL.ARB.transform_feedback_instanced to provide a more
Python-friendly API
Overview (from the spec)
Multiple instances of geometry may be specified to the GL by calling
functions such as DrawArraysInstanced and DrawElementsInstanced. Further,
the results of a transform feedback operation may be returned to the GL
by calling DrawTransformFeedback, or DrawTransformFeedbackStream. However,
it is not presently possible to draw multiple instances of data
transform feedback without using a query and the resulting round trip from
server to client.
This extension adds functionality to draw multiple instances of the result
of a transform feedback operation.
The official definition of this extension is available here:
http://www.opengl.org/registry/specs/ARB/transform_feedback_instanced.txt
'''
from OpenGL import platform, constant, arrays
from OpenGL import extensions, wrapper
import ctypes
from OpenGL.raw.GL import _types, _glgets
from OpenGL.raw.GL.ARB.transform_feedback_instanced import *
from OpenGL.raw.GL.ARB.transform_feedback_instanced import _EXTENSION_NAME
def glInitTransformFeedbackInstancedARB():
'''Return boolean indicating whether this extension is available'''
from OpenGL import extensions
return extensions.hasGLExtension( _EXTENSION_NAME )
### END AUTOGENERATED SECTION | <filename>deep-rl/lib/python2.7/site-packages/OpenGL/GL/ARB/transform_feedback_instanced.py<gh_stars>100-1000
'''OpenGL extension ARB.transform_feedback_instanced
This module customises the behaviour of the
OpenGL.raw.GL.ARB.transform_feedback_instanced to provide a more
Python-friendly API
Overview (from the spec)
Multiple instances of geometry may be specified to the GL by calling
functions such as DrawArraysInstanced and DrawElementsInstanced. Further,
the results of a transform feedback operation may be returned to the GL
by calling DrawTransformFeedback, or DrawTransformFeedbackStream. However,
it is not presently possible to draw multiple instances of data
transform feedback without using a query and the resulting round trip from
server to client.
This extension adds functionality to draw multiple instances of the result
of a transform feedback operation.
The official definition of this extension is available here:
http://www.opengl.org/registry/specs/ARB/transform_feedback_instanced.txt
'''
from OpenGL import platform, constant, arrays
from OpenGL import extensions, wrapper
import ctypes
from OpenGL.raw.GL import _types, _glgets
from OpenGL.raw.GL.ARB.transform_feedback_instanced import *
from OpenGL.raw.GL.ARB.transform_feedback_instanced import _EXTENSION_NAME
def glInitTransformFeedbackInstancedARB():
'''Return boolean indicating whether this extension is available'''
from OpenGL import extensions
return extensions.hasGLExtension( _EXTENSION_NAME )
### END AUTOGENERATED SECTION | en | 0.770066 | OpenGL extension ARB.transform_feedback_instanced This module customises the behaviour of the OpenGL.raw.GL.ARB.transform_feedback_instanced to provide a more Python-friendly API Overview (from the spec) Multiple instances of geometry may be specified to the GL by calling functions such as DrawArraysInstanced and DrawElementsInstanced. Further, the results of a transform feedback operation may be returned to the GL by calling DrawTransformFeedback, or DrawTransformFeedbackStream. However, it is not presently possible to draw multiple instances of data transform feedback without using a query and the resulting round trip from server to client. This extension adds functionality to draw multiple instances of the result of a transform feedback operation. The official definition of this extension is available here: http://www.opengl.org/registry/specs/ARB/transform_feedback_instanced.txt Return boolean indicating whether this extension is available ### END AUTOGENERATED SECTION | 1.723923 | 2 |
features/cpp/simple/test.py | xbabka01/retdec-regression-tests | 8 | 9765 | <filename>features/cpp/simple/test.py<gh_stars>1-10
from regression_tests import *
class TestBase(Test):
def test_for_main(self):
assert self.out_c.has_funcs('main') or self.out_c.has_funcs('entry_point')
def test_check_main_is_not_ctor_or_dtor(self):
for c in self.out_config.classes:
assert "main" not in c.constructors
assert "main" not in c.destructors
class TestAll(TestBase):
settings = TestSettings(
input=files_in_dir('inputs/symbols'),
args='-k'
)
def test_for_string(self):
# printf() is used -> '\n' at the end of the string
# puts() is used -> no '\n' at the end of the string
assert self.out_c.has_string_literal_matching( r'ClassA::ClassA(\\n)?' )
assert self.out_c.has_string_literal_matching( r'%i %i(\\n)?' )
assert self.out_c.has_string_literal_matching( r'~ClassA::ClassA(\\n)?' )
def test_for_vtables(self):
assert self.out_config.vtable_count == 1
vtable = self.out_config.vtables[0]
assert vtable.item_count == 1
assert "doSomething" in vtable.items[0].target_name
def test_for_classes(self):
assert self.out_config.classes_count == 1
c = self.out_config.classes[0]
assert len(c.constructors) == 2
assert len(c.destructors) == 2
assert len(c.virtualMethods) == 1
class TestAllStripped(TestBase):
settings = TestSettings(
input=files_in_dir('inputs/stripped'),
args='-k'
)
def test_for_vtables(self):
assert self.out_config.vtable_count == 1
vtable = self.out_config.vtables[0]
assert vtable.item_count == 1
assert vtable.items[0].target_name # there is some (!empty) function name
def test_for_classes(self):
assert self.out_config.classes_count == 1
c = self.out_config.classes[0]
assert len(c.virtualMethods) == 1
assert len(c.constructors) == 2
assert len(c.destructors) == 2
class TestMsvc(TestBase):
settings = TestSettings(
input='inputs/msvc/simple-msvc-release.ex',
args='-k'
)
settings_d = TestSettings(
input='inputs/msvc/simple-msvc-debug.ex',
args='-k'
)
def test_for_string(self):
assert self.out_c.has_string_literal( 'ClassA::ClassA\\n' )
assert self.out_c.has_string_literal( '~ClassA::ClassA\\n' )
assert self.out_c.has_string_literal( '%i %i\\n' )
def test_for_vtables(self):
assert self.out_config.vtable_count == 2
vtable1 = self.out_config.vtables[0]
assert vtable1.item_count == 1
vtable2 = self.out_config.vtables[0]
assert vtable2.item_count == 1
| <filename>features/cpp/simple/test.py<gh_stars>1-10
from regression_tests import *
class TestBase(Test):
def test_for_main(self):
assert self.out_c.has_funcs('main') or self.out_c.has_funcs('entry_point')
def test_check_main_is_not_ctor_or_dtor(self):
for c in self.out_config.classes:
assert "main" not in c.constructors
assert "main" not in c.destructors
class TestAll(TestBase):
settings = TestSettings(
input=files_in_dir('inputs/symbols'),
args='-k'
)
def test_for_string(self):
# printf() is used -> '\n' at the end of the string
# puts() is used -> no '\n' at the end of the string
assert self.out_c.has_string_literal_matching( r'ClassA::ClassA(\\n)?' )
assert self.out_c.has_string_literal_matching( r'%i %i(\\n)?' )
assert self.out_c.has_string_literal_matching( r'~ClassA::ClassA(\\n)?' )
def test_for_vtables(self):
assert self.out_config.vtable_count == 1
vtable = self.out_config.vtables[0]
assert vtable.item_count == 1
assert "doSomething" in vtable.items[0].target_name
def test_for_classes(self):
assert self.out_config.classes_count == 1
c = self.out_config.classes[0]
assert len(c.constructors) == 2
assert len(c.destructors) == 2
assert len(c.virtualMethods) == 1
class TestAllStripped(TestBase):
settings = TestSettings(
input=files_in_dir('inputs/stripped'),
args='-k'
)
def test_for_vtables(self):
assert self.out_config.vtable_count == 1
vtable = self.out_config.vtables[0]
assert vtable.item_count == 1
assert vtable.items[0].target_name # there is some (!empty) function name
def test_for_classes(self):
assert self.out_config.classes_count == 1
c = self.out_config.classes[0]
assert len(c.virtualMethods) == 1
assert len(c.constructors) == 2
assert len(c.destructors) == 2
class TestMsvc(TestBase):
settings = TestSettings(
input='inputs/msvc/simple-msvc-release.ex',
args='-k'
)
settings_d = TestSettings(
input='inputs/msvc/simple-msvc-debug.ex',
args='-k'
)
def test_for_string(self):
assert self.out_c.has_string_literal( 'ClassA::ClassA\\n' )
assert self.out_c.has_string_literal( '~ClassA::ClassA\\n' )
assert self.out_c.has_string_literal( '%i %i\\n' )
def test_for_vtables(self):
assert self.out_config.vtable_count == 2
vtable1 = self.out_config.vtables[0]
assert vtable1.item_count == 1
vtable2 = self.out_config.vtables[0]
assert vtable2.item_count == 1
| en | 0.750024 | # printf() is used -> '\n' at the end of the string # puts() is used -> no '\n' at the end of the string # there is some (!empty) function name | 2.391305 | 2 |
src/experiment.py | windar427/find_alpha | 0 | 9766 | <filename>src/experiment.py
from .lib.DownloadData import DownloadData
| <filename>src/experiment.py
from .lib.DownloadData import DownloadData
| none | 1 | 1.060894 | 1 |
|
src/__init__.py | songchenwen/icloud-drive-docker | 0 | 9767 | <filename>src/__init__.py
__author__ = '<NAME> (<EMAIL>)'
import warnings
warnings.filterwarnings('ignore', category=DeprecationWarning)
| <filename>src/__init__.py
__author__ = '<NAME> (<EMAIL>)'
import warnings
warnings.filterwarnings('ignore', category=DeprecationWarning)
| none | 1 | 1.17847 | 1 |
|
test_basico.py | rafael-torraca/delivery | 0 | 9768 | <gh_stars>0
def test_one_plus_one_is_two():
assert 1 + 1 == 2 #o assert espera que algo seja verdadeiro, se for falso o teste quebrou
def test_negative_1_plus_1_is_3():
assert 1 + 1 == 3
| def test_one_plus_one_is_two():
assert 1 + 1 == 2 #o assert espera que algo seja verdadeiro, se for falso o teste quebrou
def test_negative_1_plus_1_is_3():
assert 1 + 1 == 3 | pt | 0.680635 | #o assert espera que algo seja verdadeiro, se for falso o teste quebrou | 3.79914 | 4 |
setup.py | rohernandezz/coldtype | 0 | 9769 | import setuptools
long_description = """
# Coldtype
### Programmatic display typography
More info available at: [coldtype.goodhertz.com](https://coldtype.goodhertz.com)
"""
setuptools.setup(
name="coldtype",
version="0.6.6",
author="<NAME> / Goodhertz",
author_email="<EMAIL>",
description="Functions for manual vectorized typesetting",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/goodhertz/coldtype",
#package_dir={"": "coldtype"},
packages=[
"coldtype",
"coldtype.sh",
"coldtype.fx",
"coldtype.img",
"coldtype.time",
"coldtype.midi",
"coldtype.pens",
"coldtype.text",
"coldtype.grid",
"coldtype.color",
"coldtype.capture",
"coldtype.blender",
"coldtype.geometry",
"coldtype.time.nle",
"coldtype.renderer",
"coldtype.webserver",
"coldtype.renderable",
"coldtype.fontgoggles",
"coldtype.interpolation",
"coldtype.renderer.winman",
"coldtype.fontgoggles.font",
"coldtype.fontgoggles.misc",
"coldtype.fontgoggles.compile",
],
include_package_data=True,
package_data={
"": [
"webserver/webviewer.html",
"demo/RecMono-CasualItalic.ttf",
"demo/ColdtypeObviously-VF.ttf",
"demo/MutatorSans.ttf",
"demo/demo.py",
"demo/midi.py",
"demo/blank.py",
"demo/boiler.py",
"renderer/picklejar.py",
"renderer/.coldtype.py"
],
},
entry_points={
'console_scripts': [
'coldtype = coldtype.renderer:main'
],
},
extras_require={
"skia": [
"skia-python>=86.0",
],
"viewer": [
"glfw",
"PyOpenGL",
"PyOpenGL-accelerate",
"skia-python>=86.0",
"skia-pathops", # can this be taken from skia-python?
"SimpleWebSocketServer",
"watchdog<2.0.0", # https://github.com/gorakhargosh/watchdog/issues/702
"noise",
"ufo2ft",
"numpy",
],
"webviewer": [
"SimpleWebSocketServer",
"watchdog<2.0.0", # https://github.com/gorakhargosh/watchdog/issues/702
],
"experimental": [
"pynput",
"rtmidi",
"noise",
],
"c": [
"srt",
"noise",
],
"unicode": [
"unicodedata2"
],
"blender": [
"skia-pathops"
],
"notebook": [
"skia-pathops",
"skia-python",
]
},
install_requires=[
"lxml",
"fonttools[ufo]",
"fontPens",
"fontParts",
"more-itertools",
"easing-functions",
"timecode",
"mido",
"defcon",
"freetype-py",
"uharfbuzz>=0.14.0",
"python-bidi"
],
classifiers=[
"Programming Language :: Python :: 3",
"Operating System :: OS Independent",
],
)
| import setuptools
long_description = """
# Coldtype
### Programmatic display typography
More info available at: [coldtype.goodhertz.com](https://coldtype.goodhertz.com)
"""
setuptools.setup(
name="coldtype",
version="0.6.6",
author="<NAME> / Goodhertz",
author_email="<EMAIL>",
description="Functions for manual vectorized typesetting",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/goodhertz/coldtype",
#package_dir={"": "coldtype"},
packages=[
"coldtype",
"coldtype.sh",
"coldtype.fx",
"coldtype.img",
"coldtype.time",
"coldtype.midi",
"coldtype.pens",
"coldtype.text",
"coldtype.grid",
"coldtype.color",
"coldtype.capture",
"coldtype.blender",
"coldtype.geometry",
"coldtype.time.nle",
"coldtype.renderer",
"coldtype.webserver",
"coldtype.renderable",
"coldtype.fontgoggles",
"coldtype.interpolation",
"coldtype.renderer.winman",
"coldtype.fontgoggles.font",
"coldtype.fontgoggles.misc",
"coldtype.fontgoggles.compile",
],
include_package_data=True,
package_data={
"": [
"webserver/webviewer.html",
"demo/RecMono-CasualItalic.ttf",
"demo/ColdtypeObviously-VF.ttf",
"demo/MutatorSans.ttf",
"demo/demo.py",
"demo/midi.py",
"demo/blank.py",
"demo/boiler.py",
"renderer/picklejar.py",
"renderer/.coldtype.py"
],
},
entry_points={
'console_scripts': [
'coldtype = coldtype.renderer:main'
],
},
extras_require={
"skia": [
"skia-python>=86.0",
],
"viewer": [
"glfw",
"PyOpenGL",
"PyOpenGL-accelerate",
"skia-python>=86.0",
"skia-pathops", # can this be taken from skia-python?
"SimpleWebSocketServer",
"watchdog<2.0.0", # https://github.com/gorakhargosh/watchdog/issues/702
"noise",
"ufo2ft",
"numpy",
],
"webviewer": [
"SimpleWebSocketServer",
"watchdog<2.0.0", # https://github.com/gorakhargosh/watchdog/issues/702
],
"experimental": [
"pynput",
"rtmidi",
"noise",
],
"c": [
"srt",
"noise",
],
"unicode": [
"unicodedata2"
],
"blender": [
"skia-pathops"
],
"notebook": [
"skia-pathops",
"skia-python",
]
},
install_requires=[
"lxml",
"fonttools[ufo]",
"fontPens",
"fontParts",
"more-itertools",
"easing-functions",
"timecode",
"mido",
"defcon",
"freetype-py",
"uharfbuzz>=0.14.0",
"python-bidi"
],
classifiers=[
"Programming Language :: Python :: 3",
"Operating System :: OS Independent",
],
)
| en | 0.569193 | # Coldtype ### Programmatic display typography More info available at: [coldtype.goodhertz.com](https://coldtype.goodhertz.com) #package_dir={"": "coldtype"}, # can this be taken from skia-python? # https://github.com/gorakhargosh/watchdog/issues/702 # https://github.com/gorakhargosh/watchdog/issues/702 | 1.611154 | 2 |
GFOLD_problem.py | xdedss/SuccessiveConvexification | 0 | 9770 | # -*- coding: utf-8 -*-
# GFOLD_static_p3p4
min_=min
from cvxpy import *
import cvxpy_codegen as cpg
from time import time
import numpy as np
import sys
import GFOLD_params
''' As defined in the paper...
PROBLEM 3: Minimum Landing Error (tf roughly solved)
MINIMIZE : norm of landing error vector
SUBJ TO :
0) initial conditions satisfied (position, velocity)
1) final conditions satisfied (altitude, velocity)
2) dynamics always satisfied
3) x stays in cone at all times
4) relaxed convexified mass and thrust constraints
5) thrust pointing constraint
6) sub-surface flight constraint
PROBLEM 4: Minimum Fuel Use
MAXIMIZE : landing mass, opt variables are dynamical and
SUBJ TO :
0) same constraints as p1, plus:
1) landing point must be equal or better than that found by p1
'''
def solve(params, params_super = None, codegen = False, verbose=False):
#super params
if (params_super == None):
params_super = GFOLD_params.SuperParams() # default
N = params_super.N
#优化变量
x =Variable(6,N,name='var_x') # state vector (3position,3velocity)
u =Variable(3,N,name='var_u') # u = Tc/mass because Tc[:,n]/m[n] is not allowed by DCP
z= Variable(1,N,name='var_z') # z = ln(mass)
s= Variable(1,N,name='var_s') # thrust slack parameter
# Parameters
x0 = Parameter(6, 1, name="x0")
xf = Parameter(6, 1, name="xf")
z0_term_inv = Parameter(1, N, name="z0_term_inv", sign='positive')
z0_term_log = Parameter(1, N, name="z0_term_log")
g = Parameter(3, 1, name="g_vec")
p_cs_cos = Parameter(1, N, name='p_cs_cos')
sparse_params = Parameter(7, 1, name="sparse_params", sign='positive')
m_wet_log = Parameter(2, 1, name='m_wet_log')
if (not codegen):
x0.value = params.x0.reshape(6, 1)
xf.value = params.xf.reshape(6, 1)
z0_term_inv.value = params.z0_term_inv.reshape(1, N)
z0_term_log.value = params.z0_term_log.reshape(1, N)
g.value = params.g.reshape(3, 1)
p_cs_cos.value = params.p_cs_cos.reshape(1, N)
m_wet_log.value = [params.m_wet_log, 0]
sparse_params.value = np.array([
params.alpha_dt,
params.G_max,
params.V_max,
params.y_gs_cot,
params.r1,
params.r2,
params.tf
]).reshape(7, 1)
alpha_dt, G_max, V_max, y_gs_cot, r1, r2, tf_ = sparse_params
dt = tf_ * (1/N) # Integration dt
# constraints
con = []
con += [x[0:3,0] == x0[0:3]] # initial pos
con += [x[3:6,0] == x0[3:6]] # initial vel
con += [x[0:3,N-1] == xf[0:3]] # final pos
con += [x[3:6,N-1]== xf[3:6]] # final vel
con += [s[0,N-1] == 0] # thrust at the end must be zero
con += [u[:,0] == s[0,0]*np.array([1,0,0])] # thrust direction starts straight
con += [u[:,N-1] == s[0,N-1]*np.array([1,0,0])] # and ends straight
con += [z[0,0] == m_wet_log[0,0]] # convexified (7)
for n in range(0,N-1):
#dynamics
con += [x[3:6,n+1] == x[3:6,n] + (dt*0.5)*((u[:,n]+g[:,0]) + (u[:,n+1]+g[:,0]))]
con += [x[0:3,n+1] == x[0:3,n] + (dt*0.5)*(x[3:6,n+1]+x[3:6,n])]
# glideslope cone
con += [ norm( (x[0:3,n])[1:3] ) - y_gs_cot*(x[0,n]) <= 0 ]
con += [ norm(x[3:6,n]) <= V_max ] # velocity
#con += [norm(u[:,n+1]-u[:,n]) <= dt*T_max/m_dry * 3]
con += [z[0,n+1] == z[0,n] - (alpha_dt*0.5)*(s[0,n] + s[0,n+1])] # mass decreases
con += [norm(u[:,n]) <= s[0,n]] # limit thrust magnitude & also therefore, mass
# Thrust pointing constraint
con += [ u[0,n] >= p_cs_cos[0,n]*s[0,n] ]
if n > 0:
#z0_term = m_wet - alpha * r2 * (n) * dt # see ref [2], eq 34,35,36
#z0 = log(z0_term)
z0 = z0_term_log[0,n]
mu_1 = r1*(z0_term_inv[0,n])
mu_2 = r2*(z0_term_inv[0,n])
#更正一处原项目与论文不符之处
# 示意图:https://www.desmos.com/calculator/wtcfgnepe1
con += [s[0,n] >= mu_1 * (1 - (z[0,n] - z0) + (z[0,n] - z0)**2 *0.5)] # lower thrust bound
con += [s[0,n] <= mu_2 * (1 - (z[0,n] - z0))] # upper thrust bound
#Objective
objective = Minimize(-z[0,N-1])
problem=Problem(objective, con)
if codegen:
cpg.codegen(problem, codegen_path)
else:
obj_opt = problem.solve(solver=ECOS, verbose=verbose)
return (
obj_opt,
np.array(x.value), # r,v
np.array(u.value), # u (acceleration)
np.exp(np.array(z.value)) # mass
) if type(x.value) != type(None) else (None, None, None, None)
if __name__ == '__main__':
if (len(sys.argv) > 2 and sys.argv[1] == 'codegen'):
codegen_path = sys.argv[2]
solve(None, None, True)
else:
print("invalid input")
print(sys.argv)
| # -*- coding: utf-8 -*-
# GFOLD_static_p3p4
min_=min
from cvxpy import *
import cvxpy_codegen as cpg
from time import time
import numpy as np
import sys
import GFOLD_params
''' As defined in the paper...
PROBLEM 3: Minimum Landing Error (tf roughly solved)
MINIMIZE : norm of landing error vector
SUBJ TO :
0) initial conditions satisfied (position, velocity)
1) final conditions satisfied (altitude, velocity)
2) dynamics always satisfied
3) x stays in cone at all times
4) relaxed convexified mass and thrust constraints
5) thrust pointing constraint
6) sub-surface flight constraint
PROBLEM 4: Minimum Fuel Use
MAXIMIZE : landing mass, opt variables are dynamical and
SUBJ TO :
0) same constraints as p1, plus:
1) landing point must be equal or better than that found by p1
'''
def solve(params, params_super = None, codegen = False, verbose=False):
#super params
if (params_super == None):
params_super = GFOLD_params.SuperParams() # default
N = params_super.N
#优化变量
x =Variable(6,N,name='var_x') # state vector (3position,3velocity)
u =Variable(3,N,name='var_u') # u = Tc/mass because Tc[:,n]/m[n] is not allowed by DCP
z= Variable(1,N,name='var_z') # z = ln(mass)
s= Variable(1,N,name='var_s') # thrust slack parameter
# Parameters
x0 = Parameter(6, 1, name="x0")
xf = Parameter(6, 1, name="xf")
z0_term_inv = Parameter(1, N, name="z0_term_inv", sign='positive')
z0_term_log = Parameter(1, N, name="z0_term_log")
g = Parameter(3, 1, name="g_vec")
p_cs_cos = Parameter(1, N, name='p_cs_cos')
sparse_params = Parameter(7, 1, name="sparse_params", sign='positive')
m_wet_log = Parameter(2, 1, name='m_wet_log')
if (not codegen):
x0.value = params.x0.reshape(6, 1)
xf.value = params.xf.reshape(6, 1)
z0_term_inv.value = params.z0_term_inv.reshape(1, N)
z0_term_log.value = params.z0_term_log.reshape(1, N)
g.value = params.g.reshape(3, 1)
p_cs_cos.value = params.p_cs_cos.reshape(1, N)
m_wet_log.value = [params.m_wet_log, 0]
sparse_params.value = np.array([
params.alpha_dt,
params.G_max,
params.V_max,
params.y_gs_cot,
params.r1,
params.r2,
params.tf
]).reshape(7, 1)
alpha_dt, G_max, V_max, y_gs_cot, r1, r2, tf_ = sparse_params
dt = tf_ * (1/N) # Integration dt
# constraints
con = []
con += [x[0:3,0] == x0[0:3]] # initial pos
con += [x[3:6,0] == x0[3:6]] # initial vel
con += [x[0:3,N-1] == xf[0:3]] # final pos
con += [x[3:6,N-1]== xf[3:6]] # final vel
con += [s[0,N-1] == 0] # thrust at the end must be zero
con += [u[:,0] == s[0,0]*np.array([1,0,0])] # thrust direction starts straight
con += [u[:,N-1] == s[0,N-1]*np.array([1,0,0])] # and ends straight
con += [z[0,0] == m_wet_log[0,0]] # convexified (7)
for n in range(0,N-1):
#dynamics
con += [x[3:6,n+1] == x[3:6,n] + (dt*0.5)*((u[:,n]+g[:,0]) + (u[:,n+1]+g[:,0]))]
con += [x[0:3,n+1] == x[0:3,n] + (dt*0.5)*(x[3:6,n+1]+x[3:6,n])]
# glideslope cone
con += [ norm( (x[0:3,n])[1:3] ) - y_gs_cot*(x[0,n]) <= 0 ]
con += [ norm(x[3:6,n]) <= V_max ] # velocity
#con += [norm(u[:,n+1]-u[:,n]) <= dt*T_max/m_dry * 3]
con += [z[0,n+1] == z[0,n] - (alpha_dt*0.5)*(s[0,n] + s[0,n+1])] # mass decreases
con += [norm(u[:,n]) <= s[0,n]] # limit thrust magnitude & also therefore, mass
# Thrust pointing constraint
con += [ u[0,n] >= p_cs_cos[0,n]*s[0,n] ]
if n > 0:
#z0_term = m_wet - alpha * r2 * (n) * dt # see ref [2], eq 34,35,36
#z0 = log(z0_term)
z0 = z0_term_log[0,n]
mu_1 = r1*(z0_term_inv[0,n])
mu_2 = r2*(z0_term_inv[0,n])
#更正一处原项目与论文不符之处
# 示意图:https://www.desmos.com/calculator/wtcfgnepe1
con += [s[0,n] >= mu_1 * (1 - (z[0,n] - z0) + (z[0,n] - z0)**2 *0.5)] # lower thrust bound
con += [s[0,n] <= mu_2 * (1 - (z[0,n] - z0))] # upper thrust bound
#Objective
objective = Minimize(-z[0,N-1])
problem=Problem(objective, con)
if codegen:
cpg.codegen(problem, codegen_path)
else:
obj_opt = problem.solve(solver=ECOS, verbose=verbose)
return (
obj_opt,
np.array(x.value), # r,v
np.array(u.value), # u (acceleration)
np.exp(np.array(z.value)) # mass
) if type(x.value) != type(None) else (None, None, None, None)
if __name__ == '__main__':
if (len(sys.argv) > 2 and sys.argv[1] == 'codegen'):
codegen_path = sys.argv[2]
solve(None, None, True)
else:
print("invalid input")
print(sys.argv)
| en | 0.737469 | # -*- coding: utf-8 -*- # GFOLD_static_p3p4 As defined in the paper... PROBLEM 3: Minimum Landing Error (tf roughly solved) MINIMIZE : norm of landing error vector SUBJ TO : 0) initial conditions satisfied (position, velocity) 1) final conditions satisfied (altitude, velocity) 2) dynamics always satisfied 3) x stays in cone at all times 4) relaxed convexified mass and thrust constraints 5) thrust pointing constraint 6) sub-surface flight constraint PROBLEM 4: Minimum Fuel Use MAXIMIZE : landing mass, opt variables are dynamical and SUBJ TO : 0) same constraints as p1, plus: 1) landing point must be equal or better than that found by p1 #super params # default #优化变量 # state vector (3position,3velocity) # u = Tc/mass because Tc[:,n]/m[n] is not allowed by DCP # z = ln(mass) # thrust slack parameter # Parameters # Integration dt # constraints # initial pos # initial vel # final pos # final vel # thrust at the end must be zero # thrust direction starts straight # and ends straight # convexified (7) #dynamics # glideslope cone # velocity #con += [norm(u[:,n+1]-u[:,n]) <= dt*T_max/m_dry * 3] # mass decreases # limit thrust magnitude & also therefore, mass # Thrust pointing constraint #z0_term = m_wet - alpha * r2 * (n) * dt # see ref [2], eq 34,35,36 #z0 = log(z0_term) #更正一处原项目与论文不符之处 # 示意图:https://www.desmos.com/calculator/wtcfgnepe1 # lower thrust bound # upper thrust bound #Objective # r,v # u (acceleration) # mass | 2.57138 | 3 |
Hints.py | SarienFates/MMRandomizer | 36 | 9771 | import io
import hashlib
import logging
import os
import struct
import random
from HintList import getHint, getHintGroup, Hint
from Utils import local_path
#builds out general hints based on location and whether an item is required or not
def buildGossipHints(world, rom):
stoneAddresses = [0x938e4c, 0x938EA8, 0x938F04, 0x938F60, 0x938FBC, 0x939018, 0x939074, 0x9390D0, 0x93912C, 0x939188,
0x9391E4, 0x939240, 0x93929C, 0x9392F8, 0x939354, 0x9393B0, 0x93940C, 0x939468, 0x9394C4, 0x939520,
0x93957C, 0x9395D8, 0x939634, 0x939690, 0x9396EC, 0x939748, 0x9397A4, 0x939800, 0x93985C, 0x9398B8,
0x939914, 0x939970] #address for gossip stone text boxes, byte limit is 92
alwaysLocations = getHintGroup('alwaysLocation')#These location will always have a hint somewhere in the world.
sometimesSpace = (int((len(stoneAddresses) - len(alwaysLocations)*2)/2))
sometimesLocations = getHintGroup('location')#A random selection of these locations will be in the hint pool.
random.shuffle(sometimesLocations)
sometimesLocations = sometimesLocations[0:sometimesSpace]
hintList = alwaysLocations
hintList.extend(alwaysLocations)
hintList.extend(sometimesLocations)
locationData = []
for hint in hintList:
for locationWorld in world.get_locations():
if hint.name == locationWorld.name:
locationData.extend([locationWorld])
#hopefully fixes weird VC error where the last character from a previous text box would sometimes spill over into the next box.
for address in range(stoneAddresses[0], 0x9399D8):
rom.write_byte(address, 0x08)
#shuffles the stone addresses for randomization, always locations will be placed first and twice
random.shuffle(stoneAddresses)
#loops through shuffled locations and addresses and builds hint.
while locationData:
currentLoc = locationData.pop(0)
Block_code = getBytes((getHint(currentLoc.name).text))
if currentLoc.item.type == 'Map' or currentLoc.item.type == 'Compass' or currentLoc.item.type == 'BossKey' or currentLoc.item.type == 'SmallKey':
Block_code.extend(getBytes((getHint(currentLoc.item.type).text)))
else:
Block_code.extend(getBytes((getHint(currentLoc.item.name).text)))
endText(Block_code)
if len(Block_code) > 92:
print('Too many characters in hint')
Block_code = getBytes("I am Error.")
Block_code.extend(getBytes(currentLoc.name))
Block_code.extend(getBytes('&'))
Block_code.extend(getBytes(currentLoc.item.name))
rom.write_bytes(stoneAddresses.pop(0), Block_code)
junkHints = getHintGroup('junkHint')
random.shuffle(junkHints)
while stoneAddresses:
junkHint = junkHints.pop()
Block_code = getBytes(junkHint.text)
endText(Block_code)
rom.write_bytes(stoneAddresses.pop(0), Block_code)
return rom
# builds boss reward text that is displayed at the temple of time altar for child and adult, pull based off of item in a fixed order.
def buildBossRewardHints(world, rom):
bossRewardsSpiritualStones = ['Kokiri Emerald', 'Goron Ruby', 'Zora Sapphire']
bossRewardsMedallions = ['Forest Medallion', 'Fire Medallion', 'Water Medallion', 'Shadow Medallion', 'Spirit Medallion', 'Light Medallion']
# text that appears at altar as a child.
Block_code = []
Block_code = getBytes(getHint('Spiritual Stone Text Start').text)
for reward in bossRewardsSpiritualStones:
buildBossString(Block_code, reward, world)
Block_code = setRewardColor(Block_code)
Block_code.extend(getBytes(getHint('Spiritual Stone Text End').text))
Block_code.extend([0x0B])
endText(Block_code)
rom.write_bytes(0x95ED95, Block_code)
# text that appears at altar as an adult.
Block_code = []
for reward in bossRewardsMedallions:
buildBossString(Block_code, reward, world)
Block_code = setRewardColor(Block_code)
Block_code.extend(getBytes(getHint('Medallion Text End').text))
Block_code.extend([0x0B])
endText(Block_code)
rom.write_bytes(0x95DB94, Block_code)
return rom
# pulls text string from hintlist for reward after sending the location to hintlist.
def buildBossString(Block_code, reward, world):
for location in world.get_locations():
if location.item.name == reward:
Block_code.extend([0x08])
Block_code.extend(getBytes(getHint(location.name).text))
return Block_code
# alternates through color set commands in child and adult boss reward hint strings setting the colors at the start of the string to correspond with the reward found at the location.
# skips over color commands at the end of stings to set color back to white.
def setRewardColor(Block_code):
rewardColors = [0x42, 0x41, 0x43, 0x45, 0x46, 0x44]
colorWhite = True
for i, byte in enumerate(Block_code):
if byte == 0x05 and colorWhite:
Block_code[i + 1] = rewardColors.pop(0)
colorWhite = False
elif byte == 0x05 and not colorWhite:
colorWhite = True
return Block_code
#sets the end of text byte in the text box.
def endText(byteArray):
return byteArray.extend([0x02])
# reads array of characters and converts them to an array of bytes.
def getBytes(string):
byteCode = []
for char in string:
if char == '^':
byteCode.extend([0x04])#box break
elif char == '&':
byteCode.extend([0x01])#new line
elif char == '@':
byteCode.extend([0x0F])#print player name
elif char == '#':
byteCode.extend([0x05, 0x40]) #sets color to white
else:
char = char.encode('utf-8')
char = char.hex()
byte = int('0x' + char, 16)
byteCode.extend([byte])
return byteCode
| import io
import hashlib
import logging
import os
import struct
import random
from HintList import getHint, getHintGroup, Hint
from Utils import local_path
#builds out general hints based on location and whether an item is required or not
def buildGossipHints(world, rom):
stoneAddresses = [0x938e4c, 0x938EA8, 0x938F04, 0x938F60, 0x938FBC, 0x939018, 0x939074, 0x9390D0, 0x93912C, 0x939188,
0x9391E4, 0x939240, 0x93929C, 0x9392F8, 0x939354, 0x9393B0, 0x93940C, 0x939468, 0x9394C4, 0x939520,
0x93957C, 0x9395D8, 0x939634, 0x939690, 0x9396EC, 0x939748, 0x9397A4, 0x939800, 0x93985C, 0x9398B8,
0x939914, 0x939970] #address for gossip stone text boxes, byte limit is 92
alwaysLocations = getHintGroup('alwaysLocation')#These location will always have a hint somewhere in the world.
sometimesSpace = (int((len(stoneAddresses) - len(alwaysLocations)*2)/2))
sometimesLocations = getHintGroup('location')#A random selection of these locations will be in the hint pool.
random.shuffle(sometimesLocations)
sometimesLocations = sometimesLocations[0:sometimesSpace]
hintList = alwaysLocations
hintList.extend(alwaysLocations)
hintList.extend(sometimesLocations)
locationData = []
for hint in hintList:
for locationWorld in world.get_locations():
if hint.name == locationWorld.name:
locationData.extend([locationWorld])
#hopefully fixes weird VC error where the last character from a previous text box would sometimes spill over into the next box.
for address in range(stoneAddresses[0], 0x9399D8):
rom.write_byte(address, 0x08)
#shuffles the stone addresses for randomization, always locations will be placed first and twice
random.shuffle(stoneAddresses)
#loops through shuffled locations and addresses and builds hint.
while locationData:
currentLoc = locationData.pop(0)
Block_code = getBytes((getHint(currentLoc.name).text))
if currentLoc.item.type == 'Map' or currentLoc.item.type == 'Compass' or currentLoc.item.type == 'BossKey' or currentLoc.item.type == 'SmallKey':
Block_code.extend(getBytes((getHint(currentLoc.item.type).text)))
else:
Block_code.extend(getBytes((getHint(currentLoc.item.name).text)))
endText(Block_code)
if len(Block_code) > 92:
print('Too many characters in hint')
Block_code = getBytes("I am Error.")
Block_code.extend(getBytes(currentLoc.name))
Block_code.extend(getBytes('&'))
Block_code.extend(getBytes(currentLoc.item.name))
rom.write_bytes(stoneAddresses.pop(0), Block_code)
junkHints = getHintGroup('junkHint')
random.shuffle(junkHints)
while stoneAddresses:
junkHint = junkHints.pop()
Block_code = getBytes(junkHint.text)
endText(Block_code)
rom.write_bytes(stoneAddresses.pop(0), Block_code)
return rom
# builds boss reward text that is displayed at the temple of time altar for child and adult, pull based off of item in a fixed order.
def buildBossRewardHints(world, rom):
bossRewardsSpiritualStones = ['Kokiri Emerald', 'Goron Ruby', 'Zora Sapphire']
bossRewardsMedallions = ['Forest Medallion', 'Fire Medallion', 'Water Medallion', 'Shadow Medallion', 'Spirit Medallion', 'Light Medallion']
# text that appears at altar as a child.
Block_code = []
Block_code = getBytes(getHint('Spiritual Stone Text Start').text)
for reward in bossRewardsSpiritualStones:
buildBossString(Block_code, reward, world)
Block_code = setRewardColor(Block_code)
Block_code.extend(getBytes(getHint('Spiritual Stone Text End').text))
Block_code.extend([0x0B])
endText(Block_code)
rom.write_bytes(0x95ED95, Block_code)
# text that appears at altar as an adult.
Block_code = []
for reward in bossRewardsMedallions:
buildBossString(Block_code, reward, world)
Block_code = setRewardColor(Block_code)
Block_code.extend(getBytes(getHint('Medallion Text End').text))
Block_code.extend([0x0B])
endText(Block_code)
rom.write_bytes(0x95DB94, Block_code)
return rom
# pulls text string from hintlist for reward after sending the location to hintlist.
def buildBossString(Block_code, reward, world):
for location in world.get_locations():
if location.item.name == reward:
Block_code.extend([0x08])
Block_code.extend(getBytes(getHint(location.name).text))
return Block_code
# alternates through color set commands in child and adult boss reward hint strings setting the colors at the start of the string to correspond with the reward found at the location.
# skips over color commands at the end of stings to set color back to white.
def setRewardColor(Block_code):
rewardColors = [0x42, 0x41, 0x43, 0x45, 0x46, 0x44]
colorWhite = True
for i, byte in enumerate(Block_code):
if byte == 0x05 and colorWhite:
Block_code[i + 1] = rewardColors.pop(0)
colorWhite = False
elif byte == 0x05 and not colorWhite:
colorWhite = True
return Block_code
#sets the end of text byte in the text box.
def endText(byteArray):
return byteArray.extend([0x02])
# reads array of characters and converts them to an array of bytes.
def getBytes(string):
byteCode = []
for char in string:
if char == '^':
byteCode.extend([0x04])#box break
elif char == '&':
byteCode.extend([0x01])#new line
elif char == '@':
byteCode.extend([0x0F])#print player name
elif char == '#':
byteCode.extend([0x05, 0x40]) #sets color to white
else:
char = char.encode('utf-8')
char = char.hex()
byte = int('0x' + char, 16)
byteCode.extend([byte])
return byteCode
| en | 0.915753 | #builds out general hints based on location and whether an item is required or not #address for gossip stone text boxes, byte limit is 92 #These location will always have a hint somewhere in the world. #A random selection of these locations will be in the hint pool. #hopefully fixes weird VC error where the last character from a previous text box would sometimes spill over into the next box. #shuffles the stone addresses for randomization, always locations will be placed first and twice #loops through shuffled locations and addresses and builds hint. # builds boss reward text that is displayed at the temple of time altar for child and adult, pull based off of item in a fixed order. # text that appears at altar as a child. # text that appears at altar as an adult. # pulls text string from hintlist for reward after sending the location to hintlist. # alternates through color set commands in child and adult boss reward hint strings setting the colors at the start of the string to correspond with the reward found at the location. # skips over color commands at the end of stings to set color back to white. #sets the end of text byte in the text box. # reads array of characters and converts them to an array of bytes. #box break #new line #print player name #sets color to white | 2.440115 | 2 |
examen_2/p2/p2.py | Jhoselyn-Carballo/computacion_para_ingenieria | 0 | 9772 | # -*- coding: utf-8 -*-
"""
Created on Thu Feb 17 09:10:05 2022
@author: JHOSS
"""
from tkinter import *
def contador(accion, contador):
if accion == 'countUp':
contador == contador + 1
elif accion == 'coundDown':
contador == contador -1
elif accion == 'reset':
contador == 0
return contador
| # -*- coding: utf-8 -*-
"""
Created on Thu Feb 17 09:10:05 2022
@author: JHOSS
"""
from tkinter import *
def contador(accion, contador):
if accion == 'countUp':
contador == contador + 1
elif accion == 'coundDown':
contador == contador -1
elif accion == 'reset':
contador == 0
return contador
| en | 0.736576 | # -*- coding: utf-8 -*- Created on Thu Feb 17 09:10:05 2022 @author: JHOSS | 3.373298 | 3 |
bokeh/models/tests/test_callbacks.py | ndepal/bokeh | 1 | 9773 | <reponame>ndepal/bokeh
from pytest import raises
from bokeh.models import CustomJS, Slider
def test_js_callback():
slider = Slider()
cb = CustomJS(code="foo();", args=dict(x=slider))
assert 'foo()' in cb.code
assert cb.args['x'] is slider
cb = CustomJS(code="foo();", args=dict(x=3))
assert 'foo()' in cb.code
assert cb.args['x'] is 3
with raises(AttributeError): # kwargs not supported
CustomJS(code="foo();", x=slider)
def test_py_callback():
slider = Slider()
foo = None # fool pyflakes
def cb(x=slider):
foo()
cb = CustomJS.from_py_func(cb)
assert 'foo()' in cb.code
assert cb.args['x'] is slider
def cb(x=4):
foo()
cb = CustomJS.from_py_func(cb)
assert 'foo()' in cb.code
assert cb.args['x'] is 4
| from pytest import raises
from bokeh.models import CustomJS, Slider
def test_js_callback():
slider = Slider()
cb = CustomJS(code="foo();", args=dict(x=slider))
assert 'foo()' in cb.code
assert cb.args['x'] is slider
cb = CustomJS(code="foo();", args=dict(x=3))
assert 'foo()' in cb.code
assert cb.args['x'] is 3
with raises(AttributeError): # kwargs not supported
CustomJS(code="foo();", x=slider)
def test_py_callback():
slider = Slider()
foo = None # fool pyflakes
def cb(x=slider):
foo()
cb = CustomJS.from_py_func(cb)
assert 'foo()' in cb.code
assert cb.args['x'] is slider
def cb(x=4):
foo()
cb = CustomJS.from_py_func(cb)
assert 'foo()' in cb.code
assert cb.args['x'] is 4 | en | 0.353214 | # kwargs not supported # fool pyflakes | 2.172644 | 2 |
tests/test_0150-attributeerrors.py | martindurant/awkward-1.0 | 0 | 9774 | # BSD 3-Clause License; see https://github.com/jpivarski/awkward-1.0/blob/master/LICENSE
from __future__ import absolute_import
import sys
import pytest
import numpy
import awkward1
class Dummy(awkward1.Record):
@property
def broken(self):
raise AttributeError("I'm broken!")
def test():
behavior = {}
behavior["Dummy"] = Dummy
array = awkward1.Array([{"x": 1}, {"x": 2}, {"x": 3}], behavior=behavior)
array.layout.setparameter("__record__", "Dummy")
with pytest.raises(AttributeError) as err:
array[1].broken
assert str(err.value) == "I'm broken!" # not "no field named 'broken'"
| # BSD 3-Clause License; see https://github.com/jpivarski/awkward-1.0/blob/master/LICENSE
from __future__ import absolute_import
import sys
import pytest
import numpy
import awkward1
class Dummy(awkward1.Record):
@property
def broken(self):
raise AttributeError("I'm broken!")
def test():
behavior = {}
behavior["Dummy"] = Dummy
array = awkward1.Array([{"x": 1}, {"x": 2}, {"x": 3}], behavior=behavior)
array.layout.setparameter("__record__", "Dummy")
with pytest.raises(AttributeError) as err:
array[1].broken
assert str(err.value) == "I'm broken!" # not "no field named 'broken'"
| en | 0.783693 | # BSD 3-Clause License; see https://github.com/jpivarski/awkward-1.0/blob/master/LICENSE # not "no field named 'broken'" | 1.969888 | 2 |
scripts/preprocess.py | umd-lib/solr-irroc | 0 | 9775 | #!/user/bin/env python3
# -*- coding: utf8 -*-
#===================================================#
# cleanup.py #
# <NAME> #
# 2015-08-13 #
# #
# Data preprocessing script for IRRoC DB #
# Usage: python3 cleanup.py [in.csv] [out.csv] #
#===================================================#
import sys, csv, re
infields = ['id', 'str_resource', 'str_description', 'website', 'meta_title',
'meta_description', 'stage_list', 'task_list']
outfields = infields + ['stage_list_facet', 'task_list_facet']
with open(sys.argv[1], 'r') as infile, open(sys.argv[2], 'w') as outfile:
# skip header row in order to use own fieldnames
next(infile)
# instantiate the reader and writer objects
dr = csv.DictReader(infile, fieldnames=infields)
dw = csv.DictWriter(outfile, fieldnames=outfields)
dw.writeheader()
exp = re.compile(r'\d+::([^\b])')
# loop over the input file, writing results to output file
for row in dr:
# remove hash marks from URL
m = re.search('#(.+)#', row['website'])
if m:
row['website'] = m.group(1)
# remove spaces from all multivalued fields
row['stage_list_facet'] = row['stage_list'].replace('; ', ';')
row['task_list_facet'] = row['task_list'].replace('; ', ';')
row['meta_description'] = row['meta_description'].replace(', ', ',')
# create stage_list_facet and task_list_facet cols and strip numbers
row['stage_list'] = re.sub(exp, r'\1', row['stage_list_facet'])
row['task_list'] = re.sub(exp, r'\1', row['task_list_facet'])
# write row
dw.writerow(row)
| #!/user/bin/env python3
# -*- coding: utf8 -*-
#===================================================#
# cleanup.py #
# <NAME> #
# 2015-08-13 #
# #
# Data preprocessing script for IRRoC DB #
# Usage: python3 cleanup.py [in.csv] [out.csv] #
#===================================================#
import sys, csv, re
infields = ['id', 'str_resource', 'str_description', 'website', 'meta_title',
'meta_description', 'stage_list', 'task_list']
outfields = infields + ['stage_list_facet', 'task_list_facet']
with open(sys.argv[1], 'r') as infile, open(sys.argv[2], 'w') as outfile:
# skip header row in order to use own fieldnames
next(infile)
# instantiate the reader and writer objects
dr = csv.DictReader(infile, fieldnames=infields)
dw = csv.DictWriter(outfile, fieldnames=outfields)
dw.writeheader()
exp = re.compile(r'\d+::([^\b])')
# loop over the input file, writing results to output file
for row in dr:
# remove hash marks from URL
m = re.search('#(.+)#', row['website'])
if m:
row['website'] = m.group(1)
# remove spaces from all multivalued fields
row['stage_list_facet'] = row['stage_list'].replace('; ', ';')
row['task_list_facet'] = row['task_list'].replace('; ', ';')
row['meta_description'] = row['meta_description'].replace(', ', ',')
# create stage_list_facet and task_list_facet cols and strip numbers
row['stage_list'] = re.sub(exp, r'\1', row['stage_list_facet'])
row['task_list'] = re.sub(exp, r'\1', row['task_list_facet'])
# write row
dw.writerow(row)
| en | 0.487706 | #!/user/bin/env python3 # -*- coding: utf8 -*- #===================================================# # cleanup.py # # <NAME> # # 2015-08-13 # # # # Data preprocessing script for IRRoC DB # # Usage: python3 cleanup.py [in.csv] [out.csv] # #===================================================# # skip header row in order to use own fieldnames # instantiate the reader and writer objects # loop over the input file, writing results to output file # remove hash marks from URL #', row['website']) # remove spaces from all multivalued fields # create stage_list_facet and task_list_facet cols and strip numbers # write row | 2.57541 | 3 |
ievv_opensource/demo/batchframeworkdemo/apps.py | appressoas/ievv_opensource | 0 | 9776 | from django.apps import AppConfig
from ievv_opensource import ievv_batchframework
from ievv_opensource.ievv_batchframework import batchregistry
class HelloWorldAction(ievv_batchframework.Action):
def execute(self):
self.logger.info('Hello world! %r', self.kwargs)
class HelloWorldAsyncAction(ievv_batchframework.Action):
def execute(self):
self.logger.info('\n\n\n\n\n\n\n\nHello world, async! %r\n\n\n\n\n', self.kwargs)
class BatchFrameworkDemoAppConfig(AppConfig):
name = 'ievv_opensource.demo.batchframeworkdemo'
verbose_name = "IEVV Batchframework demo"
def ready(self):
batchregistry.Registry.get_instance().add_actiongroup(
batchregistry.ActionGroup(
name='batchframeworkdemo_helloworld',
mode=batchregistry.ActionGroup.MODE_SYNCHRONOUS,
actions=[
HelloWorldAction
]))
batchregistry.Registry.get_instance().add_actiongroup(
batchregistry.ActionGroup(
name='batchframeworkdemo_helloworld_async',
mode=batchregistry.ActionGroup.MODE_ASYNCHRONOUS,
actions=[
HelloWorldAsyncAction
]
)
)
| from django.apps import AppConfig
from ievv_opensource import ievv_batchframework
from ievv_opensource.ievv_batchframework import batchregistry
class HelloWorldAction(ievv_batchframework.Action):
def execute(self):
self.logger.info('Hello world! %r', self.kwargs)
class HelloWorldAsyncAction(ievv_batchframework.Action):
def execute(self):
self.logger.info('\n\n\n\n\n\n\n\nHello world, async! %r\n\n\n\n\n', self.kwargs)
class BatchFrameworkDemoAppConfig(AppConfig):
name = 'ievv_opensource.demo.batchframeworkdemo'
verbose_name = "IEVV Batchframework demo"
def ready(self):
batchregistry.Registry.get_instance().add_actiongroup(
batchregistry.ActionGroup(
name='batchframeworkdemo_helloworld',
mode=batchregistry.ActionGroup.MODE_SYNCHRONOUS,
actions=[
HelloWorldAction
]))
batchregistry.Registry.get_instance().add_actiongroup(
batchregistry.ActionGroup(
name='batchframeworkdemo_helloworld_async',
mode=batchregistry.ActionGroup.MODE_ASYNCHRONOUS,
actions=[
HelloWorldAsyncAction
]
)
)
| none | 1 | 2.009999 | 2 |
|
fmoe/gates/utils.py | GODVIX/fastmoe | 0 | 9777 | <filename>fmoe/gates/utils.py<gh_stars>0
r"""
Utilities that may be used in the gates
"""
import torch
from fmoe.functions import count_by_gate
import fmoe_cuda as fmoe_native
def limit_by_capacity(topk_idx, num_expert, world_size, capacity):
capacity = torch.ones(num_expert, dtype=torch.int32,
device=topk_idx.device) * capacity
pos, lec, gec = count_by_gate(topk_idx, num_expert, world_size,
require_pos=False)
new_gec, = fmoe_native.limit_by_capacity(gec, capacity,
num_expert, world_size)
if world_size > 1:
new_lec, = fmoe_native.expert_exchange(new_gec, num_expert, world_size)
else:
new_lec = new_gec
fmoe_native.prune_gate_by_capacity(topk_idx,
new_lec.to(torch.int32), num_expert, world_size)
return new_lec, new_gec
| <filename>fmoe/gates/utils.py<gh_stars>0
r"""
Utilities that may be used in the gates
"""
import torch
from fmoe.functions import count_by_gate
import fmoe_cuda as fmoe_native
def limit_by_capacity(topk_idx, num_expert, world_size, capacity):
capacity = torch.ones(num_expert, dtype=torch.int32,
device=topk_idx.device) * capacity
pos, lec, gec = count_by_gate(topk_idx, num_expert, world_size,
require_pos=False)
new_gec, = fmoe_native.limit_by_capacity(gec, capacity,
num_expert, world_size)
if world_size > 1:
new_lec, = fmoe_native.expert_exchange(new_gec, num_expert, world_size)
else:
new_lec = new_gec
fmoe_native.prune_gate_by_capacity(topk_idx,
new_lec.to(torch.int32), num_expert, world_size)
return new_lec, new_gec
| en | 0.917752 | Utilities that may be used in the gates | 1.911596 | 2 |
evaluate.py | DeppMeng/DANNet | 0 | 9778 | <reponame>DeppMeng/DANNet<filename>evaluate.py
import os
import torch
import numpy as np
from PIL import Image
import torch.nn as nn
from torch.utils import data
from network import *
from dataset.zurich_night_dataset import zurich_night_DataSet
from configs.test_config import get_arguments
palette = [128, 64, 128, 244, 35, 232, 70, 70, 70, 102, 102, 156, 190, 153, 153, 153, 153, 153, 250, 170, 30,
220, 220, 0, 107, 142, 35, 152, 251, 152, 70, 130, 180, 220, 20, 60, 255, 0, 0, 0, 0, 142, 0, 0, 70,
0, 60, 100, 0, 80, 100, 0, 0, 230, 119, 11, 32]
zero_pad = 256 * 3 - len(palette)
for i in range(zero_pad):
palette.append(0)
def colorize_mask(mask):
new_mask = Image.fromarray(mask.astype(np.uint8)).convert('P')
new_mask.putpalette(palette)
return new_mask
def main():
os.environ['CUDA_VISIBLE_DEVICES'] = '0'
device = torch.device("cuda")
args = get_arguments()
if not os.path.exists(args.save):
os.makedirs(args.save)
if args.model == 'PSPNet':
model = PSPNet(num_classes=args.num_classes)
if args.model == 'DeepLab':
model = Deeplab(num_classes=args.num_classes)
if args.model == 'RefineNet':
model = RefineNet(num_classes=args.num_classes, imagenet=False)
saved_state_dict = torch.load(args.restore_from)
model_dict = model.state_dict()
saved_state_dict = {k: v for k, v in saved_state_dict.items() if k in model_dict}
model_dict.update(saved_state_dict)
model.load_state_dict(saved_state_dict)
lightnet = LightNet()
saved_state_dict = torch.load(args.restore_from_light)
model_dict = lightnet.state_dict()
saved_state_dict = {k: v for k, v in saved_state_dict.items() if k in model_dict}
model_dict.update(saved_state_dict)
lightnet.load_state_dict(saved_state_dict)
model = model.to(device)
lightnet = lightnet.to(device)
model.eval()
lightnet.eval()
testloader = data.DataLoader(zurich_night_DataSet(args.data_dir, args.data_list, set=args.set))
interp = nn.Upsample(size=(1080, 1920), mode='bilinear', align_corners=True)
weights = torch.log(torch.FloatTensor(
[0.36869696, 0.06084986, 0.22824049, 0.00655399, 0.00877272, 0.01227341, 0.00207795, 0.0055127, 0.15928651,
0.01157818, 0.04018982, 0.01218957, 0.00135122, 0.06994545, 0.00267456, 0.00235192, 0.00232904, 0.00098658,
0.00413907])).cuda()
weights = (torch.mean(weights) - weights) / torch.std(weights) * args.std + 1.0
for index, batch in enumerate(testloader):
if index % 10 == 0:
print('%d processd' % index)
image, name = batch
image = image.to(device)
with torch.no_grad():
r = lightnet(image)
enhancement = image + r
if args.model == 'RefineNet':
output2 = model(enhancement)
else:
_, output2 = model(enhancement)
weights_prob = weights.expand(output2.size()[0], output2.size()[3], output2.size()[2], 19)
weights_prob = weights_prob.transpose(1, 3)
output2 = output2 * weights_prob
output = interp(output2).cpu().data[0].numpy()
output = output.transpose(1,2,0)
output = np.asarray(np.argmax(output, axis=2), dtype=np.uint8)
output_col = colorize_mask(output)
output = Image.fromarray(output)
###### get the enhanced image
mean_std = ([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
enhancement = enhancement.cpu().data[0].numpy().transpose(1,2,0)
enhancement = enhancement * mean_std[1] + mean_std[0]
enhancement = (enhancement - enhancement.min()) / (enhancement.max()-enhancement.min())
enhancement = enhancement[:, :, ::-1] * 255 # change to BGR
enhancement = Image.fromarray(enhancement.astype(np.uint8))
###### get the light
light = r.cpu().data[0].numpy().transpose(1, 2, 0)
light = (light-light.min()) / (light.max() - light.min())
light = light[:, :, ::-1] * 255 # change to BGR
light = Image.fromarray(light.astype(np.uint8))
name = name[0].split('/')[-1]
output.save('%s/%s' % (args.save, name))
output_col.save('%s/%s_color.png' % (args.save, name.split('.')[0]))
enhancement.save('%s/%s_enhancement.png' % (args.save, name.split('.')[0]))
light.save('%s/%s_light.png' % (args.save, name.split('.')[0]))
if __name__ == '__main__':
main()
| import os
import torch
import numpy as np
from PIL import Image
import torch.nn as nn
from torch.utils import data
from network import *
from dataset.zurich_night_dataset import zurich_night_DataSet
from configs.test_config import get_arguments
palette = [128, 64, 128, 244, 35, 232, 70, 70, 70, 102, 102, 156, 190, 153, 153, 153, 153, 153, 250, 170, 30,
220, 220, 0, 107, 142, 35, 152, 251, 152, 70, 130, 180, 220, 20, 60, 255, 0, 0, 0, 0, 142, 0, 0, 70,
0, 60, 100, 0, 80, 100, 0, 0, 230, 119, 11, 32]
zero_pad = 256 * 3 - len(palette)
for i in range(zero_pad):
palette.append(0)
def colorize_mask(mask):
new_mask = Image.fromarray(mask.astype(np.uint8)).convert('P')
new_mask.putpalette(palette)
return new_mask
def main():
os.environ['CUDA_VISIBLE_DEVICES'] = '0'
device = torch.device("cuda")
args = get_arguments()
if not os.path.exists(args.save):
os.makedirs(args.save)
if args.model == 'PSPNet':
model = PSPNet(num_classes=args.num_classes)
if args.model == 'DeepLab':
model = Deeplab(num_classes=args.num_classes)
if args.model == 'RefineNet':
model = RefineNet(num_classes=args.num_classes, imagenet=False)
saved_state_dict = torch.load(args.restore_from)
model_dict = model.state_dict()
saved_state_dict = {k: v for k, v in saved_state_dict.items() if k in model_dict}
model_dict.update(saved_state_dict)
model.load_state_dict(saved_state_dict)
lightnet = LightNet()
saved_state_dict = torch.load(args.restore_from_light)
model_dict = lightnet.state_dict()
saved_state_dict = {k: v for k, v in saved_state_dict.items() if k in model_dict}
model_dict.update(saved_state_dict)
lightnet.load_state_dict(saved_state_dict)
model = model.to(device)
lightnet = lightnet.to(device)
model.eval()
lightnet.eval()
testloader = data.DataLoader(zurich_night_DataSet(args.data_dir, args.data_list, set=args.set))
interp = nn.Upsample(size=(1080, 1920), mode='bilinear', align_corners=True)
weights = torch.log(torch.FloatTensor(
[0.36869696, 0.06084986, 0.22824049, 0.00655399, 0.00877272, 0.01227341, 0.00207795, 0.0055127, 0.15928651,
0.01157818, 0.04018982, 0.01218957, 0.00135122, 0.06994545, 0.00267456, 0.00235192, 0.00232904, 0.00098658,
0.00413907])).cuda()
weights = (torch.mean(weights) - weights) / torch.std(weights) * args.std + 1.0
for index, batch in enumerate(testloader):
if index % 10 == 0:
print('%d processd' % index)
image, name = batch
image = image.to(device)
with torch.no_grad():
r = lightnet(image)
enhancement = image + r
if args.model == 'RefineNet':
output2 = model(enhancement)
else:
_, output2 = model(enhancement)
weights_prob = weights.expand(output2.size()[0], output2.size()[3], output2.size()[2], 19)
weights_prob = weights_prob.transpose(1, 3)
output2 = output2 * weights_prob
output = interp(output2).cpu().data[0].numpy()
output = output.transpose(1,2,0)
output = np.asarray(np.argmax(output, axis=2), dtype=np.uint8)
output_col = colorize_mask(output)
output = Image.fromarray(output)
###### get the enhanced image
mean_std = ([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
enhancement = enhancement.cpu().data[0].numpy().transpose(1,2,0)
enhancement = enhancement * mean_std[1] + mean_std[0]
enhancement = (enhancement - enhancement.min()) / (enhancement.max()-enhancement.min())
enhancement = enhancement[:, :, ::-1] * 255 # change to BGR
enhancement = Image.fromarray(enhancement.astype(np.uint8))
###### get the light
light = r.cpu().data[0].numpy().transpose(1, 2, 0)
light = (light-light.min()) / (light.max() - light.min())
light = light[:, :, ::-1] * 255 # change to BGR
light = Image.fromarray(light.astype(np.uint8))
name = name[0].split('/')[-1]
output.save('%s/%s' % (args.save, name))
output_col.save('%s/%s_color.png' % (args.save, name.split('.')[0]))
enhancement.save('%s/%s_enhancement.png' % (args.save, name.split('.')[0]))
light.save('%s/%s_light.png' % (args.save, name.split('.')[0]))
if __name__ == '__main__':
main() | en | 0.774967 | ###### get the enhanced image # change to BGR ###### get the light # change to BGR | 2.293945 | 2 |
decorator.py | zengboming/python | 0 | 9779 | #decorator
def now():
print "2015-11-18"
f=now
f()
print now.__name__
print f.__name__
def log(func):
def wrapper(*args,**kw):
print 'begin call %s():' %func.__name__
func(*args,**kw)
print 'end call %s():' %func.__name__
return wrapper
@log
def now1():
print now1.__name__
now1()
now1=log(now1)
now1()
def log1(text):
def decorator(func):
def wrapper(*args,**kw):
print '%s %s():' %(text,func.__name__)
return func(*args,**kw)
return wrapper
return decorator
@log1('execute')
def now2():
print now2.__name__
now2()
import functools
def log2(func):
@functools.wraps(func)
def wrapper(*args,**kw):
print 'call %s():' %func.__name__
return func(*args,**kw)
return wrapper
@log2
def now3():
print now3.__name__
now3()
def log3(text):
def decorator(func):
@functools.wraps(func)
def wrapper(*args,**kw):
print '%s %s():' %(text,func.__name__)
return func(*args,**kw)
return wrapper
return decorator
@log3('execute')
def now4():
print now4.__name__
now4()
def log4(text):
if callable(text):
@functools.wraps(text)
def wrapper(*args,**kw):
print 'begin call %s:' %text.__name__
text(*args,**kw)
print 'end call '+text.__name__
return wrapper
else :
def decorator(func):
@functools.wraps(func)
def wrapper(*args,**kw):
print 'begin call %s %s():' %(text,func.__name__)
func(*args,**kw)
print 'end call %s %s():' %(text,func.__name__)
return wrapper
return decorator
@log4
def now5():
print 'doing'+now5.__name__
now5()
@log4('execute')
def now6():
print 'doing'+now6.__name__
now6() | #decorator
def now():
print "2015-11-18"
f=now
f()
print now.__name__
print f.__name__
def log(func):
def wrapper(*args,**kw):
print 'begin call %s():' %func.__name__
func(*args,**kw)
print 'end call %s():' %func.__name__
return wrapper
@log
def now1():
print now1.__name__
now1()
now1=log(now1)
now1()
def log1(text):
def decorator(func):
def wrapper(*args,**kw):
print '%s %s():' %(text,func.__name__)
return func(*args,**kw)
return wrapper
return decorator
@log1('execute')
def now2():
print now2.__name__
now2()
import functools
def log2(func):
@functools.wraps(func)
def wrapper(*args,**kw):
print 'call %s():' %func.__name__
return func(*args,**kw)
return wrapper
@log2
def now3():
print now3.__name__
now3()
def log3(text):
def decorator(func):
@functools.wraps(func)
def wrapper(*args,**kw):
print '%s %s():' %(text,func.__name__)
return func(*args,**kw)
return wrapper
return decorator
@log3('execute')
def now4():
print now4.__name__
now4()
def log4(text):
if callable(text):
@functools.wraps(text)
def wrapper(*args,**kw):
print 'begin call %s:' %text.__name__
text(*args,**kw)
print 'end call '+text.__name__
return wrapper
else :
def decorator(func):
@functools.wraps(func)
def wrapper(*args,**kw):
print 'begin call %s %s():' %(text,func.__name__)
func(*args,**kw)
print 'end call %s %s():' %(text,func.__name__)
return wrapper
return decorator
@log4
def now5():
print 'doing'+now5.__name__
now5()
@log4('execute')
def now6():
print 'doing'+now6.__name__
now6() | en | 0.553214 | #decorator | 3.184262 | 3 |
test/pyfrechet_visualize.py | compgeomTU/frechetForCurves | 0 | 9780 | # Author: <NAME>
# <EMAIL>
#
# Command line to run program:
# python3 pyfrechet_visualize.py
import sys, os, unittest
sys.path.insert(0, "../")
from pyfrechet.distance import StrongDistance
from pyfrechet.visualize import FreeSpaceDiagram, Trajectories
TEST_DATA = "sp500"
if TEST_DATA == "sp500":
REACHABLE_EPSILON = 5
UNREACHABLE_EPSILON = 1
REVERSE_CURVE = False
elif TEST_DATA == "trajectory":
REACHABLE_EPSILON = 70
UNREACHABLE_EPSILON = 60
REVERSE_CURVE = True
CURVE_1 = f"{TEST_DATA}_data/sample_1.txt"
CURVE_2 = f"{TEST_DATA}_data/sample_2.txt"
class pyfrechet_optimise(unittest.TestCase):
global REACHABLE_EPSILON
global UNREACHABLE_EPSILON
global REVERSE_CURVE
global CURVE_1
global CURVE_2
def test_fail_BinarySearch_instance_argument(self):
class BadClass(): pass
with self.assertRaises(TypeError):
bc = BadClass()
FreeSpaceDiagram(bc)
def test_FreeSpaceDiagram_plot(self):
sd = StrongDistance.setCurves(CURVE_1, CURVE_2, REVERSE_CURVE)
sd.setFreeSpace(REACHABLE_EPSILON)
fsd = FreeSpaceDiagram(sd)
fsd.plot()
def test_FreeSpaceDiagram__addEpsilonSlider(self):
sd = StrongDistance.setCurves(CURVE_1, CURVE_2, REVERSE_CURVE)
fsd = FreeSpaceDiagram(sd)
fsd.addEpsilonSlider(UNREACHABLE_EPSILON, REACHABLE_EPSILON, 1)
fsd.plot()
def test_FreeSpaceDiagram__weighted_cells(self):
sd = StrongDistance.setCurves(CURVE_1, CURVE_2, REVERSE_CURVE)
fsd = FreeSpaceDiagram(sd)
sd.setFreeSpace(REACHABLE_EPSILON)
fsd.plot(True, False)
def test_FreeSpaceDiagram__gridlines(self):
sd = StrongDistance.setCurves(CURVE_1, CURVE_2, REVERSE_CURVE)
fsd = FreeSpaceDiagram(sd)
sd.setFreeSpace(REACHABLE_EPSILON)
fsd.plot(True, True)
def test_Trajectories(self):
sd = StrongDistance.setCurves(CURVE_1, CURVE_2, REVERSE_CURVE)
t = Trajectories(sd)
t.plot()
if __name__ == '__main__':
unittest.main()
| # Author: <NAME>
# <EMAIL>
#
# Command line to run program:
# python3 pyfrechet_visualize.py
import sys, os, unittest
sys.path.insert(0, "../")
from pyfrechet.distance import StrongDistance
from pyfrechet.visualize import FreeSpaceDiagram, Trajectories
TEST_DATA = "sp500"
if TEST_DATA == "sp500":
REACHABLE_EPSILON = 5
UNREACHABLE_EPSILON = 1
REVERSE_CURVE = False
elif TEST_DATA == "trajectory":
REACHABLE_EPSILON = 70
UNREACHABLE_EPSILON = 60
REVERSE_CURVE = True
CURVE_1 = f"{TEST_DATA}_data/sample_1.txt"
CURVE_2 = f"{TEST_DATA}_data/sample_2.txt"
class pyfrechet_optimise(unittest.TestCase):
global REACHABLE_EPSILON
global UNREACHABLE_EPSILON
global REVERSE_CURVE
global CURVE_1
global CURVE_2
def test_fail_BinarySearch_instance_argument(self):
class BadClass(): pass
with self.assertRaises(TypeError):
bc = BadClass()
FreeSpaceDiagram(bc)
def test_FreeSpaceDiagram_plot(self):
sd = StrongDistance.setCurves(CURVE_1, CURVE_2, REVERSE_CURVE)
sd.setFreeSpace(REACHABLE_EPSILON)
fsd = FreeSpaceDiagram(sd)
fsd.plot()
def test_FreeSpaceDiagram__addEpsilonSlider(self):
sd = StrongDistance.setCurves(CURVE_1, CURVE_2, REVERSE_CURVE)
fsd = FreeSpaceDiagram(sd)
fsd.addEpsilonSlider(UNREACHABLE_EPSILON, REACHABLE_EPSILON, 1)
fsd.plot()
def test_FreeSpaceDiagram__weighted_cells(self):
sd = StrongDistance.setCurves(CURVE_1, CURVE_2, REVERSE_CURVE)
fsd = FreeSpaceDiagram(sd)
sd.setFreeSpace(REACHABLE_EPSILON)
fsd.plot(True, False)
def test_FreeSpaceDiagram__gridlines(self):
sd = StrongDistance.setCurves(CURVE_1, CURVE_2, REVERSE_CURVE)
fsd = FreeSpaceDiagram(sd)
sd.setFreeSpace(REACHABLE_EPSILON)
fsd.plot(True, True)
def test_Trajectories(self):
sd = StrongDistance.setCurves(CURVE_1, CURVE_2, REVERSE_CURVE)
t = Trajectories(sd)
t.plot()
if __name__ == '__main__':
unittest.main()
| en | 0.364627 | # Author: <NAME> # <EMAIL> # # Command line to run program: # python3 pyfrechet_visualize.py | 2.600627 | 3 |
py_ser_freeastro/core.py | nww2007/py_ser_freeastro | 0 | 9781 | #!/usr/bin/env python3
# vim:fileencoding=UTF-8
# -*- coding: UTF-8 -*-
"""
Created on 15 juny 2019 y.
@author: <NAME> <EMAIL>
"""
import sys
import struct
import numpy as np
from progress.bar import Bar
import logging
logging.basicConfig(format = u'%(filename)s:%(lineno)d: %(levelname)-8s [%(asctime)s] %(message)s', level = logging.DEBUG, stream=sys.stdout)
# class ser(np.array):
class ser(object):
"""
A set of methods for working with a set of images in the SER format.
"""
def __init__(self, fname):
"""
Download information from file.
"""
# super.__init__()
# luids
self.MONO = 0
self.BAYER_RGGB = 8
self.BAYER_GRBG = 9
self.BAYER_GBRG = 10
self.BAYER_BGGR = 11
self.BAYER_CYYM = 16
self.BAYER_YCMY = 17
self.BAYER_YMCY = 18
self.BAYER_MYYC = 19
self.RGB = 100
self.BGR = 101
self.fname = fname
with open(self.fname, 'rb') as fd:
# Download information from the header.
self.header = fd.read(178)
self.parse_header()
# Download images.
self.frames = np.zeros((self.framecount, self.imageheight, self.imagewidth))
bar = Bar('Downloading', max=self.framecount)
for frame in range(self.framecount):
# for frame in range(1):
bar.next()
t_frame = fd.read(self.imageheight * self.imagewidth * self.pixeldepthperplane//8)
for line in range(self.imageheight):
for pixel in range(self.imagewidth):
index = (line * self.imagewidth + pixel) * 2
self.frames[frame][line][pixel] = struct.unpack('<H', t_frame[index:index+2])[0]
bar.finish()
# Download the trailer
self.trailer = fd.read(self.framecount * 8)
self.parse_trailer()
def parse_header(self):
"""
Parse the title.
"""
self.fileid = self.header[0:14]
self.luid = struct.unpack('<i', self.header[14:18])[0]
self.colorid = struct.unpack('<i', self.header[18:22])[0]
self.littleendian_FALSE = 0
self.littleendian_TRUE = 1
self.littleendian = struct.unpack('<i', self.header[22:26])[0]
self.imagewidth = struct.unpack('<i', self.header[26:30])[0]
self.imageheight = struct.unpack('<i', self.header[30:34])[0]
self.pixeldepthperplane = struct.unpack('<i', self.header[34:38])[0]
self.framecount = struct.unpack('<i', self.header[38:42])[0]
self.observer = self.header[42:82]
self.telescope = self.header[82:122]
self.datetime = struct.unpack('<q', self.header[122:130])[0]
self.datetime_utc = struct.unpack('<q', self.header[130:138])[0]
# logging.info('{0}x{1}'.format(self.imagewidth, self.imageheight))
def parse_trailer(self):
"""
Parse the trailer
"""
for i in range(0, self.framecount*8, 8):
tuli = (struct.unpack('<Q', self.trailer[i:i+8])[0])
def main(argv):
logging.info('%s started.\n' % argv[0])
fn = './images/ASICAP_2019-05-10_01_43_36_523.SER'
frames = ser(fn)
# logging.debug(type(frames))
# logging.debug(type(object))
# # darks_fn = './images/ASICAP_2019-05-10_02_12_00_621.SER'
# # offsets_fn = './images/ASICAP_2019-05-10_02_30_47_294.SER'
#
# # frames = ser.ser()
# # frames.read(darks_fn)
# # frames.read(lights_fn)
# # ser_fr = serialise_frames(frames)
# # logging.debug('std1={}'.format(ser_fr.std()))
# # hist_fr = get_hist(ser_fr)
# # plt.plot(hist_fr)
# # plt.grid()
# # plt.show()
#
# fnames = [
# '/home/nww/ASICAP/tmp/ASICAP_2019-05-25_15_34_52_584.SER',
# '/home/nww/ASICAP/tmp/ASICAP_2019-05-25_15_36_05_343.SER',
# '/home/nww/ASICAP/tmp/ASICAP_2019-05-25_15_37_34_373.SER',
# '/home/nww/ASICAP/tmp/ASICAP_2019-05-25_15_37_47_276.SER',
# '/home/nww/ASICAP/tmp/ASICAP_2019-05-25_15_37_58_784.SER',
# '/home/nww/ASICAP/tmp/ASICAP_2019-05-25_15_39_06_703.SER',
# '/home/nww/ASICAP/tmp/ASICAP_2019-05-25_15_39_17_476.SER',
# '/home/nww/ASICAP/tmp/ASICAP_2019-05-25_15_39_27_330.SER',
# '/home/nww/ASICAP/tmp/ASICAP_2019-05-25_15_39_36_623.SER',
# '/home/nww/ASICAP/tmp/ASICAP_2019-05-25_15_39_48_239.SER',
# '/home/nww/ASICAP/tmp/ASICAP_2019-05-25_15_40_20_816.SER',
# '/home/nww/ASICAP/tmp/ASICAP_2019-05-25_15_40_32_118.SER',
# '/home/nww/ASICAP/tmp/ASICAP_2019-05-25_15_40_47_796.SER',
# '/home/nww/ASICAP/tmp/ASICAP_2019-05-25_15_40_59_999.SER',
# '/home/nww/ASICAP/tmp/ASICAP_2019-05-25_15_41_10_321.SER',
# '/home/nww/ASICAP/tmp/ASICAP_2019-05-25_15_41_41_276.SER',
# '/home/nww/ASICAP/tmp/ASICAP_2019-05-25_15_42_07_956.SER',
# '/home/nww/ASICAP/tmp/ASICAP_2019-05-25_15_42_19_287.SER',
# '/home/nww/ASICAP/tmp/ASICAP_2019-05-25_15_42_31_180.SER',
# '/home/nww/ASICAP/tmp/ASICAP_2019-05-25_15_42_43_981.SER',
# '/home/nww/ASICAP/tmp/ASICAP_2019-05-25_15_43_07_152.SER',
# '/home/nww/ASICAP/tmp/ASICAP_2019-05-25_15_43_36_180.SER',
# '/home/nww/ASICAP/tmp/ASICAP_2019-05-25_15_44_01_167.SER',
# '/home/nww/ASICAP/tmp/ASICAP_2019-05-25_15_44_33_214.SER',
# '/home/nww/ASICAP/tmp/ASICAP_2019-05-25_15_44_58_952.SER',
# ]
#
# print('{};{};{};{};{}'.format('File', 'Temperature', 'Exposure', 'Gain', 'std'))
# for fn in fnames:
# print('{}'.format(fn), flush=True, file=sys.stderr)
# frames = ser.ser()
# frames.read(fn)
# ser_fr = serialise_frames(frames)
#
# config = configparser.ConfigParser()
# config.read(fn + '.txt')
#
# print('{};{};{};{};{}'.format(fn, config['ZWO ASI120MC']['temperature'], config['ZWO ASI120MC']['exposure'], config['ZWO ASI120MC']['gain'], ser_fr.std()))
logging.info('%s finished.\n' % argv[0])
return 0
if __name__ == '__main__':
sys.exit(main(sys.argv))
| #!/usr/bin/env python3
# vim:fileencoding=UTF-8
# -*- coding: UTF-8 -*-
"""
Created on 15 juny 2019 y.
@author: <NAME> <EMAIL>
"""
import sys
import struct
import numpy as np
from progress.bar import Bar
import logging
logging.basicConfig(format = u'%(filename)s:%(lineno)d: %(levelname)-8s [%(asctime)s] %(message)s', level = logging.DEBUG, stream=sys.stdout)
# class ser(np.array):
class ser(object):
"""
A set of methods for working with a set of images in the SER format.
"""
def __init__(self, fname):
"""
Download information from file.
"""
# super.__init__()
# luids
self.MONO = 0
self.BAYER_RGGB = 8
self.BAYER_GRBG = 9
self.BAYER_GBRG = 10
self.BAYER_BGGR = 11
self.BAYER_CYYM = 16
self.BAYER_YCMY = 17
self.BAYER_YMCY = 18
self.BAYER_MYYC = 19
self.RGB = 100
self.BGR = 101
self.fname = fname
with open(self.fname, 'rb') as fd:
# Download information from the header.
self.header = fd.read(178)
self.parse_header()
# Download images.
self.frames = np.zeros((self.framecount, self.imageheight, self.imagewidth))
bar = Bar('Downloading', max=self.framecount)
for frame in range(self.framecount):
# for frame in range(1):
bar.next()
t_frame = fd.read(self.imageheight * self.imagewidth * self.pixeldepthperplane//8)
for line in range(self.imageheight):
for pixel in range(self.imagewidth):
index = (line * self.imagewidth + pixel) * 2
self.frames[frame][line][pixel] = struct.unpack('<H', t_frame[index:index+2])[0]
bar.finish()
# Download the trailer
self.trailer = fd.read(self.framecount * 8)
self.parse_trailer()
def parse_header(self):
"""
Parse the title.
"""
self.fileid = self.header[0:14]
self.luid = struct.unpack('<i', self.header[14:18])[0]
self.colorid = struct.unpack('<i', self.header[18:22])[0]
self.littleendian_FALSE = 0
self.littleendian_TRUE = 1
self.littleendian = struct.unpack('<i', self.header[22:26])[0]
self.imagewidth = struct.unpack('<i', self.header[26:30])[0]
self.imageheight = struct.unpack('<i', self.header[30:34])[0]
self.pixeldepthperplane = struct.unpack('<i', self.header[34:38])[0]
self.framecount = struct.unpack('<i', self.header[38:42])[0]
self.observer = self.header[42:82]
self.telescope = self.header[82:122]
self.datetime = struct.unpack('<q', self.header[122:130])[0]
self.datetime_utc = struct.unpack('<q', self.header[130:138])[0]
# logging.info('{0}x{1}'.format(self.imagewidth, self.imageheight))
def parse_trailer(self):
"""
Parse the trailer
"""
for i in range(0, self.framecount*8, 8):
tuli = (struct.unpack('<Q', self.trailer[i:i+8])[0])
def main(argv):
logging.info('%s started.\n' % argv[0])
fn = './images/ASICAP_2019-05-10_01_43_36_523.SER'
frames = ser(fn)
# logging.debug(type(frames))
# logging.debug(type(object))
# # darks_fn = './images/ASICAP_2019-05-10_02_12_00_621.SER'
# # offsets_fn = './images/ASICAP_2019-05-10_02_30_47_294.SER'
#
# # frames = ser.ser()
# # frames.read(darks_fn)
# # frames.read(lights_fn)
# # ser_fr = serialise_frames(frames)
# # logging.debug('std1={}'.format(ser_fr.std()))
# # hist_fr = get_hist(ser_fr)
# # plt.plot(hist_fr)
# # plt.grid()
# # plt.show()
#
# fnames = [
# '/home/nww/ASICAP/tmp/ASICAP_2019-05-25_15_34_52_584.SER',
# '/home/nww/ASICAP/tmp/ASICAP_2019-05-25_15_36_05_343.SER',
# '/home/nww/ASICAP/tmp/ASICAP_2019-05-25_15_37_34_373.SER',
# '/home/nww/ASICAP/tmp/ASICAP_2019-05-25_15_37_47_276.SER',
# '/home/nww/ASICAP/tmp/ASICAP_2019-05-25_15_37_58_784.SER',
# '/home/nww/ASICAP/tmp/ASICAP_2019-05-25_15_39_06_703.SER',
# '/home/nww/ASICAP/tmp/ASICAP_2019-05-25_15_39_17_476.SER',
# '/home/nww/ASICAP/tmp/ASICAP_2019-05-25_15_39_27_330.SER',
# '/home/nww/ASICAP/tmp/ASICAP_2019-05-25_15_39_36_623.SER',
# '/home/nww/ASICAP/tmp/ASICAP_2019-05-25_15_39_48_239.SER',
# '/home/nww/ASICAP/tmp/ASICAP_2019-05-25_15_40_20_816.SER',
# '/home/nww/ASICAP/tmp/ASICAP_2019-05-25_15_40_32_118.SER',
# '/home/nww/ASICAP/tmp/ASICAP_2019-05-25_15_40_47_796.SER',
# '/home/nww/ASICAP/tmp/ASICAP_2019-05-25_15_40_59_999.SER',
# '/home/nww/ASICAP/tmp/ASICAP_2019-05-25_15_41_10_321.SER',
# '/home/nww/ASICAP/tmp/ASICAP_2019-05-25_15_41_41_276.SER',
# '/home/nww/ASICAP/tmp/ASICAP_2019-05-25_15_42_07_956.SER',
# '/home/nww/ASICAP/tmp/ASICAP_2019-05-25_15_42_19_287.SER',
# '/home/nww/ASICAP/tmp/ASICAP_2019-05-25_15_42_31_180.SER',
# '/home/nww/ASICAP/tmp/ASICAP_2019-05-25_15_42_43_981.SER',
# '/home/nww/ASICAP/tmp/ASICAP_2019-05-25_15_43_07_152.SER',
# '/home/nww/ASICAP/tmp/ASICAP_2019-05-25_15_43_36_180.SER',
# '/home/nww/ASICAP/tmp/ASICAP_2019-05-25_15_44_01_167.SER',
# '/home/nww/ASICAP/tmp/ASICAP_2019-05-25_15_44_33_214.SER',
# '/home/nww/ASICAP/tmp/ASICAP_2019-05-25_15_44_58_952.SER',
# ]
#
# print('{};{};{};{};{}'.format('File', 'Temperature', 'Exposure', 'Gain', 'std'))
# for fn in fnames:
# print('{}'.format(fn), flush=True, file=sys.stderr)
# frames = ser.ser()
# frames.read(fn)
# ser_fr = serialise_frames(frames)
#
# config = configparser.ConfigParser()
# config.read(fn + '.txt')
#
# print('{};{};{};{};{}'.format(fn, config['ZWO ASI120MC']['temperature'], config['ZWO ASI120MC']['exposure'], config['ZWO ASI120MC']['gain'], ser_fr.std()))
logging.info('%s finished.\n' % argv[0])
return 0
if __name__ == '__main__':
sys.exit(main(sys.argv))
| en | 0.318762 | #!/usr/bin/env python3 # vim:fileencoding=UTF-8 # -*- coding: UTF-8 -*- Created on 15 juny 2019 y. @author: <NAME> <EMAIL> # class ser(np.array): A set of methods for working with a set of images in the SER format. Download information from file. # super.__init__() # luids # Download information from the header. # Download images. # for frame in range(1): # Download the trailer Parse the title. # logging.info('{0}x{1}'.format(self.imagewidth, self.imageheight)) Parse the trailer # logging.debug(type(frames)) # logging.debug(type(object)) # # darks_fn = './images/ASICAP_2019-05-10_02_12_00_621.SER' # # offsets_fn = './images/ASICAP_2019-05-10_02_30_47_294.SER' # # # frames = ser.ser() # # frames.read(darks_fn) # # frames.read(lights_fn) # # ser_fr = serialise_frames(frames) # # logging.debug('std1={}'.format(ser_fr.std())) # # hist_fr = get_hist(ser_fr) # # plt.plot(hist_fr) # # plt.grid() # # plt.show() # # fnames = [ # '/home/nww/ASICAP/tmp/ASICAP_2019-05-25_15_34_52_584.SER', # '/home/nww/ASICAP/tmp/ASICAP_2019-05-25_15_36_05_343.SER', # '/home/nww/ASICAP/tmp/ASICAP_2019-05-25_15_37_34_373.SER', # '/home/nww/ASICAP/tmp/ASICAP_2019-05-25_15_37_47_276.SER', # '/home/nww/ASICAP/tmp/ASICAP_2019-05-25_15_37_58_784.SER', # '/home/nww/ASICAP/tmp/ASICAP_2019-05-25_15_39_06_703.SER', # '/home/nww/ASICAP/tmp/ASICAP_2019-05-25_15_39_17_476.SER', # '/home/nww/ASICAP/tmp/ASICAP_2019-05-25_15_39_27_330.SER', # '/home/nww/ASICAP/tmp/ASICAP_2019-05-25_15_39_36_623.SER', # '/home/nww/ASICAP/tmp/ASICAP_2019-05-25_15_39_48_239.SER', # '/home/nww/ASICAP/tmp/ASICAP_2019-05-25_15_40_20_816.SER', # '/home/nww/ASICAP/tmp/ASICAP_2019-05-25_15_40_32_118.SER', # '/home/nww/ASICAP/tmp/ASICAP_2019-05-25_15_40_47_796.SER', # '/home/nww/ASICAP/tmp/ASICAP_2019-05-25_15_40_59_999.SER', # '/home/nww/ASICAP/tmp/ASICAP_2019-05-25_15_41_10_321.SER', # '/home/nww/ASICAP/tmp/ASICAP_2019-05-25_15_41_41_276.SER', # '/home/nww/ASICAP/tmp/ASICAP_2019-05-25_15_42_07_956.SER', # '/home/nww/ASICAP/tmp/ASICAP_2019-05-25_15_42_19_287.SER', # '/home/nww/ASICAP/tmp/ASICAP_2019-05-25_15_42_31_180.SER', # '/home/nww/ASICAP/tmp/ASICAP_2019-05-25_15_42_43_981.SER', # '/home/nww/ASICAP/tmp/ASICAP_2019-05-25_15_43_07_152.SER', # '/home/nww/ASICAP/tmp/ASICAP_2019-05-25_15_43_36_180.SER', # '/home/nww/ASICAP/tmp/ASICAP_2019-05-25_15_44_01_167.SER', # '/home/nww/ASICAP/tmp/ASICAP_2019-05-25_15_44_33_214.SER', # '/home/nww/ASICAP/tmp/ASICAP_2019-05-25_15_44_58_952.SER', # ] # # print('{};{};{};{};{}'.format('File', 'Temperature', 'Exposure', 'Gain', 'std')) # for fn in fnames: # print('{}'.format(fn), flush=True, file=sys.stderr) # frames = ser.ser() # frames.read(fn) # ser_fr = serialise_frames(frames) # # config = configparser.ConfigParser() # config.read(fn + '.txt') # # print('{};{};{};{};{}'.format(fn, config['ZWO ASI120MC']['temperature'], config['ZWO ASI120MC']['exposure'], config['ZWO ASI120MC']['gain'], ser_fr.std())) | 2.571103 | 3 |
sgdml_dataset_generation/readers/fchk.py | humeniuka/sGDML_dataset_generation | 0 | 9782 | <gh_stars>0
#!/usr/bin/env python
# -*- coding: utf-8 -*-
__all__ = ["FormattedCheckpointFile"]
# # Imports
import numpy as np
import scipy.linalg as sla
from collections import OrderedDict
import re
import logging
# # Local Imports
from sgdml_dataset_generation import units
from sgdml_dataset_generation.units import hbar
# # Logging
logger = logging.getLogger(__name__)
logging.basicConfig(format="[%(module)-12s] %(message)s", level=logging.INFO)
class FormattedCheckpointFile(object):
"""
reads all fields from formatted checkpoint files produced by the quantum chemistry
programs Gaussian 16 and QChem.
Parameters
----------
f : File
file handle opened for reading a formatted checkpoint file
The user has to ensure the file handle is opened and closed at the end.
The fields of the checkpoint file can be accessed by their names (see example below).
Array fields are stored as 1D numpy arrays of float (R) or integer (I) type.
Example
-------
>>> with open("freq.fchk") as f:
>>> fchk = FormattedCheckpointFile(f)
>>> print(fchk["Number of atoms"])
"""
def __init__(self, f):
self.filename = f.name
self.data = OrderedDict()
# accumulate all lines belonging to the same field (whithout newlines)
acc = ""
dtype = None
for line_number, line in enumerate(f.readlines()):
# count lines starting from 1
line_number += 1
# The name of a field starts in the first column and with a capital letter
if re.match(r"^[A-Z].*", line):
if len(acc) > 0 and not dtype is None:
# All lines belonging to the previous field must have been read,
# so we convert it to a numpy array.
try:
if dtype == str:
self.data[field] = acc
else:
# numerical types
array = np.fromstring(acc, dtype=dtype, sep=" ")
assert len(array) == count
self.data[field] = array
except (ValueError,AssertionError) as err:
logger.warning(f"A problem occurred reading field `{field}` in line {line_number:10} in {f.name} .")
logger.warning(err)
self.data[field] = np.zeros(count, dtype=dtype)
# reset accumulator
acc = ""
try:
if len(line) < 43:
# skip title and method
logger.debug(f"skipping line {line_number:10} in {f.name}: `{line.strip()}`")
continue
# First 43 columns are reserved for the field name
field = line[0:43].strip()
logger.debug(f"field `{field}` encountered")
# Colum 43 contains a character indicating the data type:
# I -> integer
# R -> real
type_char = line[43]
if type_char == "I":
dtype = int
elif type_char == "R":
dtype = float
elif type_char == "C":
dtype = str
else:
dtype = None
# skip lines without I or R data type markers
logger.debug(f"skipping line {line_number:10} in {f.name}: `{line.strip()}` .")
continue
# If column 47-48 contain the string "N=", we are dealing with an array
# and the last integer indicates the number of elements
if line[47:49] == "N=":
count = int(line[49:])
else:
# scalar value
self.data[field] = dtype(line[49:])
except Exception as err:
logger.error(f"An error occurred while reading line {line_number:10} in {f.name} .")
raise err
else:
acc += " " + line
# read last field
if len(acc) > 0:
self.data[field] = np.fromstring(acc, dtype=dtype, sep=" ")
assert len(self.data[field]) == count
def __getitem__(self, key):
"""
access data fields by their names
Parameters
----------
key : str
name of field that should be retrieved (e.g. 'Number of atoms')
Returns
-------
field : float, int or ndarray
a KeyError is raised if the field is not present in the formatted checkpoint file
"""
return self.data[key]
def keys(self):
"""
list names of all fields present in the formatted checkpoint file
Returns
-------
keys : list of str
field names
"""
return self.data.keys()
def harmonic_approximation(self):
"""
extract the position, gradient and Hessian of the potential energy in cartesian coordinates
The potential is expanded to second order around the current position x0:
E(x) = E(x0) + grad(E)^T.(x-x0) + 1/2 (x-x0)^T . hess(E) . (x-x0)
A frequency calculation has to be present in the formatted checkpoint file.
The frequency calculation should be performed in a separate Gaussian 16 job using the
following route line for the ground state calculation:
#P functional/basis Freq NoSymm IOp(7/32=5)
and the following route line for an excited state frequency calculation:
#P functional/basis TD=(Nstates=2, Root=1, NAC) Freq NoSymm IOp(7/32=5)
Returns
-------
pos : ndarray (3*nat,)
cartesian coordinates x0
energy : ndarray (1,)
total energy E(x0) of state of interest (in Hartree)
grad : ndarray (3*nat,)
cartesian gradient dE/dx(x0) (in Hartree/bohr)
hess : ndarray (3*nat,3*nat)
cartesian force constants d^2E/(dxdx)(x0) (in Hartree/bohr^2)
"""
try:
nat = self.data["Number of atoms"]
# total energy of state of interest
energy = np.array(self.data["Total Energy"])
# geometry
pos = self.data["Current cartesian coordinates"]
# cartesian gradient
grad = self.data["Cartesian Gradient"]
# Only the lower triangular part of the Hessian is stored.
hess = np.zeros((3*nat,3*nat))
row, col = np.tril_indices(3*nat)
hess[row,col] = self.data["Cartesian Force Constants"]
# Hessian is symmetric, H^T = H
hess[col,row] = hess[row,col]
except KeyError as err:
logger.error(f"A required field could not be found in formatted checkpoint file {self.filename} .")
raise err
return pos, energy, grad, hess
def nonadiabatic_coupling(self):
"""
extract non-adiabatic coupling vector between ground and excited state (Root=I), if present.
Only Gaussian 16 saves the NAC vector in the checkpoint file, while QChem writes it to the output file.
Returns
-------
nac : ndarray (3*nat,)
1st order derivative coupling <0|d/dx|I>
"""
try:
nac = self.data["Nonadiabatic coupling"]
except KeyError as err:
logger.error(f"The field `Nonadiabatic coupling` could not be found in the formatted checkpoint file {self.filename} .")
raise err
if (nac == 0.0).all():
logger.warning(f"All components of non-adiabatic coupling vector in {self.filename} are zero.")
return nac
def vibrational_groundstate(self, zero_threshold=100.0):
"""
The vibrational ground state belonging to the harmonic potential is given by
1/4 T
psi (x) = (det(Gamma ) / pi^N) exp{ -1/2 (x-x ) Gamma (x-x ) }
0 0 0 0 0
provided that x0 is the minimum. This function computes the width parameter matrix
Gamma_0 from the Hessian at the minimum.
Optional
--------
zero_threshold : float > 0
threshold for considering normal mode frequencies as zero (in cm-1)
Returns
-------
x0 : ndarray (3*nat,)
center of Gaussian, in cartesian coordinates (bohr)
Gamma0 : ndarray (3*nat,3*nat)
symmetric, positive semi-definite matrix of width parameters (bohr^{-2})
en_zpt : float
zero-point energy (Hartree)
"""
x0, energy, grad, hess = self.harmonic_approximation()
mass = self.masses()
# diagonals of M^{1/2} and M^{-1/2}
msq = np.sqrt(mass)
imsq = 1.0/msq
# mass-weighted Hessian H
hess_mwc = np.einsum('i,ij,j->ij', imsq, hess, imsq)
# diagonalize symmetric H = V.diag(w).V^T
w2,V = sla.eigh(hess_mwc)
# vibrational energies
w = np.sqrt(w2)
# zero-point energy
en_zpt = 0.5 * hbar * np.sum(w)
logger.info("Normal mode frequencies (cm-1)")
logger.info(w*units.hartree_to_wavenumbers)
if not (w * units.hartree_to_wavenumbers > zero_threshold).all():
logger.warning("At a minimum all frequencies should be positive, found imaginary ones.")
# select non-zero vibrational modes
non_zero = (w * units.hartree_to_wavenumbers) > zero_threshold
# number of non singular dimensions
num_non_zero = np.count_nonzero( non_zero )
dim = x0.shape[0]
logger.info(f"number of zero modes : {dim - num_non_zero}")
# L = hbar^{-1/2} M^{1/2} V w^{1/2}
L = hbar**(-1/2) * np.einsum('i,ij,j->ij', msq, V[:,non_zero], np.sqrt(w[non_zero]))
# Gamma_0 = L . L^T
Gamma_0 = np.einsum('ij,kj->ik', L, L)
return x0, Gamma_0, en_zpt
def masses(self):
"""
atomic masses in a.u.
Returns
-------
masses : ndarray (3*nat,)
masses for each cartesian coordinate in multiples of electron mass
"""
mass = self.data["Real atomic weights"] * units.amu_to_aumass
mass = np.repeat(mass, 3)
return mass
def atomic_numbers(self):
"""
atomic numbers
Returns
-------
numbers : ndarray(nat,)
atomic number for each atom
"""
return self.data["Atomic numbers"]
| #!/usr/bin/env python
# -*- coding: utf-8 -*-
__all__ = ["FormattedCheckpointFile"]
# # Imports
import numpy as np
import scipy.linalg as sla
from collections import OrderedDict
import re
import logging
# # Local Imports
from sgdml_dataset_generation import units
from sgdml_dataset_generation.units import hbar
# # Logging
logger = logging.getLogger(__name__)
logging.basicConfig(format="[%(module)-12s] %(message)s", level=logging.INFO)
class FormattedCheckpointFile(object):
"""
reads all fields from formatted checkpoint files produced by the quantum chemistry
programs Gaussian 16 and QChem.
Parameters
----------
f : File
file handle opened for reading a formatted checkpoint file
The user has to ensure the file handle is opened and closed at the end.
The fields of the checkpoint file can be accessed by their names (see example below).
Array fields are stored as 1D numpy arrays of float (R) or integer (I) type.
Example
-------
>>> with open("freq.fchk") as f:
>>> fchk = FormattedCheckpointFile(f)
>>> print(fchk["Number of atoms"])
"""
def __init__(self, f):
self.filename = f.name
self.data = OrderedDict()
# accumulate all lines belonging to the same field (whithout newlines)
acc = ""
dtype = None
for line_number, line in enumerate(f.readlines()):
# count lines starting from 1
line_number += 1
# The name of a field starts in the first column and with a capital letter
if re.match(r"^[A-Z].*", line):
if len(acc) > 0 and not dtype is None:
# All lines belonging to the previous field must have been read,
# so we convert it to a numpy array.
try:
if dtype == str:
self.data[field] = acc
else:
# numerical types
array = np.fromstring(acc, dtype=dtype, sep=" ")
assert len(array) == count
self.data[field] = array
except (ValueError,AssertionError) as err:
logger.warning(f"A problem occurred reading field `{field}` in line {line_number:10} in {f.name} .")
logger.warning(err)
self.data[field] = np.zeros(count, dtype=dtype)
# reset accumulator
acc = ""
try:
if len(line) < 43:
# skip title and method
logger.debug(f"skipping line {line_number:10} in {f.name}: `{line.strip()}`")
continue
# First 43 columns are reserved for the field name
field = line[0:43].strip()
logger.debug(f"field `{field}` encountered")
# Colum 43 contains a character indicating the data type:
# I -> integer
# R -> real
type_char = line[43]
if type_char == "I":
dtype = int
elif type_char == "R":
dtype = float
elif type_char == "C":
dtype = str
else:
dtype = None
# skip lines without I or R data type markers
logger.debug(f"skipping line {line_number:10} in {f.name}: `{line.strip()}` .")
continue
# If column 47-48 contain the string "N=", we are dealing with an array
# and the last integer indicates the number of elements
if line[47:49] == "N=":
count = int(line[49:])
else:
# scalar value
self.data[field] = dtype(line[49:])
except Exception as err:
logger.error(f"An error occurred while reading line {line_number:10} in {f.name} .")
raise err
else:
acc += " " + line
# read last field
if len(acc) > 0:
self.data[field] = np.fromstring(acc, dtype=dtype, sep=" ")
assert len(self.data[field]) == count
def __getitem__(self, key):
"""
access data fields by their names
Parameters
----------
key : str
name of field that should be retrieved (e.g. 'Number of atoms')
Returns
-------
field : float, int or ndarray
a KeyError is raised if the field is not present in the formatted checkpoint file
"""
return self.data[key]
def keys(self):
"""
list names of all fields present in the formatted checkpoint file
Returns
-------
keys : list of str
field names
"""
return self.data.keys()
def harmonic_approximation(self):
"""
extract the position, gradient and Hessian of the potential energy in cartesian coordinates
The potential is expanded to second order around the current position x0:
E(x) = E(x0) + grad(E)^T.(x-x0) + 1/2 (x-x0)^T . hess(E) . (x-x0)
A frequency calculation has to be present in the formatted checkpoint file.
The frequency calculation should be performed in a separate Gaussian 16 job using the
following route line for the ground state calculation:
#P functional/basis Freq NoSymm IOp(7/32=5)
and the following route line for an excited state frequency calculation:
#P functional/basis TD=(Nstates=2, Root=1, NAC) Freq NoSymm IOp(7/32=5)
Returns
-------
pos : ndarray (3*nat,)
cartesian coordinates x0
energy : ndarray (1,)
total energy E(x0) of state of interest (in Hartree)
grad : ndarray (3*nat,)
cartesian gradient dE/dx(x0) (in Hartree/bohr)
hess : ndarray (3*nat,3*nat)
cartesian force constants d^2E/(dxdx)(x0) (in Hartree/bohr^2)
"""
try:
nat = self.data["Number of atoms"]
# total energy of state of interest
energy = np.array(self.data["Total Energy"])
# geometry
pos = self.data["Current cartesian coordinates"]
# cartesian gradient
grad = self.data["Cartesian Gradient"]
# Only the lower triangular part of the Hessian is stored.
hess = np.zeros((3*nat,3*nat))
row, col = np.tril_indices(3*nat)
hess[row,col] = self.data["Cartesian Force Constants"]
# Hessian is symmetric, H^T = H
hess[col,row] = hess[row,col]
except KeyError as err:
logger.error(f"A required field could not be found in formatted checkpoint file {self.filename} .")
raise err
return pos, energy, grad, hess
def nonadiabatic_coupling(self):
"""
extract non-adiabatic coupling vector between ground and excited state (Root=I), if present.
Only Gaussian 16 saves the NAC vector in the checkpoint file, while QChem writes it to the output file.
Returns
-------
nac : ndarray (3*nat,)
1st order derivative coupling <0|d/dx|I>
"""
try:
nac = self.data["Nonadiabatic coupling"]
except KeyError as err:
logger.error(f"The field `Nonadiabatic coupling` could not be found in the formatted checkpoint file {self.filename} .")
raise err
if (nac == 0.0).all():
logger.warning(f"All components of non-adiabatic coupling vector in {self.filename} are zero.")
return nac
def vibrational_groundstate(self, zero_threshold=100.0):
"""
The vibrational ground state belonging to the harmonic potential is given by
1/4 T
psi (x) = (det(Gamma ) / pi^N) exp{ -1/2 (x-x ) Gamma (x-x ) }
0 0 0 0 0
provided that x0 is the minimum. This function computes the width parameter matrix
Gamma_0 from the Hessian at the minimum.
Optional
--------
zero_threshold : float > 0
threshold for considering normal mode frequencies as zero (in cm-1)
Returns
-------
x0 : ndarray (3*nat,)
center of Gaussian, in cartesian coordinates (bohr)
Gamma0 : ndarray (3*nat,3*nat)
symmetric, positive semi-definite matrix of width parameters (bohr^{-2})
en_zpt : float
zero-point energy (Hartree)
"""
x0, energy, grad, hess = self.harmonic_approximation()
mass = self.masses()
# diagonals of M^{1/2} and M^{-1/2}
msq = np.sqrt(mass)
imsq = 1.0/msq
# mass-weighted Hessian H
hess_mwc = np.einsum('i,ij,j->ij', imsq, hess, imsq)
# diagonalize symmetric H = V.diag(w).V^T
w2,V = sla.eigh(hess_mwc)
# vibrational energies
w = np.sqrt(w2)
# zero-point energy
en_zpt = 0.5 * hbar * np.sum(w)
logger.info("Normal mode frequencies (cm-1)")
logger.info(w*units.hartree_to_wavenumbers)
if not (w * units.hartree_to_wavenumbers > zero_threshold).all():
logger.warning("At a minimum all frequencies should be positive, found imaginary ones.")
# select non-zero vibrational modes
non_zero = (w * units.hartree_to_wavenumbers) > zero_threshold
# number of non singular dimensions
num_non_zero = np.count_nonzero( non_zero )
dim = x0.shape[0]
logger.info(f"number of zero modes : {dim - num_non_zero}")
# L = hbar^{-1/2} M^{1/2} V w^{1/2}
L = hbar**(-1/2) * np.einsum('i,ij,j->ij', msq, V[:,non_zero], np.sqrt(w[non_zero]))
# Gamma_0 = L . L^T
Gamma_0 = np.einsum('ij,kj->ik', L, L)
return x0, Gamma_0, en_zpt
def masses(self):
"""
atomic masses in a.u.
Returns
-------
masses : ndarray (3*nat,)
masses for each cartesian coordinate in multiples of electron mass
"""
mass = self.data["Real atomic weights"] * units.amu_to_aumass
mass = np.repeat(mass, 3)
return mass
def atomic_numbers(self):
"""
atomic numbers
Returns
-------
numbers : ndarray(nat,)
atomic number for each atom
"""
return self.data["Atomic numbers"] | en | 0.768298 | #!/usr/bin/env python # -*- coding: utf-8 -*- # # Imports # # Local Imports # # Logging reads all fields from formatted checkpoint files produced by the quantum chemistry programs Gaussian 16 and QChem. Parameters ---------- f : File file handle opened for reading a formatted checkpoint file The user has to ensure the file handle is opened and closed at the end. The fields of the checkpoint file can be accessed by their names (see example below). Array fields are stored as 1D numpy arrays of float (R) or integer (I) type. Example ------- >>> with open("freq.fchk") as f: >>> fchk = FormattedCheckpointFile(f) >>> print(fchk["Number of atoms"]) # accumulate all lines belonging to the same field (whithout newlines) # count lines starting from 1 # The name of a field starts in the first column and with a capital letter # All lines belonging to the previous field must have been read, # so we convert it to a numpy array. # numerical types # reset accumulator # skip title and method # First 43 columns are reserved for the field name # Colum 43 contains a character indicating the data type: # I -> integer # R -> real # skip lines without I or R data type markers # If column 47-48 contain the string "N=", we are dealing with an array # and the last integer indicates the number of elements # scalar value # read last field access data fields by their names Parameters ---------- key : str name of field that should be retrieved (e.g. 'Number of atoms') Returns ------- field : float, int or ndarray a KeyError is raised if the field is not present in the formatted checkpoint file list names of all fields present in the formatted checkpoint file Returns ------- keys : list of str field names extract the position, gradient and Hessian of the potential energy in cartesian coordinates The potential is expanded to second order around the current position x0: E(x) = E(x0) + grad(E)^T.(x-x0) + 1/2 (x-x0)^T . hess(E) . (x-x0) A frequency calculation has to be present in the formatted checkpoint file. The frequency calculation should be performed in a separate Gaussian 16 job using the following route line for the ground state calculation: #P functional/basis Freq NoSymm IOp(7/32=5) and the following route line for an excited state frequency calculation: #P functional/basis TD=(Nstates=2, Root=1, NAC) Freq NoSymm IOp(7/32=5) Returns ------- pos : ndarray (3*nat,) cartesian coordinates x0 energy : ndarray (1,) total energy E(x0) of state of interest (in Hartree) grad : ndarray (3*nat,) cartesian gradient dE/dx(x0) (in Hartree/bohr) hess : ndarray (3*nat,3*nat) cartesian force constants d^2E/(dxdx)(x0) (in Hartree/bohr^2) # total energy of state of interest # geometry # cartesian gradient # Only the lower triangular part of the Hessian is stored. # Hessian is symmetric, H^T = H extract non-adiabatic coupling vector between ground and excited state (Root=I), if present. Only Gaussian 16 saves the NAC vector in the checkpoint file, while QChem writes it to the output file. Returns ------- nac : ndarray (3*nat,) 1st order derivative coupling <0|d/dx|I> The vibrational ground state belonging to the harmonic potential is given by 1/4 T psi (x) = (det(Gamma ) / pi^N) exp{ -1/2 (x-x ) Gamma (x-x ) } 0 0 0 0 0 provided that x0 is the minimum. This function computes the width parameter matrix Gamma_0 from the Hessian at the minimum. Optional -------- zero_threshold : float > 0 threshold for considering normal mode frequencies as zero (in cm-1) Returns ------- x0 : ndarray (3*nat,) center of Gaussian, in cartesian coordinates (bohr) Gamma0 : ndarray (3*nat,3*nat) symmetric, positive semi-definite matrix of width parameters (bohr^{-2}) en_zpt : float zero-point energy (Hartree) # diagonals of M^{1/2} and M^{-1/2} # mass-weighted Hessian H # diagonalize symmetric H = V.diag(w).V^T # vibrational energies # zero-point energy # select non-zero vibrational modes # number of non singular dimensions # L = hbar^{-1/2} M^{1/2} V w^{1/2} # Gamma_0 = L . L^T atomic masses in a.u. Returns ------- masses : ndarray (3*nat,) masses for each cartesian coordinate in multiples of electron mass atomic numbers Returns ------- numbers : ndarray(nat,) atomic number for each atom | 2.94844 | 3 |
2020_01_01/max_values/max_values.py | 94JuHo/Algorithm_study | 0 | 9783 | values = []
for i in range(9):
values.append(int(input('')))
max_value = 0
location = 0
for i in range(9):
if values[i] > max_value:
max_value = values[i]
location = i+1
print(max_value)
print(location) | values = []
for i in range(9):
values.append(int(input('')))
max_value = 0
location = 0
for i in range(9):
if values[i] > max_value:
max_value = values[i]
location = i+1
print(max_value)
print(location) | none | 1 | 3.786208 | 4 |
|
fuzzywuzzy/process.py | rhasspy/fuzzywuzzy | 3 | 9784 | #!/usr/bin/env python
# encoding: utf-8
"""
process.py
Copyright (c) 2011 <NAME>
Permission is hereby granted, free of charge, to any person obtaining
a copy of this software and associated documentation files (the
"Software"), to deal in the Software without restriction, including
without limitation the rights to use, copy, modify, merge, publish,
distribute, sublicense, and/or sell copies of the Software, and to
permit persons to whom the Software is furnished to do so, subject to
the following conditions:
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
from fuzz import *
import sys, os
import utils
#######################################
# Find Best Matchs In List Of Choices #
#######################################
def extract(query, choices, processor=None, scorer=None, limit=5):
# choices = a list of objects we are attempting to extract values from
# query = an object representing the thing we want to find
# scorer f(OBJ, QUERY) --> INT. We will return the objects with the highest score
# by default, we use score.WRatio() and both OBJ and QUERY should be strings
# processor f(OBJ_A) --> OBJ_B, where the output is an input to scorer
# for example, "processor = lambda x: x[0]" would return the first element in a collection x (of, say, strings)
# this would then be used in the scoring collection
if choices is None or len(choices) == 0:
return []
# default, turn whatever the choice is into a string
if processor is None:
processor = lambda x: utils.asciidammit(x)
# default: wratio
if scorer is None:
scorer = WRatio
sl = list()
for choice in choices:
processed = processor(choice)
score = scorer(query, processed)
tuple = (choice, score)
sl.append(tuple)
sl.sort(key=lambda i: -1*i[1])
return sl[:limit]
##########################
# Find Single Best Match #
##########################
def extractOne(query, choices, processor=None, scorer=None, score_cutoff=0):
# convenience method which returns the single best choice
# optional parameter: score_cutoff.
# If the best choice has a score of less than score_cutoff
# we will return none (intuition: not a good enough match)
best_list = extract(query, choices, processor, scorer, limit=1)
if len(best_list) > 0:
best = best_list[0]
if best[1] > score_cutoff:
return best
else:
return None
else:
return None
| #!/usr/bin/env python
# encoding: utf-8
"""
process.py
Copyright (c) 2011 <NAME>
Permission is hereby granted, free of charge, to any person obtaining
a copy of this software and associated documentation files (the
"Software"), to deal in the Software without restriction, including
without limitation the rights to use, copy, modify, merge, publish,
distribute, sublicense, and/or sell copies of the Software, and to
permit persons to whom the Software is furnished to do so, subject to
the following conditions:
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
from fuzz import *
import sys, os
import utils
#######################################
# Find Best Matchs In List Of Choices #
#######################################
def extract(query, choices, processor=None, scorer=None, limit=5):
# choices = a list of objects we are attempting to extract values from
# query = an object representing the thing we want to find
# scorer f(OBJ, QUERY) --> INT. We will return the objects with the highest score
# by default, we use score.WRatio() and both OBJ and QUERY should be strings
# processor f(OBJ_A) --> OBJ_B, where the output is an input to scorer
# for example, "processor = lambda x: x[0]" would return the first element in a collection x (of, say, strings)
# this would then be used in the scoring collection
if choices is None or len(choices) == 0:
return []
# default, turn whatever the choice is into a string
if processor is None:
processor = lambda x: utils.asciidammit(x)
# default: wratio
if scorer is None:
scorer = WRatio
sl = list()
for choice in choices:
processed = processor(choice)
score = scorer(query, processed)
tuple = (choice, score)
sl.append(tuple)
sl.sort(key=lambda i: -1*i[1])
return sl[:limit]
##########################
# Find Single Best Match #
##########################
def extractOne(query, choices, processor=None, scorer=None, score_cutoff=0):
# convenience method which returns the single best choice
# optional parameter: score_cutoff.
# If the best choice has a score of less than score_cutoff
# we will return none (intuition: not a good enough match)
best_list = extract(query, choices, processor, scorer, limit=1)
if len(best_list) > 0:
best = best_list[0]
if best[1] > score_cutoff:
return best
else:
return None
else:
return None
| en | 0.735952 | #!/usr/bin/env python # encoding: utf-8 process.py Copyright (c) 2011 <NAME> Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ####################################### # Find Best Matchs In List Of Choices # ####################################### # choices = a list of objects we are attempting to extract values from # query = an object representing the thing we want to find # scorer f(OBJ, QUERY) --> INT. We will return the objects with the highest score # by default, we use score.WRatio() and both OBJ and QUERY should be strings # processor f(OBJ_A) --> OBJ_B, where the output is an input to scorer # for example, "processor = lambda x: x[0]" would return the first element in a collection x (of, say, strings) # this would then be used in the scoring collection # default, turn whatever the choice is into a string # default: wratio ########################## # Find Single Best Match # ########################## # convenience method which returns the single best choice # optional parameter: score_cutoff. # If the best choice has a score of less than score_cutoff # we will return none (intuition: not a good enough match) | 2.392474 | 2 |
day03/day03.py | robfalck/AoC2017 | 0 | 9785 | <gh_stars>0
from __future__ import print_function, division, absolute_import
import numpy as np
INPUT = 265149
def part1(number):
skip = 2
d = 1
row = None
col = None
for shell_idx in range(1, 10000):
size = shell_idx * 2 + 1
a = d + skip
b = a + skip
c = b + skip
d = c + skip
skip = skip + 2
if a <= number <= b:
# top
col = -(size // 2) + (b - number)
row = size // 2
elif b <= number <= c:
# left
row = size // 2 - (c - number)
col = -(size // 2)
elif c <= number <= d:
# bottom
row = -(size // 2)
col = row + (number - c)
elif number < a:
# right
col = size // 2
row = col - (a - number)
if row is not None and col is not None:
manh_dist = abs(row) + abs(col)
return manh_dist
def part2(number):
"""
A brute-force approach to part 2.
"""
map = np.zeros((11, 11), dtype=int)
row = 5
col = 5
map[row, col] = 1
heading = 'RIGHT'
dcol = 1
drow = 0
nsteps = 70
for i in range(nsteps):
row += drow
col += dcol
sum_at_next = map[row-1:row+2, col-1:col+2].sum()
map[row, col] = sum_at_next
if sum_at_next > number:
return sum_at_next
# Determine if we need to change heading
if heading == 'RIGHT' and map[row-1, col] == 0:
heading = 'UP'
drow = -1
dcol = 0
elif heading == 'UP' and map[row, col-1] == 0:
heading = 'LEFT'
drow = 0
dcol = -1
elif heading == 'LEFT' and map[row+1, col] == 0:
heading = 'DOWN'
drow = 1
dcol = 0
elif heading == 'DOWN' and map[row, col+1] == 0:
heading = 'RIGHT'
drow = 0
dcol = 1
if __name__ == '__main__':
print(part1(number=INPUT))
print(part2(number=INPUT))
| from __future__ import print_function, division, absolute_import
import numpy as np
INPUT = 265149
def part1(number):
skip = 2
d = 1
row = None
col = None
for shell_idx in range(1, 10000):
size = shell_idx * 2 + 1
a = d + skip
b = a + skip
c = b + skip
d = c + skip
skip = skip + 2
if a <= number <= b:
# top
col = -(size // 2) + (b - number)
row = size // 2
elif b <= number <= c:
# left
row = size // 2 - (c - number)
col = -(size // 2)
elif c <= number <= d:
# bottom
row = -(size // 2)
col = row + (number - c)
elif number < a:
# right
col = size // 2
row = col - (a - number)
if row is not None and col is not None:
manh_dist = abs(row) + abs(col)
return manh_dist
def part2(number):
"""
A brute-force approach to part 2.
"""
map = np.zeros((11, 11), dtype=int)
row = 5
col = 5
map[row, col] = 1
heading = 'RIGHT'
dcol = 1
drow = 0
nsteps = 70
for i in range(nsteps):
row += drow
col += dcol
sum_at_next = map[row-1:row+2, col-1:col+2].sum()
map[row, col] = sum_at_next
if sum_at_next > number:
return sum_at_next
# Determine if we need to change heading
if heading == 'RIGHT' and map[row-1, col] == 0:
heading = 'UP'
drow = -1
dcol = 0
elif heading == 'UP' and map[row, col-1] == 0:
heading = 'LEFT'
drow = 0
dcol = -1
elif heading == 'LEFT' and map[row+1, col] == 0:
heading = 'DOWN'
drow = 1
dcol = 0
elif heading == 'DOWN' and map[row, col+1] == 0:
heading = 'RIGHT'
drow = 0
dcol = 1
if __name__ == '__main__':
print(part1(number=INPUT))
print(part2(number=INPUT)) | en | 0.422821 | # top # left # bottom # right A brute-force approach to part 2. # Determine if we need to change heading | 2.724455 | 3 |
core/migrations/0004_auto_20210929_2354.py | codefair114/Inventory-App-Django | 0 | 9786 | <filename>core/migrations/0004_auto_20210929_2354.py
# Generated by Django 3.2.7 on 2021-09-29 23:54
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('core', '0003_auto_20210929_2353'),
]
operations = [
migrations.AlterField(
model_name='order',
name='client',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='core.orderclient'),
),
migrations.AlterField(
model_name='order',
name='payment',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='core.paymentmethod'),
),
migrations.AlterField(
model_name='order',
name='shipment',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='core.shipment'),
),
]
| <filename>core/migrations/0004_auto_20210929_2354.py
# Generated by Django 3.2.7 on 2021-09-29 23:54
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('core', '0003_auto_20210929_2353'),
]
operations = [
migrations.AlterField(
model_name='order',
name='client',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='core.orderclient'),
),
migrations.AlterField(
model_name='order',
name='payment',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='core.paymentmethod'),
),
migrations.AlterField(
model_name='order',
name='shipment',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='core.shipment'),
),
]
| en | 0.823453 | # Generated by Django 3.2.7 on 2021-09-29 23:54 | 1.38346 | 1 |
nova/api/openstack/compute/legacy_v2/contrib/console_auth_tokens.py | bopopescu/nova-token | 0 | 9787 | begin_unit
comment|'# Copyright 2013 Cloudbase Solutions Srl'
nl|'\n'
comment|'# All Rights Reserved.'
nl|'\n'
comment|'#'
nl|'\n'
comment|'# Licensed under the Apache License, Version 2.0 (the "License"); you may'
nl|'\n'
comment|'# not use this file except in compliance with the License. You may obtain'
nl|'\n'
comment|'# a copy of the License at'
nl|'\n'
comment|'#'
nl|'\n'
comment|'# http://www.apache.org/licenses/LICENSE-2.0'
nl|'\n'
comment|'#'
nl|'\n'
comment|'# Unless required by applicable law or agreed to in writing, software'
nl|'\n'
comment|'# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT'
nl|'\n'
comment|'# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the'
nl|'\n'
comment|'# License for the specific language governing permissions and limitations'
nl|'\n'
comment|'# under the License.'
nl|'\n'
nl|'\n'
name|'import'
name|'webob'
newline|'\n'
nl|'\n'
name|'from'
name|'nova'
op|'.'
name|'api'
op|'.'
name|'openstack'
name|'import'
name|'extensions'
newline|'\n'
name|'from'
name|'nova'
op|'.'
name|'api'
op|'.'
name|'openstack'
name|'import'
name|'wsgi'
newline|'\n'
name|'from'
name|'nova'
op|'.'
name|'consoleauth'
name|'import'
name|'rpcapi'
name|'as'
name|'consoleauth_rpcapi'
newline|'\n'
name|'from'
name|'nova'
op|'.'
name|'i18n'
name|'import'
name|'_'
newline|'\n'
nl|'\n'
nl|'\n'
DECL|variable|authorize
name|'authorize'
op|'='
name|'extensions'
op|'.'
name|'extension_authorizer'
op|'('
string|"'compute'"
op|','
string|"'console_auth_tokens'"
op|')'
newline|'\n'
nl|'\n'
nl|'\n'
DECL|class|ConsoleAuthTokensController
name|'class'
name|'ConsoleAuthTokensController'
op|'('
name|'wsgi'
op|'.'
name|'Controller'
op|')'
op|':'
newline|'\n'
DECL|member|__init__
indent|' '
name|'def'
name|'__init__'
op|'('
name|'self'
op|','
op|'*'
name|'args'
op|','
op|'**'
name|'kwargs'
op|')'
op|':'
newline|'\n'
indent|' '
name|'self'
op|'.'
name|'_consoleauth_rpcapi'
op|'='
name|'consoleauth_rpcapi'
op|'.'
name|'ConsoleAuthAPI'
op|'('
op|')'
newline|'\n'
name|'super'
op|'('
name|'ConsoleAuthTokensController'
op|','
name|'self'
op|')'
op|'.'
name|'__init__'
op|'('
op|'*'
name|'args'
op|','
op|'**'
name|'kwargs'
op|')'
newline|'\n'
nl|'\n'
DECL|member|show
dedent|''
name|'def'
name|'show'
op|'('
name|'self'
op|','
name|'req'
op|','
name|'id'
op|')'
op|':'
newline|'\n'
indent|' '
string|'"""Checks a console auth token and returns the related connect info."""'
newline|'\n'
name|'context'
op|'='
name|'req'
op|'.'
name|'environ'
op|'['
string|"'nova.context'"
op|']'
newline|'\n'
name|'authorize'
op|'('
name|'context'
op|')'
newline|'\n'
nl|'\n'
name|'token'
op|'='
name|'id'
newline|'\n'
name|'connect_info'
op|'='
name|'self'
op|'.'
name|'_consoleauth_rpcapi'
op|'.'
name|'check_token'
op|'('
name|'context'
op|','
name|'token'
op|')'
newline|'\n'
name|'if'
name|'not'
name|'connect_info'
op|':'
newline|'\n'
indent|' '
name|'raise'
name|'webob'
op|'.'
name|'exc'
op|'.'
name|'HTTPNotFound'
op|'('
name|'explanation'
op|'='
name|'_'
op|'('
string|'"Token not found"'
op|')'
op|')'
newline|'\n'
nl|'\n'
dedent|''
name|'console_type'
op|'='
name|'connect_info'
op|'.'
name|'get'
op|'('
string|"'console_type'"
op|')'
newline|'\n'
comment|'# This is currently required only for RDP consoles'
nl|'\n'
name|'if'
name|'console_type'
op|'!='
string|'"rdp-html5"'
op|':'
newline|'\n'
indent|' '
name|'raise'
name|'webob'
op|'.'
name|'exc'
op|'.'
name|'HTTPUnauthorized'
op|'('
nl|'\n'
name|'explanation'
op|'='
name|'_'
op|'('
string|'"The requested console type details are not "'
nl|'\n'
string|'"accessible"'
op|')'
op|')'
newline|'\n'
nl|'\n'
dedent|''
name|'return'
op|'{'
string|"'console'"
op|':'
nl|'\n'
op|'{'
name|'i'
op|':'
name|'connect_info'
op|'['
name|'i'
op|']'
nl|'\n'
name|'for'
name|'i'
name|'in'
op|'['
string|"'instance_uuid'"
op|','
string|"'host'"
op|','
string|"'port'"
op|','
nl|'\n'
string|"'internal_access_path'"
op|']'
nl|'\n'
name|'if'
name|'i'
name|'in'
name|'connect_info'
op|'}'
op|'}'
newline|'\n'
nl|'\n'
nl|'\n'
DECL|class|Console_auth_tokens
dedent|''
dedent|''
name|'class'
name|'Console_auth_tokens'
op|'('
name|'extensions'
op|'.'
name|'ExtensionDescriptor'
op|')'
op|':'
newline|'\n'
indent|' '
string|'"""Console token authentication support."""'
newline|'\n'
DECL|variable|name
name|'name'
op|'='
string|'"ConsoleAuthTokens"'
newline|'\n'
DECL|variable|alias
name|'alias'
op|'='
string|'"os-console-auth-tokens"'
newline|'\n'
DECL|variable|namespace
name|'namespace'
op|'='
op|'('
string|'"http://docs.openstack.org/compute/ext/"'
nl|'\n'
string|'"consoles-auth-tokens/api/v2"'
op|')'
newline|'\n'
DECL|variable|updated
name|'updated'
op|'='
string|'"2013-08-13T00:00:00Z"'
newline|'\n'
nl|'\n'
DECL|member|get_resources
name|'def'
name|'get_resources'
op|'('
name|'self'
op|')'
op|':'
newline|'\n'
indent|' '
name|'controller'
op|'='
name|'ConsoleAuthTokensController'
op|'('
op|')'
newline|'\n'
name|'ext'
op|'='
name|'extensions'
op|'.'
name|'ResourceExtension'
op|'('
string|"'os-console-auth-tokens'"
op|','
nl|'\n'
name|'controller'
op|')'
newline|'\n'
name|'return'
op|'['
name|'ext'
op|']'
newline|'\n'
dedent|''
dedent|''
endmarker|''
end_unit
| begin_unit
comment|'# Copyright 2013 Cloudbase Solutions Srl'
nl|'\n'
comment|'# All Rights Reserved.'
nl|'\n'
comment|'#'
nl|'\n'
comment|'# Licensed under the Apache License, Version 2.0 (the "License"); you may'
nl|'\n'
comment|'# not use this file except in compliance with the License. You may obtain'
nl|'\n'
comment|'# a copy of the License at'
nl|'\n'
comment|'#'
nl|'\n'
comment|'# http://www.apache.org/licenses/LICENSE-2.0'
nl|'\n'
comment|'#'
nl|'\n'
comment|'# Unless required by applicable law or agreed to in writing, software'
nl|'\n'
comment|'# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT'
nl|'\n'
comment|'# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the'
nl|'\n'
comment|'# License for the specific language governing permissions and limitations'
nl|'\n'
comment|'# under the License.'
nl|'\n'
nl|'\n'
name|'import'
name|'webob'
newline|'\n'
nl|'\n'
name|'from'
name|'nova'
op|'.'
name|'api'
op|'.'
name|'openstack'
name|'import'
name|'extensions'
newline|'\n'
name|'from'
name|'nova'
op|'.'
name|'api'
op|'.'
name|'openstack'
name|'import'
name|'wsgi'
newline|'\n'
name|'from'
name|'nova'
op|'.'
name|'consoleauth'
name|'import'
name|'rpcapi'
name|'as'
name|'consoleauth_rpcapi'
newline|'\n'
name|'from'
name|'nova'
op|'.'
name|'i18n'
name|'import'
name|'_'
newline|'\n'
nl|'\n'
nl|'\n'
DECL|variable|authorize
name|'authorize'
op|'='
name|'extensions'
op|'.'
name|'extension_authorizer'
op|'('
string|"'compute'"
op|','
string|"'console_auth_tokens'"
op|')'
newline|'\n'
nl|'\n'
nl|'\n'
DECL|class|ConsoleAuthTokensController
name|'class'
name|'ConsoleAuthTokensController'
op|'('
name|'wsgi'
op|'.'
name|'Controller'
op|')'
op|':'
newline|'\n'
DECL|member|__init__
indent|' '
name|'def'
name|'__init__'
op|'('
name|'self'
op|','
op|'*'
name|'args'
op|','
op|'**'
name|'kwargs'
op|')'
op|':'
newline|'\n'
indent|' '
name|'self'
op|'.'
name|'_consoleauth_rpcapi'
op|'='
name|'consoleauth_rpcapi'
op|'.'
name|'ConsoleAuthAPI'
op|'('
op|')'
newline|'\n'
name|'super'
op|'('
name|'ConsoleAuthTokensController'
op|','
name|'self'
op|')'
op|'.'
name|'__init__'
op|'('
op|'*'
name|'args'
op|','
op|'**'
name|'kwargs'
op|')'
newline|'\n'
nl|'\n'
DECL|member|show
dedent|''
name|'def'
name|'show'
op|'('
name|'self'
op|','
name|'req'
op|','
name|'id'
op|')'
op|':'
newline|'\n'
indent|' '
string|'"""Checks a console auth token and returns the related connect info."""'
newline|'\n'
name|'context'
op|'='
name|'req'
op|'.'
name|'environ'
op|'['
string|"'nova.context'"
op|']'
newline|'\n'
name|'authorize'
op|'('
name|'context'
op|')'
newline|'\n'
nl|'\n'
name|'token'
op|'='
name|'id'
newline|'\n'
name|'connect_info'
op|'='
name|'self'
op|'.'
name|'_consoleauth_rpcapi'
op|'.'
name|'check_token'
op|'('
name|'context'
op|','
name|'token'
op|')'
newline|'\n'
name|'if'
name|'not'
name|'connect_info'
op|':'
newline|'\n'
indent|' '
name|'raise'
name|'webob'
op|'.'
name|'exc'
op|'.'
name|'HTTPNotFound'
op|'('
name|'explanation'
op|'='
name|'_'
op|'('
string|'"Token not found"'
op|')'
op|')'
newline|'\n'
nl|'\n'
dedent|''
name|'console_type'
op|'='
name|'connect_info'
op|'.'
name|'get'
op|'('
string|"'console_type'"
op|')'
newline|'\n'
comment|'# This is currently required only for RDP consoles'
nl|'\n'
name|'if'
name|'console_type'
op|'!='
string|'"rdp-html5"'
op|':'
newline|'\n'
indent|' '
name|'raise'
name|'webob'
op|'.'
name|'exc'
op|'.'
name|'HTTPUnauthorized'
op|'('
nl|'\n'
name|'explanation'
op|'='
name|'_'
op|'('
string|'"The requested console type details are not "'
nl|'\n'
string|'"accessible"'
op|')'
op|')'
newline|'\n'
nl|'\n'
dedent|''
name|'return'
op|'{'
string|"'console'"
op|':'
nl|'\n'
op|'{'
name|'i'
op|':'
name|'connect_info'
op|'['
name|'i'
op|']'
nl|'\n'
name|'for'
name|'i'
name|'in'
op|'['
string|"'instance_uuid'"
op|','
string|"'host'"
op|','
string|"'port'"
op|','
nl|'\n'
string|"'internal_access_path'"
op|']'
nl|'\n'
name|'if'
name|'i'
name|'in'
name|'connect_info'
op|'}'
op|'}'
newline|'\n'
nl|'\n'
nl|'\n'
DECL|class|Console_auth_tokens
dedent|''
dedent|''
name|'class'
name|'Console_auth_tokens'
op|'('
name|'extensions'
op|'.'
name|'ExtensionDescriptor'
op|')'
op|':'
newline|'\n'
indent|' '
string|'"""Console token authentication support."""'
newline|'\n'
DECL|variable|name
name|'name'
op|'='
string|'"ConsoleAuthTokens"'
newline|'\n'
DECL|variable|alias
name|'alias'
op|'='
string|'"os-console-auth-tokens"'
newline|'\n'
DECL|variable|namespace
name|'namespace'
op|'='
op|'('
string|'"http://docs.openstack.org/compute/ext/"'
nl|'\n'
string|'"consoles-auth-tokens/api/v2"'
op|')'
newline|'\n'
DECL|variable|updated
name|'updated'
op|'='
string|'"2013-08-13T00:00:00Z"'
newline|'\n'
nl|'\n'
DECL|member|get_resources
name|'def'
name|'get_resources'
op|'('
name|'self'
op|')'
op|':'
newline|'\n'
indent|' '
name|'controller'
op|'='
name|'ConsoleAuthTokensController'
op|'('
op|')'
newline|'\n'
name|'ext'
op|'='
name|'extensions'
op|'.'
name|'ResourceExtension'
op|'('
string|"'os-console-auth-tokens'"
op|','
nl|'\n'
name|'controller'
op|')'
newline|'\n'
name|'return'
op|'['
name|'ext'
op|']'
newline|'\n'
dedent|''
dedent|''
endmarker|''
end_unit
| en | 0.53614 | Checks a console auth token and returns the related connect info. Console token authentication support. | 1.236482 | 1 |
ahrs/common/geometry.py | jaluebbe/ahrs | 184 | 9788 | <filename>ahrs/common/geometry.py
# -*- coding: utf-8 -*-
"""
Geometrical functions
---------------------
References
----------
.. [W1] Wikipedia: https://de.wikipedia.org/wiki/Ellipse#Ellipsengleichung_(Parameterform)
.. [WAE] Wolfram Alpha: Ellipse. (http://mathworld.wolfram.com/Ellipse.html)
"""
import numpy as np
from typing import Union
def circle(center: Union[list, np.ndarray], radius: float = 1.0, num_points: int = 20) -> np.ndarray:
"""
Build a circle with the given characteristics.
Parameters
----------
c : array-like
2D Coordinates of center.
r : float
Radius of the circle.
num_points : int
Number of points to build.
Returns
-------
points : numpy.ndarray
N-by-2 array with the coordinates of the circle.
"""
R = np.linspace(0.0, 2.0*np.pi, num_points+1)
x = center[0] + radius*np.cos(R)
y = center[1] + radius*np.sin(R)
return np.array([x, y]).transpose()
def ellipse(center: Union[list, np.ndarray], phi: float, axes: Union[list, np.ndarray], num_points: int = 20) -> np.ndarray:
"""
Build an ellipse with the given characteristics.
Parameters
----------
center : array-like
2D Coordinates of center.
phi : float
Angle, in radians, of the major axis w.r.t. the X-axis
axes : array-like
Lengths of major and minor axes, respectively.
num_points : int
Number of points. Defaults to 20.
Returns
-------
points : numpy.ndarray
N-by-2 array with the coordinates of the ellipse.
"""
R = np.linspace(0.0, 2.0*np.pi, num_points+1)
a, b = axes
x = center[0] + a*np.cos(R)*np.cos(phi) - b*np.sin(R)*np.sin(phi)
y = center[1] + a*np.cos(R)*np.sin(phi) + b*np.sin(R)*np.cos(phi)
return np.array([x, y]).transpose()
| <filename>ahrs/common/geometry.py
# -*- coding: utf-8 -*-
"""
Geometrical functions
---------------------
References
----------
.. [W1] Wikipedia: https://de.wikipedia.org/wiki/Ellipse#Ellipsengleichung_(Parameterform)
.. [WAE] Wolfram Alpha: Ellipse. (http://mathworld.wolfram.com/Ellipse.html)
"""
import numpy as np
from typing import Union
def circle(center: Union[list, np.ndarray], radius: float = 1.0, num_points: int = 20) -> np.ndarray:
"""
Build a circle with the given characteristics.
Parameters
----------
c : array-like
2D Coordinates of center.
r : float
Radius of the circle.
num_points : int
Number of points to build.
Returns
-------
points : numpy.ndarray
N-by-2 array with the coordinates of the circle.
"""
R = np.linspace(0.0, 2.0*np.pi, num_points+1)
x = center[0] + radius*np.cos(R)
y = center[1] + radius*np.sin(R)
return np.array([x, y]).transpose()
def ellipse(center: Union[list, np.ndarray], phi: float, axes: Union[list, np.ndarray], num_points: int = 20) -> np.ndarray:
"""
Build an ellipse with the given characteristics.
Parameters
----------
center : array-like
2D Coordinates of center.
phi : float
Angle, in radians, of the major axis w.r.t. the X-axis
axes : array-like
Lengths of major and minor axes, respectively.
num_points : int
Number of points. Defaults to 20.
Returns
-------
points : numpy.ndarray
N-by-2 array with the coordinates of the ellipse.
"""
R = np.linspace(0.0, 2.0*np.pi, num_points+1)
a, b = axes
x = center[0] + a*np.cos(R)*np.cos(phi) - b*np.sin(R)*np.sin(phi)
y = center[1] + a*np.cos(R)*np.sin(phi) + b*np.sin(R)*np.cos(phi)
return np.array([x, y]).transpose()
| en | 0.52084 | # -*- coding: utf-8 -*- Geometrical functions --------------------- References ---------- .. [W1] Wikipedia: https://de.wikipedia.org/wiki/Ellipse#Ellipsengleichung_(Parameterform) .. [WAE] Wolfram Alpha: Ellipse. (http://mathworld.wolfram.com/Ellipse.html) Build a circle with the given characteristics. Parameters ---------- c : array-like 2D Coordinates of center. r : float Radius of the circle. num_points : int Number of points to build. Returns ------- points : numpy.ndarray N-by-2 array with the coordinates of the circle. Build an ellipse with the given characteristics. Parameters ---------- center : array-like 2D Coordinates of center. phi : float Angle, in radians, of the major axis w.r.t. the X-axis axes : array-like Lengths of major and minor axes, respectively. num_points : int Number of points. Defaults to 20. Returns ------- points : numpy.ndarray N-by-2 array with the coordinates of the ellipse. | 3.937743 | 4 |
htdocs/plotting/auto/scripts100/p116.py | jamayfieldjr/iem | 1 | 9789 | """Monthly HDD/CDD Totals."""
import datetime
from pandas.io.sql import read_sql
from pyiem.plot.use_agg import plt
from pyiem.util import get_dbconn, get_autoplot_context
from pyiem.exceptions import NoDataFound
PDICT = {'cdd': 'Cooling Degree Days',
'hdd': 'Heating Degree Days'}
def get_description():
""" Return a dict describing how to call this plotter """
desc = dict()
desc['data'] = True
desc['report'] = True
desc['description'] = """This chart presents monthly cooling degree days
or heating degree days for a 20 year period of your choice. The 20 year
limit is for plot usability only, the data download has all available
years contained."""
y20 = datetime.date.today().year - 19
desc['arguments'] = [
dict(type='station', name='station', default='IATDSM',
label='Select Station', network='IACLIMATE'),
dict(type='select', options=PDICT, default='cdd', name='var',
label='Select Variable'),
dict(type='year', name='syear', default=y20,
label='For plotting, year to start 20 years of plot'),
]
return desc
def plotter(fdict):
""" Go """
import seaborn as sns
ctx = get_autoplot_context(fdict, get_description())
pgconn = get_dbconn('coop')
station = ctx['station']
varname = ctx['var']
table = "alldata_%s" % (station[:2], )
df = read_sql("""
SELECT year, month, sum(precip) as sum_precip,
avg(high) as avg_high,
avg(low) as avg_low,
sum(cdd(high,low,60)) as cdd60,
sum(cdd(high,low,65)) as cdd65,
sum(hdd(high,low,60)) as hdd60,
sum(hdd(high,low,65)) as hdd65,
sum(case when precip >= 0.01 then 1 else 0 end) as rain_days,
sum(case when snow >= 0.1 then 1 else 0 end) as snow_days
from """+table+""" WHERE station = %s GROUP by year, month
""", pgconn, params=(station,), index_col=None)
if df.empty:
raise NoDataFound("No Data Found.")
df['monthdate'] = df[['year', 'month']].apply(
lambda x: datetime.date(x[0], x[1], 1), axis=1)
df.set_index('monthdate', inplace=True)
res = """\
# IEM Climodat https://mesonet.agron.iastate.edu/climodat/
# Report Generated: %s
# Climate Record: %s -> %s
# Site Information: [%s] %s
# Contact Information: <NAME> <EMAIL> 515.294.5978
""" % (datetime.date.today().strftime("%d %b %Y"),
ctx['_nt'].sts[station]['archive_begin'].date(),
datetime.date.today(), station, ctx['_nt'].sts[station]['name'])
res += """# THESE ARE THE MONTHLY %s (base=65) FOR STATION %s
YEAR JAN FEB MAR APR MAY JUN JUL AUG SEP \
OCT NOV DEC
""" % (PDICT[varname].upper(), station)
second = """# THESE ARE THE MONTHLY %s (base=60) FOR STATION %s
YEAR JAN FEB MAR APR MAY JUN JUL AUG SEP \
OCT NOV DEC
""" % (
PDICT[varname].upper(), station)
minyear = df['year'].min()
maxyear = df['year'].max()
for yr in range(minyear, maxyear + 1):
res += ("%4i" % (yr,))
second += "%4i" % (yr,)
for mo in range(1, 13):
ts = datetime.date(yr, mo, 1)
if ts not in df.index:
res += ("%7s" % ("M",))
second += "%7s" % ("M",)
continue
row = df.loc[ts]
res += ("%7.0f" % (row[varname+"65"],))
second += "%7.0f" % (row[varname+"60"],)
res += ("\n")
second += "\n"
res += ("MEAN")
second += "MEAN"
for mo in range(1, 13):
df2 = df[df['month'] == mo]
res += ("%7.0f" % (df2[varname+"65"].mean(), ))
second += "%7.0f" % (df2[varname+"60"].mean(), )
res += ("\n")
second += "\n"
res += second
y1 = int(fdict.get('syear', 1990))
fig, ax = plt.subplots(1, 1, figsize=(8., 6.))
fig.text(0.5, 0.95, "[%s] %s (%s-%s)" % (
station, ctx['_nt'].sts[station]['name'], y1, y1 + 20), ha='center',
fontsize=16)
ax.set_title(r"%s base=60$^\circ$F" % (PDICT[varname], ))
filtered = df[(df['year'] >= y1) & (df['year'] <= (y1 + 20))]
df2 = filtered[
['month', 'year', varname + '60']
].pivot('year', 'month', varname + '60')
sns.heatmap(df2, annot=True, fmt=".0f", linewidths=.5, ax=ax)
return fig, df, res
if __name__ == '__main__':
plotter(dict(syear=1990))
| """Monthly HDD/CDD Totals."""
import datetime
from pandas.io.sql import read_sql
from pyiem.plot.use_agg import plt
from pyiem.util import get_dbconn, get_autoplot_context
from pyiem.exceptions import NoDataFound
PDICT = {'cdd': 'Cooling Degree Days',
'hdd': 'Heating Degree Days'}
def get_description():
""" Return a dict describing how to call this plotter """
desc = dict()
desc['data'] = True
desc['report'] = True
desc['description'] = """This chart presents monthly cooling degree days
or heating degree days for a 20 year period of your choice. The 20 year
limit is for plot usability only, the data download has all available
years contained."""
y20 = datetime.date.today().year - 19
desc['arguments'] = [
dict(type='station', name='station', default='IATDSM',
label='Select Station', network='IACLIMATE'),
dict(type='select', options=PDICT, default='cdd', name='var',
label='Select Variable'),
dict(type='year', name='syear', default=y20,
label='For plotting, year to start 20 years of plot'),
]
return desc
def plotter(fdict):
""" Go """
import seaborn as sns
ctx = get_autoplot_context(fdict, get_description())
pgconn = get_dbconn('coop')
station = ctx['station']
varname = ctx['var']
table = "alldata_%s" % (station[:2], )
df = read_sql("""
SELECT year, month, sum(precip) as sum_precip,
avg(high) as avg_high,
avg(low) as avg_low,
sum(cdd(high,low,60)) as cdd60,
sum(cdd(high,low,65)) as cdd65,
sum(hdd(high,low,60)) as hdd60,
sum(hdd(high,low,65)) as hdd65,
sum(case when precip >= 0.01 then 1 else 0 end) as rain_days,
sum(case when snow >= 0.1 then 1 else 0 end) as snow_days
from """+table+""" WHERE station = %s GROUP by year, month
""", pgconn, params=(station,), index_col=None)
if df.empty:
raise NoDataFound("No Data Found.")
df['monthdate'] = df[['year', 'month']].apply(
lambda x: datetime.date(x[0], x[1], 1), axis=1)
df.set_index('monthdate', inplace=True)
res = """\
# IEM Climodat https://mesonet.agron.iastate.edu/climodat/
# Report Generated: %s
# Climate Record: %s -> %s
# Site Information: [%s] %s
# Contact Information: <NAME> <EMAIL> 515.294.5978
""" % (datetime.date.today().strftime("%d %b %Y"),
ctx['_nt'].sts[station]['archive_begin'].date(),
datetime.date.today(), station, ctx['_nt'].sts[station]['name'])
res += """# THESE ARE THE MONTHLY %s (base=65) FOR STATION %s
YEAR JAN FEB MAR APR MAY JUN JUL AUG SEP \
OCT NOV DEC
""" % (PDICT[varname].upper(), station)
second = """# THESE ARE THE MONTHLY %s (base=60) FOR STATION %s
YEAR JAN FEB MAR APR MAY JUN JUL AUG SEP \
OCT NOV DEC
""" % (
PDICT[varname].upper(), station)
minyear = df['year'].min()
maxyear = df['year'].max()
for yr in range(minyear, maxyear + 1):
res += ("%4i" % (yr,))
second += "%4i" % (yr,)
for mo in range(1, 13):
ts = datetime.date(yr, mo, 1)
if ts not in df.index:
res += ("%7s" % ("M",))
second += "%7s" % ("M",)
continue
row = df.loc[ts]
res += ("%7.0f" % (row[varname+"65"],))
second += "%7.0f" % (row[varname+"60"],)
res += ("\n")
second += "\n"
res += ("MEAN")
second += "MEAN"
for mo in range(1, 13):
df2 = df[df['month'] == mo]
res += ("%7.0f" % (df2[varname+"65"].mean(), ))
second += "%7.0f" % (df2[varname+"60"].mean(), )
res += ("\n")
second += "\n"
res += second
y1 = int(fdict.get('syear', 1990))
fig, ax = plt.subplots(1, 1, figsize=(8., 6.))
fig.text(0.5, 0.95, "[%s] %s (%s-%s)" % (
station, ctx['_nt'].sts[station]['name'], y1, y1 + 20), ha='center',
fontsize=16)
ax.set_title(r"%s base=60$^\circ$F" % (PDICT[varname], ))
filtered = df[(df['year'] >= y1) & (df['year'] <= (y1 + 20))]
df2 = filtered[
['month', 'year', varname + '60']
].pivot('year', 'month', varname + '60')
sns.heatmap(df2, annot=True, fmt=".0f", linewidths=.5, ax=ax)
return fig, df, res
if __name__ == '__main__':
plotter(dict(syear=1990))
| en | 0.787647 | Monthly HDD/CDD Totals. Return a dict describing how to call this plotter This chart presents monthly cooling degree days or heating degree days for a 20 year period of your choice. The 20 year limit is for plot usability only, the data download has all available years contained. Go SELECT year, month, sum(precip) as sum_precip, avg(high) as avg_high, avg(low) as avg_low, sum(cdd(high,low,60)) as cdd60, sum(cdd(high,low,65)) as cdd65, sum(hdd(high,low,60)) as hdd60, sum(hdd(high,low,65)) as hdd65, sum(case when precip >= 0.01 then 1 else 0 end) as rain_days, sum(case when snow >= 0.1 then 1 else 0 end) as snow_days from WHERE station = %s GROUP by year, month \ # IEM Climodat https://mesonet.agron.iastate.edu/climodat/ # Report Generated: %s # Climate Record: %s -> %s # Site Information: [%s] %s # Contact Information: <NAME> <EMAIL> 515.294.5978 # THESE ARE THE MONTHLY %s (base=65) FOR STATION %s YEAR JAN FEB MAR APR MAY JUN JUL AUG SEP \ OCT NOV DEC # THESE ARE THE MONTHLY %s (base=60) FOR STATION %s YEAR JAN FEB MAR APR MAY JUN JUL AUG SEP \ OCT NOV DEC | 2.888843 | 3 |
examples/horovod/ray_torch_shuffle.py | krfricke/ray_shuffling_data_loader | 16 | 9790 | import os
import pickle
import time
import timeit
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
import numpy as np
import torch
import tempfile
import horovod.torch as hvd
from horovod.ray import RayExecutor
from ray_shuffling_data_loader.torch_dataset import (TorchShufflingDataset)
from ray_shuffling_data_loader.data_generation import (generate_data,
DATA_SPEC)
import argparse
DEFAULT_DATA_DIR = "s3://shuffling-data-loader-benchmarks/data/"
numpy_to_torch_dtype = {
np.bool: torch.bool,
np.uint8: torch.uint8,
np.int8: torch.int8,
np.int16: torch.int16,
np.int32: torch.int32,
np.int64: torch.int64,
np.float16: torch.float16,
np.float32: torch.float32,
np.float64: torch.float64,
np.complex64: torch.complex64,
np.complex128: torch.complex128
}
# Training settings
parser = argparse.ArgumentParser(description="PyTorch MNIST Example")
parser.add_argument(
"--batch-size",
type=int,
default=250000,
metavar="N",
help="input batch size for training (default: 64)")
parser.add_argument(
"--test-batch-size",
type=int,
default=250000,
metavar="N",
help="input batch size for testing (default: 1000)")
parser.add_argument(
"--epochs",
type=int,
default=10,
metavar="N",
help="number of epochs to train (default: 10)")
parser.add_argument(
"--lr",
type=float,
default=0.01,
metavar="LR",
help="learning rate (default: 0.01)")
parser.add_argument(
"--momentum",
type=float,
default=0.5,
metavar="M",
help="SGD momentum (default: 0.5)")
parser.add_argument(
"--no-cuda",
action="store_true",
default=False,
help="disables CUDA training")
parser.add_argument(
"--seed",
type=int,
default=42,
metavar="S",
help="random seed (default: 42)")
parser.add_argument(
"--log-interval",
type=int,
default=10,
metavar="N",
help=("how many batches to wait before logging training "
"status"))
parser.add_argument(
"--fp16-allreduce",
action="store_true",
default=False,
help="use fp16 compression during allreduce")
parser.add_argument(
"--use-adasum",
action="store_true",
default=False,
help="use adasum algorithm to do reduction")
parser.add_argument(
"--gradient-predivide-factor",
type=float,
default=1.0,
help=("apply gradient predivide factor in optimizer "
"(default: 1.0)"))
parser.add_argument("--num-workers", type=int, default=None)
parser.add_argument("--num-hosts", type=int, default=None)
parser.add_argument("--num-workers-per-host", type=int, default=None)
parser.add_argument("--cpus-per-worker", type=int, default=1)
parser.add_argument("--mock-train-step-time", type=float, default=1.0)
# Synthetic training data generation settings.
parser.add_argument("--cache-files", action="store_true", default=False)
parser.add_argument("--num-rows", type=int, default=2 * (10**7))
parser.add_argument("--num-files", type=int, default=25)
parser.add_argument("--max-row-group-skew", type=float, default=0.0)
parser.add_argument("--num-row-groups-per-file", type=int, default=5)
parser.add_argument("--data-dir", type=str, default=DEFAULT_DATA_DIR)
# Shuffling data loader settings.
parser.add_argument("--num-reducers", type=int, default=32)
parser.add_argument("--max-concurrent-epochs", type=int, default=2)
parser.add_argument("--address", default="auto")
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.conv1 = nn.Conv2d(1, 10, kernel_size=5)
self.conv2 = nn.Conv2d(10, 20, kernel_size=5)
self.conv2_drop = nn.Dropout2d()
self.fc1 = nn.Linear(320, 50)
self.fc2 = nn.Linear(50, 10)
def forward(self, x):
x = F.relu(F.max_pool2d(self.conv1(x), 2))
x = F.relu(F.max_pool2d(self.conv2_drop(self.conv2(x)), 2))
x = x.view(-1, 320)
x = F.relu(self.fc1(x))
x = F.dropout(x, training=self.training)
x = self.fc2(x)
return F.log_softmax(x)
def train_main(args, filenames):
# Horovod: initialize library.
hvd.init()
torch.manual_seed(args.seed)
if torch.cuda.is_available() and not args.no_cuda:
# Horovod: pin GPU to local rank.
torch.cuda.set_device(hvd.local_rank())
torch.cuda.manual_seed(args.seed)
# Horovod: limit # of CPU threads to be used per worker.
torch.set_num_threads(1)
rank = hvd.rank()
train_dataset = create_dataset(
filenames,
batch_size=args.batch_size,
rank=rank,
num_epochs=args.epochs,
world_size=hvd.size(),
num_reducers=args.num_reducers,
max_concurrent_epochs=args.max_concurrent_epochs)
model = Net()
# By default, Adasum doesn"t need scaling up learning rate.
lr_scaler = hvd.size() if not args.use_adasum else 1
if torch.cuda.is_available() and not args.no_cuda:
# Move model to GPU.
model.cuda()
# If using GPU Adasum allreduce, scale learning rate by local_size.
if args.use_adasum and hvd.nccl_built():
lr_scaler = hvd.local_size()
# Horovod: scale learning rate by lr_scaler.
optimizer = optim.SGD(
model.parameters(), lr=args.lr * lr_scaler, momentum=args.momentum)
# Horovod: broadcast parameters & optimizer state.
hvd.broadcast_parameters(model.state_dict(), root_rank=0)
hvd.broadcast_optimizer_state(optimizer, root_rank=0)
# Horovod: (optional) compression algorithm.
compression = (hvd.Compression.fp16
if args.fp16_allreduce else hvd.Compression.none)
# Horovod: wrap optimizer with DistributedOptimizer.
optimizer = hvd.DistributedOptimizer(
optimizer,
named_parameters=model.named_parameters(),
compression=compression,
op=hvd.Adasum if args.use_adasum else hvd.Average,
gradient_predivide_factor=args.gradient_predivide_factor)
def _train(epoch):
model.train()
# Horovod: set epoch to sampler for shuffling.
train_dataset.set_epoch(epoch)
start_epoch = timeit.default_timer()
last_batch_time = start_epoch
batch_wait_times = []
for batch_idx, (data, target) in enumerate(train_dataset):
batch_wait_times.append(timeit.default_timer() - last_batch_time)
if torch.cuda.is_available() and not args.no_cuda:
if isinstance(data, list):
data = [t.cuda() for t in data]
target = target.cuda()
optimizer.zero_grad()
# output = model(data)
if batch_idx % args.log_interval == 0:
print(
f"Processing batch {batch_idx} in epoch {epoch} on worker "
f"{rank}.")
time.sleep(args.mock_train_step_time)
# TODO(Clark): Add worker synchronization barrier here.
# loss = F.nll_loss(output, target)
# loss.backward()
# optimizer.step()
last_batch_time = timeit.default_timer()
epoch_duration = timeit.default_timer() - start_epoch
avg_batch_wait_time = np.mean(batch_wait_times)
std_batch_wait_time = np.std(batch_wait_times)
max_batch_wait_time = np.max(batch_wait_times)
min_batch_wait_time = np.min(batch_wait_times)
print(f"\nEpoch {epoch}, worker {rank} stats over "
f"{len(batch_wait_times)} steps: {epoch_duration:.3f}")
print(f"Mean batch wait time: {avg_batch_wait_time:.3f}s +- "
f"{std_batch_wait_time}")
print(f"Max batch wait time: {max_batch_wait_time:.3f}s")
print(f"Min batch wait time: {min_batch_wait_time:.3f}s")
return batch_wait_times
print(f"Starting training on worker {rank}.")
batch_wait_times = []
for epoch in range(args.epochs):
batch_wait_times.extend(_train(epoch))
batch_wait_times.pop(0)
print(f"Done training on worker {rank}.")
avg_batch_wait_time = np.mean(batch_wait_times)
std_batch_wait_time = np.std(batch_wait_times)
max_batch_wait_time = np.max(batch_wait_times)
min_batch_wait_time = np.min(batch_wait_times)
print(f"\nWorker {rank} training stats over {args.epochs} epochs:")
print(f"Mean batch wait time: {avg_batch_wait_time:.3f}s +- "
f"{std_batch_wait_time}")
print(f"Max batch wait time: {max_batch_wait_time:.3f}s")
print(f"Min batch wait time: {min_batch_wait_time:.3f}s")
# TODO(Clark): Add logic to the dataset abstraction so we don't have to do
# this.
if rank == 0:
print("Waiting in rank 0 worker to let other workers consume queue...")
time.sleep(10)
print("Done waiting in rank 0 worker.")
def create_dataset(filenames, *, batch_size, rank, num_epochs, world_size,
num_reducers, max_concurrent_epochs):
print(f"Creating Torch shuffling dataset for worker {rank} with "
f"{batch_size} batch size, {num_epochs} epochs, {num_reducers} "
f"reducers, and {world_size} trainers.")
feature_columns = list(DATA_SPEC.keys())
feature_types = [
numpy_to_torch_dtype[dtype] for _, _, dtype in DATA_SPEC.values()
]
label_column = feature_columns.pop()
label_type = feature_types.pop()
return TorchShufflingDataset(
filenames,
num_epochs,
world_size,
batch_size,
rank,
num_reducers=num_reducers,
max_concurrent_epochs=max_concurrent_epochs,
feature_columns=feature_columns,
feature_types=feature_types,
label_column=label_column,
label_type=label_type)
if __name__ == "__main__":
args = parser.parse_args()
from ray_shuffling_data_loader.stats import human_readable_size
import ray
print("Connecting to Ray cluster...")
ray.init(address=args.address)
num_rows = args.num_rows
num_files = args.num_files
num_row_groups_per_file = args.num_row_groups_per_file
max_row_group_skew = args.max_row_group_skew
data_dir = args.data_dir
cache_path = os.path.join(tempfile.gettempdir(), "data_cache")
filenames = None
if args.cache_files and os.path.exists(cache_path):
try:
with open(cache_path, "rb") as f:
filenames, num_bytes = pickle.load(f)
except Exception as exc:
print(f"Cache load failed - {exc}")
if not filenames:
print(f"Generating {num_rows} rows over {num_files} files, with "
f"{num_row_groups_per_file} row groups per file and at most "
f"{100 * max_row_group_skew:.1f}% row group skew.")
filenames, num_bytes = generate_data(num_rows, num_files,
num_row_groups_per_file,
max_row_group_skew, data_dir)
if args.cache_files:
with open(os.path.join(tempfile.gettempdir(), "data_cache"),
"wb") as f:
pickle.dump((filenames, num_bytes), f)
print(f"Generated {len(filenames)} files containing {num_rows} rows "
f"with {num_row_groups_per_file} row groups per file, totalling "
f"{human_readable_size(num_bytes)}.")
print("Create Ray executor")
worker_kwargs = {}
num_workers = args.num_workers
num_hosts = args.num_hosts
num_workers_per_host = args.num_workers_per_host
if num_workers is not None:
if num_hosts is not None:
raise ValueError(
"Only one of --num-workers and --num-hosts should be used.")
worker_kwargs["num_workers"] = num_workers
elif num_hosts is not None:
worker_kwargs["num_hosts"] = num_hosts
if num_workers_per_host is None:
raise ValueError("When giving --num-hosts, --num-workers-per-host "
"must also be given.")
worker_kwargs["num_workers_per_host"] = num_workers_per_host
cpus_per_worker = args.cpus_per_worker
settings = RayExecutor.create_settings(timeout_s=30)
executor = RayExecutor(
settings,
use_gpu=True,
gpus_per_worker=1,
cpus_per_worker=cpus_per_worker,
**worker_kwargs)
executor.start()
executor.run(train_main, args=[args, filenames])
executor.shutdown()
print("Done consuming batches.")
| import os
import pickle
import time
import timeit
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
import numpy as np
import torch
import tempfile
import horovod.torch as hvd
from horovod.ray import RayExecutor
from ray_shuffling_data_loader.torch_dataset import (TorchShufflingDataset)
from ray_shuffling_data_loader.data_generation import (generate_data,
DATA_SPEC)
import argparse
DEFAULT_DATA_DIR = "s3://shuffling-data-loader-benchmarks/data/"
numpy_to_torch_dtype = {
np.bool: torch.bool,
np.uint8: torch.uint8,
np.int8: torch.int8,
np.int16: torch.int16,
np.int32: torch.int32,
np.int64: torch.int64,
np.float16: torch.float16,
np.float32: torch.float32,
np.float64: torch.float64,
np.complex64: torch.complex64,
np.complex128: torch.complex128
}
# Training settings
parser = argparse.ArgumentParser(description="PyTorch MNIST Example")
parser.add_argument(
"--batch-size",
type=int,
default=250000,
metavar="N",
help="input batch size for training (default: 64)")
parser.add_argument(
"--test-batch-size",
type=int,
default=250000,
metavar="N",
help="input batch size for testing (default: 1000)")
parser.add_argument(
"--epochs",
type=int,
default=10,
metavar="N",
help="number of epochs to train (default: 10)")
parser.add_argument(
"--lr",
type=float,
default=0.01,
metavar="LR",
help="learning rate (default: 0.01)")
parser.add_argument(
"--momentum",
type=float,
default=0.5,
metavar="M",
help="SGD momentum (default: 0.5)")
parser.add_argument(
"--no-cuda",
action="store_true",
default=False,
help="disables CUDA training")
parser.add_argument(
"--seed",
type=int,
default=42,
metavar="S",
help="random seed (default: 42)")
parser.add_argument(
"--log-interval",
type=int,
default=10,
metavar="N",
help=("how many batches to wait before logging training "
"status"))
parser.add_argument(
"--fp16-allreduce",
action="store_true",
default=False,
help="use fp16 compression during allreduce")
parser.add_argument(
"--use-adasum",
action="store_true",
default=False,
help="use adasum algorithm to do reduction")
parser.add_argument(
"--gradient-predivide-factor",
type=float,
default=1.0,
help=("apply gradient predivide factor in optimizer "
"(default: 1.0)"))
parser.add_argument("--num-workers", type=int, default=None)
parser.add_argument("--num-hosts", type=int, default=None)
parser.add_argument("--num-workers-per-host", type=int, default=None)
parser.add_argument("--cpus-per-worker", type=int, default=1)
parser.add_argument("--mock-train-step-time", type=float, default=1.0)
# Synthetic training data generation settings.
parser.add_argument("--cache-files", action="store_true", default=False)
parser.add_argument("--num-rows", type=int, default=2 * (10**7))
parser.add_argument("--num-files", type=int, default=25)
parser.add_argument("--max-row-group-skew", type=float, default=0.0)
parser.add_argument("--num-row-groups-per-file", type=int, default=5)
parser.add_argument("--data-dir", type=str, default=DEFAULT_DATA_DIR)
# Shuffling data loader settings.
parser.add_argument("--num-reducers", type=int, default=32)
parser.add_argument("--max-concurrent-epochs", type=int, default=2)
parser.add_argument("--address", default="auto")
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.conv1 = nn.Conv2d(1, 10, kernel_size=5)
self.conv2 = nn.Conv2d(10, 20, kernel_size=5)
self.conv2_drop = nn.Dropout2d()
self.fc1 = nn.Linear(320, 50)
self.fc2 = nn.Linear(50, 10)
def forward(self, x):
x = F.relu(F.max_pool2d(self.conv1(x), 2))
x = F.relu(F.max_pool2d(self.conv2_drop(self.conv2(x)), 2))
x = x.view(-1, 320)
x = F.relu(self.fc1(x))
x = F.dropout(x, training=self.training)
x = self.fc2(x)
return F.log_softmax(x)
def train_main(args, filenames):
# Horovod: initialize library.
hvd.init()
torch.manual_seed(args.seed)
if torch.cuda.is_available() and not args.no_cuda:
# Horovod: pin GPU to local rank.
torch.cuda.set_device(hvd.local_rank())
torch.cuda.manual_seed(args.seed)
# Horovod: limit # of CPU threads to be used per worker.
torch.set_num_threads(1)
rank = hvd.rank()
train_dataset = create_dataset(
filenames,
batch_size=args.batch_size,
rank=rank,
num_epochs=args.epochs,
world_size=hvd.size(),
num_reducers=args.num_reducers,
max_concurrent_epochs=args.max_concurrent_epochs)
model = Net()
# By default, Adasum doesn"t need scaling up learning rate.
lr_scaler = hvd.size() if not args.use_adasum else 1
if torch.cuda.is_available() and not args.no_cuda:
# Move model to GPU.
model.cuda()
# If using GPU Adasum allreduce, scale learning rate by local_size.
if args.use_adasum and hvd.nccl_built():
lr_scaler = hvd.local_size()
# Horovod: scale learning rate by lr_scaler.
optimizer = optim.SGD(
model.parameters(), lr=args.lr * lr_scaler, momentum=args.momentum)
# Horovod: broadcast parameters & optimizer state.
hvd.broadcast_parameters(model.state_dict(), root_rank=0)
hvd.broadcast_optimizer_state(optimizer, root_rank=0)
# Horovod: (optional) compression algorithm.
compression = (hvd.Compression.fp16
if args.fp16_allreduce else hvd.Compression.none)
# Horovod: wrap optimizer with DistributedOptimizer.
optimizer = hvd.DistributedOptimizer(
optimizer,
named_parameters=model.named_parameters(),
compression=compression,
op=hvd.Adasum if args.use_adasum else hvd.Average,
gradient_predivide_factor=args.gradient_predivide_factor)
def _train(epoch):
model.train()
# Horovod: set epoch to sampler for shuffling.
train_dataset.set_epoch(epoch)
start_epoch = timeit.default_timer()
last_batch_time = start_epoch
batch_wait_times = []
for batch_idx, (data, target) in enumerate(train_dataset):
batch_wait_times.append(timeit.default_timer() - last_batch_time)
if torch.cuda.is_available() and not args.no_cuda:
if isinstance(data, list):
data = [t.cuda() for t in data]
target = target.cuda()
optimizer.zero_grad()
# output = model(data)
if batch_idx % args.log_interval == 0:
print(
f"Processing batch {batch_idx} in epoch {epoch} on worker "
f"{rank}.")
time.sleep(args.mock_train_step_time)
# TODO(Clark): Add worker synchronization barrier here.
# loss = F.nll_loss(output, target)
# loss.backward()
# optimizer.step()
last_batch_time = timeit.default_timer()
epoch_duration = timeit.default_timer() - start_epoch
avg_batch_wait_time = np.mean(batch_wait_times)
std_batch_wait_time = np.std(batch_wait_times)
max_batch_wait_time = np.max(batch_wait_times)
min_batch_wait_time = np.min(batch_wait_times)
print(f"\nEpoch {epoch}, worker {rank} stats over "
f"{len(batch_wait_times)} steps: {epoch_duration:.3f}")
print(f"Mean batch wait time: {avg_batch_wait_time:.3f}s +- "
f"{std_batch_wait_time}")
print(f"Max batch wait time: {max_batch_wait_time:.3f}s")
print(f"Min batch wait time: {min_batch_wait_time:.3f}s")
return batch_wait_times
print(f"Starting training on worker {rank}.")
batch_wait_times = []
for epoch in range(args.epochs):
batch_wait_times.extend(_train(epoch))
batch_wait_times.pop(0)
print(f"Done training on worker {rank}.")
avg_batch_wait_time = np.mean(batch_wait_times)
std_batch_wait_time = np.std(batch_wait_times)
max_batch_wait_time = np.max(batch_wait_times)
min_batch_wait_time = np.min(batch_wait_times)
print(f"\nWorker {rank} training stats over {args.epochs} epochs:")
print(f"Mean batch wait time: {avg_batch_wait_time:.3f}s +- "
f"{std_batch_wait_time}")
print(f"Max batch wait time: {max_batch_wait_time:.3f}s")
print(f"Min batch wait time: {min_batch_wait_time:.3f}s")
# TODO(Clark): Add logic to the dataset abstraction so we don't have to do
# this.
if rank == 0:
print("Waiting in rank 0 worker to let other workers consume queue...")
time.sleep(10)
print("Done waiting in rank 0 worker.")
def create_dataset(filenames, *, batch_size, rank, num_epochs, world_size,
num_reducers, max_concurrent_epochs):
print(f"Creating Torch shuffling dataset for worker {rank} with "
f"{batch_size} batch size, {num_epochs} epochs, {num_reducers} "
f"reducers, and {world_size} trainers.")
feature_columns = list(DATA_SPEC.keys())
feature_types = [
numpy_to_torch_dtype[dtype] for _, _, dtype in DATA_SPEC.values()
]
label_column = feature_columns.pop()
label_type = feature_types.pop()
return TorchShufflingDataset(
filenames,
num_epochs,
world_size,
batch_size,
rank,
num_reducers=num_reducers,
max_concurrent_epochs=max_concurrent_epochs,
feature_columns=feature_columns,
feature_types=feature_types,
label_column=label_column,
label_type=label_type)
if __name__ == "__main__":
args = parser.parse_args()
from ray_shuffling_data_loader.stats import human_readable_size
import ray
print("Connecting to Ray cluster...")
ray.init(address=args.address)
num_rows = args.num_rows
num_files = args.num_files
num_row_groups_per_file = args.num_row_groups_per_file
max_row_group_skew = args.max_row_group_skew
data_dir = args.data_dir
cache_path = os.path.join(tempfile.gettempdir(), "data_cache")
filenames = None
if args.cache_files and os.path.exists(cache_path):
try:
with open(cache_path, "rb") as f:
filenames, num_bytes = pickle.load(f)
except Exception as exc:
print(f"Cache load failed - {exc}")
if not filenames:
print(f"Generating {num_rows} rows over {num_files} files, with "
f"{num_row_groups_per_file} row groups per file and at most "
f"{100 * max_row_group_skew:.1f}% row group skew.")
filenames, num_bytes = generate_data(num_rows, num_files,
num_row_groups_per_file,
max_row_group_skew, data_dir)
if args.cache_files:
with open(os.path.join(tempfile.gettempdir(), "data_cache"),
"wb") as f:
pickle.dump((filenames, num_bytes), f)
print(f"Generated {len(filenames)} files containing {num_rows} rows "
f"with {num_row_groups_per_file} row groups per file, totalling "
f"{human_readable_size(num_bytes)}.")
print("Create Ray executor")
worker_kwargs = {}
num_workers = args.num_workers
num_hosts = args.num_hosts
num_workers_per_host = args.num_workers_per_host
if num_workers is not None:
if num_hosts is not None:
raise ValueError(
"Only one of --num-workers and --num-hosts should be used.")
worker_kwargs["num_workers"] = num_workers
elif num_hosts is not None:
worker_kwargs["num_hosts"] = num_hosts
if num_workers_per_host is None:
raise ValueError("When giving --num-hosts, --num-workers-per-host "
"must also be given.")
worker_kwargs["num_workers_per_host"] = num_workers_per_host
cpus_per_worker = args.cpus_per_worker
settings = RayExecutor.create_settings(timeout_s=30)
executor = RayExecutor(
settings,
use_gpu=True,
gpus_per_worker=1,
cpus_per_worker=cpus_per_worker,
**worker_kwargs)
executor.start()
executor.run(train_main, args=[args, filenames])
executor.shutdown()
print("Done consuming batches.")
| en | 0.656283 | # Training settings # Synthetic training data generation settings. # Shuffling data loader settings. # Horovod: initialize library. # Horovod: pin GPU to local rank. # Horovod: limit # of CPU threads to be used per worker. # By default, Adasum doesn"t need scaling up learning rate. # Move model to GPU. # If using GPU Adasum allreduce, scale learning rate by local_size. # Horovod: scale learning rate by lr_scaler. # Horovod: broadcast parameters & optimizer state. # Horovod: (optional) compression algorithm. # Horovod: wrap optimizer with DistributedOptimizer. # Horovod: set epoch to sampler for shuffling. # output = model(data) # TODO(Clark): Add worker synchronization barrier here. # loss = F.nll_loss(output, target) # loss.backward() # optimizer.step() # TODO(Clark): Add logic to the dataset abstraction so we don't have to do # this. | 2.270872 | 2 |
tests/test_main/test_base/tests.py | PitonX60/django-firebird | 51 | 9791 | <reponame>PitonX60/django-firebird
# -*- coding: utf-8 -*-
from datetime import datetime, timedelta
from django.conf import settings
from django.db import connection, DatabaseError
from django.db.models import F, DateField, DateTimeField, IntegerField, TimeField, CASCADE
from django.db.models.fields.related import ForeignKey
from django.db.models.functions import (
Extract, ExtractDay, ExtractHour, ExtractMinute, ExtractMonth,
ExtractSecond, ExtractWeek, ExtractWeekDay, ExtractYear, Trunc, TruncDate,
TruncDay, TruncHour, TruncMinute, TruncMonth, TruncSecond, TruncTime,
TruncYear,
)
from django.test import TestCase, TransactionTestCase, override_settings
from django.utils import timezone
from .models import BigS, FieldsTest, Foo, Bar, DTModel
def microsecond_support(value):
return value if connection.features.supports_microsecond_precision else value.replace(microsecond=0)
def truncate_to(value, kind, tzinfo=None):
# Convert to target timezone before truncation
if tzinfo is not None:
value = value.astimezone(tzinfo)
def truncate(value, kind):
if kind == 'second':
return value.replace(microsecond=0)
if kind == 'minute':
return value.replace(second=0, microsecond=0)
if kind == 'hour':
return value.replace(minute=0, second=0, microsecond=0)
if kind == 'day':
if isinstance(value, datetime):
return value.replace(hour=0, minute=0, second=0, microsecond=0)
return value
if kind == 'month':
if isinstance(value, datetime):
return value.replace(day=1, hour=0, minute=0, second=0, microsecond=0)
return value.replace(day=1)
# otherwise, truncate to year
if isinstance(value, datetime):
return value.replace(month=1, day=1, hour=0, minute=0, second=0, microsecond=0)
return value.replace(month=1, day=1)
value = truncate(value, kind)
if tzinfo is not None:
# If there was a daylight saving transition, then reset the timezone.
value = timezone.make_aware(value.replace(tzinfo=None), tzinfo)
return value
class FirebirdTest(TestCase):
def setUp(self):
pass
def test_server_version(self):
version = connection.server_version
self.assertNotEqual(version, '')
def test_firebird_version(self):
version = connection.ops.firebird_version
self.assertNotEqual(version, [])
class DatabaseOperationsTest(TestCase):
def setUp(self):
self.ops = connection.ops
def test_get_sequence_name(self):
sq_name = self.ops.get_sequence_name('TEST')
self.assertEqual(sq_name, '"TEST_SQ"')
def test_drop_sequence_sql(self):
sql = self.ops.drop_sequence_sql('TEST')
self.assertEqual(sql, 'DROP SEQUENCE "TEST_SQ"')
def test_date_extract_sql(self):
sql = self.ops.date_extract_sql('week_day', 'DATE_FIELD')
value = "EXTRACT(WEEKDAY FROM DATE_FIELD) + 1"
self.assertEqual(sql, value)
sql = self.ops.date_extract_sql('year', 'DATE_FIELD')
value = "EXTRACT(YEAR FROM DATE_FIELD)"
self.assertEqual(sql, value)
sql = self.ops.date_extract_sql('month', 'DATE_FIELD')
value = "EXTRACT(MONTH FROM DATE_FIELD)"
self.assertEqual(sql, value)
sql = self.ops.date_extract_sql('day', 'DATE_FIELD')
value = "EXTRACT(DAY FROM DATE_FIELD)"
self.assertEqual(sql, value)
def test_datetime_trunc_sql(self):
sql = self.ops.datetime_trunc_sql('year', 'DATE_FIELD', None)
value = "CAST(EXTRACT(year FROM DATE_FIELD)||'-01-01 00:00:00' AS TIMESTAMP)"
self.assertEqual(sql, value)
sql = self.ops.datetime_trunc_sql('month', 'DATE_FIELD', None)
value = "CAST(EXTRACT(year FROM DATE_FIELD)||'-'||EXTRACT(month FROM DATE_FIELD)||'-01 00:00:00' AS TIMESTAMP)"
self.assertEqual(sql, value)
sql = self.ops.datetime_trunc_sql('day', 'DATE_FIELD', None)
value = "CAST(EXTRACT(year FROM DATE_FIELD)||'-'||EXTRACT(month FROM DATE_FIELD)||'-'||EXTRACT(day FROM DATE_FIELD)||' 00:00:00' AS TIMESTAMP)"
self.assertEqual(sql, value)
sql = self.ops.datetime_trunc_sql('hour', 'DATE_FIELD', None)
value = "CAST(EXTRACT(year FROM DATE_FIELD)||'-'||EXTRACT(month FROM DATE_FIELD)||'-'||EXTRACT(day FROM DATE_FIELD)||' '||EXTRACT(hour FROM DATE_FIELD)||':00:00' AS TIMESTAMP)"
self.assertEqual(sql, value)
sql = self.ops.datetime_trunc_sql('minute', 'DATE_FIELD', None)
value = "CAST(EXTRACT(year FROM DATE_FIELD)||'-'||EXTRACT(month FROM DATE_FIELD)||'-'||EXTRACT(day FROM DATE_FIELD)||' '||EXTRACT(hour FROM DATE_FIELD)||':'||EXTRACT(minute FROM DATE_FIELD)||':00' AS TIMESTAMP)"
self.assertEqual(sql, value)
sql = self.ops.datetime_trunc_sql('second', 'DATE_FIELD', None)
value = "CAST(EXTRACT(year FROM DATE_FIELD)||'-'||EXTRACT(month FROM DATE_FIELD)||'-'||EXTRACT(day FROM DATE_FIELD)||' '||EXTRACT(hour FROM DATE_FIELD)||':'||EXTRACT(minute FROM DATE_FIELD)||':'||TRUNC(EXTRACT(second FROM DATE_FIELD)) AS TIMESTAMP)"
self.assertEqual(sql, value)
def test_time_trunc_sql(self):
sql = self.ops.time_trunc_sql('hour', 'TIME_FIELD')
out = "CAST(EXTRACT(hour FROM TIME_FIELD) || ':00:00' AS TIME)"
self.assertEqual(sql, out)
sql = self.ops.time_trunc_sql('minute', 'TIME_FIELD')
out = "CAST(EXTRACT(hour FROM TIME_FIELD) || ':' || EXTRACT(minute FROM TIME_FIELD) || ':00' AS TIME)"
self.assertEqual(sql, out)
sql = self.ops.time_trunc_sql('second', 'TIME_FIELD')
out = "CAST(EXTRACT(hour FROM TIME_FIELD) || ':' || EXTRACT(minute FROM TIME_FIELD) || ':' || TRUNC(EXTRACT(second FROM TIME_FIELD)) AS TIME)"
self.assertEqual(sql, out)
class DatabaseSchemaTests(TransactionTestCase):
def test_no_index_for_foreignkey(self):
"""
FirebirdSQL already creates indexes automatically for foreign keys. (#70).
"""
index_sql = connection.schema_editor()._model_indexes_sql(Bar)
self.assertEqual(index_sql, [])
def test_fk_index_creation(self):
new_field = ForeignKey(Foo, on_delete=CASCADE)
new_field.set_attributes_from_name(None)
with connection.schema_editor() as editor:
editor.add_field(
Bar,
new_field
)
# Just return indexes others that not automaically created by Fk
indexes = editor._get_field_indexes(Bar, new_field)
self.assertEqual(indexes, [])
def test_fk_remove_issue70(self):
with connection.schema_editor() as editor:
editor.remove_field(
Bar,
Bar._meta.get_field("a")
)
self.assertRaises(DatabaseError)
class SlugFieldTests(TestCase):
def test_slugfield_max_length(self):
"""
Make sure SlugField honors max_length (#9706)
"""
bs = BigS.objects.create(s='slug' * 50)
bs = BigS.objects.get(pk=bs.pk)
self.assertEqual(bs.s, 'slug' * 50)
class DateFieldTests(TestCase):
def tests_date_interval(self):
obj = FieldsTest()
obj.pub_date = datetime.now()
obj.mod_date = obj.pub_date + timedelta(days=3)
obj.save()
objs = FieldsTest.objects.filter(mod_date__gte=F('pub_date') + timedelta(days=3)).all()
self.assertEqual(len(objs), 1)
@override_settings(USE_TZ=False)
class DateFunctionTests(TestCase):
def create_model(self, start_datetime, end_datetime):
return DTModel.objects.create(
name=start_datetime.isoformat(),
start_datetime=start_datetime, end_datetime=end_datetime,
start_date=start_datetime.date(), end_date=end_datetime.date(),
start_time=start_datetime.time(), end_time=end_datetime.time(),
duration=(end_datetime - start_datetime),
)
def test_trunc_func(self):
start_datetime = microsecond_support(datetime(2015, 6, 15, 14, 30, 50, 321))
end_datetime = microsecond_support(datetime(2016, 6, 15, 14, 10, 50, 123))
if settings.USE_TZ:
start_datetime = timezone.make_aware(start_datetime, is_dst=False)
end_datetime = timezone.make_aware(end_datetime, is_dst=False)
self.create_model(start_datetime, end_datetime)
self.create_model(end_datetime, start_datetime)
msg = 'output_field must be either DateField, TimeField, or DateTimeField'
with self.assertRaisesMessage(ValueError, msg):
list(DTModel.objects.annotate(truncated=Trunc('start_datetime', 'year', output_field=IntegerField())))
with self.assertRaisesMessage(AssertionError, "'name' isn't a DateField, TimeField, or DateTimeField."):
list(DTModel.objects.annotate(truncated=Trunc('name', 'year', output_field=DateTimeField())))
with self.assertRaisesMessage(ValueError, "Cannot truncate DateField 'start_date' to DateTimeField"):
list(DTModel.objects.annotate(truncated=Trunc('start_date', 'second')))
with self.assertRaisesMessage(ValueError, "Cannot truncate TimeField 'start_time' to DateTimeField"):
list(DTModel.objects.annotate(truncated=Trunc('start_time', 'month')))
with self.assertRaisesMessage(ValueError, "Cannot truncate DateField 'start_date' to DateTimeField"):
list(DTModel.objects.annotate(truncated=Trunc('start_date', 'month', output_field=DateTimeField())))
with self.assertRaisesMessage(ValueError, "Cannot truncate TimeField 'start_time' to DateTimeField"):
list(DTModel.objects.annotate(truncated=Trunc('start_time', 'second', output_field=DateTimeField())))
def test_datetime_kind(kind):
self.assertQuerysetEqual(
DTModel.objects.annotate(
truncated=Trunc('start_datetime', kind, output_field=DateTimeField())
).order_by('start_datetime'),
[
(truncate_to(start_datetime, kind)),
(truncate_to(end_datetime, kind))
],
lambda m: (m.truncated)
)
def test_date_kind(kind):
self.assertQuerysetEqual(
DTModel.objects.annotate(
truncated=Trunc('start_date', kind, output_field=DateField())
).order_by('start_datetime'),
[
(truncate_to(start_datetime.date(), kind)),
(truncate_to(end_datetime.date(), kind))
],
lambda m: (m.truncated)
)
def test_time_kind(kind):
self.assertQuerysetEqual(
DTModel.objects.annotate(
truncated=Trunc('start_time', kind, output_field=TimeField())
).order_by('start_datetime'),
[
(truncate_to(start_datetime.time(), kind)),
(truncate_to(end_datetime.time(), kind))
],
lambda m: (m.truncated)
)
test_date_kind('year')
test_date_kind('month')
test_date_kind('day')
test_time_kind('hour')
test_time_kind('minute')
test_time_kind('second')
test_datetime_kind('year')
test_datetime_kind('month')
test_datetime_kind('day')
test_datetime_kind('hour')
test_datetime_kind('minute')
test_datetime_kind('second')
qs = DTModel.objects.filter(start_datetime__date=Trunc('start_datetime', 'day', output_field=DateField()))
self.assertEqual(qs.count(), 2)
def test_trunc_time_func(self):
start_datetime = microsecond_support(datetime(2015, 6, 15, 14, 30, 50, 321000))
end_datetime = microsecond_support(datetime(2016, 6, 15, 14, 10, 50, 123000))
if settings.USE_TZ:
start_datetime = timezone.make_aware(start_datetime, is_dst=False)
end_datetime = timezone.make_aware(end_datetime, is_dst=False)
self.create_model(start_datetime, end_datetime)
self.create_model(end_datetime, start_datetime)
self.assertQuerysetEqual(
DTModel.objects.annotate(extracted=TruncTime('start_datetime')).order_by('start_datetime'),
[
(start_datetime.time()),
(end_datetime.time()),
],
lambda m: (m.extracted)
)
self.assertEqual(DTModel.objects.filter(start_datetime__time=TruncTime('start_datetime')).count(), 2)
with self.assertRaisesMessage(ValueError, "Cannot truncate DateField 'start_date' to TimeField"):
list(DTModel.objects.annotate(truncated=TruncTime('start_date')))
with self.assertRaisesMessage(ValueError, "Cannot truncate DateField 'start_date' to TimeField"):
list(DTModel.objects.annotate(truncated=TruncTime('start_date', output_field=DateField())))
| # -*- coding: utf-8 -*-
from datetime import datetime, timedelta
from django.conf import settings
from django.db import connection, DatabaseError
from django.db.models import F, DateField, DateTimeField, IntegerField, TimeField, CASCADE
from django.db.models.fields.related import ForeignKey
from django.db.models.functions import (
Extract, ExtractDay, ExtractHour, ExtractMinute, ExtractMonth,
ExtractSecond, ExtractWeek, ExtractWeekDay, ExtractYear, Trunc, TruncDate,
TruncDay, TruncHour, TruncMinute, TruncMonth, TruncSecond, TruncTime,
TruncYear,
)
from django.test import TestCase, TransactionTestCase, override_settings
from django.utils import timezone
from .models import BigS, FieldsTest, Foo, Bar, DTModel
def microsecond_support(value):
return value if connection.features.supports_microsecond_precision else value.replace(microsecond=0)
def truncate_to(value, kind, tzinfo=None):
# Convert to target timezone before truncation
if tzinfo is not None:
value = value.astimezone(tzinfo)
def truncate(value, kind):
if kind == 'second':
return value.replace(microsecond=0)
if kind == 'minute':
return value.replace(second=0, microsecond=0)
if kind == 'hour':
return value.replace(minute=0, second=0, microsecond=0)
if kind == 'day':
if isinstance(value, datetime):
return value.replace(hour=0, minute=0, second=0, microsecond=0)
return value
if kind == 'month':
if isinstance(value, datetime):
return value.replace(day=1, hour=0, minute=0, second=0, microsecond=0)
return value.replace(day=1)
# otherwise, truncate to year
if isinstance(value, datetime):
return value.replace(month=1, day=1, hour=0, minute=0, second=0, microsecond=0)
return value.replace(month=1, day=1)
value = truncate(value, kind)
if tzinfo is not None:
# If there was a daylight saving transition, then reset the timezone.
value = timezone.make_aware(value.replace(tzinfo=None), tzinfo)
return value
class FirebirdTest(TestCase):
def setUp(self):
pass
def test_server_version(self):
version = connection.server_version
self.assertNotEqual(version, '')
def test_firebird_version(self):
version = connection.ops.firebird_version
self.assertNotEqual(version, [])
class DatabaseOperationsTest(TestCase):
def setUp(self):
self.ops = connection.ops
def test_get_sequence_name(self):
sq_name = self.ops.get_sequence_name('TEST')
self.assertEqual(sq_name, '"TEST_SQ"')
def test_drop_sequence_sql(self):
sql = self.ops.drop_sequence_sql('TEST')
self.assertEqual(sql, 'DROP SEQUENCE "TEST_SQ"')
def test_date_extract_sql(self):
sql = self.ops.date_extract_sql('week_day', 'DATE_FIELD')
value = "EXTRACT(WEEKDAY FROM DATE_FIELD) + 1"
self.assertEqual(sql, value)
sql = self.ops.date_extract_sql('year', 'DATE_FIELD')
value = "EXTRACT(YEAR FROM DATE_FIELD)"
self.assertEqual(sql, value)
sql = self.ops.date_extract_sql('month', 'DATE_FIELD')
value = "EXTRACT(MONTH FROM DATE_FIELD)"
self.assertEqual(sql, value)
sql = self.ops.date_extract_sql('day', 'DATE_FIELD')
value = "EXTRACT(DAY FROM DATE_FIELD)"
self.assertEqual(sql, value)
def test_datetime_trunc_sql(self):
sql = self.ops.datetime_trunc_sql('year', 'DATE_FIELD', None)
value = "CAST(EXTRACT(year FROM DATE_FIELD)||'-01-01 00:00:00' AS TIMESTAMP)"
self.assertEqual(sql, value)
sql = self.ops.datetime_trunc_sql('month', 'DATE_FIELD', None)
value = "CAST(EXTRACT(year FROM DATE_FIELD)||'-'||EXTRACT(month FROM DATE_FIELD)||'-01 00:00:00' AS TIMESTAMP)"
self.assertEqual(sql, value)
sql = self.ops.datetime_trunc_sql('day', 'DATE_FIELD', None)
value = "CAST(EXTRACT(year FROM DATE_FIELD)||'-'||EXTRACT(month FROM DATE_FIELD)||'-'||EXTRACT(day FROM DATE_FIELD)||' 00:00:00' AS TIMESTAMP)"
self.assertEqual(sql, value)
sql = self.ops.datetime_trunc_sql('hour', 'DATE_FIELD', None)
value = "CAST(EXTRACT(year FROM DATE_FIELD)||'-'||EXTRACT(month FROM DATE_FIELD)||'-'||EXTRACT(day FROM DATE_FIELD)||' '||EXTRACT(hour FROM DATE_FIELD)||':00:00' AS TIMESTAMP)"
self.assertEqual(sql, value)
sql = self.ops.datetime_trunc_sql('minute', 'DATE_FIELD', None)
value = "CAST(EXTRACT(year FROM DATE_FIELD)||'-'||EXTRACT(month FROM DATE_FIELD)||'-'||EXTRACT(day FROM DATE_FIELD)||' '||EXTRACT(hour FROM DATE_FIELD)||':'||EXTRACT(minute FROM DATE_FIELD)||':00' AS TIMESTAMP)"
self.assertEqual(sql, value)
sql = self.ops.datetime_trunc_sql('second', 'DATE_FIELD', None)
value = "CAST(EXTRACT(year FROM DATE_FIELD)||'-'||EXTRACT(month FROM DATE_FIELD)||'-'||EXTRACT(day FROM DATE_FIELD)||' '||EXTRACT(hour FROM DATE_FIELD)||':'||EXTRACT(minute FROM DATE_FIELD)||':'||TRUNC(EXTRACT(second FROM DATE_FIELD)) AS TIMESTAMP)"
self.assertEqual(sql, value)
def test_time_trunc_sql(self):
sql = self.ops.time_trunc_sql('hour', 'TIME_FIELD')
out = "CAST(EXTRACT(hour FROM TIME_FIELD) || ':00:00' AS TIME)"
self.assertEqual(sql, out)
sql = self.ops.time_trunc_sql('minute', 'TIME_FIELD')
out = "CAST(EXTRACT(hour FROM TIME_FIELD) || ':' || EXTRACT(minute FROM TIME_FIELD) || ':00' AS TIME)"
self.assertEqual(sql, out)
sql = self.ops.time_trunc_sql('second', 'TIME_FIELD')
out = "CAST(EXTRACT(hour FROM TIME_FIELD) || ':' || EXTRACT(minute FROM TIME_FIELD) || ':' || TRUNC(EXTRACT(second FROM TIME_FIELD)) AS TIME)"
self.assertEqual(sql, out)
class DatabaseSchemaTests(TransactionTestCase):
def test_no_index_for_foreignkey(self):
"""
FirebirdSQL already creates indexes automatically for foreign keys. (#70).
"""
index_sql = connection.schema_editor()._model_indexes_sql(Bar)
self.assertEqual(index_sql, [])
def test_fk_index_creation(self):
new_field = ForeignKey(Foo, on_delete=CASCADE)
new_field.set_attributes_from_name(None)
with connection.schema_editor() as editor:
editor.add_field(
Bar,
new_field
)
# Just return indexes others that not automaically created by Fk
indexes = editor._get_field_indexes(Bar, new_field)
self.assertEqual(indexes, [])
def test_fk_remove_issue70(self):
with connection.schema_editor() as editor:
editor.remove_field(
Bar,
Bar._meta.get_field("a")
)
self.assertRaises(DatabaseError)
class SlugFieldTests(TestCase):
def test_slugfield_max_length(self):
"""
Make sure SlugField honors max_length (#9706)
"""
bs = BigS.objects.create(s='slug' * 50)
bs = BigS.objects.get(pk=bs.pk)
self.assertEqual(bs.s, 'slug' * 50)
class DateFieldTests(TestCase):
def tests_date_interval(self):
obj = FieldsTest()
obj.pub_date = datetime.now()
obj.mod_date = obj.pub_date + timedelta(days=3)
obj.save()
objs = FieldsTest.objects.filter(mod_date__gte=F('pub_date') + timedelta(days=3)).all()
self.assertEqual(len(objs), 1)
@override_settings(USE_TZ=False)
class DateFunctionTests(TestCase):
def create_model(self, start_datetime, end_datetime):
return DTModel.objects.create(
name=start_datetime.isoformat(),
start_datetime=start_datetime, end_datetime=end_datetime,
start_date=start_datetime.date(), end_date=end_datetime.date(),
start_time=start_datetime.time(), end_time=end_datetime.time(),
duration=(end_datetime - start_datetime),
)
def test_trunc_func(self):
start_datetime = microsecond_support(datetime(2015, 6, 15, 14, 30, 50, 321))
end_datetime = microsecond_support(datetime(2016, 6, 15, 14, 10, 50, 123))
if settings.USE_TZ:
start_datetime = timezone.make_aware(start_datetime, is_dst=False)
end_datetime = timezone.make_aware(end_datetime, is_dst=False)
self.create_model(start_datetime, end_datetime)
self.create_model(end_datetime, start_datetime)
msg = 'output_field must be either DateField, TimeField, or DateTimeField'
with self.assertRaisesMessage(ValueError, msg):
list(DTModel.objects.annotate(truncated=Trunc('start_datetime', 'year', output_field=IntegerField())))
with self.assertRaisesMessage(AssertionError, "'name' isn't a DateField, TimeField, or DateTimeField."):
list(DTModel.objects.annotate(truncated=Trunc('name', 'year', output_field=DateTimeField())))
with self.assertRaisesMessage(ValueError, "Cannot truncate DateField 'start_date' to DateTimeField"):
list(DTModel.objects.annotate(truncated=Trunc('start_date', 'second')))
with self.assertRaisesMessage(ValueError, "Cannot truncate TimeField 'start_time' to DateTimeField"):
list(DTModel.objects.annotate(truncated=Trunc('start_time', 'month')))
with self.assertRaisesMessage(ValueError, "Cannot truncate DateField 'start_date' to DateTimeField"):
list(DTModel.objects.annotate(truncated=Trunc('start_date', 'month', output_field=DateTimeField())))
with self.assertRaisesMessage(ValueError, "Cannot truncate TimeField 'start_time' to DateTimeField"):
list(DTModel.objects.annotate(truncated=Trunc('start_time', 'second', output_field=DateTimeField())))
def test_datetime_kind(kind):
self.assertQuerysetEqual(
DTModel.objects.annotate(
truncated=Trunc('start_datetime', kind, output_field=DateTimeField())
).order_by('start_datetime'),
[
(truncate_to(start_datetime, kind)),
(truncate_to(end_datetime, kind))
],
lambda m: (m.truncated)
)
def test_date_kind(kind):
self.assertQuerysetEqual(
DTModel.objects.annotate(
truncated=Trunc('start_date', kind, output_field=DateField())
).order_by('start_datetime'),
[
(truncate_to(start_datetime.date(), kind)),
(truncate_to(end_datetime.date(), kind))
],
lambda m: (m.truncated)
)
def test_time_kind(kind):
self.assertQuerysetEqual(
DTModel.objects.annotate(
truncated=Trunc('start_time', kind, output_field=TimeField())
).order_by('start_datetime'),
[
(truncate_to(start_datetime.time(), kind)),
(truncate_to(end_datetime.time(), kind))
],
lambda m: (m.truncated)
)
test_date_kind('year')
test_date_kind('month')
test_date_kind('day')
test_time_kind('hour')
test_time_kind('minute')
test_time_kind('second')
test_datetime_kind('year')
test_datetime_kind('month')
test_datetime_kind('day')
test_datetime_kind('hour')
test_datetime_kind('minute')
test_datetime_kind('second')
qs = DTModel.objects.filter(start_datetime__date=Trunc('start_datetime', 'day', output_field=DateField()))
self.assertEqual(qs.count(), 2)
def test_trunc_time_func(self):
start_datetime = microsecond_support(datetime(2015, 6, 15, 14, 30, 50, 321000))
end_datetime = microsecond_support(datetime(2016, 6, 15, 14, 10, 50, 123000))
if settings.USE_TZ:
start_datetime = timezone.make_aware(start_datetime, is_dst=False)
end_datetime = timezone.make_aware(end_datetime, is_dst=False)
self.create_model(start_datetime, end_datetime)
self.create_model(end_datetime, start_datetime)
self.assertQuerysetEqual(
DTModel.objects.annotate(extracted=TruncTime('start_datetime')).order_by('start_datetime'),
[
(start_datetime.time()),
(end_datetime.time()),
],
lambda m: (m.extracted)
)
self.assertEqual(DTModel.objects.filter(start_datetime__time=TruncTime('start_datetime')).count(), 2)
with self.assertRaisesMessage(ValueError, "Cannot truncate DateField 'start_date' to TimeField"):
list(DTModel.objects.annotate(truncated=TruncTime('start_date')))
with self.assertRaisesMessage(ValueError, "Cannot truncate DateField 'start_date' to TimeField"):
list(DTModel.objects.annotate(truncated=TruncTime('start_date', output_field=DateField()))) | en | 0.75436 | # -*- coding: utf-8 -*- # Convert to target timezone before truncation # otherwise, truncate to year # If there was a daylight saving transition, then reset the timezone. FirebirdSQL already creates indexes automatically for foreign keys. (#70). # Just return indexes others that not automaically created by Fk Make sure SlugField honors max_length (#9706) | 2.096732 | 2 |
tests/test_past_failures.py | justinbois/eqtk | 2 | 9792 | import pytest
import numpy as np
import eqtk
def test_promiscuous_binding_failure():
A = np.array(
[
[
1.0,
0.0,
0.0,
0.0,
0.0,
0.0,
1.0,
1.0,
0.0,
0.0,
1.0,
1.0,
1.0,
1.0,
0.0,
0.0,
0.0,
0.0,
],
[
0.0,
1.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
1.0,
1.0,
0.0,
0.0,
0.0,
0.0,
1.0,
1.0,
1.0,
1.0,
],
[
0.0,
0.0,
1.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
1.0,
0.0,
1.0,
0.0,
1.0,
0.0,
1.0,
0.0,
],
[
0.0,
0.0,
0.0,
1.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
1.0,
0.0,
1.0,
0.0,
1.0,
0.0,
1.0,
],
[
0.0,
0.0,
0.0,
0.0,
1.0,
0.0,
1.0,
0.0,
1.0,
0.0,
1.0,
1.0,
0.0,
0.0,
1.0,
1.0,
0.0,
0.0,
],
[
0.0,
0.0,
0.0,
0.0,
0.0,
1.0,
0.0,
1.0,
0.0,
1.0,
0.0,
0.0,
1.0,
1.0,
0.0,
0.0,
1.0,
1.0,
],
]
)
G = np.array(
[
-0.51720535,
-0.69471304,
-1.78260496,
-1.32337777,
-0.63267947,
-0.57923893,
-0.78718634,
-0.27521037,
-0.13733511,
-0.69433251,
1.6858364,
-0.43683479,
0.39312096,
-0.0625205,
0.23139303,
0.07680628,
-0.52774543,
1.74592678,
]
)
x0 = np.array(
[
[
2.48257788e01,
1.72132293e-01,
1.14833731e-02,
5.00547317e-02,
1.38949549e-01,
1.93069773e01,
0.00000000e00,
0.00000000e00,
0.00000000e00,
0.00000000e00,
0.00000000e00,
0.00000000e00,
0.00000000e00,
0.00000000e00,
0.00000000e00,
0.00000000e00,
0.00000000e00,
0.00000000e00,
]
]
)
def test_spontaneous_production_failure():
N = np.array(
[[1, 0, 1, 0, -1, 0], [1, 0, 0, 1, 0, -1], [1, 1, 1, 0, 0, 0]], dtype=float
)
A = np.array(
[[0, 0, 0, 1, 0, 1], [1, 0, -1, 0, 0, 1], [0, -1, 1, 0, 1, 0]], dtype=float
)
G = np.array([0, 1, 2, 3, 4, 5])
K = np.exp(-np.dot(N, G))
for x0_val in [
[1, 1, 1, 1, 1, 1],
[0, 0, 0, 1, 0, 0],
[1, 1, 1, 1, 0, 0],
[0, 0, 0, 0, 0, 0],
]:
x0 = np.array(x0_val, dtype=float)
x_NK = eqtk.solve(c0=x0, N=N, K=K)
with pytest.raises(ValueError) as excinfo:
x_AG = eqtk.solve(c0=x0, A=A, G=G)
excinfo.match("`A` must have all nonnegative entries.")
assert eqtk.eqcheck(x_NK, x0, N=N, K=K)
def test_scale_factor_failure():
A = np.array([[1.0, 0.0, 2.0, 1.0, 0.0], [0.0, 1.0, 0.0, 1.0, 2.0]])
G = np.array([0.0, 0.0, 0.77428976, -5.64873697, -0.95863043])
x0 = np.array(
[
[
5.50293892e-05,
6.49273515e-08,
2.75796219e-05,
1.29854703e-07,
3.24636758e-08,
]
]
)
x = eqtk.solve(c0=x0, A=A, G=G)
assert eqtk.eqcheck(x, x0, A=A, G=G)
def test_trivial_elemental_failure():
A = np.array([[1.0, 0.0], [0.0, 1.0]])
G = np.array([0.0, 0.0])
x0 = np.array([[3.48219906e-06, 1.32719868e-10]])
assert np.allclose(eqtk.solve(c0=x0, A=A, G=G), x0)
A = np.array([[1.0, 0.0], [0.0, 1.0]])
G = np.array([0.0, 0.0])
x0 = np.array([[2.24222410e-08, 1.63359284e-04]])
assert np.allclose(eqtk.solve(c0=x0, A=A, G=G), x0)
A = np.array([[1.0, 0.0, 0.0], [0.0, 1.0, 0.0], [0.0, 0.0, 1.0]])
G = np.array([0.0, 0.0, 0.0])
x0 = np.array([[2.63761955e-04, 4.93360042e-07, 4.88340687e-07]])
assert np.allclose(eqtk.solve(c0=x0, A=A, G=G), x0)
def test_past_failure_1():
A = np.array([[1.0, 0.0, 2.0, 1.0, 0.0], [0.0, 1.0, 0.0, 1.0, 2.0]])
G = np.array([0.0, 0.0, -16.76857677, -2.38430181, 1.22028775])
x0 = np.array(
[
[
1.65989040e-10,
1.07630096e-04,
1.65989040e-10,
1.65989040e-10,
5.38150479e-05,
]
]
)
x = eqtk.solve(x0, A=A, G=G)
assert eqtk.eqcheck(x, x0, A=A, G=G)
def test_past_failure_2():
N = np.array([[-2.0, 1.0, 0.0, 0.0], [-3.0, 0.0, 1.0, 0.0], [-4.0, 0.0, 0.0, 1.0]])
minus_log_K = np.array([-43.66660344, -68.14676841, -92.28023823])
x0 = np.array([[1.87852623e-06, 3.75705246e-06, 1.25235082e-06, 4.69631557e-07]])
K = np.exp(-minus_log_K)
x = eqtk.solve(x0, N, K)
assert eqtk.eqcheck(x, x0, N, K)
def test_small_conc_failure():
A = np.array(
[
[1.0, 0.0, 1.0, 1.0, 0.0],
[1.0, 1.0, 0.0, 0.0, 2.0],
[1.0, 0.0, 0.0, 1.0, 2.0],
]
)
G = np.array(
[
-1.1323012373599138e02,
-2.7028447814426110e-01,
-2.3382656193096754e01,
-1.0088531260804201e02,
-5.7676558386243052e01,
]
)
x0 = np.array(
[
[
1.8134373707286439e-08,
3.5913242229740680e-14,
3.5913242229740680e-14,
3.5913242229740680e-14,
1.7956621114870340e-14,
]
]
)
x = eqtk.solve(c0=x0, A=A, G=G)
assert eqtk.eqcheck(x, x0, A=A, G=G)
| import pytest
import numpy as np
import eqtk
def test_promiscuous_binding_failure():
A = np.array(
[
[
1.0,
0.0,
0.0,
0.0,
0.0,
0.0,
1.0,
1.0,
0.0,
0.0,
1.0,
1.0,
1.0,
1.0,
0.0,
0.0,
0.0,
0.0,
],
[
0.0,
1.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
1.0,
1.0,
0.0,
0.0,
0.0,
0.0,
1.0,
1.0,
1.0,
1.0,
],
[
0.0,
0.0,
1.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
1.0,
0.0,
1.0,
0.0,
1.0,
0.0,
1.0,
0.0,
],
[
0.0,
0.0,
0.0,
1.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
1.0,
0.0,
1.0,
0.0,
1.0,
0.0,
1.0,
],
[
0.0,
0.0,
0.0,
0.0,
1.0,
0.0,
1.0,
0.0,
1.0,
0.0,
1.0,
1.0,
0.0,
0.0,
1.0,
1.0,
0.0,
0.0,
],
[
0.0,
0.0,
0.0,
0.0,
0.0,
1.0,
0.0,
1.0,
0.0,
1.0,
0.0,
0.0,
1.0,
1.0,
0.0,
0.0,
1.0,
1.0,
],
]
)
G = np.array(
[
-0.51720535,
-0.69471304,
-1.78260496,
-1.32337777,
-0.63267947,
-0.57923893,
-0.78718634,
-0.27521037,
-0.13733511,
-0.69433251,
1.6858364,
-0.43683479,
0.39312096,
-0.0625205,
0.23139303,
0.07680628,
-0.52774543,
1.74592678,
]
)
x0 = np.array(
[
[
2.48257788e01,
1.72132293e-01,
1.14833731e-02,
5.00547317e-02,
1.38949549e-01,
1.93069773e01,
0.00000000e00,
0.00000000e00,
0.00000000e00,
0.00000000e00,
0.00000000e00,
0.00000000e00,
0.00000000e00,
0.00000000e00,
0.00000000e00,
0.00000000e00,
0.00000000e00,
0.00000000e00,
]
]
)
def test_spontaneous_production_failure():
N = np.array(
[[1, 0, 1, 0, -1, 0], [1, 0, 0, 1, 0, -1], [1, 1, 1, 0, 0, 0]], dtype=float
)
A = np.array(
[[0, 0, 0, 1, 0, 1], [1, 0, -1, 0, 0, 1], [0, -1, 1, 0, 1, 0]], dtype=float
)
G = np.array([0, 1, 2, 3, 4, 5])
K = np.exp(-np.dot(N, G))
for x0_val in [
[1, 1, 1, 1, 1, 1],
[0, 0, 0, 1, 0, 0],
[1, 1, 1, 1, 0, 0],
[0, 0, 0, 0, 0, 0],
]:
x0 = np.array(x0_val, dtype=float)
x_NK = eqtk.solve(c0=x0, N=N, K=K)
with pytest.raises(ValueError) as excinfo:
x_AG = eqtk.solve(c0=x0, A=A, G=G)
excinfo.match("`A` must have all nonnegative entries.")
assert eqtk.eqcheck(x_NK, x0, N=N, K=K)
def test_scale_factor_failure():
A = np.array([[1.0, 0.0, 2.0, 1.0, 0.0], [0.0, 1.0, 0.0, 1.0, 2.0]])
G = np.array([0.0, 0.0, 0.77428976, -5.64873697, -0.95863043])
x0 = np.array(
[
[
5.50293892e-05,
6.49273515e-08,
2.75796219e-05,
1.29854703e-07,
3.24636758e-08,
]
]
)
x = eqtk.solve(c0=x0, A=A, G=G)
assert eqtk.eqcheck(x, x0, A=A, G=G)
def test_trivial_elemental_failure():
A = np.array([[1.0, 0.0], [0.0, 1.0]])
G = np.array([0.0, 0.0])
x0 = np.array([[3.48219906e-06, 1.32719868e-10]])
assert np.allclose(eqtk.solve(c0=x0, A=A, G=G), x0)
A = np.array([[1.0, 0.0], [0.0, 1.0]])
G = np.array([0.0, 0.0])
x0 = np.array([[2.24222410e-08, 1.63359284e-04]])
assert np.allclose(eqtk.solve(c0=x0, A=A, G=G), x0)
A = np.array([[1.0, 0.0, 0.0], [0.0, 1.0, 0.0], [0.0, 0.0, 1.0]])
G = np.array([0.0, 0.0, 0.0])
x0 = np.array([[2.63761955e-04, 4.93360042e-07, 4.88340687e-07]])
assert np.allclose(eqtk.solve(c0=x0, A=A, G=G), x0)
def test_past_failure_1():
A = np.array([[1.0, 0.0, 2.0, 1.0, 0.0], [0.0, 1.0, 0.0, 1.0, 2.0]])
G = np.array([0.0, 0.0, -16.76857677, -2.38430181, 1.22028775])
x0 = np.array(
[
[
1.65989040e-10,
1.07630096e-04,
1.65989040e-10,
1.65989040e-10,
5.38150479e-05,
]
]
)
x = eqtk.solve(x0, A=A, G=G)
assert eqtk.eqcheck(x, x0, A=A, G=G)
def test_past_failure_2():
N = np.array([[-2.0, 1.0, 0.0, 0.0], [-3.0, 0.0, 1.0, 0.0], [-4.0, 0.0, 0.0, 1.0]])
minus_log_K = np.array([-43.66660344, -68.14676841, -92.28023823])
x0 = np.array([[1.87852623e-06, 3.75705246e-06, 1.25235082e-06, 4.69631557e-07]])
K = np.exp(-minus_log_K)
x = eqtk.solve(x0, N, K)
assert eqtk.eqcheck(x, x0, N, K)
def test_small_conc_failure():
A = np.array(
[
[1.0, 0.0, 1.0, 1.0, 0.0],
[1.0, 1.0, 0.0, 0.0, 2.0],
[1.0, 0.0, 0.0, 1.0, 2.0],
]
)
G = np.array(
[
-1.1323012373599138e02,
-2.7028447814426110e-01,
-2.3382656193096754e01,
-1.0088531260804201e02,
-5.7676558386243052e01,
]
)
x0 = np.array(
[
[
1.8134373707286439e-08,
3.5913242229740680e-14,
3.5913242229740680e-14,
3.5913242229740680e-14,
1.7956621114870340e-14,
]
]
)
x = eqtk.solve(c0=x0, A=A, G=G)
assert eqtk.eqcheck(x, x0, A=A, G=G)
| none | 1 | 1.945195 | 2 |
|
sdk/python/pulumi_azure/lb/outbound_rule.py | suresh198526/pulumi-azure | 0 | 9793 | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union
from .. import _utilities, _tables
from . import outputs
from ._inputs import *
__all__ = ['OutboundRule']
class OutboundRule(pulumi.CustomResource):
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
allocated_outbound_ports: Optional[pulumi.Input[int]] = None,
backend_address_pool_id: Optional[pulumi.Input[str]] = None,
enable_tcp_reset: Optional[pulumi.Input[bool]] = None,
frontend_ip_configurations: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['OutboundRuleFrontendIpConfigurationArgs']]]]] = None,
idle_timeout_in_minutes: Optional[pulumi.Input[int]] = None,
loadbalancer_id: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
protocol: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
__props__=None,
__name__=None,
__opts__=None):
"""
Manages a Load Balancer Outbound Rule.
> **NOTE** When using this resource, the Load Balancer needs to have a FrontEnd IP Configuration and a Backend Address Pool Attached.
## Example Usage
```python
import pulumi
import pulumi_azure as azure
example_resource_group = azure.core.ResourceGroup("exampleResourceGroup", location="West US")
example_public_ip = azure.network.PublicIp("examplePublicIp",
location="West US",
resource_group_name=example_resource_group.name,
allocation_method="Static")
example_load_balancer = azure.lb.LoadBalancer("exampleLoadBalancer",
location="West US",
resource_group_name=example_resource_group.name,
frontend_ip_configurations=[azure.lb.LoadBalancerFrontendIpConfigurationArgs(
name="PublicIPAddress",
public_ip_address_id=example_public_ip.id,
)])
example_backend_address_pool = azure.lb.BackendAddressPool("exampleBackendAddressPool",
resource_group_name=example_resource_group.name,
loadbalancer_id=example_load_balancer.id)
example_outbound_rule = azure.lb.OutboundRule("exampleOutboundRule",
resource_group_name=example_resource_group.name,
loadbalancer_id=example_load_balancer.id,
protocol="Tcp",
backend_address_pool_id=example_backend_address_pool.id,
frontend_ip_configurations=[azure.lb.OutboundRuleFrontendIpConfigurationArgs(
name="PublicIPAddress",
)])
```
## Import
Load Balancer Outbound Rules can be imported using the `resource id`, e.g.
```sh
$ pulumi import azure:lb/outboundRule:OutboundRule example /subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/group1/providers/Microsoft.Network/loadBalancers/lb1/outboundRules/rule1
```
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[int] allocated_outbound_ports: The number of outbound ports to be used for NAT.
:param pulumi.Input[str] backend_address_pool_id: The ID of the Backend Address Pool. Outbound traffic is randomly load balanced across IPs in the backend IPs.
:param pulumi.Input[bool] enable_tcp_reset: Receive bidirectional TCP Reset on TCP flow idle timeout or unexpected connection termination. This element is only used when the protocol is set to TCP.
:param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['OutboundRuleFrontendIpConfigurationArgs']]]] frontend_ip_configurations: One or more `frontend_ip_configuration` blocks as defined below.
:param pulumi.Input[int] idle_timeout_in_minutes: The timeout for the TCP idle connection
:param pulumi.Input[str] loadbalancer_id: The ID of the Load Balancer in which to create the Outbound Rule. Changing this forces a new resource to be created.
:param pulumi.Input[str] name: Specifies the name of the Outbound Rule. Changing this forces a new resource to be created.
:param pulumi.Input[str] protocol: The transport protocol for the external endpoint. Possible values are `Udp`, `Tcp` or `All`.
:param pulumi.Input[str] resource_group_name: The name of the resource group in which to create the resource. Changing this forces a new resource to be created.
"""
if __name__ is not None:
warnings.warn("explicit use of __name__ is deprecated", DeprecationWarning)
resource_name = __name__
if __opts__ is not None:
warnings.warn("explicit use of __opts__ is deprecated, use 'opts' instead", DeprecationWarning)
opts = __opts__
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = dict()
__props__['allocated_outbound_ports'] = allocated_outbound_ports
if backend_address_pool_id is None:
raise TypeError("Missing required property 'backend_address_pool_id'")
__props__['backend_address_pool_id'] = backend_address_pool_id
__props__['enable_tcp_reset'] = enable_tcp_reset
__props__['frontend_ip_configurations'] = frontend_ip_configurations
__props__['idle_timeout_in_minutes'] = idle_timeout_in_minutes
if loadbalancer_id is None:
raise TypeError("Missing required property 'loadbalancer_id'")
__props__['loadbalancer_id'] = loadbalancer_id
__props__['name'] = name
if protocol is None:
raise TypeError("Missing required property 'protocol'")
__props__['protocol'] = protocol
if resource_group_name is None:
raise TypeError("Missing required property 'resource_group_name'")
__props__['resource_group_name'] = resource_group_name
super(OutboundRule, __self__).__init__(
'azure:lb/outboundRule:OutboundRule',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None,
allocated_outbound_ports: Optional[pulumi.Input[int]] = None,
backend_address_pool_id: Optional[pulumi.Input[str]] = None,
enable_tcp_reset: Optional[pulumi.Input[bool]] = None,
frontend_ip_configurations: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['OutboundRuleFrontendIpConfigurationArgs']]]]] = None,
idle_timeout_in_minutes: Optional[pulumi.Input[int]] = None,
loadbalancer_id: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
protocol: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None) -> 'OutboundRule':
"""
Get an existing OutboundRule resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[int] allocated_outbound_ports: The number of outbound ports to be used for NAT.
:param pulumi.Input[str] backend_address_pool_id: The ID of the Backend Address Pool. Outbound traffic is randomly load balanced across IPs in the backend IPs.
:param pulumi.Input[bool] enable_tcp_reset: Receive bidirectional TCP Reset on TCP flow idle timeout or unexpected connection termination. This element is only used when the protocol is set to TCP.
:param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['OutboundRuleFrontendIpConfigurationArgs']]]] frontend_ip_configurations: One or more `frontend_ip_configuration` blocks as defined below.
:param pulumi.Input[int] idle_timeout_in_minutes: The timeout for the TCP idle connection
:param pulumi.Input[str] loadbalancer_id: The ID of the Load Balancer in which to create the Outbound Rule. Changing this forces a new resource to be created.
:param pulumi.Input[str] name: Specifies the name of the Outbound Rule. Changing this forces a new resource to be created.
:param pulumi.Input[str] protocol: The transport protocol for the external endpoint. Possible values are `Udp`, `Tcp` or `All`.
:param pulumi.Input[str] resource_group_name: The name of the resource group in which to create the resource. Changing this forces a new resource to be created.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = dict()
__props__["allocated_outbound_ports"] = allocated_outbound_ports
__props__["backend_address_pool_id"] = backend_address_pool_id
__props__["enable_tcp_reset"] = enable_tcp_reset
__props__["frontend_ip_configurations"] = frontend_ip_configurations
__props__["idle_timeout_in_minutes"] = idle_timeout_in_minutes
__props__["loadbalancer_id"] = loadbalancer_id
__props__["name"] = name
__props__["protocol"] = protocol
__props__["resource_group_name"] = resource_group_name
return OutboundRule(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter(name="allocatedOutboundPorts")
def allocated_outbound_ports(self) -> pulumi.Output[Optional[int]]:
"""
The number of outbound ports to be used for NAT.
"""
return pulumi.get(self, "allocated_outbound_ports")
@property
@pulumi.getter(name="backendAddressPoolId")
def backend_address_pool_id(self) -> pulumi.Output[str]:
"""
The ID of the Backend Address Pool. Outbound traffic is randomly load balanced across IPs in the backend IPs.
"""
return pulumi.get(self, "backend_address_pool_id")
@property
@pulumi.getter(name="enableTcpReset")
def enable_tcp_reset(self) -> pulumi.Output[Optional[bool]]:
"""
Receive bidirectional TCP Reset on TCP flow idle timeout or unexpected connection termination. This element is only used when the protocol is set to TCP.
"""
return pulumi.get(self, "enable_tcp_reset")
@property
@pulumi.getter(name="frontendIpConfigurations")
def frontend_ip_configurations(self) -> pulumi.Output[Optional[Sequence['outputs.OutboundRuleFrontendIpConfiguration']]]:
"""
One or more `frontend_ip_configuration` blocks as defined below.
"""
return pulumi.get(self, "frontend_ip_configurations")
@property
@pulumi.getter(name="idleTimeoutInMinutes")
def idle_timeout_in_minutes(self) -> pulumi.Output[Optional[int]]:
"""
The timeout for the TCP idle connection
"""
return pulumi.get(self, "idle_timeout_in_minutes")
@property
@pulumi.getter(name="loadbalancerId")
def loadbalancer_id(self) -> pulumi.Output[str]:
"""
The ID of the Load Balancer in which to create the Outbound Rule. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "loadbalancer_id")
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
"""
Specifies the name of the Outbound Rule. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def protocol(self) -> pulumi.Output[str]:
"""
The transport protocol for the external endpoint. Possible values are `Udp`, `Tcp` or `All`.
"""
return pulumi.get(self, "protocol")
@property
@pulumi.getter(name="resourceGroupName")
def resource_group_name(self) -> pulumi.Output[str]:
"""
The name of the resource group in which to create the resource. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "resource_group_name")
def translate_output_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
def translate_input_property(self, prop):
return _tables.SNAKE_TO_CAMEL_CASE_TABLE.get(prop) or prop
| # coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union
from .. import _utilities, _tables
from . import outputs
from ._inputs import *
__all__ = ['OutboundRule']
class OutboundRule(pulumi.CustomResource):
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
allocated_outbound_ports: Optional[pulumi.Input[int]] = None,
backend_address_pool_id: Optional[pulumi.Input[str]] = None,
enable_tcp_reset: Optional[pulumi.Input[bool]] = None,
frontend_ip_configurations: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['OutboundRuleFrontendIpConfigurationArgs']]]]] = None,
idle_timeout_in_minutes: Optional[pulumi.Input[int]] = None,
loadbalancer_id: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
protocol: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
__props__=None,
__name__=None,
__opts__=None):
"""
Manages a Load Balancer Outbound Rule.
> **NOTE** When using this resource, the Load Balancer needs to have a FrontEnd IP Configuration and a Backend Address Pool Attached.
## Example Usage
```python
import pulumi
import pulumi_azure as azure
example_resource_group = azure.core.ResourceGroup("exampleResourceGroup", location="West US")
example_public_ip = azure.network.PublicIp("examplePublicIp",
location="West US",
resource_group_name=example_resource_group.name,
allocation_method="Static")
example_load_balancer = azure.lb.LoadBalancer("exampleLoadBalancer",
location="West US",
resource_group_name=example_resource_group.name,
frontend_ip_configurations=[azure.lb.LoadBalancerFrontendIpConfigurationArgs(
name="PublicIPAddress",
public_ip_address_id=example_public_ip.id,
)])
example_backend_address_pool = azure.lb.BackendAddressPool("exampleBackendAddressPool",
resource_group_name=example_resource_group.name,
loadbalancer_id=example_load_balancer.id)
example_outbound_rule = azure.lb.OutboundRule("exampleOutboundRule",
resource_group_name=example_resource_group.name,
loadbalancer_id=example_load_balancer.id,
protocol="Tcp",
backend_address_pool_id=example_backend_address_pool.id,
frontend_ip_configurations=[azure.lb.OutboundRuleFrontendIpConfigurationArgs(
name="PublicIPAddress",
)])
```
## Import
Load Balancer Outbound Rules can be imported using the `resource id`, e.g.
```sh
$ pulumi import azure:lb/outboundRule:OutboundRule example /subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/group1/providers/Microsoft.Network/loadBalancers/lb1/outboundRules/rule1
```
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[int] allocated_outbound_ports: The number of outbound ports to be used for NAT.
:param pulumi.Input[str] backend_address_pool_id: The ID of the Backend Address Pool. Outbound traffic is randomly load balanced across IPs in the backend IPs.
:param pulumi.Input[bool] enable_tcp_reset: Receive bidirectional TCP Reset on TCP flow idle timeout or unexpected connection termination. This element is only used when the protocol is set to TCP.
:param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['OutboundRuleFrontendIpConfigurationArgs']]]] frontend_ip_configurations: One or more `frontend_ip_configuration` blocks as defined below.
:param pulumi.Input[int] idle_timeout_in_minutes: The timeout for the TCP idle connection
:param pulumi.Input[str] loadbalancer_id: The ID of the Load Balancer in which to create the Outbound Rule. Changing this forces a new resource to be created.
:param pulumi.Input[str] name: Specifies the name of the Outbound Rule. Changing this forces a new resource to be created.
:param pulumi.Input[str] protocol: The transport protocol for the external endpoint. Possible values are `Udp`, `Tcp` or `All`.
:param pulumi.Input[str] resource_group_name: The name of the resource group in which to create the resource. Changing this forces a new resource to be created.
"""
if __name__ is not None:
warnings.warn("explicit use of __name__ is deprecated", DeprecationWarning)
resource_name = __name__
if __opts__ is not None:
warnings.warn("explicit use of __opts__ is deprecated, use 'opts' instead", DeprecationWarning)
opts = __opts__
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = dict()
__props__['allocated_outbound_ports'] = allocated_outbound_ports
if backend_address_pool_id is None:
raise TypeError("Missing required property 'backend_address_pool_id'")
__props__['backend_address_pool_id'] = backend_address_pool_id
__props__['enable_tcp_reset'] = enable_tcp_reset
__props__['frontend_ip_configurations'] = frontend_ip_configurations
__props__['idle_timeout_in_minutes'] = idle_timeout_in_minutes
if loadbalancer_id is None:
raise TypeError("Missing required property 'loadbalancer_id'")
__props__['loadbalancer_id'] = loadbalancer_id
__props__['name'] = name
if protocol is None:
raise TypeError("Missing required property 'protocol'")
__props__['protocol'] = protocol
if resource_group_name is None:
raise TypeError("Missing required property 'resource_group_name'")
__props__['resource_group_name'] = resource_group_name
super(OutboundRule, __self__).__init__(
'azure:lb/outboundRule:OutboundRule',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None,
allocated_outbound_ports: Optional[pulumi.Input[int]] = None,
backend_address_pool_id: Optional[pulumi.Input[str]] = None,
enable_tcp_reset: Optional[pulumi.Input[bool]] = None,
frontend_ip_configurations: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['OutboundRuleFrontendIpConfigurationArgs']]]]] = None,
idle_timeout_in_minutes: Optional[pulumi.Input[int]] = None,
loadbalancer_id: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
protocol: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None) -> 'OutboundRule':
"""
Get an existing OutboundRule resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[int] allocated_outbound_ports: The number of outbound ports to be used for NAT.
:param pulumi.Input[str] backend_address_pool_id: The ID of the Backend Address Pool. Outbound traffic is randomly load balanced across IPs in the backend IPs.
:param pulumi.Input[bool] enable_tcp_reset: Receive bidirectional TCP Reset on TCP flow idle timeout or unexpected connection termination. This element is only used when the protocol is set to TCP.
:param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['OutboundRuleFrontendIpConfigurationArgs']]]] frontend_ip_configurations: One or more `frontend_ip_configuration` blocks as defined below.
:param pulumi.Input[int] idle_timeout_in_minutes: The timeout for the TCP idle connection
:param pulumi.Input[str] loadbalancer_id: The ID of the Load Balancer in which to create the Outbound Rule. Changing this forces a new resource to be created.
:param pulumi.Input[str] name: Specifies the name of the Outbound Rule. Changing this forces a new resource to be created.
:param pulumi.Input[str] protocol: The transport protocol for the external endpoint. Possible values are `Udp`, `Tcp` or `All`.
:param pulumi.Input[str] resource_group_name: The name of the resource group in which to create the resource. Changing this forces a new resource to be created.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = dict()
__props__["allocated_outbound_ports"] = allocated_outbound_ports
__props__["backend_address_pool_id"] = backend_address_pool_id
__props__["enable_tcp_reset"] = enable_tcp_reset
__props__["frontend_ip_configurations"] = frontend_ip_configurations
__props__["idle_timeout_in_minutes"] = idle_timeout_in_minutes
__props__["loadbalancer_id"] = loadbalancer_id
__props__["name"] = name
__props__["protocol"] = protocol
__props__["resource_group_name"] = resource_group_name
return OutboundRule(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter(name="allocatedOutboundPorts")
def allocated_outbound_ports(self) -> pulumi.Output[Optional[int]]:
"""
The number of outbound ports to be used for NAT.
"""
return pulumi.get(self, "allocated_outbound_ports")
@property
@pulumi.getter(name="backendAddressPoolId")
def backend_address_pool_id(self) -> pulumi.Output[str]:
"""
The ID of the Backend Address Pool. Outbound traffic is randomly load balanced across IPs in the backend IPs.
"""
return pulumi.get(self, "backend_address_pool_id")
@property
@pulumi.getter(name="enableTcpReset")
def enable_tcp_reset(self) -> pulumi.Output[Optional[bool]]:
"""
Receive bidirectional TCP Reset on TCP flow idle timeout or unexpected connection termination. This element is only used when the protocol is set to TCP.
"""
return pulumi.get(self, "enable_tcp_reset")
@property
@pulumi.getter(name="frontendIpConfigurations")
def frontend_ip_configurations(self) -> pulumi.Output[Optional[Sequence['outputs.OutboundRuleFrontendIpConfiguration']]]:
"""
One or more `frontend_ip_configuration` blocks as defined below.
"""
return pulumi.get(self, "frontend_ip_configurations")
@property
@pulumi.getter(name="idleTimeoutInMinutes")
def idle_timeout_in_minutes(self) -> pulumi.Output[Optional[int]]:
"""
The timeout for the TCP idle connection
"""
return pulumi.get(self, "idle_timeout_in_minutes")
@property
@pulumi.getter(name="loadbalancerId")
def loadbalancer_id(self) -> pulumi.Output[str]:
"""
The ID of the Load Balancer in which to create the Outbound Rule. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "loadbalancer_id")
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
"""
Specifies the name of the Outbound Rule. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def protocol(self) -> pulumi.Output[str]:
"""
The transport protocol for the external endpoint. Possible values are `Udp`, `Tcp` or `All`.
"""
return pulumi.get(self, "protocol")
@property
@pulumi.getter(name="resourceGroupName")
def resource_group_name(self) -> pulumi.Output[str]:
"""
The name of the resource group in which to create the resource. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "resource_group_name")
def translate_output_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
def translate_input_property(self, prop):
return _tables.SNAKE_TO_CAMEL_CASE_TABLE.get(prop) or prop
| en | 0.640442 | # coding=utf-8 # *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. *** # *** Do not edit by hand unless you're certain you know what you are doing! *** Manages a Load Balancer Outbound Rule. > **NOTE** When using this resource, the Load Balancer needs to have a FrontEnd IP Configuration and a Backend Address Pool Attached. ## Example Usage ```python import pulumi import pulumi_azure as azure example_resource_group = azure.core.ResourceGroup("exampleResourceGroup", location="West US") example_public_ip = azure.network.PublicIp("examplePublicIp", location="West US", resource_group_name=example_resource_group.name, allocation_method="Static") example_load_balancer = azure.lb.LoadBalancer("exampleLoadBalancer", location="West US", resource_group_name=example_resource_group.name, frontend_ip_configurations=[azure.lb.LoadBalancerFrontendIpConfigurationArgs( name="PublicIPAddress", public_ip_address_id=example_public_ip.id, )]) example_backend_address_pool = azure.lb.BackendAddressPool("exampleBackendAddressPool", resource_group_name=example_resource_group.name, loadbalancer_id=example_load_balancer.id) example_outbound_rule = azure.lb.OutboundRule("exampleOutboundRule", resource_group_name=example_resource_group.name, loadbalancer_id=example_load_balancer.id, protocol="Tcp", backend_address_pool_id=example_backend_address_pool.id, frontend_ip_configurations=[azure.lb.OutboundRuleFrontendIpConfigurationArgs( name="PublicIPAddress", )]) ``` ## Import Load Balancer Outbound Rules can be imported using the `resource id`, e.g. ```sh $ pulumi import azure:lb/outboundRule:OutboundRule example /subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/group1/providers/Microsoft.Network/loadBalancers/lb1/outboundRules/rule1 ``` :param str resource_name: The name of the resource. :param pulumi.ResourceOptions opts: Options for the resource. :param pulumi.Input[int] allocated_outbound_ports: The number of outbound ports to be used for NAT. :param pulumi.Input[str] backend_address_pool_id: The ID of the Backend Address Pool. Outbound traffic is randomly load balanced across IPs in the backend IPs. :param pulumi.Input[bool] enable_tcp_reset: Receive bidirectional TCP Reset on TCP flow idle timeout or unexpected connection termination. This element is only used when the protocol is set to TCP. :param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['OutboundRuleFrontendIpConfigurationArgs']]]] frontend_ip_configurations: One or more `frontend_ip_configuration` blocks as defined below. :param pulumi.Input[int] idle_timeout_in_minutes: The timeout for the TCP idle connection :param pulumi.Input[str] loadbalancer_id: The ID of the Load Balancer in which to create the Outbound Rule. Changing this forces a new resource to be created. :param pulumi.Input[str] name: Specifies the name of the Outbound Rule. Changing this forces a new resource to be created. :param pulumi.Input[str] protocol: The transport protocol for the external endpoint. Possible values are `Udp`, `Tcp` or `All`. :param pulumi.Input[str] resource_group_name: The name of the resource group in which to create the resource. Changing this forces a new resource to be created. Get an existing OutboundRule resource's state with the given name, id, and optional extra properties used to qualify the lookup. :param str resource_name: The unique name of the resulting resource. :param pulumi.Input[str] id: The unique provider ID of the resource to lookup. :param pulumi.ResourceOptions opts: Options for the resource. :param pulumi.Input[int] allocated_outbound_ports: The number of outbound ports to be used for NAT. :param pulumi.Input[str] backend_address_pool_id: The ID of the Backend Address Pool. Outbound traffic is randomly load balanced across IPs in the backend IPs. :param pulumi.Input[bool] enable_tcp_reset: Receive bidirectional TCP Reset on TCP flow idle timeout or unexpected connection termination. This element is only used when the protocol is set to TCP. :param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['OutboundRuleFrontendIpConfigurationArgs']]]] frontend_ip_configurations: One or more `frontend_ip_configuration` blocks as defined below. :param pulumi.Input[int] idle_timeout_in_minutes: The timeout for the TCP idle connection :param pulumi.Input[str] loadbalancer_id: The ID of the Load Balancer in which to create the Outbound Rule. Changing this forces a new resource to be created. :param pulumi.Input[str] name: Specifies the name of the Outbound Rule. Changing this forces a new resource to be created. :param pulumi.Input[str] protocol: The transport protocol for the external endpoint. Possible values are `Udp`, `Tcp` or `All`. :param pulumi.Input[str] resource_group_name: The name of the resource group in which to create the resource. Changing this forces a new resource to be created. The number of outbound ports to be used for NAT. The ID of the Backend Address Pool. Outbound traffic is randomly load balanced across IPs in the backend IPs. Receive bidirectional TCP Reset on TCP flow idle timeout or unexpected connection termination. This element is only used when the protocol is set to TCP. One or more `frontend_ip_configuration` blocks as defined below. The timeout for the TCP idle connection The ID of the Load Balancer in which to create the Outbound Rule. Changing this forces a new resource to be created. Specifies the name of the Outbound Rule. Changing this forces a new resource to be created. The transport protocol for the external endpoint. Possible values are `Udp`, `Tcp` or `All`. The name of the resource group in which to create the resource. Changing this forces a new resource to be created. | 1.544875 | 2 |
orbit_predictor/predictors/base.py | Juanlu001/orbit-predictor | 0 | 9794 | <gh_stars>0
# MIT License
#
# Copyright (c) 2017 <NAME>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import datetime as dt
import logging
import warnings
from collections import namedtuple
from math import pi, acos, degrees, radians
import numpy as np
try:
from scipy.optimize import brentq, minimize_scalar
except ImportError:
warnings.warn('scipy module was not found, some features may not work properly.',
ImportWarning)
from orbit_predictor.constants import MU_E
from orbit_predictor.exceptions import NotReachable, PropagationError
from orbit_predictor import coordinate_systems
from orbit_predictor.keplerian import rv2coe
from orbit_predictor.utils import (
angle_between,
cross_product,
dot_product,
reify,
vector_diff,
vector_norm,
gstime_from_datetime,
get_shadow,
get_sun,
eclipse_duration,
get_satellite_minus_penumbra_verticals,
)
logger = logging.getLogger(__name__)
ONE_SECOND = dt.timedelta(seconds=1)
def round_datetime(dt_):
return dt_
class Position(namedtuple(
"Position", ['when_utc', 'position_ecef', 'velocity_ecef', 'error_estimate'])):
@reify
def position_llh(self):
"""Latitude (deg), longitude (deg), altitude (km)."""
return coordinate_systems.ecef_to_llh(self.position_ecef)
@reify
def osculating_elements(self):
"""Osculating Keplerian orbital elements.
Semimajor axis (km), eccentricity, inclination (deg),
right ascension of the ascending node or RAAN (deg),
argument of perigee (deg), true anomaly (deg).
"""
gmst = gstime_from_datetime(self.when_utc)
position_eci = coordinate_systems.ecef_to_eci(self.position_ecef, gmst)
velocity_eci = coordinate_systems.ecef_to_eci(self.velocity_ecef, gmst)
# Convert position to Keplerian osculating elements
p, ecc, inc, raan, argp, ta = rv2coe(
MU_E, np.array(position_eci), np.array(velocity_eci)
)
# Transform to more familiar semimajor axis
sma = p / (1 - ecc ** 2)
return sma, ecc, degrees(inc), degrees(raan), degrees(argp), degrees(ta)
class PredictedPass:
def __init__(self, location, sate_id,
max_elevation_deg,
aos, los, duration_s,
max_elevation_position=None,
max_elevation_date=None):
self.location = location
self.sate_id = sate_id
self.max_elevation_position = max_elevation_position
self.max_elevation_date = max_elevation_date
self.max_elevation_deg = max_elevation_deg
self.aos = aos
self.los = los
self.duration_s = duration_s
@property
def midpoint(self):
"""Returns a datetime of the midpoint of the pass"""
return self.aos + (self.los - self.aos) / 2
def __repr__(self):
return "<PredictedPass {} over {} on {}>".format(self.sate_id, self.location, self.aos)
def __eq__(self, other):
return all([issubclass(other.__class__, PredictedPass),
self.location == other.location,
self.sate_id == other.sate_id,
self.max_elevation_position == other.max_elevation_position,
self.max_elevation_date == other.max_elevation_date,
self.max_elevation_deg == other.max_elevation_deg,
self.aos == other.aos,
self.los == other.los,
self.duration_s == other.duration_s])
def get_off_nadir_angle(self):
warnings.warn("This method is deprecated!", DeprecationWarning)
return self.off_nadir_deg
@reify
def off_nadir_deg(self):
"""Computes off-nadir angle calculation
Given satellite position ``sate_pos``, velocity ``sate_vel``, and
location ``target`` in a common frame, off-nadir angle ``off_nadir_angle``
is given by:
t2b = sate_pos - target
cos(off_nadir_angle) = (sate_pos · t2b) # Vectorial dot product
_______________________
|| sate_pos || || t2b||
Sign for the rotation is calculated this way
cross = target ⨯ sate_pos
sign = cross · sate_vel
____________________
| cross · sate_vel |
"""
sate_pos = self.max_elevation_position.position_ecef
sate_vel = self.max_elevation_position.velocity_ecef
target = self.location.position_ecef
t2b = vector_diff(sate_pos, target)
angle = acos(
dot_product(sate_pos, t2b) / (vector_norm(sate_pos) * vector_norm(t2b))
)
cross = cross_product(target, sate_pos)
dot = dot_product(cross, sate_vel)
try:
sign = dot / abs(dot)
except ZeroDivisionError:
sign = 1
return degrees(angle) * sign
class Predictor:
@property
def sate_id(self):
raise NotImplementedError
def propagate_eci(self, when_utc=None):
raise NotImplementedError
def get_position(self, when_utc=None):
raise NotImplementedError("You have to implement it!")
def get_shadow(self, when_utc=None):
"""Gives illumination at given time (2 for illuminated, 1 for penumbra, 0 for umbra)."""
if when_utc is None:
when_utc = dt.datetime.utcnow()
return get_shadow(
self.get_position(when_utc).position_ecef,
when_utc
)
def get_normal_vector(self, when_utc=None):
"""Gets unitary normal vector (orthogonal to orbital plane) at given time."""
if when_utc is None:
when_utc = dt.datetime.utcnow()
position, velocity = self.propagate_eci(when_utc)
orbital_plane_normal = np.cross(position, velocity)
return orbital_plane_normal / vector_norm(orbital_plane_normal)
def get_beta(self, when_utc=None):
"""Gets angle between orbital plane and Sun direction (beta) at given time, in degrees."""
if when_utc is None:
when_utc = dt.datetime.utcnow()
# Here we calculate the complementary angle of beta,
# because we use the normal vector of the orbital plane
beta_comp = angle_between(
get_sun(when_utc),
self.get_normal_vector(when_utc)
)
# We subtract from 90 degrees to return the real beta angle
return 90 - beta_comp
class CartesianPredictor(Predictor):
def _propagate_ecef(self, when_utc=None):
"""Return position and velocity in the given date using ECEF coordinate system."""
if when_utc is None:
when_utc = dt.datetime.utcnow()
position_eci, velocity_eci = self.propagate_eci(when_utc)
gmst = gstime_from_datetime(when_utc)
position_ecef = coordinate_systems.eci_to_ecef(position_eci, gmst)
velocity_ecef = coordinate_systems.eci_to_ecef(velocity_eci, gmst)
return position_ecef, velocity_ecef
@reify
def mean_motion(self):
"""Mean motion, in radians per minute"""
raise NotImplementedError
@reify
def period(self):
"""Orbital period, in minutes"""
return 2 * pi / self.mean_motion
def get_position(self, when_utc=None):
"""Return a Position namedtuple in ECEF coordinate system"""
if when_utc is None:
when_utc = dt.datetime.utcnow()
position_ecef, velocity_ecef = self._propagate_ecef(when_utc)
return Position(when_utc=when_utc, position_ecef=position_ecef,
velocity_ecef=velocity_ecef, error_estimate=None)
def get_only_position(self, when_utc=None):
"""Return a tuple in ECEF coordinate system"""
return self.get_position(when_utc).position_ecef
def get_eclipse_duration(self, when_utc=None, tolerance=1e-1):
"""Gets eclipse duration at given time, in minutes"""
ecc = self.get_position(when_utc).osculating_elements[1]
if ecc > tolerance:
raise NotImplementedError("Non circular orbits are not supported")
beta = self.get_beta(when_utc)
return eclipse_duration(beta, self.period)
def passes_over(self, location, when_utc, limit_date=None, max_elevation_gt=0, aos_at_dg=0):
return LocationPredictor(location, self, when_utc, limit_date,
max_elevation_gt, aos_at_dg)
def get_next_pass(self, location, when_utc=None, max_elevation_gt=5,
aos_at_dg=0, limit_date=None):
"""Return a PredictedPass instance with the data of the next pass over the given location
location_llh: point on Earth we want to see from the satellite.
when_utc: datetime UTC after which the pass is calculated, default to now.
max_elevation_gt: filter passes with max_elevation under it.
aos_at_dg: This is if we want to start the pass at a specific elevation.
The next pass with a LOS strictly after when_utc will be returned,
possibly the current pass.
"""
if when_utc is None:
when_utc = dt.datetime.utcnow()
for pass_ in self.passes_over(location, when_utc, limit_date,
max_elevation_gt=max_elevation_gt,
aos_at_dg=aos_at_dg):
return pass_
else:
raise NotReachable('Propagation limit date exceeded')
def eclipses_since(self, when_utc=None, limit_date=None):
"""
An iterator that yields all eclipses start and end times between
when_utc and limit_date.
The next eclipse with a end strictly after when_utc will be returned,
possibly the current eclipse.
The last eclipse returned starts before limit_date, but it can end
strictly after limit_date.
No circular orbits are not supported, and will raise NotImplementedError.
"""
def _get_illumination(t):
my_start = start + dt.timedelta(seconds=t)
result = get_satellite_minus_penumbra_verticals(
self.get_only_position(my_start),
my_start
)
return result
if when_utc is None:
when_utc = dt.datetime.utcnow()
orbital_period_s = self.period * 60
# A third of the orbit period is used as the base window of the search.
# This window ensures the function get_satellite_minus_penumbra_verticals
# will not have more than one local minimum (one in the illuminated phase and
# the other in penumbra).
base_search_window_s = orbital_period_s / 3
start = when_utc
while limit_date is None or start < limit_date:
# a minimum negative value is aproximatelly the middle point of the eclipse
minimum_illumination = minimize_scalar(
_get_illumination,
bounds=(0, base_search_window_s),
method="bounded",
options={"xatol": 1e-2},
)
eclipse_center_candidate_delta_s = minimum_illumination.x
# If found a minimum that is not illuminated, there is an eclipse here
if _get_illumination(eclipse_center_candidate_delta_s) < 0:
# The small time interval to search zeros around the center
# is estimated with the expected eclipse duration (which generally
# is smaller than expected, and that is the reason of the 1.5 coeficient).
# Also a minimum of 180 seconds was added because
# in some cases the estimation is 0 even though there is an eclipse.
eclipse_duration_estimation_s = self.get_eclipse_duration(start) * 60
zero_search_window_s = max(180, 1.5 * eclipse_duration_estimation_s)
# Search now both zeros to get the start and end of the eclipse
eclipse_start_delta_s = brentq(
_get_illumination,
eclipse_center_candidate_delta_s - zero_search_window_s,
eclipse_center_candidate_delta_s,
xtol=1e-2,
full_output=False,
)
eclipse_end_delta_s = brentq(
_get_illumination,
eclipse_center_candidate_delta_s,
eclipse_center_candidate_delta_s + zero_search_window_s,
xtol=1e-2,
full_output=False,
)
eclipse_start = start + dt.timedelta(seconds=eclipse_start_delta_s)
eclipse_end = start + dt.timedelta(seconds=eclipse_end_delta_s)
yield eclipse_start, eclipse_end
start = eclipse_end + dt.timedelta(seconds=base_search_window_s)
else:
start += dt.timedelta(seconds=base_search_window_s)
class GPSPredictor(Predictor):
pass
class LocationPredictor:
"""Predicts passes over a given location
Exposes an iterable interface
"""
def __init__(self, location, predictor, start_date, limit_date=None,
max_elevation_gt=0, aos_at_dg=0, *, propagator=None):
if propagator is not None:
warnings.warn(
"propagator parameter was renamed to predictor "
"and will be removed in a future release",
DeprecationWarning
)
predictor = propagator
self.location = location
self.predictor = predictor
self.start_date = start_date
self.limit_date = limit_date
self.max_elevation_gt = radians(max([max_elevation_gt, aos_at_dg]))
self.aos_at = radians(aos_at_dg)
@property
def propagator(self):
warnings.warn(
"propagator parameter was renamed to predictor "
"and will be removed in a future release",
DeprecationWarning
)
return self.predictor
def __iter__(self):
"""Returns one pass each time"""
current_date = self.start_date
while True:
if self.is_ascending(current_date):
# we need a descending point
ascending_date = current_date
descending_date = self._find_nearest_descending(ascending_date)
pass_ = self._refine_pass(ascending_date, descending_date)
if pass_.valid:
if self.limit_date is not None and pass_.aos > self.limit_date:
break
yield self._build_predicted_pass(pass_)
if self.limit_date is not None and current_date > self.limit_date:
break
current_date = pass_.tca + self._orbit_step(0.6)
else:
current_date = self._find_nearest_ascending(current_date)
def _build_predicted_pass(self, accuratepass):
"""Returns a classic predicted pass"""
tca_position = self.predictor.get_position(accuratepass.tca)
return PredictedPass(self.location, self.predictor.sate_id,
max_elevation_deg=accuratepass.max_elevation_deg,
aos=accuratepass.aos,
los=accuratepass.los,
duration_s=accuratepass.duration.total_seconds(),
max_elevation_position=tca_position,
max_elevation_date=accuratepass.tca,
)
def _find_nearest_descending(self, ascending_date):
for candidate in self._sample_points(ascending_date):
if not self.is_ascending(candidate):
return candidate
else:
logger.error('Could not find a descending pass over %s start date: %s - TLE: %s',
self.location, ascending_date, self.predictor.tle)
raise PropagationError("Can not find an descending phase")
def _find_nearest_ascending(self, descending_date):
for candidate in self._sample_points(descending_date):
if self.is_ascending(candidate):
return candidate
else:
logger.error('Could not find an ascending pass over %s start date: %s - TLE: %s',
self.location, descending_date, self.predictor.tle)
raise PropagationError('Can not find an ascending phase')
def _sample_points(self, date):
"""Helper method to found ascending or descending phases of elevation"""
start = date
end = date + self._orbit_step(0.99)
mid = self.midpoint(start, end)
mid_right = self.midpoint(mid, end)
mid_left = self.midpoint(start, mid)
return [end, mid, mid_right, mid_left]
def _refine_pass(self, ascending_date, descending_date):
tca = self._find_tca(ascending_date, descending_date)
elevation = self._elevation_at(tca)
if elevation > self.max_elevation_gt:
aos = self._find_aos(tca)
los = self._find_los(tca)
else:
aos = los = None
return AccuratePredictedPass(aos, tca, los, elevation)
def _find_tca(self, ascending_date, descending_date):
while not self._precision_reached(ascending_date, descending_date):
midpoint = self.midpoint(ascending_date, descending_date)
if self.is_ascending(midpoint):
ascending_date = midpoint
else:
descending_date = midpoint
return ascending_date
def _precision_reached(self, start, end):
# TODO: Allow the precision to change from the outside
return end - start <= ONE_SECOND
@staticmethod
def midpoint(start, end):
"""Returns the midpoint between two dates"""
return start + (end - start) / 2
def _elevation_at(self, when_utc):
position = self.predictor.get_only_position(when_utc)
return self.location.elevation_for(position)
def is_passing(self, when_utc):
"""Returns a boolean indicating if satellite is actually visible"""
return bool(self._elevation_at(when_utc))
def is_ascending(self, when_utc):
"""Check is elevation is ascending or descending on a given point"""
elevation = self._elevation_at(when_utc)
next_elevation = self._elevation_at(when_utc + ONE_SECOND)
return elevation <= next_elevation
def _orbit_step(self, size):
"""Returns a time step, that will make the satellite advance a given number of orbits"""
step_in_radians = size * 2 * pi
seconds = (step_in_radians / self.predictor.mean_motion) * 60
return dt.timedelta(seconds=seconds)
def _find_aos(self, tca):
end = tca
start = tca - self._orbit_step(0.34) # On third of the orbit
elevation = self._elevation_at(start)
assert elevation < 0
while not self._precision_reached(start, end):
midpoint = self.midpoint(start, end)
elevation = self._elevation_at(midpoint)
if elevation < self.aos_at:
start = midpoint
else:
end = midpoint
return end
def _find_los(self, tca):
start = tca
end = tca + self._orbit_step(0.34)
while not self._precision_reached(start, end):
midpoint = self.midpoint(start, end)
elevation = self._elevation_at(midpoint)
if elevation < self.aos_at:
end = midpoint
else:
start = midpoint
return start
class AccuratePredictedPass:
def __init__(self, aos, tca, los, max_elevation):
self.aos = round_datetime(aos) if aos is not None else None
self.tca = round_datetime(tca)
self.los = round_datetime(los) if los is not None else None
self.max_elevation = max_elevation
@property
def valid(self):
return self.max_elevation > 0 and self.aos is not None and self.los is not None
@reify
def max_elevation_deg(self):
return degrees(self.max_elevation)
@reify
def duration(self):
return self.los - self.aos
| # MIT License
#
# Copyright (c) 2017 <NAME>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import datetime as dt
import logging
import warnings
from collections import namedtuple
from math import pi, acos, degrees, radians
import numpy as np
try:
from scipy.optimize import brentq, minimize_scalar
except ImportError:
warnings.warn('scipy module was not found, some features may not work properly.',
ImportWarning)
from orbit_predictor.constants import MU_E
from orbit_predictor.exceptions import NotReachable, PropagationError
from orbit_predictor import coordinate_systems
from orbit_predictor.keplerian import rv2coe
from orbit_predictor.utils import (
angle_between,
cross_product,
dot_product,
reify,
vector_diff,
vector_norm,
gstime_from_datetime,
get_shadow,
get_sun,
eclipse_duration,
get_satellite_minus_penumbra_verticals,
)
logger = logging.getLogger(__name__)
ONE_SECOND = dt.timedelta(seconds=1)
def round_datetime(dt_):
return dt_
class Position(namedtuple(
"Position", ['when_utc', 'position_ecef', 'velocity_ecef', 'error_estimate'])):
@reify
def position_llh(self):
"""Latitude (deg), longitude (deg), altitude (km)."""
return coordinate_systems.ecef_to_llh(self.position_ecef)
@reify
def osculating_elements(self):
"""Osculating Keplerian orbital elements.
Semimajor axis (km), eccentricity, inclination (deg),
right ascension of the ascending node or RAAN (deg),
argument of perigee (deg), true anomaly (deg).
"""
gmst = gstime_from_datetime(self.when_utc)
position_eci = coordinate_systems.ecef_to_eci(self.position_ecef, gmst)
velocity_eci = coordinate_systems.ecef_to_eci(self.velocity_ecef, gmst)
# Convert position to Keplerian osculating elements
p, ecc, inc, raan, argp, ta = rv2coe(
MU_E, np.array(position_eci), np.array(velocity_eci)
)
# Transform to more familiar semimajor axis
sma = p / (1 - ecc ** 2)
return sma, ecc, degrees(inc), degrees(raan), degrees(argp), degrees(ta)
class PredictedPass:
def __init__(self, location, sate_id,
max_elevation_deg,
aos, los, duration_s,
max_elevation_position=None,
max_elevation_date=None):
self.location = location
self.sate_id = sate_id
self.max_elevation_position = max_elevation_position
self.max_elevation_date = max_elevation_date
self.max_elevation_deg = max_elevation_deg
self.aos = aos
self.los = los
self.duration_s = duration_s
@property
def midpoint(self):
"""Returns a datetime of the midpoint of the pass"""
return self.aos + (self.los - self.aos) / 2
def __repr__(self):
return "<PredictedPass {} over {} on {}>".format(self.sate_id, self.location, self.aos)
def __eq__(self, other):
return all([issubclass(other.__class__, PredictedPass),
self.location == other.location,
self.sate_id == other.sate_id,
self.max_elevation_position == other.max_elevation_position,
self.max_elevation_date == other.max_elevation_date,
self.max_elevation_deg == other.max_elevation_deg,
self.aos == other.aos,
self.los == other.los,
self.duration_s == other.duration_s])
def get_off_nadir_angle(self):
warnings.warn("This method is deprecated!", DeprecationWarning)
return self.off_nadir_deg
@reify
def off_nadir_deg(self):
"""Computes off-nadir angle calculation
Given satellite position ``sate_pos``, velocity ``sate_vel``, and
location ``target`` in a common frame, off-nadir angle ``off_nadir_angle``
is given by:
t2b = sate_pos - target
cos(off_nadir_angle) = (sate_pos · t2b) # Vectorial dot product
_______________________
|| sate_pos || || t2b||
Sign for the rotation is calculated this way
cross = target ⨯ sate_pos
sign = cross · sate_vel
____________________
| cross · sate_vel |
"""
sate_pos = self.max_elevation_position.position_ecef
sate_vel = self.max_elevation_position.velocity_ecef
target = self.location.position_ecef
t2b = vector_diff(sate_pos, target)
angle = acos(
dot_product(sate_pos, t2b) / (vector_norm(sate_pos) * vector_norm(t2b))
)
cross = cross_product(target, sate_pos)
dot = dot_product(cross, sate_vel)
try:
sign = dot / abs(dot)
except ZeroDivisionError:
sign = 1
return degrees(angle) * sign
class Predictor:
@property
def sate_id(self):
raise NotImplementedError
def propagate_eci(self, when_utc=None):
raise NotImplementedError
def get_position(self, when_utc=None):
raise NotImplementedError("You have to implement it!")
def get_shadow(self, when_utc=None):
"""Gives illumination at given time (2 for illuminated, 1 for penumbra, 0 for umbra)."""
if when_utc is None:
when_utc = dt.datetime.utcnow()
return get_shadow(
self.get_position(when_utc).position_ecef,
when_utc
)
def get_normal_vector(self, when_utc=None):
"""Gets unitary normal vector (orthogonal to orbital plane) at given time."""
if when_utc is None:
when_utc = dt.datetime.utcnow()
position, velocity = self.propagate_eci(when_utc)
orbital_plane_normal = np.cross(position, velocity)
return orbital_plane_normal / vector_norm(orbital_plane_normal)
def get_beta(self, when_utc=None):
"""Gets angle between orbital plane and Sun direction (beta) at given time, in degrees."""
if when_utc is None:
when_utc = dt.datetime.utcnow()
# Here we calculate the complementary angle of beta,
# because we use the normal vector of the orbital plane
beta_comp = angle_between(
get_sun(when_utc),
self.get_normal_vector(when_utc)
)
# We subtract from 90 degrees to return the real beta angle
return 90 - beta_comp
class CartesianPredictor(Predictor):
def _propagate_ecef(self, when_utc=None):
"""Return position and velocity in the given date using ECEF coordinate system."""
if when_utc is None:
when_utc = dt.datetime.utcnow()
position_eci, velocity_eci = self.propagate_eci(when_utc)
gmst = gstime_from_datetime(when_utc)
position_ecef = coordinate_systems.eci_to_ecef(position_eci, gmst)
velocity_ecef = coordinate_systems.eci_to_ecef(velocity_eci, gmst)
return position_ecef, velocity_ecef
@reify
def mean_motion(self):
"""Mean motion, in radians per minute"""
raise NotImplementedError
@reify
def period(self):
"""Orbital period, in minutes"""
return 2 * pi / self.mean_motion
def get_position(self, when_utc=None):
"""Return a Position namedtuple in ECEF coordinate system"""
if when_utc is None:
when_utc = dt.datetime.utcnow()
position_ecef, velocity_ecef = self._propagate_ecef(when_utc)
return Position(when_utc=when_utc, position_ecef=position_ecef,
velocity_ecef=velocity_ecef, error_estimate=None)
def get_only_position(self, when_utc=None):
"""Return a tuple in ECEF coordinate system"""
return self.get_position(when_utc).position_ecef
def get_eclipse_duration(self, when_utc=None, tolerance=1e-1):
"""Gets eclipse duration at given time, in minutes"""
ecc = self.get_position(when_utc).osculating_elements[1]
if ecc > tolerance:
raise NotImplementedError("Non circular orbits are not supported")
beta = self.get_beta(when_utc)
return eclipse_duration(beta, self.period)
def passes_over(self, location, when_utc, limit_date=None, max_elevation_gt=0, aos_at_dg=0):
return LocationPredictor(location, self, when_utc, limit_date,
max_elevation_gt, aos_at_dg)
def get_next_pass(self, location, when_utc=None, max_elevation_gt=5,
aos_at_dg=0, limit_date=None):
"""Return a PredictedPass instance with the data of the next pass over the given location
location_llh: point on Earth we want to see from the satellite.
when_utc: datetime UTC after which the pass is calculated, default to now.
max_elevation_gt: filter passes with max_elevation under it.
aos_at_dg: This is if we want to start the pass at a specific elevation.
The next pass with a LOS strictly after when_utc will be returned,
possibly the current pass.
"""
if when_utc is None:
when_utc = dt.datetime.utcnow()
for pass_ in self.passes_over(location, when_utc, limit_date,
max_elevation_gt=max_elevation_gt,
aos_at_dg=aos_at_dg):
return pass_
else:
raise NotReachable('Propagation limit date exceeded')
def eclipses_since(self, when_utc=None, limit_date=None):
"""
An iterator that yields all eclipses start and end times between
when_utc and limit_date.
The next eclipse with a end strictly after when_utc will be returned,
possibly the current eclipse.
The last eclipse returned starts before limit_date, but it can end
strictly after limit_date.
No circular orbits are not supported, and will raise NotImplementedError.
"""
def _get_illumination(t):
my_start = start + dt.timedelta(seconds=t)
result = get_satellite_minus_penumbra_verticals(
self.get_only_position(my_start),
my_start
)
return result
if when_utc is None:
when_utc = dt.datetime.utcnow()
orbital_period_s = self.period * 60
# A third of the orbit period is used as the base window of the search.
# This window ensures the function get_satellite_minus_penumbra_verticals
# will not have more than one local minimum (one in the illuminated phase and
# the other in penumbra).
base_search_window_s = orbital_period_s / 3
start = when_utc
while limit_date is None or start < limit_date:
# a minimum negative value is aproximatelly the middle point of the eclipse
minimum_illumination = minimize_scalar(
_get_illumination,
bounds=(0, base_search_window_s),
method="bounded",
options={"xatol": 1e-2},
)
eclipse_center_candidate_delta_s = minimum_illumination.x
# If found a minimum that is not illuminated, there is an eclipse here
if _get_illumination(eclipse_center_candidate_delta_s) < 0:
# The small time interval to search zeros around the center
# is estimated with the expected eclipse duration (which generally
# is smaller than expected, and that is the reason of the 1.5 coeficient).
# Also a minimum of 180 seconds was added because
# in some cases the estimation is 0 even though there is an eclipse.
eclipse_duration_estimation_s = self.get_eclipse_duration(start) * 60
zero_search_window_s = max(180, 1.5 * eclipse_duration_estimation_s)
# Search now both zeros to get the start and end of the eclipse
eclipse_start_delta_s = brentq(
_get_illumination,
eclipse_center_candidate_delta_s - zero_search_window_s,
eclipse_center_candidate_delta_s,
xtol=1e-2,
full_output=False,
)
eclipse_end_delta_s = brentq(
_get_illumination,
eclipse_center_candidate_delta_s,
eclipse_center_candidate_delta_s + zero_search_window_s,
xtol=1e-2,
full_output=False,
)
eclipse_start = start + dt.timedelta(seconds=eclipse_start_delta_s)
eclipse_end = start + dt.timedelta(seconds=eclipse_end_delta_s)
yield eclipse_start, eclipse_end
start = eclipse_end + dt.timedelta(seconds=base_search_window_s)
else:
start += dt.timedelta(seconds=base_search_window_s)
class GPSPredictor(Predictor):
pass
class LocationPredictor:
"""Predicts passes over a given location
Exposes an iterable interface
"""
def __init__(self, location, predictor, start_date, limit_date=None,
max_elevation_gt=0, aos_at_dg=0, *, propagator=None):
if propagator is not None:
warnings.warn(
"propagator parameter was renamed to predictor "
"and will be removed in a future release",
DeprecationWarning
)
predictor = propagator
self.location = location
self.predictor = predictor
self.start_date = start_date
self.limit_date = limit_date
self.max_elevation_gt = radians(max([max_elevation_gt, aos_at_dg]))
self.aos_at = radians(aos_at_dg)
@property
def propagator(self):
warnings.warn(
"propagator parameter was renamed to predictor "
"and will be removed in a future release",
DeprecationWarning
)
return self.predictor
def __iter__(self):
"""Returns one pass each time"""
current_date = self.start_date
while True:
if self.is_ascending(current_date):
# we need a descending point
ascending_date = current_date
descending_date = self._find_nearest_descending(ascending_date)
pass_ = self._refine_pass(ascending_date, descending_date)
if pass_.valid:
if self.limit_date is not None and pass_.aos > self.limit_date:
break
yield self._build_predicted_pass(pass_)
if self.limit_date is not None and current_date > self.limit_date:
break
current_date = pass_.tca + self._orbit_step(0.6)
else:
current_date = self._find_nearest_ascending(current_date)
def _build_predicted_pass(self, accuratepass):
"""Returns a classic predicted pass"""
tca_position = self.predictor.get_position(accuratepass.tca)
return PredictedPass(self.location, self.predictor.sate_id,
max_elevation_deg=accuratepass.max_elevation_deg,
aos=accuratepass.aos,
los=accuratepass.los,
duration_s=accuratepass.duration.total_seconds(),
max_elevation_position=tca_position,
max_elevation_date=accuratepass.tca,
)
def _find_nearest_descending(self, ascending_date):
for candidate in self._sample_points(ascending_date):
if not self.is_ascending(candidate):
return candidate
else:
logger.error('Could not find a descending pass over %s start date: %s - TLE: %s',
self.location, ascending_date, self.predictor.tle)
raise PropagationError("Can not find an descending phase")
def _find_nearest_ascending(self, descending_date):
for candidate in self._sample_points(descending_date):
if self.is_ascending(candidate):
return candidate
else:
logger.error('Could not find an ascending pass over %s start date: %s - TLE: %s',
self.location, descending_date, self.predictor.tle)
raise PropagationError('Can not find an ascending phase')
def _sample_points(self, date):
"""Helper method to found ascending or descending phases of elevation"""
start = date
end = date + self._orbit_step(0.99)
mid = self.midpoint(start, end)
mid_right = self.midpoint(mid, end)
mid_left = self.midpoint(start, mid)
return [end, mid, mid_right, mid_left]
def _refine_pass(self, ascending_date, descending_date):
tca = self._find_tca(ascending_date, descending_date)
elevation = self._elevation_at(tca)
if elevation > self.max_elevation_gt:
aos = self._find_aos(tca)
los = self._find_los(tca)
else:
aos = los = None
return AccuratePredictedPass(aos, tca, los, elevation)
def _find_tca(self, ascending_date, descending_date):
while not self._precision_reached(ascending_date, descending_date):
midpoint = self.midpoint(ascending_date, descending_date)
if self.is_ascending(midpoint):
ascending_date = midpoint
else:
descending_date = midpoint
return ascending_date
def _precision_reached(self, start, end):
# TODO: Allow the precision to change from the outside
return end - start <= ONE_SECOND
@staticmethod
def midpoint(start, end):
"""Returns the midpoint between two dates"""
return start + (end - start) / 2
def _elevation_at(self, when_utc):
position = self.predictor.get_only_position(when_utc)
return self.location.elevation_for(position)
def is_passing(self, when_utc):
"""Returns a boolean indicating if satellite is actually visible"""
return bool(self._elevation_at(when_utc))
def is_ascending(self, when_utc):
"""Check is elevation is ascending or descending on a given point"""
elevation = self._elevation_at(when_utc)
next_elevation = self._elevation_at(when_utc + ONE_SECOND)
return elevation <= next_elevation
def _orbit_step(self, size):
"""Returns a time step, that will make the satellite advance a given number of orbits"""
step_in_radians = size * 2 * pi
seconds = (step_in_radians / self.predictor.mean_motion) * 60
return dt.timedelta(seconds=seconds)
def _find_aos(self, tca):
end = tca
start = tca - self._orbit_step(0.34) # On third of the orbit
elevation = self._elevation_at(start)
assert elevation < 0
while not self._precision_reached(start, end):
midpoint = self.midpoint(start, end)
elevation = self._elevation_at(midpoint)
if elevation < self.aos_at:
start = midpoint
else:
end = midpoint
return end
def _find_los(self, tca):
start = tca
end = tca + self._orbit_step(0.34)
while not self._precision_reached(start, end):
midpoint = self.midpoint(start, end)
elevation = self._elevation_at(midpoint)
if elevation < self.aos_at:
end = midpoint
else:
start = midpoint
return start
class AccuratePredictedPass:
def __init__(self, aos, tca, los, max_elevation):
self.aos = round_datetime(aos) if aos is not None else None
self.tca = round_datetime(tca)
self.los = round_datetime(los) if los is not None else None
self.max_elevation = max_elevation
@property
def valid(self):
return self.max_elevation > 0 and self.aos is not None and self.los is not None
@reify
def max_elevation_deg(self):
return degrees(self.max_elevation)
@reify
def duration(self):
return self.los - self.aos | en | 0.799172 | # MIT License # # Copyright (c) 2017 <NAME> # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in all # copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE. Latitude (deg), longitude (deg), altitude (km). Osculating Keplerian orbital elements. Semimajor axis (km), eccentricity, inclination (deg), right ascension of the ascending node or RAAN (deg), argument of perigee (deg), true anomaly (deg). # Convert position to Keplerian osculating elements # Transform to more familiar semimajor axis Returns a datetime of the midpoint of the pass Computes off-nadir angle calculation Given satellite position ``sate_pos``, velocity ``sate_vel``, and location ``target`` in a common frame, off-nadir angle ``off_nadir_angle`` is given by: t2b = sate_pos - target cos(off_nadir_angle) = (sate_pos · t2b) # Vectorial dot product _______________________ || sate_pos || || t2b|| Sign for the rotation is calculated this way cross = target ⨯ sate_pos sign = cross · sate_vel ____________________ | cross · sate_vel | Gives illumination at given time (2 for illuminated, 1 for penumbra, 0 for umbra). Gets unitary normal vector (orthogonal to orbital plane) at given time. Gets angle between orbital plane and Sun direction (beta) at given time, in degrees. # Here we calculate the complementary angle of beta, # because we use the normal vector of the orbital plane # We subtract from 90 degrees to return the real beta angle Return position and velocity in the given date using ECEF coordinate system. Mean motion, in radians per minute Orbital period, in minutes Return a Position namedtuple in ECEF coordinate system Return a tuple in ECEF coordinate system Gets eclipse duration at given time, in minutes Return a PredictedPass instance with the data of the next pass over the given location location_llh: point on Earth we want to see from the satellite. when_utc: datetime UTC after which the pass is calculated, default to now. max_elevation_gt: filter passes with max_elevation under it. aos_at_dg: This is if we want to start the pass at a specific elevation. The next pass with a LOS strictly after when_utc will be returned, possibly the current pass. An iterator that yields all eclipses start and end times between when_utc and limit_date. The next eclipse with a end strictly after when_utc will be returned, possibly the current eclipse. The last eclipse returned starts before limit_date, but it can end strictly after limit_date. No circular orbits are not supported, and will raise NotImplementedError. # A third of the orbit period is used as the base window of the search. # This window ensures the function get_satellite_minus_penumbra_verticals # will not have more than one local minimum (one in the illuminated phase and # the other in penumbra). # a minimum negative value is aproximatelly the middle point of the eclipse # If found a minimum that is not illuminated, there is an eclipse here # The small time interval to search zeros around the center # is estimated with the expected eclipse duration (which generally # is smaller than expected, and that is the reason of the 1.5 coeficient). # Also a minimum of 180 seconds was added because # in some cases the estimation is 0 even though there is an eclipse. # Search now both zeros to get the start and end of the eclipse Predicts passes over a given location Exposes an iterable interface Returns one pass each time # we need a descending point Returns a classic predicted pass Helper method to found ascending or descending phases of elevation # TODO: Allow the precision to change from the outside Returns the midpoint between two dates Returns a boolean indicating if satellite is actually visible Check is elevation is ascending or descending on a given point Returns a time step, that will make the satellite advance a given number of orbits # On third of the orbit | 1.711189 | 2 |
vilmedic/scorers/NLG/__init__.py | jbdel/vilmedic | 15 | 9795 | <reponame>jbdel/vilmedic
from .rouge import ROUGEScorer
from .bleu.bleu import BLEUScorer
from .meteor.meteor import METEORScorer
from .cider.cider import Cider
from .ciderd.ciderd import CiderD
| from .rouge import ROUGEScorer
from .bleu.bleu import BLEUScorer
from .meteor.meteor import METEORScorer
from .cider.cider import Cider
from .ciderd.ciderd import CiderD | none | 1 | 0.973505 | 1 |
|
tests/test_liif.py | Yshuo-Li/mmediting-test | 2 | 9796 | import numpy as np
import torch
import torch.nn as nn
from mmcv.runner import obj_from_dict
from mmcv.utils.config import Config
from mmedit.models import build_model
from mmedit.models.losses import L1Loss
from mmedit.models.registry import COMPONENTS
@COMPONENTS.register_module()
class BP(nn.Module):
"""A simple BP network for testing LIIF.
Args:
in_dim (int): Input dimension.
out_dim (int): Output dimension.
"""
def __init__(self, in_dim, out_dim):
super().__init__()
self.layer = nn.Linear(in_dim, out_dim)
def forward(self, x):
shape = x.shape[:-1]
x = self.layer(x.view(-1, x.shape[-1]))
return x.view(*shape, -1)
def test_liif():
model_cfg = dict(
type='LIIF',
generator=dict(
type='EDSR',
in_channels=3,
out_channels=3,
mid_channels=8,
num_blocks=1),
imnet=dict(type='BP', in_dim=8, out_dim=3),
local_ensemble=True,
feat_unfold=True,
cell_decode=True,
rgb_mean=(0.4488, 0.4371, 0.4040),
rgb_std=(1., 1., 1.),
eval_bsize=30000,
pixel_loss=dict(type='L1Loss', loss_weight=1.0, reduction='mean'))
scale_max = 4
train_cfg = None
test_cfg = Config(dict(metrics=['PSNR', 'SSIM'], crop_border=scale_max))
# build restorer
restorer = build_model(model_cfg, train_cfg=train_cfg, test_cfg=test_cfg)
# test attributes
assert restorer.__class__.__name__ == 'LIIF'
assert isinstance(restorer.imnet, BP)
assert isinstance(restorer.pixel_loss, L1Loss)
# prepare data
inputs = torch.rand(1, 3, 22, 11)
targets = torch.rand(1, 128 * 64, 3)
coord = torch.rand(1, 128 * 64, 2)
cell = torch.rand(1, 128 * 64, 2)
data_batch = {'lq': inputs, 'gt': targets, 'coord': coord, 'cell': cell}
# prepare optimizer
optim_cfg = dict(type='Adam', lr=1e-4, betas=(0.9, 0.999))
optimizer = obj_from_dict(optim_cfg, torch.optim,
dict(params=restorer.parameters()))
# test train_step and forward_test (cpu)
outputs = restorer.train_step(data_batch, optimizer)
assert isinstance(outputs, dict)
assert isinstance(outputs['log_vars'], dict)
assert isinstance(outputs['log_vars']['loss_pix'], float)
assert outputs['num_samples'] == 1
assert outputs['results']['lq'].shape == data_batch['lq'].shape
assert outputs['results']['gt'].shape == data_batch['gt'].shape
assert torch.is_tensor(outputs['results']['output'])
assert outputs['results']['output'].size() == (1, 128 * 64, 3)
# test train_step and forward_test (gpu)
if torch.cuda.is_available():
restorer = restorer.cuda()
data_batch = {
'lq': inputs.cuda(),
'gt': targets.cuda(),
'coord': coord.cuda(),
'cell': cell.cuda()
}
# train_step
optimizer = obj_from_dict(optim_cfg, torch.optim,
dict(params=restorer.parameters()))
outputs = restorer.train_step(data_batch, optimizer)
assert isinstance(outputs, dict)
assert isinstance(outputs['log_vars'], dict)
assert isinstance(outputs['log_vars']['loss_pix'], float)
assert outputs['num_samples'] == 1
assert outputs['results']['lq'].shape == data_batch['lq'].shape
assert outputs['results']['gt'].shape == data_batch['gt'].shape
assert torch.is_tensor(outputs['results']['output'])
assert outputs['results']['output'].size() == (1, 128 * 64, 3)
# val_step
result = restorer.val_step(data_batch, meta=[{'gt_path': ''}])
assert isinstance(result, dict)
assert isinstance(result['eval_result'], dict)
assert result['eval_result'].keys() == set({'PSNR', 'SSIM'})
assert isinstance(result['eval_result']['PSNR'], np.float64)
assert isinstance(result['eval_result']['SSIM'], np.float64)
| import numpy as np
import torch
import torch.nn as nn
from mmcv.runner import obj_from_dict
from mmcv.utils.config import Config
from mmedit.models import build_model
from mmedit.models.losses import L1Loss
from mmedit.models.registry import COMPONENTS
@COMPONENTS.register_module()
class BP(nn.Module):
"""A simple BP network for testing LIIF.
Args:
in_dim (int): Input dimension.
out_dim (int): Output dimension.
"""
def __init__(self, in_dim, out_dim):
super().__init__()
self.layer = nn.Linear(in_dim, out_dim)
def forward(self, x):
shape = x.shape[:-1]
x = self.layer(x.view(-1, x.shape[-1]))
return x.view(*shape, -1)
def test_liif():
model_cfg = dict(
type='LIIF',
generator=dict(
type='EDSR',
in_channels=3,
out_channels=3,
mid_channels=8,
num_blocks=1),
imnet=dict(type='BP', in_dim=8, out_dim=3),
local_ensemble=True,
feat_unfold=True,
cell_decode=True,
rgb_mean=(0.4488, 0.4371, 0.4040),
rgb_std=(1., 1., 1.),
eval_bsize=30000,
pixel_loss=dict(type='L1Loss', loss_weight=1.0, reduction='mean'))
scale_max = 4
train_cfg = None
test_cfg = Config(dict(metrics=['PSNR', 'SSIM'], crop_border=scale_max))
# build restorer
restorer = build_model(model_cfg, train_cfg=train_cfg, test_cfg=test_cfg)
# test attributes
assert restorer.__class__.__name__ == 'LIIF'
assert isinstance(restorer.imnet, BP)
assert isinstance(restorer.pixel_loss, L1Loss)
# prepare data
inputs = torch.rand(1, 3, 22, 11)
targets = torch.rand(1, 128 * 64, 3)
coord = torch.rand(1, 128 * 64, 2)
cell = torch.rand(1, 128 * 64, 2)
data_batch = {'lq': inputs, 'gt': targets, 'coord': coord, 'cell': cell}
# prepare optimizer
optim_cfg = dict(type='Adam', lr=1e-4, betas=(0.9, 0.999))
optimizer = obj_from_dict(optim_cfg, torch.optim,
dict(params=restorer.parameters()))
# test train_step and forward_test (cpu)
outputs = restorer.train_step(data_batch, optimizer)
assert isinstance(outputs, dict)
assert isinstance(outputs['log_vars'], dict)
assert isinstance(outputs['log_vars']['loss_pix'], float)
assert outputs['num_samples'] == 1
assert outputs['results']['lq'].shape == data_batch['lq'].shape
assert outputs['results']['gt'].shape == data_batch['gt'].shape
assert torch.is_tensor(outputs['results']['output'])
assert outputs['results']['output'].size() == (1, 128 * 64, 3)
# test train_step and forward_test (gpu)
if torch.cuda.is_available():
restorer = restorer.cuda()
data_batch = {
'lq': inputs.cuda(),
'gt': targets.cuda(),
'coord': coord.cuda(),
'cell': cell.cuda()
}
# train_step
optimizer = obj_from_dict(optim_cfg, torch.optim,
dict(params=restorer.parameters()))
outputs = restorer.train_step(data_batch, optimizer)
assert isinstance(outputs, dict)
assert isinstance(outputs['log_vars'], dict)
assert isinstance(outputs['log_vars']['loss_pix'], float)
assert outputs['num_samples'] == 1
assert outputs['results']['lq'].shape == data_batch['lq'].shape
assert outputs['results']['gt'].shape == data_batch['gt'].shape
assert torch.is_tensor(outputs['results']['output'])
assert outputs['results']['output'].size() == (1, 128 * 64, 3)
# val_step
result = restorer.val_step(data_batch, meta=[{'gt_path': ''}])
assert isinstance(result, dict)
assert isinstance(result['eval_result'], dict)
assert result['eval_result'].keys() == set({'PSNR', 'SSIM'})
assert isinstance(result['eval_result']['PSNR'], np.float64)
assert isinstance(result['eval_result']['SSIM'], np.float64)
| en | 0.508143 | A simple BP network for testing LIIF. Args: in_dim (int): Input dimension. out_dim (int): Output dimension. # build restorer # test attributes # prepare data # prepare optimizer # test train_step and forward_test (cpu) # test train_step and forward_test (gpu) # train_step # val_step | 2.107408 | 2 |
database/signals.py | ccraddock/beiwe-backend-cc | 0 | 9797 | <gh_stars>0
from django.utils import timezone
from django.core.exceptions import ObjectDoesNotExist
from django.db.models.signals import post_save, pre_save
from django.dispatch import receiver
from database.study_models import DeviceSettings, Study, Survey, SurveyArchive
@receiver(post_save, sender=Study)
def populate_study_device_settings(sender, **kwargs):
"""
Ensure that every newly created Study object has a DeviceSettings object. This essentially
makes the OneToOneField have null=False in both directions.
"""
my_study = kwargs['instance']
if kwargs['created'] and not hasattr(my_study, 'device_settings'):
# If my_study has just been created and doesn't have a DeviceSettings
# attached to it, create one with the default parameters.
DeviceSettings.objects.create(study=my_study)
@receiver(pre_save, sender=Survey)
def create_survey_archive(sender, **kwargs):
"""
Ensure that every time a Survey is edited, a SurveyArchive (SA) is stored which holds the
current contents of the Survey before saving, as well as a pair of timestamps marking the
time range over which the SA applies.
"""
# The Survey instance being passed has the updated contents of the Survey. To get
# the preexisting contents of the Survey, make a database call using the passed
# instance's primary key. If we get an ObjectDoesNotExist error short-circuit because
# that means it is the initial save operation.
my_survey_plus_updates = kwargs['instance']
try:
my_survey = Survey.objects.get(pk=my_survey_plus_updates.pk)
except ObjectDoesNotExist:
return
# All fields present in AbstractSurvey, plus the study foreign key which is
# separately present in Survey and SurveyArchive.
survey_fields = [f.name for f in super(Survey, my_survey)._meta.fields]
survey_fields.append('study_id')
# Prepare a new archive containing the archive-specific information
new_archive = SurveyArchive(survey=my_survey, archive_start=my_survey.last_updated)
try:
# Get the most recent archive for this Survey, to check whether the Survey has been edited
last_archive = my_survey.archives.latest('archive_end')
except SurveyArchive.DoesNotExist:
survey_dirty = True # If there is no previous archive, we automatically make a new one
else:
survey_dirty = False
for shared_field in survey_fields:
# Update all of the shared fields in the archive to have the original survey's values
if shared_field == 'name':
setattr(new_archive, shared_field, '{0} {1}'.format(getattr(my_survey, shared_field), timezone.now().isoformat()))
else:
setattr(new_archive, shared_field, getattr(my_survey, shared_field))
if not survey_dirty and getattr(my_survey, shared_field) != getattr(last_archive, shared_field):
# If the survey has been edited since the last archive was made, mark the survey as
# dirty. This tells us that we have to make a new archive object.
survey_dirty = True
if survey_dirty:
# If the survey has been edited, save the new archive. This automatically sets the
# archive_end field to be the current time.
new_archive.save()
else:
# If the survey has not been edited, we don't save the new archive. Update the
# previous archive to extend to the current time. Note that object.update saves the
# object, unlike QuerySet.update. See base_models.AbstractModel for details.
last_archive.update(archive_end=timezone.now())
| from django.utils import timezone
from django.core.exceptions import ObjectDoesNotExist
from django.db.models.signals import post_save, pre_save
from django.dispatch import receiver
from database.study_models import DeviceSettings, Study, Survey, SurveyArchive
@receiver(post_save, sender=Study)
def populate_study_device_settings(sender, **kwargs):
"""
Ensure that every newly created Study object has a DeviceSettings object. This essentially
makes the OneToOneField have null=False in both directions.
"""
my_study = kwargs['instance']
if kwargs['created'] and not hasattr(my_study, 'device_settings'):
# If my_study has just been created and doesn't have a DeviceSettings
# attached to it, create one with the default parameters.
DeviceSettings.objects.create(study=my_study)
@receiver(pre_save, sender=Survey)
def create_survey_archive(sender, **kwargs):
"""
Ensure that every time a Survey is edited, a SurveyArchive (SA) is stored which holds the
current contents of the Survey before saving, as well as a pair of timestamps marking the
time range over which the SA applies.
"""
# The Survey instance being passed has the updated contents of the Survey. To get
# the preexisting contents of the Survey, make a database call using the passed
# instance's primary key. If we get an ObjectDoesNotExist error short-circuit because
# that means it is the initial save operation.
my_survey_plus_updates = kwargs['instance']
try:
my_survey = Survey.objects.get(pk=my_survey_plus_updates.pk)
except ObjectDoesNotExist:
return
# All fields present in AbstractSurvey, plus the study foreign key which is
# separately present in Survey and SurveyArchive.
survey_fields = [f.name for f in super(Survey, my_survey)._meta.fields]
survey_fields.append('study_id')
# Prepare a new archive containing the archive-specific information
new_archive = SurveyArchive(survey=my_survey, archive_start=my_survey.last_updated)
try:
# Get the most recent archive for this Survey, to check whether the Survey has been edited
last_archive = my_survey.archives.latest('archive_end')
except SurveyArchive.DoesNotExist:
survey_dirty = True # If there is no previous archive, we automatically make a new one
else:
survey_dirty = False
for shared_field in survey_fields:
# Update all of the shared fields in the archive to have the original survey's values
if shared_field == 'name':
setattr(new_archive, shared_field, '{0} {1}'.format(getattr(my_survey, shared_field), timezone.now().isoformat()))
else:
setattr(new_archive, shared_field, getattr(my_survey, shared_field))
if not survey_dirty and getattr(my_survey, shared_field) != getattr(last_archive, shared_field):
# If the survey has been edited since the last archive was made, mark the survey as
# dirty. This tells us that we have to make a new archive object.
survey_dirty = True
if survey_dirty:
# If the survey has been edited, save the new archive. This automatically sets the
# archive_end field to be the current time.
new_archive.save()
else:
# If the survey has not been edited, we don't save the new archive. Update the
# previous archive to extend to the current time. Note that object.update saves the
# object, unlike QuerySet.update. See base_models.AbstractModel for details.
last_archive.update(archive_end=timezone.now()) | en | 0.915589 | Ensure that every newly created Study object has a DeviceSettings object. This essentially
makes the OneToOneField have null=False in both directions. # If my_study has just been created and doesn't have a DeviceSettings # attached to it, create one with the default parameters. Ensure that every time a Survey is edited, a SurveyArchive (SA) is stored which holds the
current contents of the Survey before saving, as well as a pair of timestamps marking the
time range over which the SA applies. # The Survey instance being passed has the updated contents of the Survey. To get # the preexisting contents of the Survey, make a database call using the passed # instance's primary key. If we get an ObjectDoesNotExist error short-circuit because # that means it is the initial save operation. # All fields present in AbstractSurvey, plus the study foreign key which is # separately present in Survey and SurveyArchive. # Prepare a new archive containing the archive-specific information # Get the most recent archive for this Survey, to check whether the Survey has been edited # If there is no previous archive, we automatically make a new one # Update all of the shared fields in the archive to have the original survey's values # If the survey has been edited since the last archive was made, mark the survey as # dirty. This tells us that we have to make a new archive object. # If the survey has been edited, save the new archive. This automatically sets the # archive_end field to be the current time. # If the survey has not been edited, we don't save the new archive. Update the # previous archive to extend to the current time. Note that object.update saves the # object, unlike QuerySet.update. See base_models.AbstractModel for details. | 2.296067 | 2 |
docs/examples/notify/notify_skeleton.py | Blakstar26/npyscreen | 0 | 9798 | import npyscreen
class NotifyBaseExample(npyscreen.Form):
def create(self):
key_of_choice = 'p'
what_to_display = 'Press {} for popup \n Press escape key to quit'.format(key_of_choice)
self.how_exited_handers[npyscreen.wgwidget.EXITED_ESCAPE] = self.exit_application
self.add(npyscreen.FixedText, value=what_to_display)
def exit_application(self):
self.parentApp.setNextForm(None)
self.editing = False
class MyApplication(npyscreen.NPSAppManaged):
def onStart(self):
self.addForm('MAIN', NotifyBaseExample, name='To be improved upon')
if __name__ == '__main__':
TestApp = MyApplication().run() | import npyscreen
class NotifyBaseExample(npyscreen.Form):
def create(self):
key_of_choice = 'p'
what_to_display = 'Press {} for popup \n Press escape key to quit'.format(key_of_choice)
self.how_exited_handers[npyscreen.wgwidget.EXITED_ESCAPE] = self.exit_application
self.add(npyscreen.FixedText, value=what_to_display)
def exit_application(self):
self.parentApp.setNextForm(None)
self.editing = False
class MyApplication(npyscreen.NPSAppManaged):
def onStart(self):
self.addForm('MAIN', NotifyBaseExample, name='To be improved upon')
if __name__ == '__main__':
TestApp = MyApplication().run() | none | 1 | 2.371183 | 2 |
|
practicioner_bundle/ch15-neural_style/pyimagesearch/nn/conv/minigooglenet.py | romanroson/pis_code | 1 | 9799 | # -*- coding: utf-8 -*-
"""Implementation of MiniGoogLeNet architecture.
This implementation is based on the original implemetation of GoogLeNet.
The authors of the net used BN before Activation layer.
This should be switched.
"""
from keras.layers.normalization import BatchNormalization
from keras.layers.convolutional import Conv2D
from keras.layers.convolutional import AveragePooling2D
from keras.layers.convolutional import MaxPooling2D
from keras.layers.core import Activation
from keras.layers.core import Dropout
from keras.layers.core import Dense
from keras.layers import Flatten
from keras.layers import Input
from keras.models import Model
from keras.layers import concatenate
from keras import backend as K
class MiniGoogLeNet:
"""Implementation of MiniGoogLeNet architecture
"""
@staticmethod
def conv_module(x, filter_num, filter_x_size, filter_y_size, stride, chanel_dim, padding="same"):
"""Define conv layer
Arguments:
x {Tensor} -- input layer to the function
filter_num {int} -- number of filters our CONV layer is going to learn
filter_x_size {int} -- x-size of each of the filter_num filters that will be learned
filter_y_size {int} -- y-size of each of the filter_num filters that will be learned
stride {int} -- stride of the CONV layer
chanel_dim {int} -- channel dimension, derived from “channels last” or “channels first”
Keyword Arguments:
padding {str} -- type of padding to be applied to the CONV layer (default: {"same"})
Returns:
Tensor -- convolutional module
"""
# define a CONV => BN => RELU pattern
x = Conv2D(filter_num, (filter_x_size, filter_y_size), strides=stride, padding=padding)(x)
x = BatchNormalization(axis=chanel_dim)(x)
x = Activation("relu")(x)
# return the block
return x
@staticmethod
def inception_module(x, numK1x1, numK3x3, chanel_dim): # pylint: disable=invalid-name
"""Define inception module
Arguments:
x {Tensor} -- input layer
numK1x1 {int} -- number of 1x1 filters
numK3x3 {int} -- number of 3x3 filters
chanel_dim {int} -- channel dimension, derived from “channels last” or “channels first”
Returns:
Tensor -- inception module
"""
# define two CONV modules, then concatenate across the channel dimension
conv_1x1 = MiniGoogLeNet.conv_module(x, numK1x1, 1, 1, (1, 1), chanel_dim)
conv_3x3 = MiniGoogLeNet.conv_module(x, numK3x3, 3, 3, (1, 1), chanel_dim)
x = concatenate([conv_1x1, conv_3x3], axis=chanel_dim)
# return the block
return x
@staticmethod
def downsample_module(x, filter_num, chanel_dim):
"""Define downsample module
Arguments:
x {Tensor} -- input layer
filter_num {int} -- number of filters our CONV layer is going to learn
chanel_dim {int} -- channel dimension, derived from “channels last” or “channels first”
Returns:
Tensor -- downsample module
"""
# define the CONV module and POOL, then concatenate across the channel dimensions
conv_3x3 = MiniGoogLeNet.conv_module(x, filter_num, 3, 3, (2, 2), chanel_dim, padding="valid")
pool = MaxPooling2D((3, 3), strides=(2, 2))(x)
x = concatenate([conv_3x3, pool], axis=chanel_dim)
# return the block
return x
@staticmethod
def build(width, height, depth, classes):
"""Build MiniGoogLeNet architecture
Arguments:
width {int} -- [description]
height {int} -- [description]
depth {int} -- [description]
classes {int} -- [description]
Returns:
obj -- MiniGoogLeNet model
"""
# initialize the input shape to be "channels last" and the channels dimension itself
input_shape = (height, width, depth)
chanel_dim = -1
# if we are using "channels first", update the input shape and channels dimension
if K.image_data_format() == "channels_first":
input_shape = (depth, height, width)
chanel_dim = 1
# define the model input and first CONV module
inputs = Input(shape=input_shape)
x = MiniGoogLeNet.conv_module(inputs, 96, 3, 3, (1, 1), chanel_dim)
# two Inception modules followed by a downsample module
x = MiniGoogLeNet.inception_module(x, 32, 32, chanel_dim)
x = MiniGoogLeNet.inception_module(x, 32, 48, chanel_dim)
x = MiniGoogLeNet.downsample_module(x, 80, chanel_dim)
# four Inception modules followed by a downsample module
x = MiniGoogLeNet.inception_module(x, 112, 48, chanel_dim)
x = MiniGoogLeNet.inception_module(x, 96, 64, chanel_dim)
x = MiniGoogLeNet.inception_module(x, 80, 80, chanel_dim)
x = MiniGoogLeNet.inception_module(x, 48, 96, chanel_dim)
x = MiniGoogLeNet.downsample_module(x, 96, chanel_dim)
# two Inception modules followed by global POOL and dropout
x = MiniGoogLeNet.inception_module(x, 176, 160, chanel_dim)
x = MiniGoogLeNet.inception_module(x, 176, 160, chanel_dim)
x = AveragePooling2D((7, 7))(x)
x = Dropout(0.5)(x)
# softmax classifier
x = Flatten()(x)
x = Dense(classes)(x)
x = Activation("softmax")(x)
# create the model
model = Model(inputs, x, name="googlenet")
# return the constructed network architecture
return model
| # -*- coding: utf-8 -*-
"""Implementation of MiniGoogLeNet architecture.
This implementation is based on the original implemetation of GoogLeNet.
The authors of the net used BN before Activation layer.
This should be switched.
"""
from keras.layers.normalization import BatchNormalization
from keras.layers.convolutional import Conv2D
from keras.layers.convolutional import AveragePooling2D
from keras.layers.convolutional import MaxPooling2D
from keras.layers.core import Activation
from keras.layers.core import Dropout
from keras.layers.core import Dense
from keras.layers import Flatten
from keras.layers import Input
from keras.models import Model
from keras.layers import concatenate
from keras import backend as K
class MiniGoogLeNet:
"""Implementation of MiniGoogLeNet architecture
"""
@staticmethod
def conv_module(x, filter_num, filter_x_size, filter_y_size, stride, chanel_dim, padding="same"):
"""Define conv layer
Arguments:
x {Tensor} -- input layer to the function
filter_num {int} -- number of filters our CONV layer is going to learn
filter_x_size {int} -- x-size of each of the filter_num filters that will be learned
filter_y_size {int} -- y-size of each of the filter_num filters that will be learned
stride {int} -- stride of the CONV layer
chanel_dim {int} -- channel dimension, derived from “channels last” or “channels first”
Keyword Arguments:
padding {str} -- type of padding to be applied to the CONV layer (default: {"same"})
Returns:
Tensor -- convolutional module
"""
# define a CONV => BN => RELU pattern
x = Conv2D(filter_num, (filter_x_size, filter_y_size), strides=stride, padding=padding)(x)
x = BatchNormalization(axis=chanel_dim)(x)
x = Activation("relu")(x)
# return the block
return x
@staticmethod
def inception_module(x, numK1x1, numK3x3, chanel_dim): # pylint: disable=invalid-name
"""Define inception module
Arguments:
x {Tensor} -- input layer
numK1x1 {int} -- number of 1x1 filters
numK3x3 {int} -- number of 3x3 filters
chanel_dim {int} -- channel dimension, derived from “channels last” or “channels first”
Returns:
Tensor -- inception module
"""
# define two CONV modules, then concatenate across the channel dimension
conv_1x1 = MiniGoogLeNet.conv_module(x, numK1x1, 1, 1, (1, 1), chanel_dim)
conv_3x3 = MiniGoogLeNet.conv_module(x, numK3x3, 3, 3, (1, 1), chanel_dim)
x = concatenate([conv_1x1, conv_3x3], axis=chanel_dim)
# return the block
return x
@staticmethod
def downsample_module(x, filter_num, chanel_dim):
"""Define downsample module
Arguments:
x {Tensor} -- input layer
filter_num {int} -- number of filters our CONV layer is going to learn
chanel_dim {int} -- channel dimension, derived from “channels last” or “channels first”
Returns:
Tensor -- downsample module
"""
# define the CONV module and POOL, then concatenate across the channel dimensions
conv_3x3 = MiniGoogLeNet.conv_module(x, filter_num, 3, 3, (2, 2), chanel_dim, padding="valid")
pool = MaxPooling2D((3, 3), strides=(2, 2))(x)
x = concatenate([conv_3x3, pool], axis=chanel_dim)
# return the block
return x
@staticmethod
def build(width, height, depth, classes):
"""Build MiniGoogLeNet architecture
Arguments:
width {int} -- [description]
height {int} -- [description]
depth {int} -- [description]
classes {int} -- [description]
Returns:
obj -- MiniGoogLeNet model
"""
# initialize the input shape to be "channels last" and the channels dimension itself
input_shape = (height, width, depth)
chanel_dim = -1
# if we are using "channels first", update the input shape and channels dimension
if K.image_data_format() == "channels_first":
input_shape = (depth, height, width)
chanel_dim = 1
# define the model input and first CONV module
inputs = Input(shape=input_shape)
x = MiniGoogLeNet.conv_module(inputs, 96, 3, 3, (1, 1), chanel_dim)
# two Inception modules followed by a downsample module
x = MiniGoogLeNet.inception_module(x, 32, 32, chanel_dim)
x = MiniGoogLeNet.inception_module(x, 32, 48, chanel_dim)
x = MiniGoogLeNet.downsample_module(x, 80, chanel_dim)
# four Inception modules followed by a downsample module
x = MiniGoogLeNet.inception_module(x, 112, 48, chanel_dim)
x = MiniGoogLeNet.inception_module(x, 96, 64, chanel_dim)
x = MiniGoogLeNet.inception_module(x, 80, 80, chanel_dim)
x = MiniGoogLeNet.inception_module(x, 48, 96, chanel_dim)
x = MiniGoogLeNet.downsample_module(x, 96, chanel_dim)
# two Inception modules followed by global POOL and dropout
x = MiniGoogLeNet.inception_module(x, 176, 160, chanel_dim)
x = MiniGoogLeNet.inception_module(x, 176, 160, chanel_dim)
x = AveragePooling2D((7, 7))(x)
x = Dropout(0.5)(x)
# softmax classifier
x = Flatten()(x)
x = Dense(classes)(x)
x = Activation("softmax")(x)
# create the model
model = Model(inputs, x, name="googlenet")
# return the constructed network architecture
return model
| en | 0.613197 | # -*- coding: utf-8 -*- Implementation of MiniGoogLeNet architecture. This implementation is based on the original implemetation of GoogLeNet. The authors of the net used BN before Activation layer. This should be switched. Implementation of MiniGoogLeNet architecture Define conv layer Arguments: x {Tensor} -- input layer to the function filter_num {int} -- number of filters our CONV layer is going to learn filter_x_size {int} -- x-size of each of the filter_num filters that will be learned filter_y_size {int} -- y-size of each of the filter_num filters that will be learned stride {int} -- stride of the CONV layer chanel_dim {int} -- channel dimension, derived from “channels last” or “channels first” Keyword Arguments: padding {str} -- type of padding to be applied to the CONV layer (default: {"same"}) Returns: Tensor -- convolutional module # define a CONV => BN => RELU pattern # return the block # pylint: disable=invalid-name Define inception module Arguments: x {Tensor} -- input layer numK1x1 {int} -- number of 1x1 filters numK3x3 {int} -- number of 3x3 filters chanel_dim {int} -- channel dimension, derived from “channels last” or “channels first” Returns: Tensor -- inception module # define two CONV modules, then concatenate across the channel dimension # return the block Define downsample module Arguments: x {Tensor} -- input layer filter_num {int} -- number of filters our CONV layer is going to learn chanel_dim {int} -- channel dimension, derived from “channels last” or “channels first” Returns: Tensor -- downsample module # define the CONV module and POOL, then concatenate across the channel dimensions # return the block Build MiniGoogLeNet architecture Arguments: width {int} -- [description] height {int} -- [description] depth {int} -- [description] classes {int} -- [description] Returns: obj -- MiniGoogLeNet model # initialize the input shape to be "channels last" and the channels dimension itself # if we are using "channels first", update the input shape and channels dimension # define the model input and first CONV module # two Inception modules followed by a downsample module # four Inception modules followed by a downsample module # two Inception modules followed by global POOL and dropout # softmax classifier # create the model # return the constructed network architecture | 3.501246 | 4 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.